summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAbhijeet Dharmapurikar <adharmap@codeaurora.org>2016-01-22 16:34:58 -0800
committerRohit Vaswani <rvaswani@codeaurora.org>2016-03-01 12:22:31 -0800
commit77dd35597191c40dd5900543d1f497b8e40a70b5 (patch)
treefe541bea9b56d4ea57f0780d5af93cd7be68f684
parent6aa370ab429dd0709b1fe1ef39c281ba019bc652 (diff)
qpnp: Add snapshot of some qpnp, regulator and charger drivers
This snapshot is taken as of msm-3.18 commit 9da4ddc (Merge "clk: msm: clock-gcc: Associate gfx rail voting with gfx3d branch") Change-Id: Idd2f467f1f1863a156d1757589dfe78158f0e43f Signed-off-by: Abhijeet Dharmapurikar <adharmap@codeaurora.org>
-rw-r--r--Documentation/devicetree/bindings/gpio/qpnp-pin.txt224
-rw-r--r--Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt104
-rw-r--r--Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt181
-rw-r--r--Documentation/devicetree/bindings/leds/leds-qpnp-flash.txt180
-rw-r--r--Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt90
-rw-r--r--Documentation/devicetree/bindings/leds/leds-qpnp.txt358
-rw-r--r--Documentation/devicetree/bindings/platform/msm/gpio-usbdetect.txt26
-rw-r--r--Documentation/devicetree/bindings/platform/msm/qpnp-power-on.txt183
-rw-r--r--Documentation/devicetree/bindings/rtc/qpnp-rtc.txt64
-rw-r--r--Documentation/devicetree/bindings/thermal/qpnp-temp-alarm.txt68
-rw-r--r--drivers/gpio/Kconfig20
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/qpnp-pin.c1679
-rw-r--r--drivers/hwmon/Kconfig20
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/qpnp-adc-common.c1993
-rw-r--r--drivers/hwmon/qpnp-adc-current.c1654
-rw-r--r--drivers/hwmon/qpnp-adc-voltage.c2807
-rw-r--r--drivers/leds/Kconfig30
-rw-r--r--drivers/leds/Makefile3
-rw-r--r--drivers/leds/leds-qpnp-flash.c2653
-rw-r--r--drivers/leds/leds-qpnp-wled.c1743
-rw-r--r--drivers/leds/leds-qpnp.c4260
-rw-r--r--drivers/of/Kconfig5
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/of_batterydata.c469
-rw-r--r--drivers/platform/Kconfig2
-rw-r--r--drivers/platform/Makefile2
-rw-r--r--drivers/platform/msm/Kconfig48
-rw-r--r--drivers/platform/msm/Makefile8
-rw-r--r--drivers/platform/msm/gpio-usbdetect.c146
-rw-r--r--drivers/platform/msm/qpnp-coincell.c266
-rw-r--r--drivers/platform/msm/qpnp-haptic.c2342
-rw-r--r--drivers/platform/msm/qpnp-power-on.c2324
-rw-r--r--drivers/platform/msm/qpnp-revid.c245
-rw-r--r--drivers/power/Kconfig59
-rw-r--r--drivers/power/Makefile7
-rw-r--r--drivers/power/batterydata-lib.c493
-rw-r--r--drivers/power/bcl_peripheral.c1153
-rw-r--r--drivers/power/msm_bcl.c374
-rw-r--r--drivers/power/pmic-voter.c266
-rw-r--r--drivers/power/pmic-voter.h41
-rw-r--r--drivers/power/power_supply_sysfs.c48
-rw-r--r--drivers/power/qcom/Kconfig66
-rw-r--r--drivers/power/qcom/Makefile5
-rw-r--r--drivers/power/qcom/apm.c985
-rw-r--r--drivers/power/qpnp-fg.c6721
-rw-r--r--drivers/power/qpnp-smbcharger.c8221
-rw-r--r--drivers/power/smb1351-charger.c3268
-rw-r--r--drivers/power/smb135x-charger.c4516
-rw-r--r--drivers/pwm/Kconfig10
-rw-r--r--drivers/pwm/Makefile2
-rw-r--r--drivers/pwm/pwm-qpnp.c2126
-rw-r--r--drivers/regulator/Kconfig152
-rw-r--r--drivers/regulator/Makefile14
-rw-r--r--drivers/regulator/cpr-regulator.c6361
-rw-r--r--drivers/regulator/cpr2-gfx-regulator.c2451
-rw-r--r--drivers/regulator/cpr3-hmss-regulator.c1730
-rw-r--r--drivers/regulator/cpr3-mmss-regulator.c1039
-rw-r--r--drivers/regulator/cpr3-regulator.c4641
-rw-r--r--drivers/regulator/cpr3-regulator.h786
-rw-r--r--drivers/regulator/cpr3-util.c1505
-rw-r--r--drivers/regulator/cpr4-apss-regulator.c966
-rw-r--r--drivers/regulator/kryo-regulator.c1106
-rw-r--r--drivers/regulator/mem-acc-regulator.c1390
-rw-r--r--drivers/regulator/proxy-consumer.c231
-rw-r--r--drivers/regulator/qpnp-labibb-regulator.c2494
-rw-r--r--drivers/regulator/qpnp-regulator.c2170
-rw-r--r--drivers/regulator/rpm-smd-regulator.c1945
-rw-r--r--drivers/regulator/spm-regulator.c1076
-rw-r--r--drivers/regulator/stub-regulator.c304
-rw-r--r--drivers/rtc/Kconfig9
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/qpnp-rtc.c714
-rw-r--r--drivers/thermal/Kconfig27
-rw-r--r--drivers/thermal/Makefile2
-rw-r--r--drivers/thermal/qpnp-temp-alarm.c725
-rw-r--r--include/linux/batterydata-lib.h218
-rw-r--r--include/linux/leds-qpnp-wled.h22
-rw-r--r--include/linux/msm_bcl.h104
-rw-r--r--include/linux/of_batterydata.h67
-rw-r--r--include/linux/power/qcom/apm.h48
-rw-r--r--include/linux/power_supply.h69
-rw-r--r--include/linux/qpnp/pin.h226
-rw-r--r--include/linux/qpnp/power-on.h96
-rw-r--r--include/linux/qpnp/qpnp-adc.h2268
-rw-r--r--include/linux/qpnp/qpnp-haptic.h23
-rw-r--r--include/linux/qpnp/qpnp-revid.h176
-rw-r--r--include/linux/regulator/kryo-regulator.h32
-rw-r--r--include/linux/regulator/proxy-consumer.h41
-rw-r--r--include/linux/regulator/qpnp-regulator.h197
-rw-r--r--include/linux/regulator/rpm-smd-regulator.h132
-rw-r--r--include/linux/regulator/spm-regulator.h25
-rw-r--r--include/soc/qcom/spm.h148
-rw-r--r--include/trace/trace_thermal.h399
95 files changed, 88689 insertions, 3 deletions
diff --git a/Documentation/devicetree/bindings/gpio/qpnp-pin.txt b/Documentation/devicetree/bindings/gpio/qpnp-pin.txt
new file mode 100644
index 000000000000..1a1fd454b095
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/qpnp-pin.txt
@@ -0,0 +1,224 @@
+* msm-qpnp-pin
+
+msm-qpnp-pin is a GPIO chip driver for the MSM SPMI implementation.
+It creates a spmi_device for every spmi-dev-container block of device_nodes.
+These device_nodes contained within specify the PMIC pin number associated
+with each gpio chip. The driver will map these to Linux GPIO numbers.
+
+[PMIC GPIO Device Declarations]
+
+-Root Node-
+
+Required properties :
+ - spmi-dev-container : Used to specify the following child nodes as part of the
+ same SPMI device.
+ - gpio-controller : Specify as gpio-contoller. All child nodes will belong to
+ this gpio_chip.
+ - #gpio-cells: We encode a PMIC pin number and a 32-bit flag field to
+ specify the gpio configuration. This must be set to '2'.
+ - #address-cells: Specify one address field. This must be set to '1'.
+ - #size-cells: Specify one size-cell. This must be set to '1'.
+ - compatible = "qcom,qpnp-pin" : Specify driver matching for this driver.
+ - label: String giving the name for the gpio_chip device. This name
+ should be unique on the system and portray the specifics of the device.
+
+-Child Nodes-
+
+Required properties :
+ - reg : Specify the spmi offset and size for this pin device.
+ - qcom,pin-num : Specify the PMIC pin number for this device.
+
+Optional configuration properties :
+ - qcom,mode: indicates whether the pin should be input, output, or
+ both for gpios. mpp pins also support bidirectional,
+ analog in, analog out and current sink.
+ QPNP_PIN_MODE_DIG_IN = 0, (GPIO/MPP)
+ QPNP_PIN_MODE_DIG_OUT = 1, (GPIO/MPP)
+ QPNP_PIN_MODE_DIG_IN_OUT = 2, (GPIO/MPP)
+ QPNP_PIN_MODE_ANA_PASS_THRU = 3, (GPIO_LV/GPIO_MV)
+ QPNP_PIN_MODE_BIDIR = 3, (MPP)
+ QPNP_PIN_MODE_AIN = 4, (MPP)
+ QPNP_PIN_MODE_AOUT = 5, (MPP)
+ QPNP_PIN_MODE_SINK = 6 (MPP)
+
+ - qcom,output-type: indicates gpio should be configured as CMOS or open
+ drain.
+ QPNP_PIN_OUT_BUF_CMOS = 0, (GPIO)
+ QPNP_PIN_OUT_BUF_OPEN_DRAIN_NMOS = 1, (GPIO)
+ QPNP_PIN_OUT_BUF_OPEN_DRAIN_PMOS = 2 (GPIO)
+ QPNP_PIN_OUT_BUF_NO_DRIVE = 3, (GPIO_LV/GPIO_MV)
+
+ - qcom,invert: Invert the signal of the gpio line -
+ QPNP_PIN_INVERT_DISABLE = 0 (GPIO/MPP)
+ QPNP_PIN_INVERT_ENABLE = 1 (GPIO/MPP)
+
+ - qcom,pull: This parameter should be programmed to different values
+ depending on whether it's GPIO or MPP.
+ For GPIO, it indicates whether a pull up or pull down
+ should be applied. If a pullup is required the
+ current strength needs to be specified.
+ Current values of 30uA, 1.5uA, 31.5uA, 1.5uA with 30uA
+ boost are supported. This value should be one of
+ the QPNP_PIN_GPIO_PULL_*. Note that the hardware ignores
+ this configuration if the GPIO is not set to input or
+ output open-drain mode.
+ QPNP_PIN_PULL_UP_30 = 0, (GPIO)
+ QPNP_PIN_PULL_UP_1P5 = 1, (GPIO)
+ QPNP_PIN_PULL_UP_31P5 = 2, (GPIO)
+ QPNP_PIN_PULL_UP_1P5_30 = 3, (GPIO)
+ QPNP_PIN_PULL_DN = 4, (GPIO)
+ QPNP_PIN_PULL_NO = 5 (GPIO)
+
+ For MPP, it indicates whether a pullup should be
+ applied for bidirectitional mode only. The hardware
+ ignores the configuration when operating in other modes.
+ This value should be one of the QPNP_PIN_MPP_PULL_*.
+
+ QPNP_PIN_MPP_PULL_UP_0P6KOHM = 0, (MPP)
+ QPNP_PIN_MPP_PULL_UP_OPEN = 1 (MPP)
+ QPNP_PIN_MPP_PULL_UP_10KOHM = 2, (MPP)
+ QPNP_PIN_MPP_PULL_UP_30KOHM = 3, (MPP)
+
+ - qcom,vin-sel: specifies the voltage level when the output is set to 1.
+ For an input gpio specifies the voltage level at which
+ the input is interpreted as a logical 1.
+ QPNP_PIN_VIN0 = 0, (GPIO/MPP/GPIO_LV/GPIO_MV)
+ QPNP_PIN_VIN1 = 1, (GPIO/MPP/GPIO_MV)
+ QPNP_PIN_VIN2 = 2, (GPIO/MPP)
+ QPNP_PIN_VIN3 = 3, (GPIO/MPP)
+ QPNP_PIN_VIN4 = 4, (GPIO/MPP)
+ QPNP_PIN_VIN5 = 5, (GPIO/MPP)
+ QPNP_PIN_VIN6 = 6, (GPIO/MPP)
+ QPNP_PIN_VIN7 = 7 (GPIO/MPP)
+
+ - qcom,out-strength: the amount of current supplied for an output gpio.
+ QPNP_PIN_OUT_STRENGTH_LOW = 1 (GPIO)
+ QPNP_PIN_OUT_STRENGTH_MED = 2, (GPIO)
+ QPNP_PIN_OUT_STRENGTH_HIGH = 3, (GPIO)
+
+ - qcom,dtest-sel Route the pin internally to a DTEST line.
+ QPNP_PIN_DIG_IN_CTL_DTEST1 = 1 (GPIO/MPP)
+ QPNP_PIN_DIG_IN_CTL_DTEST2 = 2, (GPIO/MPP)
+ QPNP_PIN_DIG_IN_CTL_DTEST3 = 3, (GPIO/MPP)
+ QPNP_PIN_DIG_IN_CTL_DTEST4 = 4, (GPIO/MPP)
+
+ - qcom,src-sel: select a function for the pin. Certain pins
+ can be paired (shorted) with each other. Some gpio pins
+ can act as alternate functions.
+ In the context of gpio, this acts as a source select.
+ For mpps, this is an enable select.
+ QPNP_PIN_SEL_FUNC_CONSTANT = 0, (GPIO/MPP)
+ QPNP_PIN_SEL_FUNC_PAIRED = 1, (GPIO/MPP)
+ QPNP_PIN_SEL_FUNC_1 = 2, (GPIO/MPP)
+ QPNP_PIN_SEL_FUNC_2 = 3, (GPIO/MPP)
+ QPNP_PIN_SEL_DTEST1 = 4, (GPIO/MPP)
+ QPNP_PIN_SEL_DTEST2 = 5, (GPIO/MPP)
+ QPNP_PIN_SEL_DTEST3 = 6, (GPIO/MPP)
+ QPNP_PIN_SEL_DTEST4 = 7 (GPIO/MPP)
+
+ Below are the source-select values for GPIO_LV/MV.
+ QPNP_PIN_LV_MV_SEL_FUNC_CONSTANT = 0, (GPIO_LV/GPIO_MV)
+ QPNP_PIN_LV_MV_SEL_FUNC_PAIRED = 1, (GPIO_LV/GPIO_MV)
+ QPNP_PIN_LV_MV_SEL_FUNC_1 = 2, (GPIO_LV/GPIO_MV)
+ QPNP_PIN_LV_MV_SEL_FUNC_2 = 3, (GPIO_LV/GPIO_MV)
+ QPNP_PIN_LV_MV_SEL_FUNC_3 = 4, (GPIO_LV/GPIO_MV)
+ QPNP_PIN_LV_MV_SEL_FUNC_4 = 5, (GPIO_LV/GPIO_MV)
+ QPNP_PIN_LV_MV_SEL_DTEST1 = 6 (GPIO_LV/GPIO_MV)
+ QPNP_PIN_LV_MV_SEL_DTEST2 = 7, (GPIO_LV/GPIO_MV)
+ QPNP_PIN_LV_MV_SEL_DTEST3 = 8, (GPIO_LV/GPIO_MV)
+ QPNP_PIN_LV_MV_SEL_DTEST4 = 9, (GPIO_LV/GPIO_MV)
+
+ - qcom,master-en: 1 = Enable features within the
+ pin block based on configurations. (GPIO/MPP)
+ 0 = Completely disable the block and
+ let the pin float with high impedance
+ regardless of other settings. (GPIO/MPP)
+ - qcom,aout-ref: set the analog output reference.
+
+ QPNP_PIN_AOUT_1V25 = 0, (MPP)
+ QPNP_PIN_AOUT_0V625 = 1, (MPP)
+ QPNP_PIN_AOUT_0V3125 = 2, (MPP)
+ QPNP_PIN_AOUT_MPP = 3, (MPP)
+ QPNP_PIN_AOUT_ABUS1 = 4, (MPP)
+ QPNP_PIN_AOUT_ABUS2 = 5, (MPP)
+ QPNP_PIN_AOUT_ABUS3 = 6, (MPP)
+ QPNP_PIN_AOUT_ABUS4 = 7 (MPP)
+
+ - qcom,ain-route: Set the destination for analog input.
+ QPNP_PIN_AIN_AMUX_CH5 = 0, (MPP)
+ QPNP_PIN_AIN_AMUX_CH6 = 1, (MPP)
+ QPNP_PIN_AIN_AMUX_CH7 = 2, (MPP)
+ QPNP_PIN_AIN_AMUX_CH8 = 3, (MPP)
+ QPNP_PIN_AIN_AMUX_ABUS1 = 4, (MPP)
+ QPNP_PIN_AIN_AMUX_ABUS2 = 5, (MPP)
+ QPNP_PIN_AIN_AMUX_ABUS3 = 6, (MPP)
+ QPNP_PIN_AIN_AMUX_ABUS4 = 7 (MPP)
+
+ - qcom,cs-out: Set the the amount of output to sync in mA.
+ QPNP_PIN_CS_OUT_5MA = 0, (MPP)
+ QPNP_PIN_CS_OUT_10MA = 1, (MPP)
+ QPNP_PIN_CS_OUT_15MA = 2, (MPP)
+ QPNP_PIN_CS_OUT_20MA = 3, (MPP)
+ QPNP_PIN_CS_OUT_25MA = 4, (MPP)
+ QPNP_PIN_CS_OUT_30MA = 5, (MPP)
+ QPNP_PIN_CS_OUT_35MA = 6, (MPP)
+ QPNP_PIN_CS_OUT_40MA = 7 (MPP)
+
+ - qcom,apass-sel: Set the ATEST channel to route the signal
+ QPNP_PIN_APASS_SEL_ATEST1 = 0, (GPIO_LV/GPIO_MV)
+ QPNP_PIN_APASS_SEL_ATEST2 = 1, (GPIO_LV/GPIO_MV)
+ QPNP_PIN_APASS_SEL_ATEST3 = 2, (GPIO_LV/GPIO_MV)
+ QPNP_PIN_APASS_SEL_ATEST4 = 3, (GPIO_LV/GPIO_MV)
+
+*Note: If any of the configuration properties are not specified, then the
+ qpnp-pin driver will not modify that respective configuration in
+ hardware.
+
+[PMIC GPIO clients]
+
+Required properties :
+ - gpios : Contains 3 fields of the form <&gpio_controller pmic_pin_num flags>
+
+[Example]
+
+qpnp: qcom,spmi@fc4c0000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+
+ qcom,pm8941@0 {
+ spmi-slave-container;
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ pm8941_gpios: gpios {
+ spmi-dev-container;
+ compatible = "qcom,qpnp-pin";
+ gpio-controller;
+ #gpio-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ gpio@c000 {
+ reg = <0xc000 0x100>;
+ qcom,pin-num = <62>;
+ };
+
+ gpio@c100 {
+ reg = <0xc100 0x100>;
+ qcom,pin-num = <20>;
+ qcom,source_sel = <2>;
+ qcom,pull = <5>;
+ };
+ };
+
+ qcom,testgpio@1000 {
+ compatible = "qcom,qpnp-testgpio";
+ reg = <0x1000 0x1000>;
+ gpios = <&pm8941_gpios 62 0x0 &pm8941_gpios 20 0x1>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt b/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
new file mode 100644
index 000000000000..9450b5df6d21
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/qpnp-adc-current.txt
@@ -0,0 +1,104 @@
+Qualcomm's QPNP PMIC current ADC driver
+
+QPNP PMIC current ADC (IADC) provides interface to clients to read
+current. A 16 bit ADC is used for current measurements. There are multiple
+peripherals to the IADC and the scope of the driver is to provide interface
+for the USR peripheral of the IADC.
+
+IADC node
+
+Required properties:
+- compatible : should be "qcom,qpnp-iadc" for Current ADC driver.
+- reg : offset and length of the PMIC Arbiter register map.
+- reg-names : resource names used for the physical base address of the PMIC IADC
+ peripheral, the SMBB_BAT_IF_TRIM_CNST_RDS register.
+ Should be "iadc-base" for the PMIC IADC peripheral base register.
+ Should be "batt-id-trim-cnst-rds" for reading the
+ SMBB_BAT_IF_TRIM_CNST_RDS register.
+- address-cells : Must be one.
+- size-cells : Must be zero.
+- interrupts : The USR bank peripheral IADC interrupt.
+- interrupt-names : Should contain "eoc-int-en-set".
+- qcom,adc-bit-resolution : Bit resolution of the ADC.
+- qcom,adc-vdd-reference : Voltage reference used by the ADC.
+
+Optional properties:
+- qcom,rsense : Use this property when external rsense should be used
+ for current calculation and specify the units in nano-ohms.
+- qcom,iadc-poll-eoc: Use polling instead of interrupts for End of Conversion completion.
+- qcom,pmic-revid : Phandle pointing to the revision peripheral node. Use it to query the
+ PMIC type and revision for applying the appropriate temperature
+ compensation parameters.
+- qcom,use-default-rds-trim : Add this property to check if certain conditions are to be checked
+ reading the SMBB_BAT_IF_CNST_RDS, IADC_RDS trim register and
+ manufacturer type. Check the driver for conditions that each of the type.
+ 0 : Select the TypeA to read the IADC and SMBB trim register and
+ apply the default RSENSE if conditions are met.
+ 1 : Select the TypeB to read the IADC, SMBB trim register and
+ manufacturer type and apply the default RSENSE if conditions are met.
+ 2 : Select the TypeC to read the IADC, SMBB trim register and
+ apply the default RSENSE if conditions are met.
+
+Channel node
+NOTE: Atleast one Channel node is required.
+
+Client required property:
+- qcom,<consumer name>-iadc : The phandle to the corresponding iadc device.
+ The consumer name passed to the driver when calling
+ qpnp_get_iadc() is used to associate the client
+ with the corresponding device.
+
+Required properties:
+- label : Channel name used for sysfs entry.
+- reg : AMUX channel number.
+- qcom,channel-num : Channel number associated to the AMUX input.
+- qcom,decimation : Sampling rate to use for the individual channel measurement.
+ Select from the following unsigned int.
+ 0 : 512
+ 1 : 1K
+ 2 : 2K
+ 3 : 4K
+- qcom,fast-avg-setup : Average number of samples to be used for measurement. Fast averaging
+ provides the option to obtain a single measurement from the ADC that
+ is an average of multiple samples. The value selected is 2^(value)
+ Select from the following unsigned int.
+ 0 : 1
+ 1 : 2
+ 2 : 4
+ 3 : 8
+ 4 : 16
+ 5 : 32
+ 6 : 64
+ 7 : 128
+ 8 : 256
+- qcom,iadc-vadc : Corresponding phandle of the VADC device to read the die_temperature and set
+ simultaneous voltage and current conversion requests.
+
+Example:
+ /* Main Node */
+ qcom,iadc@3200 {
+ compatible = "qcom,qpnp-iadc";
+ reg = <0x3200 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <0 0x36 0>;
+ interrupt-names = "eoc-int-en-set";
+ qcom,adc-bit-resolution = <16>;
+ qcom,adc-vdd-reference = <1800>;
+ qcom,rsense = <1500>;
+ qcom,iadc-vadc = <&pm8941_vadc>;
+
+ /* Channel Node */
+ chan@0 = {
+ label = "rsense";
+ reg = <0>;
+ qcom,decimation = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+ };
+
+Client device example:
+/* Add to the clients node that needs the IADC */
+client_node {
+ qcom,client-iadc = <&pm8941_iadc>;
+};
diff --git a/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
new file mode 100644
index 000000000000..dd0d75d9c304
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwmon/qpnp-adc-voltage.txt
@@ -0,0 +1,181 @@
+Qualcomm's QPNP PMIC Voltage ADC Arbiter
+
+QPNP PMIC Voltage ADC (VADC) provides interface to clients to read
+Voltage. A 15 bit ADC is used for Voltage measurements. There are multiple
+peripherals to the VADC and the scope of the driver is to provide interface
+for the USR peripheral of the VADC.
+
+VADC node
+
+Required properties:
+- compatible : should be "qcom,qpnp-vadc" for Voltage ADC driver and
+ "qcom,qpnp-vadc-hc" for VADC_HC voltage ADC driver.
+- reg : offset and length of the PMIC Aribter register map.
+- address-cells : Must be one.
+- size-cells : Must be zero.
+- interrupts : The USR bank peripheral VADC interrupt.
+- interrupt-names : Should contain "eoc-int-en-set" for EOC,
+ "high-thr-en-set" for high threshold interrupts and
+ "low-thr-en-set" for low threshold interrupts. High and low threshold
+ interrupts are to be enabled if VADC_USR needs to support recurring measurement.
+- qcom,adc-bit-resolution : Bit resolution of the ADC.
+- qcom,adc-vdd-reference : Voltage reference used by the ADC.
+
+Channel nodes
+NOTE: Atleast one Channel node is required.
+
+Optional properties:
+- qcom,vadc-poll-eoc: Use polling instead of interrupts for End of Conversion completion.
+- qcom,pmic-revid : Phandle pointing to the revision peripheral node. Use it to query the
+ PMIC type and revision for applying the appropriate temperature
+ compensation parameters.
+-qcom,vadc-meas-int-mode : Enable VADC_USR to handle requests to perform recurring measurements
+ for any one supported channel along with supporting single conversion
+ requests.
+- qcom,vadc-recalib-check: Add this property to check if recalibration required due to inaccuracy.
+- qcom,vadc-thermal-node : If present a thermal node is created and the channel is registered as
+ part of the thermal sysfs which allows clients to use the thermal framework
+ to set temperature thresholds and receive notification when the temperature
+ crosses a set threshold, read temperature and enable/set trip types supported
+ by the thermal framework.
+- hkadc_ldo-supply : Add this property if VADC needs to perform a Software Vote for the HKADC.
+- hkadc_ok-supply : Add this property if the VADC needs to perform a Software vote for the HKADC VREG_OK.
+
+Client required property:
+- qcom,<consumer name>-vadc : The phandle to the corresponding vadc device.
+ The consumer name passed to the driver when calling
+ qpnp_get_vadc() is used to associate the client
+ with the corresponding device.
+
+Required properties:
+- label : Channel name used for sysfs entry.
+- reg : AMUX channel number.
+- qcom,decimation : Sampling rate to use for the individual channel measurement.
+ Select from following unsigned int.
+ 0 : 512
+ 1 : 1K
+ 2 : 2K
+ 3 : 4K
+- qcom,pre-div-channel-scaling : Pre-div used for the channel before the signal
+ is being measured. Some of the AMUX channels
+ support dividing the signal from a predetermined
+ ratio. The configuration for this node is to know
+ the pre-determined ratio and use it for post scaling.
+ Select from the following unsinged int.
+ 0 : {1, 1}
+ 1 : {1, 3}
+ 2 : {1, 4}
+ 3 : {1, 6}
+ 4 : {1, 20}
+ 5 : {1, 8}
+ 6 : {10, 81}
+ 7 : {1, 10}
+- qcom,calibration-type : Reference voltage to use for channel calibration.
+ Channel calibration is dependendent on the channel.
+ Certain channels like XO_THERM, BATT_THERM use ratiometric
+ calibration. Most other channels fall under absolute calibration.
+ Select from the following strings.
+ "absolute" : Uses the 625mv and 1.25V reference channels.
+ "ratiometric" : Uses the reference Voltage/GND for calibration.
+- qcom,scale-function : Scaling function used to convert raw ADC code to units specific to
+ a given channel.
+ Select from the following unsigned int.
+ 0 : Default scaling to convert raw adc code to voltage.
+ 1 : Conversion to temperature based on btm parameters.
+ 2 : Returns result in degC for 100k pull-up.
+ 3 : Returns current across 0.1 ohm resistor.
+ 4 : Returns XO thermistor voltage in degree's Centigrade.
+ 5 : Returns result in degC for 150k pull-up.
+ 9 : Conversion to temperature based on -15~55 allowable
+ battery charging tempeature setting for btm parameters.
+- qcom,hw-settle-time : Settling period for the channel before ADC read.
+ Select from the following unsigned int.
+ 0 : 0us
+ 1 : 100us
+ 2 : 200us
+ 3 : 300us
+ 4 : 400us
+ 5 : 500us
+ 6 : 600us
+ 7 : 700us
+ 8 : 800us
+ 9 : 900us
+ 0xa : 1ms
+ 0xb : 2ms
+ 0xc : 4ms
+ 0xd : 6ms
+ 0xe : 8ms
+ 0xf : 10ms
+- qcom,fast-avg-setup : Average number of samples to be used for measurement. Fast averaging
+ provides the option to obtain a single measurement from the ADC that
+ is an average of multiple samples. The value selected is 2^(value)
+ Select from the following unsigned int.
+ 0 : 1
+ 1 : 2
+ 2 : 4
+ 3 : 8
+ 4 : 16
+ 5 : 32
+ 6 : 64
+ 7 : 128
+ 8 : 256
+
+Example:
+ /* Main Node */
+ qcom,vadc@3100 {
+ compatible = "qcom,qpnp-vadc";
+ reg = <0x3100 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <0x0 0x31 0x0>;
+ interrupt-names = "eoc-int-en-set";
+ qcom,adc-bit-resolution = <15>;
+ qcom,adc-vdd-reference = <1800>;
+
+ /* Channel Node */
+ chan@0 {
+ label = "usb_in";
+ reg = <0>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <4>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
+ };
+
+Client device example:
+/* Add to the clients node that needs the VADC channel A/D */
+client_node {
+ qcom,client-vadc = <&pm8941_vadc>;
+};
+
+/* Clients have an option of measuring an analog signal through an MPP.
+ MPP block is not part of the VADC block but is an individual PMIC
+ block that has an option to support clients to configure an MPP as
+ an analog input which can be routed through one of the VADC pre-mux
+ inputs. Here is an example of how to configure an MPP as an analog
+ input */
+
+/* Configure MPP4 as an Analog input to AMUX8 and read from channel 0x23 */
+/* MPP DT configuration in the platform DT file*/
+ mpp@a300 { /* MPP 4 */
+ qcom,mode = <4>; /* AIN input */
+ qcom,invert = <1>; /* Enable MPP */
+ qcom,ain-route = <3>; /* AMUX 8 */
+ qcom,master-en = <1>;
+ qcom,src-sel = <0>; /* Function constant */
+ };
+
+/* VADC Channel configuration */
+ chan@23 {
+ label = "mpp4_div3";
+ reg = <0x23>;
+ qcom,decimation = <0>;
+ qcom,pre-div-channel-scaling = <1>;
+ qcom,calibration-type = "absolute";
+ qcom,scale-function = <0>;
+ qcom,hw-settle-time = <0>;
+ qcom,fast-avg-setup = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-flash.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-flash.txt
new file mode 100644
index 000000000000..ed1ddf597016
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-flash.txt
@@ -0,0 +1,180 @@
+Qualcomm Technologies PNP Flash LED
+
+QPNP (Qualcomm Technologies Plug N Play) Flash LED (Light
+Emitting Diode) driver is used to provide illumination to
+camera sensor when background light is dim to capture good
+picture. It can also be used for flashlight/torch application.
+It is part of PMIC on Qualcomm Technologies reference platforms.
+The PMIC is connected to the host processor via SPMI bus.
+
+Required properties:
+- compatible : should be "qcom,qpnp-flash-led"
+- reg : base address and size for flash LED modules
+
+Optional properties:
+- qcom,headroom : headroom to use. Values should be 250, 300,
+ 400 and 500 in mV.
+- qcom,startup-dly : delay before flashing after flash executed.
+ Values should 10, 32, 64, and 128 in us.
+- qcom,clamp-curr : current to clamp at when voltage droop happens.
+ Values are in integer from 0 to 1000 inclusive,
+ indicating 0 to 1000 mA.
+- qcom,self-check-enabled : boolean type. self fault check enablement
+- qcom,thermal-derate-enabled : boolean type. derate enablement when module
+ temperature reaches threshold
+- qcom,thermal-derate-threshold : thermal threshold for derate. Values
+ should be 95, 105, 115, 125 in C.
+- qcom,thermal-derate-rate : derate rate when module temperature
+ reaches threshold. Values should be
+ "1_PERCENT", "1P25_PERCENT", "2_PERCENT",
+ "2P5_PERCENT", "5_PERCENT" in string.
+- qcom,current-ramp-enabled : boolean type. stepped current ramp enablement
+- qcom,ramp-up-step : current ramp up rate. Values should be
+ "0P2US", "0P4US", "0P8US", "1P6US", "3P3US",
+ "6P7US", "13P5US", "27US".
+- qcom,ramp-dn-step : current ramp down rate. Values should be
+ "0P2US", "0P4US", "0P8US", "1P6US", "3P3US",
+ "6P7US", "13P5US", "27US".
+- qcom,vph-pwr-droop-enabled : boolean type. VPH power droop enablement. Enablement
+ allows current clamp when phone power drops below
+ pre-determined threshold
+- qcom,vph-pwr-droop-threshold : VPH power threshold for module to clamp current.
+ Values are 2500 - 3200 in mV with 100 mV steps.
+- qcom,vph-pwr-droop-debounce-time : debounce time for module to confirm a voltage
+ droop is happening. Values are 0, 10, 32, 64
+ in us.
+- qcom,pmic-charger-support : Boolean type. This tells if flash utilizes charger boost
+ support
+- qcom,headroom-sense-ch0-enabled: Boolean type. This configures headroom sensing enablement
+ for LED channel 0
+- qcom,headroom-sense-ch1-enabled: Boolean type. This configures headroom sensing enablement
+ for LED channel 1
+- qcom,power-detect-enabled : Boolean type. This enables driver to get maximum flash LED
+ current at current battery level to avoid intensity clamp
+ when battery voltage is low
+- qcom,otst2-moduled-enabled : Boolean type. This enables driver to enable MASK to support
+ OTST2 connection.
+- qcom,follow-otst2-rb-disabled : Boolean type. This allows driver to reset/deset module.
+ By default, driver resets module. This entry allows driver to
+ bypass reset module sequence.
+- qcom,die-current-derate-enabled: Boolean type. This enables driver to get maximum flash LED
+ current, based on PMIC die temperature threshold to
+ avoid significant current derate from hardware. This property
+ is not needed if PMIC is older than PMI8994v2.0.
+- qcom,die-temp-vadc : VADC channel source for flash LED. This property is not
+ needed if PMIC is older than PMI8994v2.0.
+- qcom,die-temp-threshold : Integer type array for PMIC die temperature threshold.
+ Array should have at least one value. Values should be in
+ celcius. This property is not needed if PMIC is older than
+ PMI8994v2.0.
+- qcom,die-temp-derate-current : Integer type arrray for PMIC die temperature derate
+ current. Array should have at least one value. Values
+ should be in mA. This property is not needed if PMIC is older
+ than PMI8994v2.0.
+
+Required properties inside child node. Chile node contains settings for each individual LED.
+Each LED hardware needs a node for itself and a switch node to control brightness.
+For the purpose of turning on/off LED and better regulator control, "led:switch" node
+is introduced. "led:switch" acquires several existing properties from other nodes for
+operational simplification. For backward compatibility purpose, switch node can be optional:
+- label : type of led that will be used, either "flash" or "torch".
+- qcom,led-name : name of the LED. Accepted values are "led:flash_0",
+ "led:flash_1", "led:torch_0", "led:torch_1"
+- qcom,default-led-trigger : trigger for the camera flash and torch. Accepted values are
+ "flash0_trigger", "flash1_trigger", "torch0_trigger", torch1_trigger"
+- qcom,id : enumerated ID for each physical LED. Accepted values are "0",
+ "1", etc..
+- qcom,max-current : maximum current allowed on this LED. Valid values should be
+ integer from 0 to 1000 inclusive, indicating 0 to 1000 mA.
+- qcom,pmic-revid : PMIC revision id source. This property is needed for PMI8996
+ revision check.
+
+Optional properties inside child node:
+- qcom,current : default current intensity for LED. Accepted values should be
+ integer from 0 t 1000 inclusive, indicating 0 to 1000 mA.
+- qcom,duration : Duration for flash LED. When duration time expires, hardware will turn off
+ flash LED. Values should be from 10 ms to 1280 ms with 10 ms incremental
+ step. Not applicable to torch. It is required for LED:SWITCH node to handle
+ LED used as flash.
+- reg<n> : reg<n> (<n> represents number. eg 0,1,2,..) property is to add support for
+ multiple power sources. It includes two properties regulator-name and max-voltage.
+ Required property inside regulator node:
+ - regulator-name : This denotes this node is a regulator node and which
+ regulator to use.
+ Optional property inside regulator node:
+ - max-voltage : This specifies max voltage of regulator. Some switch
+ or boost regulator does not need this property.
+
+Example:
+ qcom,leds@d300 {
+ compatible = "qcom,qpnp-flash-led";
+ status = "okay";
+ reg = <0xd300 0x100>;
+ label = "flash";
+ qcom,headroom = <500>;
+ qcom,startup-dly = <128>;
+ qcom,clamp-curr = <200>;
+ qcom,pmic-charger-support;
+ qcom,self-check-enabled;
+ qcom,thermal-derate-enabled;
+ qcom,thermal-derate-threshold = <80>;
+ qcom,thermal-derate-rate = "4_PERCENT";
+ qcom,current-ramp-enabled;
+ qcom,ramp_up_step = "27US";
+ qcom,ramp_dn_step = "27US";
+ qcom,vph-pwr-droop-enabled;
+ qcom,vph-pwr-droop-threshold = <3200>;
+ qcom,vph-pwr-droop-debounce-time = <10>;
+ qcom,headroom-sense-ch0-enabled;
+ qcom,headroom-sense-ch1-enabled;
+ qcom,die-current-derate-enabled;
+ qcom,die-temp-vadc = <&pmi8994_vadc>;
+ qcom,die-temp-threshold = <85 80 75 70 65>;
+ qcom,die-temp-derate-current = <400 800 1200 1600 2000>;
+ qcom,pmic-revid = <&pmi8994_revid>;
+
+ pm8226_flash0: qcom,flash_0 {
+ label = "flash";
+ qcom,led-name = "led:flash_0";
+ qcom,default-led-trigger =
+ "flash0_trigger";
+ qcom,max-current = <1000>;
+ qcom,id = <0>;
+ qcom,duration = <1280>;
+ qcom,current = <625>;
+ };
+
+ pm8226_torch: qcom,torch_0 {
+ label = "torch";
+ qcom,led-name = "led:torch_0";
+ qcom,default-led-trigger =
+ "torch0_trigger";
+ boost-supply = <&pm8226_chg_boost>;
+ qcom,max-current = <200>;
+ qcom,id = <0>;
+ qcom,current = <120>;
+ qcom,max-current = <200>;
+ reg0 {
+ regulator-name =
+ "pm8226_chg_boost";
+ max-voltage = <3600000>;
+ };
+ };
+
+ pm8226_switch: qcom,switch {
+ lable = "switch";
+ qcom,led-name = "led:switch";
+ qcom,default-led-trigger =
+ "switch_trigger";
+ qcom,id = <2>;
+ qcom,current = <625>;
+ qcom,duration = <1280>;
+ qcom,max-current = <1000>;
+ reg0 {
+ regulator-name =
+ "pm8226_chg_boost";
+ max-voltage = <3600000>;
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt
new file mode 100644
index 000000000000..5df3f06763a9
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-wled.txt
@@ -0,0 +1,90 @@
+Qualcomm Technologies QPNP WLED
+
+QPNP (Qualcomm Technologies Plug N Play) WLED (White Light
+Emitting Diode) driver is used for controlling display
+backlight that is part of PMIC on Qualcomm Technologies
+reference platforms. The PMIC is connected to the host
+processor via SPMI bus.
+
+Required properties:
+- compatible : should be "qcom,qpnp-wled"
+- reg : base address and size for wled modules
+- reg-names : names associated with base addresses. It
+ should be "qpnp-wled-ctrl-base", "qpnp-wled-sink-base",
+ "qpnp-wled-ibb-base", "qpnp-wled-lab-base".
+
+Optional properties for WLED:
+ - interrupts : Specifies the interrupts associated with WLED. The available
+ interrupts are over voltage protection(ovp) and short circuit(sc).
+ The values for ovp and sc are <0x3 0xd8 0x1> and <0x3 0xd8 0x2>.
+ - interrupt-names : Specify the interrupt names associated with interrupts. Must be
+ one of "ovp-irq" or "sc-irq"
+- linux,name : name of the wled. default is "wled".
+- linux,default-trigger : trigger for the backlight. default is NONE.
+- qcom,fdbk-output : string feedback current output for wled module. The accepted values
+ are "wled1", "wled2", "wled3", "wled4" and "auto". default is "auto".
+- qcom,vref-mv : maximum reference voltage in mv. default is 350.
+- qcom,switch-freq-khz : switch frequency in khz. default is 800.
+- qcom,ovp-mv : over voltage protection value in mv. default is 17800.
+- qcom,ilim-ma : maximum current limiter in ma. default is 980.
+- qcom,boost-duty-ns : maximum boost duty cycle in ns. default is 104.
+- qcom,mod-freq-khz : modulation frequency in khz. default is 9600.
+- qcom,dim-mode : dimming mode. supporting dimming modes are "analog",
+ "digital", and "hybrid". default is "hybrid".
+- qcom,hyb-thres : threshold value when used in hybrid mode. It represents the
+ percentage of brightntess at which dimming mode is switched
+ from "digital" to "analog". the default value is 6.25%. as the
+ floating point cannot be represented directly, the value is
+ multiplied by 100. so the default is 625.
+- qcom,sync-dly-us : delay for current sync in us. default is 400.
+- qcom,fs-curr-ua : maximum full scale current in ua. default is 25000.
+- qcom,en-9b-dim-res : boolean, specify if 9-bit dim resultion is needed. otherwise 12-bit is used.
+- qcom,en-phase-stag : boolean, specify if phase staggering is needed.
+- qcom,en-cabc : boolean, specify if cabc (content adaptive backlight control) is needed.
+- qcom,disp-type-amoled : specify if the display is amoled
+- qcom,led-strings-list : Wled module has four strings of leds numbered from 0 to 3. each string of leds
+ are operated individually. specify the list of strings used by the device.
+ any combination of led strings can be used. default value is [00 01 02 03]
+- qcom,en-ext-pfet-sc-pro : Specify if external pfet short circuit protection is needed
+- qcom,cons-sync-write-delay-us : Specify in 'us' the duration of delay between two consecutive writes to
+ SYNC register.
+
+Optional properties if 'qcom,disp-type-amoled' is mentioned in DT:
+- qcom,loop-ea-gm : control the gm for gm stage in control loop. default is 3.
+- qcom,loop-comp-res-kohm : control to select the compensation resistor in kohm. default is 320.
+- qcom,vref-psm-mv : reference psm voltage in mv. default for amoled is 450.
+- qcom,sc-deb-cycles : debounce time for short circuit detection
+- qcom,avdd-trim-steps-from-center : The number of steps to trim the OVP threshold voltage. The possible values can be between -7 to 8.
+
+Example:
+ qcom,leds@d800 {
+ compatible = "qcom,qpnp-wled";
+ reg = <0xd800 0x100>,
+ <0xd900 0x100>,
+ <0xdc00 0x100>,
+ <0xde00 0x100>;
+ reg-names = "qpnp-wled-ctrl-base",
+ "qpnp-wled-sink-base",
+ "qpnp-wled-ibb-base",
+ "qpnp-wled-lab-base";
+ interrupts = <0x3 0xd8 0x2>;
+ interrupt-names = "sc-irq";
+ status = "okay";
+ linux,name = "wled";
+ linux,default-trigger = "bkl-trigger";
+ qcom,fdbk-output = "auto";
+ qcom,vref-mv = <350>;
+ qcom,switch-freq-khz = <800>;
+ qcom,ovp-mv = <29500>;
+ qcom,ilim-ma = <980>;
+ qcom,boost-duty-ns = <26>;
+ qcom,mod-freq-khz = <9600>;
+ qcom,dim-mode = "hybrid";
+ qcom,dim-method = "linear";
+ qcom,hyb-thres = <625>;
+ qcom,sync-dly-us = <800>;
+ qcom,fs-curr-ua = <16000>;
+ qcom,en-phase-stag;
+ qcom,led-strings-list = [00 01 02 03];
+ qcom,en-ext-pfet-sc-pro;
+ };
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp.txt b/Documentation/devicetree/bindings/leds/leds-qpnp.txt
new file mode 100644
index 000000000000..4564bfff3996
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp.txt
@@ -0,0 +1,358 @@
+Qualcomm QPNP Leds
+
+QPNP (Qualcomm Plug N Play) LEDs driver is used for
+controlling LEDs that are part of PMIC on Qualcomm reference
+platforms. The PMIC is connected to Host processor via
+SPMI bus. This driver supports various LED modules such as
+Keypad backlight, WLED (white LED), RGB LED and flash LED.
+
+Each LED module is represented as a node of "leds-qpnp". This
+node will further contain the type of LED supported and its
+properties. At least one child node is required for each LED
+module. Each must have the required properties below, in addition
+to the properties for the LED type, WLED, Flash, RGB and MPP.
+
+Required properties for each child node, WLED, Flash and RGB:
+- compatible : should be "qcom,leds-qpnp"
+- qcom,id : must be one of values supported in enum qpnp_led
+- label : type of led that will be used, ie "wled"
+- qcom,max-current : maximum current that the LED can sustain in mA
+- linux,name : name of the led that is used in led framework
+
+Optional properties for each child node, WLED, Flash, MPP, RGB and KPDBL:
+- qcom,in-order-command-processing : specify if user space requests leds in order
+
+WLED is primarily used as display backlight. Display subsystem uses
+LED triggers for WLED to control the brightness as needed.
+
+Optional properties for WLED:
+- qcom,num-strings: number of wled strings to be configured
+- qcom,num-physical-strings: number of physical wled strings supported
+- qcom,ovp-val: over voltage protection threshold,
+ follows enum wled_ovp_threshold
+- qcom,boost-curr-lim: boot currnet limit, follows enum wled_current_bost_limit
+- qcom,ctrl-delay-us: delay in activation of led
+- qcom,dig-mod-gen-en: digital module generator
+- qcom,cs-out-en: current sink output enable
+- qcom,op-fdbck: selection of output as feedback for the boost, 00 = automatic selection, 01 = select LED1 output, 02 = select LED2 output, 03 = select LED3 output
+- qcom,cp-select: high pole capacitance
+- linux,default-trigger: trigger the led from external modules such as display
+- qcom,default-state: default state of the led, should be "on" or "off"
+
+Flash is used primarily as a camera or video flash.
+
+Optional properties for flash:
+- qcom,headroom: headroom to use. Values should be 0, 1, 2, 3 for 250mV, 300mV, 400mV and 500mV
+- qcom,duration: duration of the flash and torch, 10ms - 1280ms for flash and 2s - 33s for torch
+- qcom,clamp-curr: current to clamp at, mA
+- qcom,startup-dly: delay before flashing after flash executed. Values should 0, 1, 2, 3 for 10us, 32us, 64us, and 128us
+- qcom,saftey-timer: include for safety timer use, otherwise watchdog timer will be used
+- linux,default-trigger: trigger the led from external modules such as display
+- qcom,default-state: default state of the led, should be "on" or "off"
+- qcom,torch-enable: set flash led to torch mode functionality and triggers software workaround for torch if hardware does not support
+- qcom,sw_vreg_ok: Specify if software strobe is used to inform the readiness of flash module to fire the flash LED when there is no smbb support
+- qcom,no-smbb-support: Specify if smbb boost is not required and there is a single regulator for both flash and torch.
+- flash-boost-supply: SMBB regulator for LED flash mode
+- torch-boost-supply: SMBB regulator for LED torch mode
+- flash-wa-supply: SMBB regulator for flash workarounds.
+
+RGB Led is a tri-colored led, Red, Blue & Green.
+
+Required properties for RGB led:
+- qcom,mode: mode the led should operate in, options "pwm" and "lpg". "manual" mode is not supported for RGB led.
+
+Required properties for PWM mode only:
+- pwms: Use the phandle of pwm device
+- qcom,pwm-us: time the pwm device will modulate at (us)
+
+Required properties for LPG mode only:
+- pwms: Use the phandle of pwm device
+- qcom,pwm-us: time the pwm device will modulate at (us)
+- qcom,duty-pcts: array of values for duty cycle to go through
+- qcom,start-idx: starting point duty-pcts array
+
+Optional properties for LPG mode only:
+- qcom,pause-lo: pause at low end of cycle
+- qcom,pause-hi: pause at high end of cycle
+- qcom,ramp-step-ms: step between each cycle (ms)
+- qcom,lut-flags: flags to be used in lut configuration
+
+Optional properties for RGB led:
+- linux,default-trigger: trigger the led from external modules such as display
+- qcom,default-state: default state of the led, should be "on" or "off"
+- qcom,turn-off-delay-ms: delay in millisecond for turning off the led when its default-state is "on". Value is being ignored in case default-state is "off".
+- qcom,use-blink: Use blink sysfs entry for switching into lpg mode. For optimal use, set default mode to pwm. All required lpg parameters must be supplied.
+
+MPP LED is an LED controled through a Multi Purpose Pin.
+
+Optional properties for MPP LED:
+- linux,default-trigger: trigger the led from external modules such as display
+- qcom,default-state: default state of the led, should be "on" or "off"
+- qcom,source-sel: select power source, default 1 (enabled)
+- qcom,mode-ctrl: select operation mode, default 0x60 = Mode Sink
+- qcom,mode: mode the led should operate in, options "pwm", "lpg" and "manual"
+- qcom,vin-ctrl: select input source, supported values are 0 to 3
+- qcom,use-blink: Use blink sysfs entry for switching into lpg mode. For optimal use, set default mode to pwm. All required lpg parameters must be supplied.
+- qcom,min-brightness - Lowest possible brightness supported on this LED other than 0.
+- qcom,current-setting: default current value for wled used as button backlight in mA
+- mpp-power-supply: regulator support for MPP LED
+- qcom,mpp-power-max-voltage - maximum voltage for MPP LED regulator. This should not be specified when no regulator is in use.
+- qcom,mpp-power-min-voltage - minimum voltage for MPP LED regulator. This should not be specified when no regulator is in use.
+
+Required properties for PWM mode only:
+- pwms: Use the phandle of pwm device
+- qcom,pwm-us: time the pwm device will modulate at (us)
+
+Required properties for LPG mode only:
+- pwms: Use the phandle of pwm device
+- qcom,pwm-us: time the pwm device will modulate at (us)
+- qcom,duty-pcts: array of values for duty cycle to go through
+- qcom,start-idx: starting point duty-pcts array
+
+Optional properties for LPG mode only:
+- qcom,pause-lo: pause at low end of cycle
+- qcom,pause-hi: pause at high end of cycle
+- qcom,ramp-step-ms: step between each cycle (ms)
+- qcom,lut-flags: flags to be used in lut configuration
+
+Keypad backlight is a backlight source for buttons. It supports four rows
+and the required rows are enabled by specifying values in the properties.
+
+Required properties for keypad backlight:
+- qcom,mode: mode the led should operate in, options "pwm" and "lpg". "manual" mode is not supported for keypad backlight.
+- qcom,row-id: specify the id of the row. Supported values are 0 to 3.
+
+Optional properties for keypad backlight:
+- qcom,row-src-vbst: select source for rows. Specify for vbst and ignore it
+ for vph_pwr.
+- qcom,row-src-en: specify to enable row source
+- qcom,always-on: specify if the module has to be always on
+- qcom,use-blink: Use blink sysfs entry for switching into lpg mode. For optimal use, set default mode to pwm. All required lpg parameters must be supplied.
+
+Required properties for PWM mode only:
+- pwms: Use the phandle of pwm device
+- qcom,pwm-us: time the pwm device will modulate at (us)
+
+Required properties for LPG mode only:
+- pwms: Use the phandle of pwm device
+- qcom,pwm-us: time the pwm device will modulate at (us)
+- qcom,duty-pcts: array of values for duty cycle to go through
+- qcom,start-idx: starting point duty-pcts array
+
+Optional properties for LPG mode only:
+- qcom,pause-lo: pause at low end of cycle
+- qcom,pause-hi: pause at high end of cycle
+- qcom,ramp-step-ms: step between each cycle (ms)
+- qcom,lut-flags: flags to be used in lut configuration
+
+GPIO LED is an LED controlled through a PMIC GPIO.
+
+Optional properties for GPIO LED:
+- linux,default-trigger: trigger the led from external modules such as charging
+- qcom,default-state: default state of the led, should be "on" or "off"
+- qcom,turn-off-delay-ms: delay in millisecond for turning off the led when its default-state is "on". Value is being ignored in case default-state is "off".
+- qcom,source-sel: select power source, default 1 (enabled)
+- qcom,mode-ctrl: select operation mode, default 0x60 = Mode Sink
+- qcom,vin-ctrl: select input source, supported values are 0 to 7
+
+Example:
+
+ qcom,leds@a100 {
+ status = "okay";
+ qcom,led_mpp_2 {
+ label = "mpp";
+ linux,name = "button-backlight";
+ linux,default-trigger = "hr-trigger";
+ qcom,default-state = "off";
+ qcom,current-setting = <20>;
+ qcom,max-current = <40>;
+ qcom,id = <6>;
+ qcom,source-sel = <1>;
+ qcom,mode-ctrl = <0x61>;
+ qcom,mode = "manual";
+ };
+ };
+
+ qcom,leds@a200 {
+ status = "okay";
+ qcom,led_mpp_3 {
+ label = "mpp";
+ linux,name = "wled-backlight";
+ linux-default-trigger = "none";
+ qcom,default-state = "on";
+ qcom,max-current = <40>;
+ qcom,id = <6>;
+ qcom,source-sel = <1>;
+ qcom,mode-ctrl = <0x10>;
+ qcom,vin-ctrl = <0x03>;
+ qcom,min-brightness = <20>;
+ };
+ };
+
+ qcom,leds@a300 {
+ status = "okay";
+ qcom,led_mpp_pwm {
+ label = "mpp";
+ linux,name = "green";
+ linux,default-trigger = "none";
+ qcom,default-state = "off";
+ qcom,max-current = <40>;
+ qcom,current-setting = <5>;
+ qcom,id = <6>;
+ qcom,mode = "pwm";
+ qcom,source-sel = <8>;
+ qcom,mode-ctrl = <0x60>;
+ pwms = <&pm8941_pwm_1 0 0>;
+ qcom,pwm-us = <1000>;
+ };
+ };
+
+ qcom,leds@d000 {
+ status = "okay";
+ qcom,rgb_pwm {
+ label = "rgb";
+ linux,name = "led:rgb_red";
+ qcom,mode = "pwm";
+ qcom,pwm-us = <1000>;
+ pwms = <&pm8941_pwm_7 0 0>;
+ qcom,max-current = <12>;
+ qcom,default-state = "off";
+ qcom,id = <3>;
+ linux,default-trigger =
+ "battery-charging";
+ };
+ qcom,rgb_lpg {
+ label = "rgb";
+ linux,name = "led:rgb_green";
+ qcom,mode = "lpg";
+ pwms = <&pm8941_pwm_6 0 0>;
+ qcom,pwm-us = <1000>;
+ qcom,duty-ms = <20>;
+ qcom,start-idx = <1>;
+ qcom,idx-len = <10>;
+ qcom,duty-pcts = [00 19 32 4B 64
+ 64 4B 32 19 00];
+ qcom,max-current = <12>;
+ qcom,default-state = "off";
+ qcom,id = <3>;
+ linux,default-trigger =
+ "battery-charging";
+ };
+
+ qcom,rgb_blink {
+ label = "rgb";
+ linux,name = "led:rgb_blue";
+ qcom,mode = "pwm";
+ pwms = <&pm8941_pwm_5 0 0>;
+ qcom,start-idx = <1>;
+ qcom,idx-len = <10>;
+ qcom,duty-pcts = [00 19 32 4B 64
+ 64 4B 32 19 00];
+ qcom,lut-flags = <3>;
+ qcom,pause-lo = <0>;
+ qcom,pause-hi = <0>;
+ qcom,ramp-step-ms = <255>;
+ qcom,max-current = <12>;
+ qcom,default-state = "on";
+ qcom,turn-off-delay-ms = <500>;
+ qcom,id = <5>;
+ linux,default-trigger = "none";
+ qcom,pwm-us = <1000>;
+ qcom,use-blink;
+ };
+ };
+
+ qcom,leds@d300 {
+ compatible = "qcom,leds-qpnp";
+ status = "okay";
+ flash-boost-supply = <&pm8941_chg_boost>;
+ torch-boost-supply = <&pm8941_boost>;
+ qcom,flash_0 {
+ qcom,max-current = <1000>;
+ qcom,default-state = "off";
+ qcom,headroom = <0>;
+ qcom,duration = <200>;
+ qcom,clamp-curr = <200>;
+ qcom,startup-dly = <1>;
+ qcom,safety-timer;
+ label = "flash";
+ linux,default-trigger =
+ "flash0_trigger";
+ linux,name = "led:flash_0";
+ qcom,current = <625>;
+ qcom,id = <1>;
+ qcom,no-torch-module;
+ };
+ };
+
+ qcom,leds@d800 {
+ compatible = "qcom,leds-qpnp";
+ status = "okay";
+ qcom,wled_0 {
+ linux,default-trigger = "bkl-trigger"
+ label = "wled";
+ qcom,cs-out-en;
+ qcom,op-fdbck = <1>;
+ qcom,default-state "off";
+ qcom,max-current = <25>;
+ qcom,ctrl-delay-us = <0>;
+ qcom,boost-curr-lim = <3>;
+ qcom,cp-sel = <0>;
+ qcom,switch-freq = <2>;
+ qcom,ovp-val = <2>;
+ qcom,num-strings = <1>;
+ qcom,id = <0>;
+ linux,name = "led:wled_backlight";
+ };
+ };
+
+ qcom,leds@e200 {
+ status = "okay";
+
+ qcom,kpdbl1 {
+ label = "kpdbl";
+ linux,name = "kpdbl-pwm-1";
+ qcom,mode = <0>;
+ pwms = <&pm8941_pwm_9 0 0>;
+ qcom,pwm-us = <1000>;
+ qcom,id = <7>;
+ qcom,max-current = <20>;
+ qcom,row-id = <0>;
+ qcom,row-src-en;
+ qcom,always-on;
+ };
+
+ qcom,kpdbl2 {
+ label = "kpdbl";
+ linux,name = "kpdbl-lut-2";
+ qcom,mode = <1>;
+ pwms = <&pm8941_pwm_10 0 0>;
+ qcom,pwm-us = <1000>;
+ qcom,start-idx = <1>;
+ qcom,duty-pcts = [00 00 00 00 64
+ 64 00 00 00 00];
+ qcom,id = <7>;
+ qcom,max-current = <20>;
+ qcom,row-id = <1>;
+ qcom,row-src-en;
+ };
+
+ };
+
+ qcom,leds@c900 {
+ compatible = "qcom,leds-qpnp";
+ reg = <0xc900 0x100>;
+ status = "okay";
+ qcom,led_gpio_10 {
+ label = "gpio";
+ linux,name = "led:notification";
+ qcom,max-current = <40>;
+ qcom,id = <8>;
+ linux,default-trigger = "notification";
+ qcom,default-state = "on";
+ qcom,turn-off-delay-ms = <1000>;
+ qcom,source-sel = <1>;
+ qcom,mode-ctrl = <0x10>;
+ qcom,vin-ctrl = <0x02>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/platform/msm/gpio-usbdetect.txt b/Documentation/devicetree/bindings/platform/msm/gpio-usbdetect.txt
new file mode 100644
index 000000000000..6ade25c32526
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/gpio-usbdetect.txt
@@ -0,0 +1,26 @@
+GPIO USB VBUS Detection
+
+Discrete USB VBUS detection circuitry can be connected to the AP or PMICs.
+Such circuits can be used to detect the when a USB cable is connected to
+an upstream port such as a standard host or a wall charger by detecting
+the presence of VBUS voltage. The GPIO can be configured to trigger an
+interrupt, and allow the software driver to in turn notify the USB
+subsytem using the power_supply framework.
+
+Required Properties:
+ - compatible: must be "qcom,gpio-usbdetect"
+ - interrupts: an interrupt triggered by the output of the detection circuit
+ - interrupt-names: must be "vbus_det_irq"
+
+Optional Properties:
+ - vin-supply: phandle to a regulator that powers this circuit, if needed
+
+Example:
+
+ usb_detect {
+ compatible = "qcom,gpio-usbdetect";
+ interrupt-parent = <&spmi_bus>;
+ interrupts = <0x0 0xCA 0x0>; /* PMA8084 GPIO 11 */
+ interrupt-names = "vbus_det_irq";
+ vin-supply = <&vbus_det_reg>;
+ };
diff --git a/Documentation/devicetree/bindings/platform/msm/qpnp-power-on.txt b/Documentation/devicetree/bindings/platform/msm/qpnp-power-on.txt
new file mode 100644
index 000000000000..6644673eef85
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/qpnp-power-on.txt
@@ -0,0 +1,183 @@
+Qualcomm QPNP power-on
+
+The qpnp-power-on is a driver which supports the power-on(PON)
+peripheral on Qualcomm PMICs. The supported functionality includes
+power on/off reason, key press/release detection, PMIC reset configurations
+and other PON specifc features. The PON module supports multiple physical
+power-on (KPDPWR_N, CBLPWR) and reset (KPDPWR_N, RESIN, KPDPWR+RESIN) sources.
+This peripheral is connected to the host processor via the SPMI interface.
+
+Required properties:
+- compatible: Must be "qcom,qpnp-power-on"
+- reg: Specifies the SPMI address and size for this PON (power-on) peripheral
+- interrupts: Specifies the interrupt associated with PON.
+- interrupt-names: Specify the interrupt names associated with interrupts. Must be
+ one of "kpdpwr", "kpdpwr-bark", "resin", "resin-bark", "cblpwr",
+ "kpdpwr-resin-bark". Bark interrupts are associated with system
+ reset configuration to allow default reset configuration to be
+ activated. If system reset configuration is not supported then
+ bark interrupts are nops. Additionally, the "pmic-wd-bark"
+ interrupt can be added if the system needs to handle PMIC
+ watch dog barks.
+
+Optional properties:
+- qcom,pon-dbc-delay The debounce delay for the power-key interrupt
+ specified in us. The value ranges from 2 seconds
+ to 1/64 of a second. Possible values are -
+ - 2, 1, 1/2, 1/4, 1/8, 1/16, 1/32, 1/64
+ - Intermediate value is rounded down to the
+ nearest valid value.
+- qcom,pon_1 ...pon_n These represent the child nodes which describe
+ the properties (reset, key) for each of the pon
+ reset source. All the child nodes are optional.
+ If none of them is specified, the driver fails
+ to register.
+- qcom,system-reset Specifies that this PON peripheral can be used
+ to reset the system. This property can only be
+ used by one device on the system. It is an error
+ to include it more than once.
+- qcom,s3-debounce The debounce delay for stage3 reset trigger in
+ secs. The values range from 0 to 128.
+- qcom,s3-src The source for stage 3 reset. It can be one of
+ "kpdpwr", "resin", "kpdpwr-or-resin" or
+ "kpdpwr-and-resin". The default value is
+ "kpdpwr-and-resin".
+- qcom,uvlo-panic If this property is present, the device will set to panic during reboot if this reboot is due to under voltage lock out
+- qcom,clear-warm-reset Specifies that the WARM_RESET reason registers
+ need to be cleared for this target. The property
+ is used for the targets which have a hardware feature
+ to catch resets which aren't triggered by the MSM.
+ In such cases clearing WARM_REASON registers across
+ MSM resets keeps the registers in good state.
+- qcom,secondary-pon-reset Boolean property which indicates that the PON
+ peripheral is a secondary PON device which needs
+ to be configured during reset in addition to the
+ primary PON device that is configured for system
+ reset through qcom,system-reset property.
+ This should not be defined along with the
+ qcom,system-reset property.
+- qcom,store-hard-reset-reason Boolean property which if set will store the hardware
+ reset reason to SOFT_RB_SPARE register of the core PMIC
+ PON peripheral.
+- qcom,warm-reset-poweroff-type Poweroff type required to be configured on
+ PS_HOLD reset control register when the system
+ goes for warm reset. If this property is not
+ specified, then the default type, warm reset
+ will be configured to PS_HOLD reset control
+ register.
+- qcom,hard-reset-poweroff-type Same description as qcom,warm-reset-poweroff-type
+ but this applies for the system hard reset case.
+- qcom,shutdown-poweroff-type Same description as qcom,warm-reset-poweroff-type
+ but this applies for the system shutdown case.
+
+
+All the below properties are in the sub-node section (properties of the child
+node).
+
+Sub-nodes (if defined) should belong to either a PON configuration or a
+regulator configuration.
+
+Regulator sub-node required properties:
+- regulator-name Regulator name for the PON regulator that
+ is being configured.
+- qcom,pon-spare-reg-addr Register offset from the base address of the PON
+ peripheral that needs to be configured for the
+ regulator being controlled.
+- qcom,pon-spare-reg-bit Bit position in the specified register that
+ needs to be configured for the regulator being
+ controlled.
+
+PON sub-node required properties:
+- qcom,pon-type The type of PON/RESET source. The driver
+ currently supports KPDPWR(0), RESIN(1) and
+ CBLPWR(2) pon/reset sources.
+
+PON sub-node optional properties:
+- qcom,pull-up The initial state of the reset pin under
+ consideration.
+ 0 = No pull-up
+ 1 = pull-up enabled
+ This property is set to '0' if not specified.
+- qcom,support-reset Indicates if this PON source supports
+ reset functionality.
+ 0 = Not supported
+ 1 = Supported
+ If this property is not defined, then do not modify S2 reset
+ values.
+- qcom,use-bark Specify if this pon type needs to handle bark irq
+- linux,code The input key-code associated with the reset source.
+ The reset source in its default configuration can be
+ used to support standard keys.
+
+The below mentioned properties are required only when qcom,support-reset DT property is defined
+and is set to 1.
+
+- qcom,s1-timer The debounce timer for the BARK interrupt for
+ that reset source. Value is specified in ms.
+ Supported values are -
+ - 0, 32, 56, 80, 128, 184, 272, 408, 608, 904
+ 1352, 2048, 3072, 4480, 6720, 10256
+- qcom,s2-timer The debounce timer for the S2 reset specified
+ in ms. On the expiry of this timer, the PMIC
+ executes the reset sequence. Supported values -
+ - 0, 10, 50, 100, 250, 500, 1000, 2000
+- qcom,s2-type The type of reset associated with this source.
+ The supported resets are -
+ SOFT(0), WARM(1), SHUTDOWN(4), HARD(7)
+
+Example:
+ qcom,power-on@800 {
+ compatible = "qcom,qpnp-power-on";
+ reg = <0x800 0x100>;
+ interrupts = <0x0 0x8 0x0>,
+ <0x0 0x8 0x1>,
+ <0x0 0x8 0x4>,
+ <0x0 0x8 0x5>;
+ interrupt-names = "kpdpwr", "resin",
+ "resin-bark", "kpdpwr-resin-bark";
+ qcom,pon-dbc-delay = <15625>;
+ qcom,system-reset;
+ qcom,s3-debounce = <32>;
+ qcom,s3-src = "resin";
+ qcom,clear-warm-reset;
+ qcom,store-hard-reset-reason;
+
+ qcom,pon_1 {
+ qcom,pon-type = <0>;
+ qcom,pull-up = <1>;
+ linux,code = <116>;
+ };
+
+ qcom,pon_2 {
+ qcom,pon-type = <1>;
+ qcom,support-reset = <1>;
+ qcom,pull-up = <1>;
+ qcom,s1-timer = <0>;
+ qcom,s2-timer = <2000>;
+ qcom,s2-type = <1>;
+ linux,code = <114>;
+ qcom,use-bark;
+ };
+
+ qcom,pon_3 {
+ qcom,pon-type = <3>;
+ qcom,support-reset = <1>;
+ qcom,s1-timer = <6720>;
+ qcom,s2-timer = <2000>;
+ qcom,s2-type = <7>;
+ qcom,pull-up = <1>;
+ qcom,use-bark;
+ };
+ };
+ qcom,power-on@800 {
+ compatible = "qcom,qpnp-power-on";
+ reg = <0x800 0x100>;
+ qcom,secondary-pon-reset;
+ qcom,hard-reset-poweroff-type = <PON_POWER_OFF_SHUTDOWN>;
+
+ pon_perph_reg: qcom,pon_perph_reg {
+ regulator-name = "pon_spare_reg";
+ qcom,pon-spare-reg-addr = <0x8c>;
+ qcom,pon-spare-reg-bit = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/rtc/qpnp-rtc.txt b/Documentation/devicetree/bindings/rtc/qpnp-rtc.txt
new file mode 100644
index 000000000000..156141fef71c
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/qpnp-rtc.txt
@@ -0,0 +1,64 @@
+* msm-qpnp-rtc
+
+msm-qpnp-rtc is a RTC driver that supports 32 bit RTC housed inside PMIC.
+Driver utilizes MSM SPMI interface to communicate with the RTC module.
+RTC device is divided into two sub-peripherals one which controls basic RTC
+and other for controlling alarm.
+
+[PMIC RTC Device Declarations]
+
+-Root Node-
+
+Required properties :
+ - compatible: Must be "qcom,qpnp-rtc"
+ - #address-cells: The number of cells dedicated to represent an address
+ This must be set to '1'.
+ - #size-cells: The number of cells dedicated to represent address
+ space range of a peripheral. This must be set to '1'.
+ - spmi-dev-container: This specifies that all the device nodes specified
+ within this node should have their resources
+ coalesced into a single spmi_device.
+
+Optional properties:
+ - qcom,qpnp-rtc-write: This property enables/disables rtc write
+ operation. If not mentioned rtc driver keeps
+ rtc writes disabled.
+ 0 = Disable rtc writes.
+ 1 = Enable rtc writes.
+ - qcom,qpnp-rtc-alarm-pwrup: This property enables/disables feature of
+ powering up phone (from power down state)
+ through alarm interrupt.
+ If not mentioned rtc driver will disable
+ feature of powring-up phone through alarm.
+ 0 = Disable powering up of phone through
+ alarm interrupt.
+ 1 = Enable powering up of phone through
+ alarm interrupt.
+
+-Child Nodes-
+
+Required properties :
+ - reg : Specify the spmi offset and size for device.
+ - interrupts: Specifies alarm interrupt, only for rtc_alarm
+ sub-peripheral.
+
+Example:
+ qcom,pm8941_rtc {
+ spmi-dev-container;
+ compatible = "qcom,qpnp-rtc";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ qcom,qpnp-rtc-write = <0>;
+ qcom,qpnp-rtc-alarm-pwrup = <0>;
+
+ qcom,pm8941_rtc_rw@6000 {
+ reg = <0x6000 0x100>;
+ };
+
+ qcom,pm8941_rtc_alarm@6100 {
+ reg = <0x6100 0x100>;
+ interrupts = <0x0 0x61 0x1>;
+ };
+ };
+
+
diff --git a/Documentation/devicetree/bindings/thermal/qpnp-temp-alarm.txt b/Documentation/devicetree/bindings/thermal/qpnp-temp-alarm.txt
new file mode 100644
index 000000000000..1c426922032f
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/qpnp-temp-alarm.txt
@@ -0,0 +1,68 @@
+Qualcomm QPNP Temperature Alarm
+
+QPNP temperature alarm peripherals are found inside of Qualcomm PMIC chips that
+utilize the MSM SPMI implementation. These peripherals provide an interrupt
+signal and status register to identify high PMIC die temperature.
+
+Required properties:
+- compatible: Must be "qcom,qpnp-temp-alarm".
+- reg: Specifies the SPMI address and size for this temperature
+ alarm device.
+- interrupts: PMIC temperature alarm interrupt
+- label: A string used as a descriptive name for this thermal device.
+ This name should be 19 characters or less.
+
+Required structure:
+- A qcom,qpnp-temp-alarm node must be a child of an SPMI node that has specified
+ the spmi-slave-container property
+
+Optional properties:
+- qcom,channel-num: VADC channel number associated PMIC DIE_TEMP thermistor.
+ If no channel is specified, then the die temperature
+ must be estimated based on the over temperature stage.
+- qcom,threshold-set: Integer value which specifies which set of threshold
+ temperatures to use for the over temperature stages.
+ Possible values (x = {stage 1 threshold temperature,
+ stage 2 threshold temperature,
+ stage 3 threshold temperature}):
+ 0 = {105 C, 125 C, 145 C}
+ 1 = {110 C, 130 C, 150 C}
+ 2 = {115 C, 135 C, 155 C}
+ 3 = {120 C, 140 C, 160 C}
+- qcom,allow-override: Boolean which controls the ability of software to
+ override shutdowns. If present, then software is
+ allowed to override automatic PMIC hardware stage 2 and
+ stage 3 over temperature shutdowns. Otherwise, software
+ is not allowed to override automatic shutdown.
+- qcom,default-temp: Specifies the default temperature in millicelcius to use
+ if no ADC channel is present to read the real time
+ temperature.
+- qcom,temp_alarm-vadc: Corresponding VADC device's phandle.
+
+Note, if a given optional qcom,* binding is not present, then the default
+hardware state for that feature will be maintained.
+
+Example:
+&spmi_bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+
+ qcom,pm8941@0 {
+ spmi-slave-container;
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ qcom,temp-alarm@2400 {
+ compatible = "qcom,qpnp-temp-alarm";
+ reg = <0x2400 0x100>;
+ interrupts = <0x0 0x24 0x0>;
+ label = "pm8941_tz";
+ qcom,channel-num = <8>;
+ qcom,threshold-set = <0>;
+ qcom,temp_alarm-vadc = <&pm8941_vadc>;
+ };
+ };
+};
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index b18bea08ff25..5931c1afded4 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -337,6 +337,26 @@ config GPIO_PXA
help
Say yes here to support the PXA GPIO device
+config GPIO_QPNP_PIN
+ depends on SPMI || MSM_SPMI
+ depends on OF_SPMI
+ depends on MSM_QPNP_INT
+ tristate "Qualcomm QPNP gpio support"
+ help
+ Say 'y' here to include support for the Qualcomm QPNP gpio
+ driver. This driver supports Device Tree and allows a
+ device_node to be registered as a gpio-controller. It
+ does not handle gpio interrupts directly. That work is handled
+ by CONFIG_MSM_QPNP_INT.
+
+config GPIO_QPNP_PIN_DEBUG
+ depends on GPIO_QPNP_PIN
+ depends on DEBUG_FS
+ bool "Qualcomm QPNP GPIO debug support"
+ help
+ Say 'y' here to include debug support for the Qualcomm
+ QPNP gpio driver.
+
config GPIO_RCAR
tristate "Renesas R-Car GPIO"
depends on ARCH_SHMOBILE || COMPILE_TEST
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 986dbd838cea..6d9d5e5d62ac 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -76,6 +76,7 @@ obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
obj-$(CONFIG_GPIO_PCH) += gpio-pch.o
obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
+obj-$(CONFIG_GPIO_QPNP_PIN) += qpnp-pin.o
obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o
obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
obj-$(CONFIG_GPIO_RCAR) += gpio-rcar.o
diff --git a/drivers/gpio/qpnp-pin.c b/drivers/gpio/qpnp-pin.c
new file mode 100644
index 000000000000..6f797190d732
--- /dev/null
+++ b/drivers/gpio/qpnp-pin.c
@@ -0,0 +1,1679 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/qpnp/pin.h>
+
+#define Q_REG_ADDR(q_spec, reg_index) \
+ ((q_spec)->offset + reg_index)
+
+#define Q_REG_STATUS1 0x8
+#define Q_REG_STATUS1_VAL_MASK 0x1
+#define Q_REG_STATUS1_GPIO_EN_REV0_MASK 0x2
+#define Q_REG_STATUS1_GPIO_EN_MASK 0x80
+#define Q_REG_STATUS1_MPP_EN_MASK 0x80
+
+#define Q_NUM_CTL_REGS 0xD
+
+/* revision registers base address offsets */
+#define Q_REG_DIG_MINOR_REV 0x0
+#define Q_REG_DIG_MAJOR_REV 0x1
+#define Q_REG_ANA_MINOR_REV 0x2
+
+/* type registers base address offsets */
+#define Q_REG_TYPE 0x4
+#define Q_REG_SUBTYPE 0x5
+
+/* gpio peripheral type and subtype values */
+#define Q_GPIO_TYPE 0x10
+#define Q_GPIO_SUBTYPE_GPIO_4CH 0x1
+#define Q_GPIO_SUBTYPE_GPIOC_4CH 0x5
+#define Q_GPIO_SUBTYPE_GPIO_8CH 0x9
+#define Q_GPIO_SUBTYPE_GPIOC_8CH 0xD
+#define Q_GPIO_SUBTYPE_GPIO_LV 0x10
+#define Q_GPIO_SUBTYPE_GPIO_MV 0x11
+
+/* mpp peripheral type and subtype values */
+#define Q_MPP_TYPE 0x11
+#define Q_MPP_SUBTYPE_4CH_NO_ANA_OUT 0x3
+#define Q_MPP_SUBTYPE_ULT_4CH_NO_ANA_OUT 0x4
+#define Q_MPP_SUBTYPE_4CH_NO_SINK 0x5
+#define Q_MPP_SUBTYPE_ULT_4CH_NO_SINK 0x6
+#define Q_MPP_SUBTYPE_4CH_FULL_FUNC 0x7
+#define Q_MPP_SUBTYPE_8CH_FULL_FUNC 0xF
+
+/* control register base address offsets */
+#define Q_REG_MODE_CTL 0x40
+#define Q_REG_DIG_VIN_CTL 0x41
+#define Q_REG_DIG_PULL_CTL 0x42
+#define Q_REG_DIG_IN_CTL 0x43
+#define Q_REG_DIG_OUT_SRC_CTL 0x44
+#define Q_REG_DIG_OUT_CTL 0x45
+#define Q_REG_EN_CTL 0x46
+#define Q_REG_AOUT_CTL 0x48
+#define Q_REG_AIN_CTL 0x4A
+#define Q_REG_APASS_SEL_CTL 0x4A
+#define Q_REG_SINK_CTL 0x4C
+
+/* control register regs array indices */
+#define Q_REG_I_MODE_CTL 0
+#define Q_REG_I_DIG_VIN_CTL 1
+#define Q_REG_I_DIG_PULL_CTL 2
+#define Q_REG_I_DIG_IN_CTL 3
+#define Q_REG_I_DIG_OUT_SRC_CTL 4
+#define Q_REG_I_DIG_OUT_CTL 5
+#define Q_REG_I_EN_CTL 6
+#define Q_REG_I_AOUT_CTL 8
+#define Q_REG_I_APASS_SEL_CTL 10
+#define Q_REG_I_AIN_CTL 10
+#define Q_REG_I_SINK_CTL 12
+
+/* control reg: mode */
+#define Q_REG_OUT_INVERT_SHIFT 0
+#define Q_REG_OUT_INVERT_MASK 0x1
+#define Q_REG_SRC_SEL_SHIFT 1
+#define Q_REG_SRC_SEL_MASK 0xE
+#define Q_REG_MODE_SEL_SHIFT 4
+#define Q_REG_MODE_SEL_MASK 0x70
+#define Q_REG_LV_MV_MODE_SEL_SHIFT 0
+#define Q_REG_LV_MV_MODE_SEL_MASK 0x3
+
+/* control reg: dig_out_src (GPIO LV/MV only) */
+#define Q_REG_DIG_OUT_SRC_SRC_SEL_SHIFT 0
+#define Q_REG_DIG_OUT_SRC_SRC_SEL_MASK 0xF
+#define Q_REG_DIG_OUT_SRC_INVERT_SHIFT 7
+#define Q_REG_DIG_OUT_SRC_INVERT_MASK 0x80
+
+/* control reg: dig_vin */
+#define Q_REG_VIN_SHIFT 0
+#define Q_REG_VIN_MASK 0x7
+
+/* control reg: dig_pull */
+#define Q_REG_PULL_SHIFT 0
+#define Q_REG_PULL_MASK 0x7
+
+/* control reg: dig_out */
+#define Q_REG_OUT_STRENGTH_SHIFT 0
+#define Q_REG_OUT_STRENGTH_MASK 0x3
+#define Q_REG_OUT_TYPE_SHIFT 4
+#define Q_REG_OUT_TYPE_MASK 0x30
+
+/* control reg: dig_in_ctl */
+#define Q_REG_DTEST_SEL_SHIFT 0
+#define Q_REG_DTEST_SEL_MASK 0xF
+#define Q_REG_LV_MV_DTEST_SEL_CFG_SHIFT 0
+#define Q_REG_LV_MV_DTEST_SEL_CFG_MASK 0x7
+#define Q_REG_LV_MV_DTEST_SEL_EN_SHIFT 7
+#define Q_REG_LV_MV_DTEST_SEL_EN_MASK 0x80
+
+/* control reg: en */
+#define Q_REG_MASTER_EN_SHIFT 7
+#define Q_REG_MASTER_EN_MASK 0x80
+
+/* control reg: ana_out */
+#define Q_REG_AOUT_REF_SHIFT 0
+#define Q_REG_AOUT_REF_MASK 0x7
+
+/* control reg: ana_in */
+#define Q_REG_AIN_ROUTE_SHIFT 0
+#define Q_REG_AIN_ROUTE_MASK 0x7
+
+/* control reg: sink */
+#define Q_REG_CS_OUT_SHIFT 0
+#define Q_REG_CS_OUT_MASK 0x7
+
+/* control ref: apass_sel */
+#define Q_REG_APASS_SEL_SHIFT 0
+#define Q_REG_APASS_SEL_MASK 0x3
+
+enum qpnp_pin_param_type {
+ Q_PIN_CFG_MODE,
+ Q_PIN_CFG_OUTPUT_TYPE,
+ Q_PIN_CFG_INVERT,
+ Q_PIN_CFG_PULL,
+ Q_PIN_CFG_VIN_SEL,
+ Q_PIN_CFG_OUT_STRENGTH,
+ Q_PIN_CFG_SRC_SEL,
+ Q_PIN_CFG_MASTER_EN,
+ Q_PIN_CFG_AOUT_REF,
+ Q_PIN_CFG_AIN_ROUTE,
+ Q_PIN_CFG_CS_OUT,
+ Q_PIN_CFG_APASS_SEL,
+ Q_PIN_CFG_DTEST_SEL,
+ Q_PIN_CFG_INVALID,
+};
+
+#define Q_NUM_PARAMS Q_PIN_CFG_INVALID
+
+/* param error checking */
+#define QPNP_PIN_GPIO_MODE_INVALID 3
+#define QPNP_PIN_GPIO_LV_MV_MODE_INVALID 4
+#define QPNP_PIN_MPP_MODE_INVALID 7
+#define QPNP_PIN_INVERT_INVALID 2
+#define QPNP_PIN_OUT_BUF_INVALID 3
+#define QPNP_PIN_GPIO_LV_MV_OUT_BUF_INVALID 4
+#define QPNP_PIN_VIN_4CH_INVALID 5
+#define QPNP_PIN_VIN_8CH_INVALID 8
+#define QPNP_PIN_GPIO_LV_VIN_INVALID 1
+#define QPNP_PIN_GPIO_MV_VIN_INVALID 2
+#define QPNP_PIN_GPIO_PULL_INVALID 6
+#define QPNP_PIN_MPP_PULL_INVALID 4
+#define QPNP_PIN_OUT_STRENGTH_INVALID 4
+#define QPNP_PIN_SRC_INVALID 8
+#define QPNP_PIN_GPIO_LV_MV_SRC_INVALID 16
+#define QPNP_PIN_MASTER_INVALID 2
+#define QPNP_PIN_AOUT_REF_INVALID 8
+#define QPNP_PIN_AIN_ROUTE_INVALID 8
+#define QPNP_PIN_CS_OUT_INVALID 8
+#define QPNP_PIN_APASS_SEL_INVALID 4
+#define QPNP_PIN_DTEST_SEL_INVALID 4
+
+struct qpnp_pin_spec {
+ uint8_t slave; /* 0-15 */
+ uint16_t offset; /* 0-255 */
+ uint32_t gpio_chip_idx; /* offset from gpio_chip base */
+ uint32_t pmic_pin; /* PMIC pin number */
+ int irq; /* logical IRQ number */
+ u8 regs[Q_NUM_CTL_REGS]; /* Control regs */
+ u8 num_ctl_regs; /* usable number on this pin */
+ u8 type; /* peripheral type */
+ u8 subtype; /* peripheral subtype */
+ u8 dig_major_rev;
+ struct device_node *node;
+ enum qpnp_pin_param_type params[Q_NUM_PARAMS];
+ struct qpnp_pin_chip *q_chip;
+};
+
+struct qpnp_pin_chip {
+ struct gpio_chip gpio_chip;
+ struct spmi_device *spmi;
+ struct qpnp_pin_spec **pmic_pins;
+ struct qpnp_pin_spec **chip_gpios;
+ uint32_t pmic_pin_lowest;
+ uint32_t pmic_pin_highest;
+ struct device_node *int_ctrl;
+ struct list_head chip_list;
+ struct dentry *dfs_dir;
+ bool chip_registered;
+};
+
+static LIST_HEAD(qpnp_pin_chips);
+static DEFINE_MUTEX(qpnp_pin_chips_lock);
+
+static inline void qpnp_pmic_pin_set_spec(struct qpnp_pin_chip *q_chip,
+ uint32_t pmic_pin,
+ struct qpnp_pin_spec *spec)
+{
+ q_chip->pmic_pins[pmic_pin - q_chip->pmic_pin_lowest] = spec;
+}
+
+static inline struct qpnp_pin_spec *qpnp_pmic_pin_get_spec(
+ struct qpnp_pin_chip *q_chip,
+ uint32_t pmic_pin)
+{
+ if (pmic_pin < q_chip->pmic_pin_lowest ||
+ pmic_pin > q_chip->pmic_pin_highest)
+ return NULL;
+
+ return q_chip->pmic_pins[pmic_pin - q_chip->pmic_pin_lowest];
+}
+
+static inline struct qpnp_pin_spec *qpnp_chip_gpio_get_spec(
+ struct qpnp_pin_chip *q_chip,
+ uint32_t chip_gpio)
+{
+ if (chip_gpio > q_chip->gpio_chip.ngpio)
+ return NULL;
+
+ return q_chip->chip_gpios[chip_gpio];
+}
+
+static inline void qpnp_chip_gpio_set_spec(struct qpnp_pin_chip *q_chip,
+ uint32_t chip_gpio,
+ struct qpnp_pin_spec *spec)
+{
+ q_chip->chip_gpios[chip_gpio] = spec;
+}
+
+static bool is_gpio_lv_mv(struct qpnp_pin_spec *q_spec)
+{
+ if ((q_spec->type == Q_GPIO_TYPE) &&
+ (q_spec->subtype == Q_GPIO_SUBTYPE_GPIO_LV ||
+ q_spec->subtype == Q_GPIO_SUBTYPE_GPIO_MV))
+ return true;
+
+ return false;
+}
+
+/*
+ * Determines whether a specified param's configuration is correct.
+ * This check is two tier. First a check is done whether the hardware
+ * supports this param and value requested. The second check validates
+ * that the configuration is correct, given the fact that the hardware
+ * supports it.
+ *
+ * Returns
+ * -ENXIO is the hardware does not support this param.
+ * -EINVAL if the the hardware does support this param, but the
+ * requested value is outside the supported range.
+ */
+static int qpnp_pin_check_config(enum qpnp_pin_param_type idx,
+ struct qpnp_pin_spec *q_spec, uint32_t val)
+{
+ u8 subtype = q_spec->subtype;
+
+ switch (idx) {
+ case Q_PIN_CFG_MODE:
+ if (q_spec->type == Q_GPIO_TYPE) {
+ if (is_gpio_lv_mv(q_spec)) {
+ if (val >= QPNP_PIN_GPIO_LV_MV_MODE_INVALID)
+ return -EINVAL;
+ } else if (val >= QPNP_PIN_GPIO_MODE_INVALID) {
+ return -EINVAL;
+ }
+ } else if (q_spec->type == Q_MPP_TYPE) {
+ if (val >= QPNP_PIN_MPP_MODE_INVALID)
+ return -EINVAL;
+ if ((subtype == Q_MPP_SUBTYPE_ULT_4CH_NO_ANA_OUT ||
+ subtype == Q_MPP_SUBTYPE_ULT_4CH_NO_SINK) &&
+ (val == QPNP_PIN_MODE_BIDIR))
+ return -ENXIO;
+ }
+ break;
+ case Q_PIN_CFG_OUTPUT_TYPE:
+ if (q_spec->type != Q_GPIO_TYPE)
+ return -ENXIO;
+ if ((val == QPNP_PIN_OUT_BUF_OPEN_DRAIN_NMOS ||
+ val == QPNP_PIN_OUT_BUF_OPEN_DRAIN_PMOS) &&
+ (subtype == Q_GPIO_SUBTYPE_GPIOC_4CH ||
+ (subtype == Q_GPIO_SUBTYPE_GPIOC_8CH)))
+ return -EINVAL;
+ else if (is_gpio_lv_mv(q_spec) &&
+ val >= QPNP_PIN_GPIO_LV_MV_OUT_BUF_INVALID)
+ return -EINVAL;
+ else if (val >= QPNP_PIN_OUT_BUF_INVALID)
+ return -EINVAL;
+ break;
+ case Q_PIN_CFG_INVERT:
+ if (val >= QPNP_PIN_INVERT_INVALID)
+ return -EINVAL;
+ break;
+ case Q_PIN_CFG_PULL:
+ if (q_spec->type == Q_GPIO_TYPE &&
+ val >= QPNP_PIN_GPIO_PULL_INVALID)
+ return -EINVAL;
+ if (q_spec->type == Q_MPP_TYPE) {
+ if (val >= QPNP_PIN_MPP_PULL_INVALID)
+ return -EINVAL;
+ if (subtype == Q_MPP_SUBTYPE_ULT_4CH_NO_ANA_OUT ||
+ subtype == Q_MPP_SUBTYPE_ULT_4CH_NO_SINK)
+ return -ENXIO;
+ }
+ break;
+ case Q_PIN_CFG_VIN_SEL:
+ if (is_gpio_lv_mv(q_spec)) {
+ if (subtype == Q_GPIO_SUBTYPE_GPIO_LV) {
+ if (val >= QPNP_PIN_GPIO_LV_VIN_INVALID)
+ return -EINVAL;
+ } else {
+ if (val >= QPNP_PIN_GPIO_MV_VIN_INVALID)
+ return -EINVAL;
+ }
+ } else if (val >= QPNP_PIN_VIN_8CH_INVALID) {
+ return -EINVAL;
+ } else if (val >= QPNP_PIN_VIN_4CH_INVALID) {
+ if (q_spec->type == Q_GPIO_TYPE &&
+ (subtype == Q_GPIO_SUBTYPE_GPIO_4CH ||
+ subtype == Q_GPIO_SUBTYPE_GPIOC_4CH))
+ return -EINVAL;
+ if (q_spec->type == Q_MPP_TYPE &&
+ (subtype == Q_MPP_SUBTYPE_4CH_NO_ANA_OUT ||
+ subtype == Q_MPP_SUBTYPE_4CH_NO_SINK ||
+ subtype == Q_MPP_SUBTYPE_4CH_FULL_FUNC ||
+ subtype == Q_MPP_SUBTYPE_ULT_4CH_NO_ANA_OUT ||
+ subtype == Q_MPP_SUBTYPE_ULT_4CH_NO_SINK))
+ return -EINVAL;
+ }
+ break;
+ case Q_PIN_CFG_OUT_STRENGTH:
+ if (q_spec->type != Q_GPIO_TYPE)
+ return -ENXIO;
+ if (val >= QPNP_PIN_OUT_STRENGTH_INVALID ||
+ val == 0)
+ return -EINVAL;
+ break;
+ case Q_PIN_CFG_SRC_SEL:
+ if (q_spec->type == Q_MPP_TYPE &&
+ (val == QPNP_PIN_SEL_FUNC_1 ||
+ val == QPNP_PIN_SEL_FUNC_2))
+ return -EINVAL;
+ if (is_gpio_lv_mv(q_spec)) {
+ if (val >= QPNP_PIN_GPIO_LV_MV_SRC_INVALID)
+ return -EINVAL;
+ } else if (val >= QPNP_PIN_SRC_INVALID) {
+ return -EINVAL;
+ }
+ break;
+ case Q_PIN_CFG_MASTER_EN:
+ if (val >= QPNP_PIN_MASTER_INVALID)
+ return -EINVAL;
+ break;
+ case Q_PIN_CFG_AOUT_REF:
+ if (q_spec->type != Q_MPP_TYPE)
+ return -ENXIO;
+ if (subtype == Q_MPP_SUBTYPE_4CH_NO_ANA_OUT ||
+ subtype == Q_MPP_SUBTYPE_ULT_4CH_NO_ANA_OUT)
+ return -ENXIO;
+ if (val >= QPNP_PIN_AOUT_REF_INVALID)
+ return -EINVAL;
+ break;
+ case Q_PIN_CFG_AIN_ROUTE:
+ if (q_spec->type != Q_MPP_TYPE)
+ return -ENXIO;
+ if (val >= QPNP_PIN_AIN_ROUTE_INVALID)
+ return -EINVAL;
+ break;
+ case Q_PIN_CFG_CS_OUT:
+ if (q_spec->type != Q_MPP_TYPE)
+ return -ENXIO;
+ if (subtype == Q_MPP_SUBTYPE_4CH_NO_SINK ||
+ subtype == Q_MPP_SUBTYPE_ULT_4CH_NO_SINK)
+ return -ENXIO;
+ if (val >= QPNP_PIN_CS_OUT_INVALID)
+ return -EINVAL;
+ break;
+ case Q_PIN_CFG_APASS_SEL:
+ if (!is_gpio_lv_mv(q_spec))
+ return -ENXIO;
+ if (val >= QPNP_PIN_APASS_SEL_INVALID)
+ return -EINVAL;
+ break;
+ case Q_PIN_CFG_DTEST_SEL:
+ if (!val && val > QPNP_PIN_DTEST_SEL_INVALID)
+ return -EINVAL;
+ break;
+ default:
+ pr_err("invalid param type %u specified\n", idx);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#define Q_CHK_INVALID(idx, q_spec, val) \
+ (qpnp_pin_check_config(idx, q_spec, val) == -EINVAL)
+
+static int qpnp_pin_check_constraints(struct qpnp_pin_spec *q_spec,
+ struct qpnp_pin_cfg *param)
+{
+ int pin = q_spec->pmic_pin;
+ const char *name;
+
+ name = (q_spec->type == Q_GPIO_TYPE) ? "gpio" : "mpp";
+
+ if (Q_CHK_INVALID(Q_PIN_CFG_MODE, q_spec, param->mode))
+ pr_err("invalid direction value %d for %s %d\n",
+ param->mode, name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_INVERT, q_spec, param->invert))
+ pr_err("invalid invert polarity value %d for %s %d\n",
+ param->invert, name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_SRC_SEL, q_spec, param->src_sel))
+ pr_err("invalid source select value %d for %s %d\n",
+ param->src_sel, name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_OUT_STRENGTH,
+ q_spec, param->out_strength))
+ pr_err("invalid out strength value %d for %s %d\n",
+ param->out_strength, name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_OUTPUT_TYPE,
+ q_spec, param->output_type))
+ pr_err("invalid out type value %d for %s %d\n",
+ param->output_type, name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_VIN_SEL, q_spec, param->vin_sel))
+ pr_err("invalid vin select %d value for %s %d\n",
+ param->vin_sel, name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_PULL, q_spec, param->pull))
+ pr_err("invalid pull value %d for pin %s %d\n",
+ param->pull, name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_MASTER_EN, q_spec, param->master_en))
+ pr_err("invalid master_en value %d for %s %d\n",
+ param->master_en, name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_AOUT_REF, q_spec, param->aout_ref))
+ pr_err("invalid aout_reg value %d for %s %d\n",
+ param->aout_ref, name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_AIN_ROUTE, q_spec, param->ain_route))
+ pr_err("invalid ain_route value %d for %s %d\n",
+ param->ain_route, name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_CS_OUT, q_spec, param->cs_out))
+ pr_err("invalid cs_out value %d for %s %d\n",
+ param->cs_out, name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_APASS_SEL, q_spec, param->apass_sel))
+ pr_err("invalid apass_sel value %d for %s %d\n",
+ param->apass_sel, name, pin);
+ else if (Q_CHK_INVALID(Q_PIN_CFG_DTEST_SEL, q_spec, param->dtest_sel))
+ pr_err("invalid dtest_sel value %d for %s %d\n",
+ param->dtest_sel, name, pin);
+ else
+ return 0;
+
+ return -EINVAL;
+}
+
+static inline u8 q_reg_get(u8 *reg, int shift, int mask)
+{
+ return (*reg & mask) >> shift;
+}
+
+static inline void q_reg_set(u8 *reg, int shift, int mask, int value)
+{
+ *reg |= (value << shift) & mask;
+}
+
+static inline void q_reg_clr_set(u8 *reg, int shift, int mask, int value)
+{
+ *reg &= ~mask;
+ *reg |= (value << shift) & mask;
+}
+
+/*
+ * Calculate the minimum number of registers that must be read / written
+ * in order to satisfy the full feature set of the given pin.
+ */
+static int qpnp_pin_ctl_regs_init(struct qpnp_pin_spec *q_spec)
+{
+ if (q_spec->type == Q_GPIO_TYPE) {
+ if (is_gpio_lv_mv(q_spec))
+ q_spec->num_ctl_regs = 11;
+ else
+ q_spec->num_ctl_regs = 7;
+ } else if (q_spec->type == Q_MPP_TYPE) {
+ switch (q_spec->subtype) {
+ case Q_MPP_SUBTYPE_4CH_NO_SINK:
+ case Q_MPP_SUBTYPE_ULT_4CH_NO_SINK:
+ q_spec->num_ctl_regs = 12;
+ break;
+ case Q_MPP_SUBTYPE_4CH_NO_ANA_OUT:
+ case Q_MPP_SUBTYPE_ULT_4CH_NO_ANA_OUT:
+ case Q_MPP_SUBTYPE_4CH_FULL_FUNC:
+ case Q_MPP_SUBTYPE_8CH_FULL_FUNC:
+ q_spec->num_ctl_regs = 13;
+ break;
+ default:
+ pr_err("Invalid MPP subtype 0x%x\n", q_spec->subtype);
+ return -EINVAL;
+ }
+ } else {
+ pr_err("Invalid type 0x%x\n", q_spec->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int qpnp_pin_read_regs(struct qpnp_pin_chip *q_chip,
+ struct qpnp_pin_spec *q_spec)
+{
+ int bytes_left = q_spec->num_ctl_regs;
+ int rc;
+ char *buf_p = &q_spec->regs[0];
+ u16 reg_addr = Q_REG_ADDR(q_spec, Q_REG_MODE_CTL);
+
+ while (bytes_left > 0) {
+ rc = spmi_ext_register_readl(q_chip->spmi->ctrl, q_spec->slave,
+ reg_addr, buf_p, bytes_left < 8 ? bytes_left : 8);
+ if (rc)
+ return rc;
+ bytes_left -= 8;
+ buf_p += 8;
+ reg_addr += 8;
+ }
+ return 0;
+}
+
+static int qpnp_pin_write_regs(struct qpnp_pin_chip *q_chip,
+ struct qpnp_pin_spec *q_spec)
+{
+ int bytes_left = q_spec->num_ctl_regs;
+ int rc;
+ char *buf_p = &q_spec->regs[0];
+ u16 reg_addr = Q_REG_ADDR(q_spec, Q_REG_MODE_CTL);
+
+ while (bytes_left > 0) {
+ rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
+ reg_addr, buf_p, bytes_left < 8 ? bytes_left : 8);
+ if (rc)
+ return rc;
+ bytes_left -= 8;
+ buf_p += 8;
+ reg_addr += 8;
+ }
+ return 0;
+}
+
+static int qpnp_pin_cache_regs(struct qpnp_pin_chip *q_chip,
+ struct qpnp_pin_spec *q_spec)
+{
+ int rc;
+ struct device *dev = &q_chip->spmi->dev;
+
+ rc = qpnp_pin_read_regs(q_chip, q_spec);
+ if (rc)
+ dev_err(dev, "%s: unable to read control regs\n", __func__);
+
+ return rc;
+}
+
+#define Q_HAVE_HW_SP(idx, q_spec, val) \
+ (qpnp_pin_check_config(idx, q_spec, val) == 0)
+
+static int _qpnp_pin_config(struct qpnp_pin_chip *q_chip,
+ struct qpnp_pin_spec *q_spec,
+ struct qpnp_pin_cfg *param)
+{
+ struct device *dev = &q_chip->spmi->dev;
+ int rc;
+ u8 shift, mask, *reg;
+
+ rc = qpnp_pin_check_constraints(q_spec, param);
+ if (rc)
+ goto gpio_cfg;
+
+ /* set mode */
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_MODE, q_spec, param->mode)) {
+ if (is_gpio_lv_mv(q_spec)) {
+ shift = Q_REG_LV_MV_MODE_SEL_SHIFT;
+ mask = Q_REG_LV_MV_MODE_SEL_MASK;
+ } else {
+ shift = Q_REG_MODE_SEL_SHIFT;
+ mask = Q_REG_MODE_SEL_MASK;
+ }
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
+ shift, mask, param->mode);
+ }
+
+ /* output specific configuration */
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_INVERT, q_spec, param->invert)) {
+ if (is_gpio_lv_mv(q_spec)) {
+ shift = Q_REG_DIG_OUT_SRC_INVERT_SHIFT;
+ mask = Q_REG_DIG_OUT_SRC_INVERT_MASK;
+ reg = &q_spec->regs[Q_REG_I_DIG_OUT_SRC_CTL];
+ } else {
+ shift = Q_REG_OUT_INVERT_SHIFT;
+ mask = Q_REG_OUT_INVERT_MASK;
+ reg = &q_spec->regs[Q_REG_I_MODE_CTL];
+ }
+ q_reg_clr_set(reg, shift, mask, param->invert);
+ }
+
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_SRC_SEL, q_spec, param->src_sel)) {
+ if (is_gpio_lv_mv(q_spec)) {
+ shift = Q_REG_DIG_OUT_SRC_SRC_SEL_SHIFT;
+ mask = Q_REG_DIG_OUT_SRC_SRC_SEL_MASK;
+ reg = &q_spec->regs[Q_REG_I_DIG_OUT_SRC_CTL];
+ } else {
+ shift = Q_REG_SRC_SEL_SHIFT;
+ mask = Q_REG_SRC_SEL_MASK;
+ reg = &q_spec->regs[Q_REG_I_MODE_CTL];
+ }
+ q_reg_clr_set(reg, shift, mask, param->src_sel);
+ }
+
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_OUT_STRENGTH, q_spec, param->out_strength))
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
+ Q_REG_OUT_STRENGTH_SHIFT, Q_REG_OUT_STRENGTH_MASK,
+ param->out_strength);
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_OUTPUT_TYPE, q_spec, param->output_type))
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
+ Q_REG_OUT_TYPE_SHIFT, Q_REG_OUT_TYPE_MASK,
+ param->output_type);
+
+ /* input config */
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_DTEST_SEL, q_spec, param->dtest_sel)) {
+ if (is_gpio_lv_mv(q_spec)) {
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_IN_CTL],
+ Q_REG_LV_MV_DTEST_SEL_CFG_SHIFT,
+ Q_REG_LV_MV_DTEST_SEL_CFG_MASK,
+ param->dtest_sel - 1);
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_IN_CTL],
+ Q_REG_LV_MV_DTEST_SEL_EN_SHIFT,
+ Q_REG_LV_MV_DTEST_SEL_EN_MASK, 0x1);
+ } else {
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_IN_CTL],
+ Q_REG_DTEST_SEL_SHIFT,
+ Q_REG_DTEST_SEL_MASK,
+ BIT(param->dtest_sel - 1));
+ }
+ }
+
+ /* config applicable for both input / output */
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_VIN_SEL, q_spec, param->vin_sel))
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_VIN_CTL],
+ Q_REG_VIN_SHIFT, Q_REG_VIN_MASK,
+ param->vin_sel);
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_PULL, q_spec, param->pull))
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_DIG_PULL_CTL],
+ Q_REG_PULL_SHIFT, Q_REG_PULL_MASK,
+ param->pull);
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_MASTER_EN, q_spec, param->master_en))
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_EN_CTL],
+ Q_REG_MASTER_EN_SHIFT, Q_REG_MASTER_EN_MASK,
+ param->master_en);
+
+ /* mpp specific config */
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_AOUT_REF, q_spec, param->aout_ref))
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_AOUT_CTL],
+ Q_REG_AOUT_REF_SHIFT, Q_REG_AOUT_REF_MASK,
+ param->aout_ref);
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_AIN_ROUTE, q_spec, param->ain_route))
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_AIN_CTL],
+ Q_REG_AIN_ROUTE_SHIFT, Q_REG_AIN_ROUTE_MASK,
+ param->ain_route);
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_CS_OUT, q_spec, param->cs_out))
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_SINK_CTL],
+ Q_REG_CS_OUT_SHIFT, Q_REG_CS_OUT_MASK,
+ param->cs_out);
+ if (Q_HAVE_HW_SP(Q_PIN_CFG_APASS_SEL, q_spec, param->apass_sel))
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_APASS_SEL_CTL],
+ Q_REG_APASS_SEL_SHIFT, Q_REG_APASS_SEL_MASK,
+ param->apass_sel);
+
+ rc = qpnp_pin_write_regs(q_chip, q_spec);
+ if (rc) {
+ dev_err(&q_chip->spmi->dev, "%s: unable to write master enable\n",
+ __func__);
+ goto gpio_cfg;
+ }
+
+ return 0;
+
+gpio_cfg:
+ dev_err(dev, "%s: unable to set default config for pmic pin %d\n",
+ __func__, q_spec->pmic_pin);
+
+ return rc;
+}
+
+int qpnp_pin_config(int gpio, struct qpnp_pin_cfg *param)
+{
+ int rc, chip_offset;
+ struct qpnp_pin_chip *q_chip;
+ struct qpnp_pin_spec *q_spec = NULL;
+ struct gpio_chip *gpio_chip;
+
+ if (param == NULL)
+ return -EINVAL;
+
+ mutex_lock(&qpnp_pin_chips_lock);
+ list_for_each_entry(q_chip, &qpnp_pin_chips, chip_list) {
+ gpio_chip = &q_chip->gpio_chip;
+ if (gpio >= gpio_chip->base
+ && gpio < gpio_chip->base + gpio_chip->ngpio) {
+ chip_offset = gpio - gpio_chip->base;
+ q_spec = qpnp_chip_gpio_get_spec(q_chip, chip_offset);
+ if (WARN_ON(!q_spec)) {
+ mutex_unlock(&qpnp_pin_chips_lock);
+ return -ENODEV;
+ }
+ break;
+ }
+ }
+ mutex_unlock(&qpnp_pin_chips_lock);
+
+ if (!q_spec)
+ return -ENODEV;
+
+ rc = _qpnp_pin_config(q_chip, q_spec, param);
+
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_pin_config);
+
+#define Q_MAX_CHIP_NAME 128
+int qpnp_pin_map(const char *name, uint32_t pmic_pin)
+{
+ struct qpnp_pin_chip *q_chip;
+ struct qpnp_pin_spec *q_spec = NULL;
+
+ mutex_lock(&qpnp_pin_chips_lock);
+ list_for_each_entry(q_chip, &qpnp_pin_chips, chip_list) {
+ if (strncmp(q_chip->gpio_chip.label, name,
+ Q_MAX_CHIP_NAME) != 0)
+ continue;
+ if (q_chip->pmic_pin_lowest <= pmic_pin &&
+ q_chip->pmic_pin_highest >= pmic_pin) {
+ q_spec = qpnp_pmic_pin_get_spec(q_chip, pmic_pin);
+ mutex_unlock(&qpnp_pin_chips_lock);
+ if (WARN_ON(!q_spec))
+ return -ENODEV;
+ return q_chip->gpio_chip.base + q_spec->gpio_chip_idx;
+ }
+ }
+ mutex_unlock(&qpnp_pin_chips_lock);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(qpnp_pin_map);
+
+static int qpnp_pin_to_irq(struct gpio_chip *gpio_chip, unsigned offset)
+{
+ struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+ struct qpnp_pin_spec *q_spec;
+ struct of_phandle_args oirq;
+
+ q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+ if (!q_spec)
+ return -EINVAL;
+
+ /* if we have mapped this pin previously return the virq */
+ if (q_spec->irq)
+ return q_spec->irq;
+
+ /* call into irq_domain to get irq mapping */
+ oirq.np = q_chip->int_ctrl;
+ oirq.args[0] = q_chip->spmi->sid;
+ oirq.args[1] = (q_spec->offset >> 8) & 0xFF;
+ oirq.args[2] = 0;
+ oirq.args_count = 3;
+
+ q_spec->irq = irq_create_of_mapping(&oirq);
+ if (!q_spec->irq) {
+ dev_err(&q_chip->spmi->dev, "%s: invalid irq for gpio %u\n",
+ __func__, q_spec->pmic_pin);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return q_spec->irq;
+}
+
+static int qpnp_pin_get(struct gpio_chip *gpio_chip, unsigned offset)
+{
+ int rc, ret_val;
+ struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+ struct qpnp_pin_spec *q_spec = NULL;
+ u8 buf[1], en_mask;
+ u8 shift, mask, reg;
+
+ if (WARN_ON(!q_chip))
+ return -ENODEV;
+
+ q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+ if (WARN_ON(!q_spec))
+ return -ENODEV;
+
+ /* gpio val is from RT status iff input is enabled */
+ if ((q_spec->regs[Q_REG_I_MODE_CTL] & Q_REG_MODE_SEL_MASK)
+ == QPNP_PIN_MODE_DIG_IN) {
+ rc = spmi_ext_register_readl(q_chip->spmi->ctrl, q_spec->slave,
+ Q_REG_ADDR(q_spec, Q_REG_STATUS1),
+ &buf[0], 1);
+
+ if (q_spec->type == Q_GPIO_TYPE && q_spec->dig_major_rev == 0)
+ en_mask = Q_REG_STATUS1_GPIO_EN_REV0_MASK;
+ else if (q_spec->type == Q_GPIO_TYPE &&
+ q_spec->dig_major_rev > 0)
+ en_mask = Q_REG_STATUS1_GPIO_EN_MASK;
+ else /* MPP */
+ en_mask = Q_REG_STATUS1_MPP_EN_MASK;
+
+ if (!(buf[0] & en_mask))
+ return -EPERM;
+
+ return buf[0] & Q_REG_STATUS1_VAL_MASK;
+
+ } else {
+ if (is_gpio_lv_mv(q_spec)) {
+ shift = Q_REG_DIG_OUT_SRC_INVERT_SHIFT;
+ mask = Q_REG_DIG_OUT_SRC_INVERT_MASK;
+ reg = q_spec->regs[Q_REG_I_DIG_OUT_SRC_CTL];
+ } else {
+ shift = Q_REG_OUT_INVERT_SHIFT;
+ mask = Q_REG_OUT_INVERT_MASK;
+ reg = q_spec->regs[Q_REG_I_MODE_CTL];
+ }
+
+ ret_val = (reg & mask) >> shift;
+ return ret_val;
+ }
+
+ return 0;
+}
+
+static int __qpnp_pin_set(struct qpnp_pin_chip *q_chip,
+ struct qpnp_pin_spec *q_spec, int value)
+{
+ int rc;
+ u8 shift, mask, *reg;
+ u16 address;
+
+ if (!q_chip || !q_spec)
+ return -EINVAL;
+
+ if (is_gpio_lv_mv(q_spec)) {
+ shift = Q_REG_DIG_OUT_SRC_INVERT_SHIFT;
+ mask = Q_REG_DIG_OUT_SRC_INVERT_MASK;
+ reg = &q_spec->regs[Q_REG_I_DIG_OUT_SRC_CTL];
+ address = Q_REG_ADDR(q_spec, Q_REG_DIG_OUT_SRC_CTL);
+ } else {
+ shift = Q_REG_OUT_INVERT_SHIFT;
+ mask = Q_REG_OUT_INVERT_MASK;
+ reg = &q_spec->regs[Q_REG_I_MODE_CTL];
+ address = Q_REG_ADDR(q_spec, Q_REG_MODE_CTL);
+ }
+
+ q_reg_clr_set(reg, shift, mask, !!value);
+
+ rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
+ address, reg, 1);
+ if (rc)
+ dev_err(&q_chip->spmi->dev, "%s: spmi write failed\n",
+ __func__);
+ return rc;
+}
+
+
+static void qpnp_pin_set(struct gpio_chip *gpio_chip,
+ unsigned offset, int value)
+{
+ struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+ struct qpnp_pin_spec *q_spec;
+
+ if (WARN_ON(!q_chip))
+ return;
+
+ q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+ if (WARN_ON(!q_spec))
+ return;
+
+ __qpnp_pin_set(q_chip, q_spec, value);
+}
+
+static int qpnp_pin_set_mode(struct qpnp_pin_chip *q_chip,
+ struct qpnp_pin_spec *q_spec, int mode)
+{
+ int rc;
+ u8 shift, mask;
+
+ if (!q_chip || !q_spec)
+ return -EINVAL;
+
+ if (qpnp_pin_check_config(Q_PIN_CFG_MODE, q_spec, mode)) {
+ pr_err("invalid mode specification %d\n", mode);
+ return -EINVAL;
+ }
+
+ if (is_gpio_lv_mv(q_spec)) {
+ shift = Q_REG_LV_MV_MODE_SEL_SHIFT;
+ mask = Q_REG_LV_MV_MODE_SEL_MASK;
+ } else {
+ shift = Q_REG_MODE_SEL_SHIFT;
+ mask = Q_REG_MODE_SEL_MASK;
+ }
+
+ q_reg_clr_set(&q_spec->regs[Q_REG_I_MODE_CTL],
+ shift, mask, mode);
+
+ rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
+ Q_REG_ADDR(q_spec, Q_REG_MODE_CTL),
+ &q_spec->regs[Q_REG_I_MODE_CTL], 1);
+ return rc;
+}
+
+static int qpnp_pin_direction_input(struct gpio_chip *gpio_chip,
+ unsigned offset)
+{
+ struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+ struct qpnp_pin_spec *q_spec;
+
+ if (WARN_ON(!q_chip))
+ return -ENODEV;
+
+ q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+ if (WARN_ON(!q_spec))
+ return -ENODEV;
+
+ return qpnp_pin_set_mode(q_chip, q_spec, QPNP_PIN_MODE_DIG_IN);
+}
+
+static int qpnp_pin_direction_output(struct gpio_chip *gpio_chip,
+ unsigned offset,
+ int val)
+{
+ int rc;
+ struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+ struct qpnp_pin_spec *q_spec;
+
+ if (WARN_ON(!q_chip))
+ return -ENODEV;
+
+ q_spec = qpnp_chip_gpio_get_spec(q_chip, offset);
+ if (WARN_ON(!q_spec))
+ return -ENODEV;
+
+ rc = __qpnp_pin_set(q_chip, q_spec, val);
+ if (rc)
+ return rc;
+
+ rc = qpnp_pin_set_mode(q_chip, q_spec, QPNP_PIN_MODE_DIG_OUT);
+
+ return rc;
+}
+
+static int qpnp_pin_of_gpio_xlate(struct gpio_chip *gpio_chip,
+ const struct of_phandle_args *gpio_spec,
+ u32 *flags)
+{
+ struct qpnp_pin_chip *q_chip = dev_get_drvdata(gpio_chip->dev);
+ struct qpnp_pin_spec *q_spec;
+
+ if (WARN_ON(gpio_chip->of_gpio_n_cells < 2)) {
+ pr_err("of_gpio_n_cells < 2\n");
+ return -EINVAL;
+ }
+
+ q_spec = qpnp_pmic_pin_get_spec(q_chip, gpio_spec->args[0]);
+ if (!q_spec) {
+ pr_err("no such PMIC gpio %u in device topology\n",
+ gpio_spec->args[0]);
+ return -EINVAL;
+ }
+
+ if (flags)
+ *flags = gpio_spec->args[1];
+
+ return q_spec->gpio_chip_idx;
+}
+
+static int qpnp_pin_apply_config(struct qpnp_pin_chip *q_chip,
+ struct qpnp_pin_spec *q_spec)
+{
+ struct qpnp_pin_cfg param;
+ struct device_node *node = q_spec->node;
+ int rc;
+ u8 shift, mask, *reg;
+
+ if (is_gpio_lv_mv(q_spec)) {
+ shift = Q_REG_LV_MV_MODE_SEL_SHIFT;
+ mask = Q_REG_LV_MV_MODE_SEL_MASK;
+ } else {
+ shift = Q_REG_MODE_SEL_SHIFT;
+ mask = Q_REG_MODE_SEL_MASK;
+ }
+ param.mode = q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL],
+ shift, mask);
+
+ param.output_type = q_reg_get(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
+ Q_REG_OUT_TYPE_SHIFT,
+ Q_REG_OUT_TYPE_MASK);
+
+ if (is_gpio_lv_mv(q_spec)) {
+ shift = Q_REG_DIG_OUT_SRC_INVERT_SHIFT;
+ mask = Q_REG_DIG_OUT_SRC_INVERT_MASK;
+ reg = &q_spec->regs[Q_REG_I_DIG_OUT_SRC_CTL];
+ } else {
+ shift = Q_REG_OUT_INVERT_SHIFT;
+ mask = Q_REG_OUT_INVERT_MASK;
+ reg = &q_spec->regs[Q_REG_I_MODE_CTL];
+ }
+ param.invert = q_reg_get(reg, shift, mask);
+
+ param.pull = q_reg_get(&q_spec->regs[Q_REG_I_DIG_PULL_CTL],
+ Q_REG_PULL_SHIFT, Q_REG_PULL_MASK);
+ param.vin_sel = q_reg_get(&q_spec->regs[Q_REG_I_DIG_VIN_CTL],
+ Q_REG_VIN_SHIFT, Q_REG_VIN_MASK);
+ param.out_strength = q_reg_get(&q_spec->regs[Q_REG_I_DIG_OUT_CTL],
+ Q_REG_OUT_STRENGTH_SHIFT,
+ Q_REG_OUT_STRENGTH_MASK);
+
+ if (is_gpio_lv_mv(q_spec)) {
+ shift = Q_REG_DIG_OUT_SRC_SRC_SEL_SHIFT;
+ mask = Q_REG_DIG_OUT_SRC_SRC_SEL_MASK;
+ reg = &q_spec->regs[Q_REG_I_DIG_OUT_SRC_CTL];
+ } else {
+ shift = Q_REG_SRC_SEL_SHIFT;
+ mask = Q_REG_SRC_SEL_MASK;
+ reg = &q_spec->regs[Q_REG_I_MODE_CTL];
+ }
+ param.src_sel = q_reg_get(reg, shift, mask);
+
+ param.master_en = q_reg_get(&q_spec->regs[Q_REG_I_EN_CTL],
+ Q_REG_MASTER_EN_SHIFT,
+ Q_REG_MASTER_EN_MASK);
+ param.aout_ref = q_reg_get(&q_spec->regs[Q_REG_I_AOUT_CTL],
+ Q_REG_AOUT_REF_SHIFT,
+ Q_REG_AOUT_REF_MASK);
+ param.ain_route = q_reg_get(&q_spec->regs[Q_REG_I_AIN_CTL],
+ Q_REG_AIN_ROUTE_SHIFT,
+ Q_REG_AIN_ROUTE_MASK);
+ param.cs_out = q_reg_get(&q_spec->regs[Q_REG_I_SINK_CTL],
+ Q_REG_CS_OUT_SHIFT,
+ Q_REG_CS_OUT_MASK);
+ param.apass_sel = q_reg_get(&q_spec->regs[Q_REG_I_APASS_SEL_CTL],
+ Q_REG_APASS_SEL_SHIFT,
+ Q_REG_APASS_SEL_MASK);
+ if (is_gpio_lv_mv(q_spec)) {
+ param.dtest_sel = q_reg_get(&q_spec->regs[Q_REG_I_DIG_IN_CTL],
+ Q_REG_LV_MV_DTEST_SEL_CFG_SHIFT,
+ Q_REG_LV_MV_DTEST_SEL_CFG_MASK);
+ } else {
+ param.dtest_sel = q_reg_get(&q_spec->regs[Q_REG_I_DIG_IN_CTL],
+ Q_REG_DTEST_SEL_SHIFT,
+ Q_REG_DTEST_SEL_MASK);
+ }
+
+ of_property_read_u32(node, "qcom,mode",
+ &param.mode);
+ of_property_read_u32(node, "qcom,output-type",
+ &param.output_type);
+ of_property_read_u32(node, "qcom,invert",
+ &param.invert);
+ of_property_read_u32(node, "qcom,pull",
+ &param.pull);
+ of_property_read_u32(node, "qcom,vin-sel",
+ &param.vin_sel);
+ of_property_read_u32(node, "qcom,out-strength",
+ &param.out_strength);
+ of_property_read_u32(node, "qcom,src-sel",
+ &param.src_sel);
+ of_property_read_u32(node, "qcom,master-en",
+ &param.master_en);
+ of_property_read_u32(node, "qcom,aout-ref",
+ &param.aout_ref);
+ of_property_read_u32(node, "qcom,ain-route",
+ &param.ain_route);
+ of_property_read_u32(node, "qcom,cs-out",
+ &param.cs_out);
+ of_property_read_u32(node, "qcom,apass-sel",
+ &param.apass_sel);
+ of_property_read_u32(node, "qcom,dtest-sel",
+ &param.dtest_sel);
+
+ rc = _qpnp_pin_config(q_chip, q_spec, &param);
+
+ return rc;
+}
+
+static int qpnp_pin_free_chip(struct qpnp_pin_chip *q_chip)
+{
+ struct spmi_device *spmi = q_chip->spmi;
+ int i, rc = 0;
+
+ if (q_chip->chip_gpios)
+ for (i = 0; i < spmi->num_dev_node; i++)
+ kfree(q_chip->chip_gpios[i]);
+
+ mutex_lock(&qpnp_pin_chips_lock);
+ list_del(&q_chip->chip_list);
+ mutex_unlock(&qpnp_pin_chips_lock);
+ if (q_chip->chip_registered)
+ gpiochip_remove(&q_chip->gpio_chip);
+
+ kfree(q_chip->chip_gpios);
+ kfree(q_chip->pmic_pins);
+ kfree(q_chip);
+ return rc;
+}
+
+#ifdef CONFIG_GPIO_QPNP_PIN_DEBUG
+struct qpnp_pin_reg {
+ uint32_t addr;
+ uint32_t idx;
+ uint32_t shift;
+ uint32_t mask;
+};
+
+static struct dentry *driver_dfs_dir;
+
+static int qpnp_pin_reg_attr(enum qpnp_pin_param_type type,
+ struct qpnp_pin_reg *cfg,
+ struct qpnp_pin_spec *q_spec)
+{
+ switch (type) {
+ case Q_PIN_CFG_MODE:
+ if (is_gpio_lv_mv(q_spec)) {
+ cfg->shift = Q_REG_LV_MV_MODE_SEL_SHIFT;
+ cfg->mask = Q_REG_LV_MV_MODE_SEL_MASK;
+ } else {
+ cfg->shift = Q_REG_MODE_SEL_SHIFT;
+ cfg->mask = Q_REG_MODE_SEL_MASK;
+ }
+ cfg->addr = Q_REG_MODE_CTL;
+ cfg->idx = Q_REG_I_MODE_CTL;
+ break;
+ case Q_PIN_CFG_OUTPUT_TYPE:
+ cfg->addr = Q_REG_DIG_OUT_CTL;
+ cfg->idx = Q_REG_I_DIG_OUT_CTL;
+ cfg->shift = Q_REG_OUT_TYPE_SHIFT;
+ cfg->mask = Q_REG_OUT_TYPE_MASK;
+ break;
+ case Q_PIN_CFG_INVERT:
+ if (is_gpio_lv_mv(q_spec)) {
+ cfg->addr = Q_REG_DIG_OUT_SRC_CTL;
+ cfg->idx = Q_REG_I_DIG_OUT_SRC_CTL;
+ cfg->shift = Q_REG_DIG_OUT_SRC_INVERT_SHIFT;
+ cfg->mask = Q_REG_DIG_OUT_SRC_INVERT_MASK;
+ } else {
+ cfg->addr = Q_REG_MODE_CTL;
+ cfg->idx = Q_REG_I_MODE_CTL;
+ cfg->shift = Q_REG_OUT_INVERT_SHIFT;
+ cfg->mask = Q_REG_OUT_INVERT_MASK;
+ }
+ break;
+ case Q_PIN_CFG_PULL:
+ cfg->addr = Q_REG_DIG_PULL_CTL;
+ cfg->idx = Q_REG_I_DIG_PULL_CTL;
+ cfg->shift = Q_REG_PULL_SHIFT;
+ cfg->mask = Q_REG_PULL_MASK;
+ break;
+ case Q_PIN_CFG_VIN_SEL:
+ cfg->addr = Q_REG_DIG_VIN_CTL;
+ cfg->idx = Q_REG_I_DIG_VIN_CTL;
+ cfg->shift = Q_REG_VIN_SHIFT;
+ cfg->mask = Q_REG_VIN_MASK;
+ break;
+ case Q_PIN_CFG_OUT_STRENGTH:
+ cfg->addr = Q_REG_DIG_OUT_CTL;
+ cfg->idx = Q_REG_I_DIG_OUT_CTL;
+ cfg->shift = Q_REG_OUT_STRENGTH_SHIFT;
+ cfg->mask = Q_REG_OUT_STRENGTH_MASK;
+ break;
+ case Q_PIN_CFG_SRC_SEL:
+ if (is_gpio_lv_mv(q_spec)) {
+ cfg->addr = Q_REG_DIG_OUT_SRC_CTL;
+ cfg->idx = Q_REG_I_DIG_OUT_SRC_CTL;
+ cfg->shift = Q_REG_DIG_OUT_SRC_SRC_SEL_SHIFT;
+ cfg->mask = Q_REG_DIG_OUT_SRC_SRC_SEL_MASK;
+ } else {
+ cfg->addr = Q_REG_MODE_CTL;
+ cfg->idx = Q_REG_I_MODE_CTL;
+ cfg->shift = Q_REG_SRC_SEL_SHIFT;
+ cfg->mask = Q_REG_SRC_SEL_MASK;
+ }
+ break;
+ case Q_PIN_CFG_MASTER_EN:
+ cfg->addr = Q_REG_EN_CTL;
+ cfg->idx = Q_REG_I_EN_CTL;
+ cfg->shift = Q_REG_MASTER_EN_SHIFT;
+ cfg->mask = Q_REG_MASTER_EN_MASK;
+ break;
+ case Q_PIN_CFG_AOUT_REF:
+ cfg->addr = Q_REG_AOUT_CTL;
+ cfg->idx = Q_REG_I_AOUT_CTL;
+ cfg->shift = Q_REG_AOUT_REF_SHIFT;
+ cfg->mask = Q_REG_AOUT_REF_MASK;
+ break;
+ case Q_PIN_CFG_AIN_ROUTE:
+ cfg->addr = Q_REG_AIN_CTL;
+ cfg->idx = Q_REG_I_AIN_CTL;
+ cfg->shift = Q_REG_AIN_ROUTE_SHIFT;
+ cfg->mask = Q_REG_AIN_ROUTE_MASK;
+ break;
+ case Q_PIN_CFG_CS_OUT:
+ cfg->addr = Q_REG_SINK_CTL;
+ cfg->idx = Q_REG_I_SINK_CTL;
+ cfg->shift = Q_REG_CS_OUT_SHIFT;
+ cfg->mask = Q_REG_CS_OUT_MASK;
+ break;
+ case Q_PIN_CFG_APASS_SEL:
+ cfg->addr = Q_REG_APASS_SEL_CTL;
+ cfg->idx = Q_REG_I_APASS_SEL_CTL;
+ cfg->shift = Q_REG_APASS_SEL_SHIFT;
+ cfg->mask = Q_REG_APASS_SEL_MASK;
+ break;
+ case Q_PIN_CFG_DTEST_SEL:
+ if (is_gpio_lv_mv(q_spec)) {
+ cfg->shift = Q_REG_LV_MV_DTEST_SEL_SHIFT;
+ cfg->mask = Q_REG_LV_MV_DTEST_SEL_MASK;
+ } else {
+ cfg->shift = Q_REG_DTEST_SEL_SHIFT;
+ cfg->mask = Q_REG_DTEST_SEL_MASK;
+ }
+ cfg->addr = Q_REG_DIG_IN_CTL;
+ cfg->idx = Q_REG_I_DIG_IN_CTL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qpnp_pin_debugfs_get(void *data, u64 *val)
+{
+ enum qpnp_pin_param_type *idx = data;
+ struct qpnp_pin_spec *q_spec;
+ struct qpnp_pin_reg cfg = {};
+ int rc;
+
+ q_spec = container_of(idx, struct qpnp_pin_spec, params[*idx]);
+
+ rc = qpnp_pin_reg_attr(*idx, &cfg, q_spec);
+ if (rc)
+ return rc;
+
+ *val = q_reg_get(&q_spec->regs[cfg.idx], cfg.shift, cfg.mask);
+ return 0;
+}
+
+static int qpnp_pin_debugfs_set(void *data, u64 val)
+{
+ enum qpnp_pin_param_type *idx = data;
+ struct qpnp_pin_spec *q_spec;
+ struct qpnp_pin_chip *q_chip;
+ struct qpnp_pin_reg cfg = {};
+ int rc;
+
+ q_spec = container_of(idx, struct qpnp_pin_spec, params[*idx]);
+ q_chip = q_spec->q_chip;
+
+ rc = qpnp_pin_check_config(*idx, q_spec, val);
+ if (rc)
+ return rc;
+
+ rc = qpnp_pin_reg_attr(*idx, &cfg, q_spec);
+ if (rc)
+ return rc;
+ q_reg_clr_set(&q_spec->regs[cfg.idx], cfg.shift, cfg.mask, val);
+ rc = spmi_ext_register_writel(q_chip->spmi->ctrl, q_spec->slave,
+ Q_REG_ADDR(q_spec, cfg.addr),
+ &q_spec->regs[cfg.idx], 1);
+
+ return rc;
+}
+DEFINE_SIMPLE_ATTRIBUTE(qpnp_pin_fops, qpnp_pin_debugfs_get,
+ qpnp_pin_debugfs_set, "%llu\n");
+
+#define DEBUGFS_BUF_SIZE 11 /* supports 2^32 in decimal */
+
+struct qpnp_pin_debugfs_args {
+ enum qpnp_pin_param_type type;
+ const char *filename;
+};
+
+static struct qpnp_pin_debugfs_args dfs_args[] = {
+ { Q_PIN_CFG_MODE, "mode" },
+ { Q_PIN_CFG_OUTPUT_TYPE, "output_type" },
+ { Q_PIN_CFG_INVERT, "invert" },
+ { Q_PIN_CFG_PULL, "pull" },
+ { Q_PIN_CFG_VIN_SEL, "vin_sel" },
+ { Q_PIN_CFG_OUT_STRENGTH, "out_strength" },
+ { Q_PIN_CFG_SRC_SEL, "src_sel" },
+ { Q_PIN_CFG_MASTER_EN, "master_en" },
+ { Q_PIN_CFG_AOUT_REF, "aout_ref" },
+ { Q_PIN_CFG_AIN_ROUTE, "ain_route" },
+ { Q_PIN_CFG_CS_OUT, "cs_out" },
+ { Q_PIN_CFG_APASS_SEL, "apass_sel" },
+ { Q_PIN_CFG_DTEST_SEL, "dtest-sel" },
+};
+
+static int qpnp_pin_debugfs_create(struct qpnp_pin_chip *q_chip)
+{
+ struct spmi_device *spmi = q_chip->spmi;
+ struct device *dev = &spmi->dev;
+ struct qpnp_pin_spec *q_spec;
+ enum qpnp_pin_param_type *params;
+ enum qpnp_pin_param_type type;
+ char pmic_pin[DEBUGFS_BUF_SIZE];
+ const char *filename;
+ struct dentry *dfs, *dfs_io_dir;
+ int i, j, rc;
+
+ BUG_ON(Q_NUM_PARAMS != ARRAY_SIZE(dfs_args));
+
+ q_chip->dfs_dir = debugfs_create_dir(q_chip->gpio_chip.label,
+ driver_dfs_dir);
+ if (q_chip->dfs_dir == NULL) {
+ dev_err(dev, "%s: cannot register chip debugfs directory %s\n",
+ __func__, dev->of_node->name);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < spmi->num_dev_node; i++) {
+ q_spec = qpnp_chip_gpio_get_spec(q_chip, i);
+ params = q_spec->params;
+ snprintf(pmic_pin, DEBUGFS_BUF_SIZE, "%u", q_spec->pmic_pin);
+ dfs_io_dir = debugfs_create_dir(pmic_pin, q_chip->dfs_dir);
+ if (dfs_io_dir == NULL)
+ goto dfs_err;
+
+ for (j = 0; j < Q_NUM_PARAMS; j++) {
+ type = dfs_args[j].type;
+ filename = dfs_args[j].filename;
+
+ /*
+ * Use a value of '0' to see if the pin has even basic
+ * support for a function. Do not create a file if
+ * it doesn't.
+ */
+ rc = qpnp_pin_check_config(type, q_spec, 0);
+ if (rc == -ENXIO)
+ continue;
+
+ params[type] = type;
+ dfs = debugfs_create_file(
+ filename,
+ S_IRUGO | S_IWUSR,
+ dfs_io_dir,
+ &q_spec->params[type],
+ &qpnp_pin_fops);
+ if (dfs == NULL)
+ goto dfs_err;
+ }
+ }
+ return 0;
+dfs_err:
+ dev_err(dev, "%s: cannot register debugfs for pmic gpio %u on chip %s\n",
+ __func__, q_spec->pmic_pin, dev->of_node->name);
+ debugfs_remove_recursive(q_chip->dfs_dir);
+ return -ENFILE;
+}
+#else
+static int qpnp_pin_debugfs_create(struct qpnp_pin_chip *q_chip)
+{
+ return 0;
+}
+#endif
+
+static int qpnp_pin_is_valid_pin(struct qpnp_pin_spec *q_spec)
+{
+ if (q_spec->type == Q_GPIO_TYPE)
+ switch (q_spec->subtype) {
+ case Q_GPIO_SUBTYPE_GPIO_4CH:
+ case Q_GPIO_SUBTYPE_GPIOC_4CH:
+ case Q_GPIO_SUBTYPE_GPIO_8CH:
+ case Q_GPIO_SUBTYPE_GPIOC_8CH:
+ case Q_GPIO_SUBTYPE_GPIO_LV:
+ case Q_GPIO_SUBTYPE_GPIO_MV:
+ return 1;
+ }
+ else if (q_spec->type == Q_MPP_TYPE)
+ switch (q_spec->subtype) {
+ case Q_MPP_SUBTYPE_4CH_NO_ANA_OUT:
+ case Q_MPP_SUBTYPE_ULT_4CH_NO_ANA_OUT:
+ case Q_MPP_SUBTYPE_4CH_NO_SINK:
+ case Q_MPP_SUBTYPE_ULT_4CH_NO_SINK:
+ case Q_MPP_SUBTYPE_4CH_FULL_FUNC:
+ case Q_MPP_SUBTYPE_8CH_FULL_FUNC:
+ return 1;
+ }
+
+ return 0;
+}
+
+static int qpnp_pin_probe(struct spmi_device *spmi)
+{
+ struct qpnp_pin_chip *q_chip;
+ struct qpnp_pin_spec *q_spec;
+ struct resource *res;
+ struct spmi_resource *d_node;
+ int i, rc;
+ u32 lowest_gpio = UINT_MAX, highest_gpio = 0;
+ u32 gpio;
+ char version[Q_REG_SUBTYPE - Q_REG_DIG_MAJOR_REV + 1];
+ const char *dev_name;
+
+ dev_name = spmi_get_primary_dev_name(spmi);
+ if (!dev_name) {
+ dev_err(&spmi->dev, "%s: label binding undefined for node %s\n",
+ __func__, spmi->dev.of_node->full_name);
+ return -EINVAL;
+ }
+
+ q_chip = kzalloc(sizeof(*q_chip), GFP_KERNEL);
+ if (!q_chip) {
+ dev_err(&spmi->dev, "%s: Can't allocate gpio_chip\n",
+ __func__);
+ return -ENOMEM;
+ }
+ q_chip->spmi = spmi;
+ dev_set_drvdata(&spmi->dev, q_chip);
+
+ mutex_lock(&qpnp_pin_chips_lock);
+ list_add(&q_chip->chip_list, &qpnp_pin_chips);
+ mutex_unlock(&qpnp_pin_chips_lock);
+
+ /* first scan through nodes to find the range required for allocation */
+ for (i = 0; i < spmi->num_dev_node; i++) {
+ rc = of_property_read_u32(spmi->dev_node[i].of_node,
+ "qcom,pin-num", &gpio);
+ if (rc) {
+ dev_err(&spmi->dev, "%s: unable to get qcom,pin-num property\n",
+ __func__);
+ goto err_probe;
+ }
+
+ if (gpio < lowest_gpio)
+ lowest_gpio = gpio;
+ if (gpio > highest_gpio)
+ highest_gpio = gpio;
+ }
+
+ if (highest_gpio < lowest_gpio) {
+ dev_err(&spmi->dev, "%s: no device nodes specified in topology\n",
+ __func__);
+ rc = -EINVAL;
+ goto err_probe;
+ } else if (lowest_gpio == 0) {
+ dev_err(&spmi->dev, "%s: 0 is not a valid PMIC GPIO\n",
+ __func__);
+ rc = -EINVAL;
+ goto err_probe;
+ }
+
+ q_chip->pmic_pin_lowest = lowest_gpio;
+ q_chip->pmic_pin_highest = highest_gpio;
+
+ /* allocate gpio lookup tables */
+ q_chip->pmic_pins = kzalloc(sizeof(struct qpnp_pin_spec *) *
+ (highest_gpio - lowest_gpio + 1),
+ GFP_KERNEL);
+ q_chip->chip_gpios = kzalloc(sizeof(struct qpnp_pin_spec *) *
+ spmi->num_dev_node, GFP_KERNEL);
+ if (!q_chip->pmic_pins || !q_chip->chip_gpios) {
+ dev_err(&spmi->dev, "%s: unable to allocate memory\n",
+ __func__);
+ rc = -ENOMEM;
+ goto err_probe;
+ }
+
+ /* get interrupt controller device_node */
+ q_chip->int_ctrl = of_irq_find_parent(spmi->dev.of_node);
+ if (!q_chip->int_ctrl) {
+ dev_err(&spmi->dev, "%s: Can't find interrupt parent\n",
+ __func__);
+ rc = -EINVAL;
+ goto err_probe;
+ }
+
+ /* now scan through again and populate the lookup table */
+ for (i = 0; i < spmi->num_dev_node; i++) {
+ d_node = &spmi->dev_node[i];
+ res = spmi_get_resource(spmi, d_node, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&spmi->dev, "%s: node %s is missing has no base address definition\n",
+ __func__, d_node->of_node->full_name);
+ rc = -EINVAL;
+ goto err_probe;
+ }
+
+ rc = of_property_read_u32(d_node->of_node,
+ "qcom,pin-num", &gpio);
+ if (rc) {
+ dev_err(&spmi->dev, "%s: unable to get qcom,pin-num property\n",
+ __func__);
+ goto err_probe;
+ }
+
+ q_spec = kzalloc(sizeof(struct qpnp_pin_spec),
+ GFP_KERNEL);
+ if (!q_spec) {
+ dev_err(&spmi->dev, "%s: unable to allocate memory\n",
+ __func__);
+ rc = -ENOMEM;
+ goto err_probe;
+ }
+
+ q_spec->slave = spmi->sid;
+ q_spec->offset = res->start;
+ q_spec->gpio_chip_idx = i;
+ q_spec->pmic_pin = gpio;
+ q_spec->node = d_node->of_node;
+ q_spec->q_chip = q_chip;
+
+ rc = spmi_ext_register_readl(spmi->ctrl, q_spec->slave,
+ Q_REG_ADDR(q_spec, Q_REG_DIG_MAJOR_REV),
+ &version[0], ARRAY_SIZE(version));
+ if (rc) {
+ dev_err(&spmi->dev, "%s: unable to read type regs\n",
+ __func__);
+ goto err_probe;
+ }
+ q_spec->dig_major_rev = version[Q_REG_DIG_MAJOR_REV -
+ Q_REG_DIG_MAJOR_REV];
+ q_spec->type = version[Q_REG_TYPE - Q_REG_DIG_MAJOR_REV];
+ q_spec->subtype = version[Q_REG_SUBTYPE - Q_REG_DIG_MAJOR_REV];
+
+ if (!qpnp_pin_is_valid_pin(q_spec)) {
+ dev_err(&spmi->dev, "%s: invalid pin type (type=0x%x subtype=0x%x)\n",
+ __func__, q_spec->type, q_spec->subtype);
+ goto err_probe;
+ }
+
+ rc = qpnp_pin_ctl_regs_init(q_spec);
+ if (rc)
+ goto err_probe;
+
+ /* initialize lookup table params */
+ qpnp_pmic_pin_set_spec(q_chip, gpio, q_spec);
+ qpnp_chip_gpio_set_spec(q_chip, i, q_spec);
+ }
+
+ q_chip->gpio_chip.base = -1;
+ q_chip->gpio_chip.ngpio = spmi->num_dev_node;
+ q_chip->gpio_chip.label = dev_name;
+ q_chip->gpio_chip.direction_input = qpnp_pin_direction_input;
+ q_chip->gpio_chip.direction_output = qpnp_pin_direction_output;
+ q_chip->gpio_chip.to_irq = qpnp_pin_to_irq;
+ q_chip->gpio_chip.get = qpnp_pin_get;
+ q_chip->gpio_chip.set = qpnp_pin_set;
+ q_chip->gpio_chip.dev = &spmi->dev;
+ q_chip->gpio_chip.of_xlate = qpnp_pin_of_gpio_xlate;
+ q_chip->gpio_chip.of_gpio_n_cells = 2;
+ q_chip->gpio_chip.can_sleep = 0;
+
+ rc = gpiochip_add(&q_chip->gpio_chip);
+ if (rc) {
+ dev_err(&spmi->dev, "%s: Can't add gpio chip, rc = %d\n",
+ __func__, rc);
+ goto err_probe;
+ }
+
+ q_chip->chip_registered = true;
+ /* now configure gpio config defaults if they exist */
+ for (i = 0; i < spmi->num_dev_node; i++) {
+ q_spec = qpnp_chip_gpio_get_spec(q_chip, i);
+ if (WARN_ON(!q_spec)) {
+ rc = -ENODEV;
+ goto err_probe;
+ }
+
+ rc = qpnp_pin_cache_regs(q_chip, q_spec);
+ if (rc)
+ goto err_probe;
+
+ rc = qpnp_pin_apply_config(q_chip, q_spec);
+ if (rc)
+ goto err_probe;
+ }
+
+ dev_dbg(&spmi->dev, "%s: gpio_chip registered between %d-%u\n",
+ __func__, q_chip->gpio_chip.base,
+ (q_chip->gpio_chip.base + q_chip->gpio_chip.ngpio) - 1);
+
+ rc = qpnp_pin_debugfs_create(q_chip);
+ if (rc) {
+ dev_err(&spmi->dev, "%s: debugfs creation failed\n", __func__);
+ goto err_probe;
+ }
+
+ return 0;
+
+err_probe:
+ qpnp_pin_free_chip(q_chip);
+ return rc;
+}
+
+static int qpnp_pin_remove(struct spmi_device *spmi)
+{
+ struct qpnp_pin_chip *q_chip = dev_get_drvdata(&spmi->dev);
+
+ debugfs_remove_recursive(q_chip->dfs_dir);
+
+ return qpnp_pin_free_chip(q_chip);
+}
+
+static struct of_device_id spmi_match_table[] = {
+ { .compatible = "qcom,qpnp-pin",
+ },
+ {}
+};
+
+static const struct spmi_device_id qpnp_pin_id[] = {
+ { "qcom,qpnp-pin", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spmi, qpnp_pin_id);
+
+static struct spmi_driver qpnp_pin_driver = {
+ .driver = {
+ .name = "qcom,qpnp-pin",
+ .of_match_table = spmi_match_table,
+ },
+ .probe = qpnp_pin_probe,
+ .remove = qpnp_pin_remove,
+ .id_table = qpnp_pin_id,
+};
+
+static int __init qpnp_pin_init(void)
+{
+#ifdef CONFIG_GPIO_QPNP_PIN_DEBUG
+ driver_dfs_dir = debugfs_create_dir("qpnp_pin", NULL);
+ if (driver_dfs_dir == NULL)
+ pr_err("Cannot register top level debugfs directory\n");
+#endif
+
+ return spmi_driver_register(&qpnp_pin_driver);
+}
+
+static void __exit qpnp_pin_exit(void)
+{
+#ifdef CONFIG_GPIO_QPNP_PIN_DEBUG
+ debugfs_remove_recursive(driver_dfs_dir);
+#endif
+ spmi_driver_unregister(&qpnp_pin_driver);
+}
+
+MODULE_DESCRIPTION("QPNP PMIC gpio driver");
+MODULE_LICENSE("GPL v2");
+
+subsys_initcall(qpnp_pin_init);
+module_exit(qpnp_pin_exit);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 80a73bfc1a65..635a075c7667 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1200,6 +1200,26 @@ config SENSORS_PCF8591
These devices are hard to detect and rarely found on mainstream
hardware. If unsure, say N.
+config SENSORS_QPNP_ADC_VOLTAGE
+ tristate "Support for Qualcomm QPNP Voltage ADC"
+ depends on MSM_SPMI
+ help
+ This is the VADC arbiter driver for Qualcomm QPNP ADC Chip.
+
+ The driver supports reading the HKADC, XOADC through the ADC AMUX arbiter.
+ The VADC includes support for the conversion sequencer. The driver supports
+ reading the ADC through the AMUX channels for external pull-ups simultaneously.
+
+config SENSORS_QPNP_ADC_CURRENT
+ tristate "Support for Qualcomm QPNP current ADC"
+ depends on MSM_SPMI
+ help
+ This is the IADC driver for Qualcomm QPNP ADC Chip.
+
+ The driver supports single mode operation to read from upto seven channel
+ configuration that include reading the external/internal Rsense, CSP_EX,
+ CSN_EX pair along with the gain and offset calibration.
+
source drivers/hwmon/pmbus/Kconfig
config SENSORS_PWM_FAN
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 12a32398fdcc..6a17b39f51fa 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -160,6 +160,8 @@ obj-$(CONFIG_SENSORS_W83L785TS) += w83l785ts.o
obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o
obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o
obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o
+obj-$(CONFIG_SENSORS_QPNP_ADC_VOLTAGE) += qpnp-adc-voltage.o qpnp-adc-common.o
+obj-$(CONFIG_SENSORS_QPNP_ADC_CURRENT) += qpnp-adc-current.o qpnp-adc-common.o
obj-$(CONFIG_PMBUS) += pmbus/
diff --git a/drivers/hwmon/qpnp-adc-common.c b/drivers/hwmon/qpnp-adc-common.c
new file mode 100644
index 000000000000..43716ab7e3bf
--- /dev/null
+++ b/drivers/hwmon/qpnp-adc-common.c
@@ -0,0 +1,1993 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/spmi.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/platform_device.h>
+
+#define KELVINMIL_DEGMIL 273160
+#define QPNP_VADC_LDO_VOLTAGE_MIN 1800000
+#define QPNP_VADC_LDO_VOLTAGE_MAX 1800000
+#define QPNP_VADC_OK_VOLTAGE_MIN 1000000
+#define QPNP_VADC_OK_VOLTAGE_MAX 1000000
+#define PMI_CHG_SCALE_1 -138890
+#define PMI_CHG_SCALE_2 391750000000
+#define QPNP_VADC_HC_VREF_CODE 0x4000
+
+/* Units for temperature below (on x axis) is in 0.1DegC as
+ required by the battery driver. Note the resolution used
+ here to compute the table was done for DegC to milli-volts.
+ In consideration to limit the size of the table for the given
+ temperature range below, the result is linearly interpolated
+ and provided to the battery driver in the units desired for
+ their framework which is 0.1DegC. True resolution of 0.1DegC
+ will result in the below table size to increase by 10 times */
+static const struct qpnp_vadc_map_pt adcmap_btm_threshold[] = {
+ {-300, 1642},
+ {-200, 1544},
+ {-100, 1414},
+ {0, 1260},
+ {10, 1244},
+ {20, 1228},
+ {30, 1212},
+ {40, 1195},
+ {50, 1179},
+ {60, 1162},
+ {70, 1146},
+ {80, 1129},
+ {90, 1113},
+ {100, 1097},
+ {110, 1080},
+ {120, 1064},
+ {130, 1048},
+ {140, 1032},
+ {150, 1016},
+ {160, 1000},
+ {170, 985},
+ {180, 969},
+ {190, 954},
+ {200, 939},
+ {210, 924},
+ {220, 909},
+ {230, 894},
+ {240, 880},
+ {250, 866},
+ {260, 852},
+ {270, 838},
+ {280, 824},
+ {290, 811},
+ {300, 798},
+ {310, 785},
+ {320, 773},
+ {330, 760},
+ {340, 748},
+ {350, 736},
+ {360, 725},
+ {370, 713},
+ {380, 702},
+ {390, 691},
+ {400, 681},
+ {410, 670},
+ {420, 660},
+ {430, 650},
+ {440, 640},
+ {450, 631},
+ {460, 622},
+ {470, 613},
+ {480, 604},
+ {490, 595},
+ {500, 587},
+ {510, 579},
+ {520, 571},
+ {530, 563},
+ {540, 556},
+ {550, 548},
+ {560, 541},
+ {570, 534},
+ {580, 527},
+ {590, 521},
+ {600, 514},
+ {610, 508},
+ {620, 502},
+ {630, 496},
+ {640, 490},
+ {650, 485},
+ {660, 281},
+ {670, 274},
+ {680, 267},
+ {690, 260},
+ {700, 254},
+ {710, 247},
+ {720, 241},
+ {730, 235},
+ {740, 229},
+ {750, 224},
+ {760, 218},
+ {770, 213},
+ {780, 208},
+ {790, 203}
+};
+
+static const struct qpnp_vadc_map_pt adcmap_qrd_btm_threshold[] = {
+ {-200, 1540},
+ {-180, 1517},
+ {-160, 1492},
+ {-140, 1467},
+ {-120, 1440},
+ {-100, 1412},
+ {-80, 1383},
+ {-60, 1353},
+ {-40, 1323},
+ {-20, 1292},
+ {0, 1260},
+ {20, 1228},
+ {40, 1196},
+ {60, 1163},
+ {80, 1131},
+ {100, 1098},
+ {120, 1066},
+ {140, 1034},
+ {160, 1002},
+ {180, 971},
+ {200, 941},
+ {220, 911},
+ {240, 882},
+ {260, 854},
+ {280, 826},
+ {300, 800},
+ {320, 774},
+ {340, 749},
+ {360, 726},
+ {380, 703},
+ {400, 681},
+ {420, 660},
+ {440, 640},
+ {460, 621},
+ {480, 602},
+ {500, 585},
+ {520, 568},
+ {540, 552},
+ {560, 537},
+ {580, 523},
+ {600, 510},
+ {620, 497},
+ {640, 485},
+ {660, 473},
+ {680, 462},
+ {700, 452},
+ {720, 442},
+ {740, 433},
+ {760, 424},
+ {780, 416},
+ {800, 408},
+};
+
+static const struct qpnp_vadc_map_pt adcmap_qrd_skuaa_btm_threshold[] = {
+ {-200, 1476},
+ {-180, 1450},
+ {-160, 1422},
+ {-140, 1394},
+ {-120, 1365},
+ {-100, 1336},
+ {-80, 1306},
+ {-60, 1276},
+ {-40, 1246},
+ {-20, 1216},
+ {0, 1185},
+ {20, 1155},
+ {40, 1126},
+ {60, 1096},
+ {80, 1068},
+ {100, 1040},
+ {120, 1012},
+ {140, 986},
+ {160, 960},
+ {180, 935},
+ {200, 911},
+ {220, 888},
+ {240, 866},
+ {260, 844},
+ {280, 824},
+ {300, 805},
+ {320, 786},
+ {340, 769},
+ {360, 752},
+ {380, 737},
+ {400, 722},
+ {420, 707},
+ {440, 694},
+ {460, 681},
+ {480, 669},
+ {500, 658},
+ {520, 648},
+ {540, 637},
+ {560, 628},
+ {580, 619},
+ {600, 611},
+ {620, 603},
+ {640, 595},
+ {660, 588},
+ {680, 582},
+ {700, 575},
+ {720, 569},
+ {740, 564},
+ {760, 559},
+ {780, 554},
+ {800, 549},
+};
+
+static const struct qpnp_vadc_map_pt adcmap_qrd_skug_btm_threshold[] = {
+ {-200, 1338},
+ {-180, 1307},
+ {-160, 1276},
+ {-140, 1244},
+ {-120, 1213},
+ {-100, 1182},
+ {-80, 1151},
+ {-60, 1121},
+ {-40, 1092},
+ {-20, 1063},
+ {0, 1035},
+ {20, 1008},
+ {40, 982},
+ {60, 957},
+ {80, 933},
+ {100, 910},
+ {120, 889},
+ {140, 868},
+ {160, 848},
+ {180, 830},
+ {200, 812},
+ {220, 795},
+ {240, 780},
+ {260, 765},
+ {280, 751},
+ {300, 738},
+ {320, 726},
+ {340, 714},
+ {360, 704},
+ {380, 694},
+ {400, 684},
+ {420, 675},
+ {440, 667},
+ {460, 659},
+ {480, 652},
+ {500, 645},
+ {520, 639},
+ {540, 633},
+ {560, 627},
+ {580, 622},
+ {600, 617},
+ {620, 613},
+ {640, 608},
+ {660, 604},
+ {680, 600},
+ {700, 597},
+ {720, 593},
+ {740, 590},
+ {760, 587},
+ {780, 585},
+ {800, 582},
+};
+
+static const struct qpnp_vadc_map_pt adcmap_qrd_skuh_btm_threshold[] = {
+ {-200, 1531},
+ {-180, 1508},
+ {-160, 1483},
+ {-140, 1458},
+ {-120, 1432},
+ {-100, 1404},
+ {-80, 1377},
+ {-60, 1348},
+ {-40, 1319},
+ {-20, 1290},
+ {0, 1260},
+ {20, 1230},
+ {40, 1200},
+ {60, 1171},
+ {80, 1141},
+ {100, 1112},
+ {120, 1083},
+ {140, 1055},
+ {160, 1027},
+ {180, 1000},
+ {200, 973},
+ {220, 948},
+ {240, 923},
+ {260, 899},
+ {280, 876},
+ {300, 854},
+ {320, 832},
+ {340, 812},
+ {360, 792},
+ {380, 774},
+ {400, 756},
+ {420, 739},
+ {440, 723},
+ {460, 707},
+ {480, 692},
+ {500, 679},
+ {520, 665},
+ {540, 653},
+ {560, 641},
+ {580, 630},
+ {600, 619},
+ {620, 609},
+ {640, 600},
+ {660, 591},
+ {680, 583},
+ {700, 575},
+ {720, 567},
+ {740, 560},
+ {760, 553},
+ {780, 547},
+ {800, 541},
+ {820, 535},
+ {840, 530},
+ {860, 524},
+ {880, 520},
+};
+
+static const struct qpnp_vadc_map_pt adcmap_qrd_skut1_btm_threshold[] = {
+ {-400, 1759},
+ {-350, 1742},
+ {-300, 1720},
+ {-250, 1691},
+ {-200, 1654},
+ {-150, 1619},
+ {-100, 1556},
+ {-50, 1493},
+ {0, 1422},
+ {50, 1345},
+ {100, 1264},
+ {150, 1180},
+ {200, 1097},
+ {250, 1017},
+ {300, 942},
+ {350, 873},
+ {400, 810},
+ {450, 754},
+ {500, 706},
+ {550, 664},
+ {600, 627},
+ {650, 596},
+ {700, 570},
+ {750, 547},
+ {800, 528},
+ {850, 512},
+ {900, 499},
+ {950, 487},
+ {1000, 477},
+};
+
+/* Voltage to temperature */
+static const struct qpnp_vadc_map_pt adcmap_100k_104ef_104fb[] = {
+ {1758, -40},
+ {1742, -35},
+ {1719, -30},
+ {1691, -25},
+ {1654, -20},
+ {1608, -15},
+ {1551, -10},
+ {1483, -5},
+ {1404, 0},
+ {1315, 5},
+ {1218, 10},
+ {1114, 15},
+ {1007, 20},
+ {900, 25},
+ {795, 30},
+ {696, 35},
+ {605, 40},
+ {522, 45},
+ {448, 50},
+ {383, 55},
+ {327, 60},
+ {278, 65},
+ {237, 70},
+ {202, 75},
+ {172, 80},
+ {146, 85},
+ {125, 90},
+ {107, 95},
+ {92, 100},
+ {79, 105},
+ {68, 110},
+ {59, 115},
+ {51, 120},
+ {44, 125}
+};
+
+/* Voltage to temperature */
+static const struct qpnp_vadc_map_pt adcmap_150k_104ef_104fb[] = {
+ {1738, -40},
+ {1714, -35},
+ {1682, -30},
+ {1641, -25},
+ {1589, -20},
+ {1526, -15},
+ {1451, -10},
+ {1363, -5},
+ {1266, 0},
+ {1159, 5},
+ {1048, 10},
+ {936, 15},
+ {825, 20},
+ {720, 25},
+ {622, 30},
+ {533, 35},
+ {454, 40},
+ {385, 45},
+ {326, 50},
+ {275, 55},
+ {232, 60},
+ {195, 65},
+ {165, 70},
+ {139, 75},
+ {118, 80},
+ {100, 85},
+ {85, 90},
+ {73, 95},
+ {62, 100},
+ {53, 105},
+ {46, 110},
+ {40, 115},
+ {34, 120},
+ {30, 125}
+};
+
+static const struct qpnp_vadc_map_pt adcmap_smb_batt_therm[] = {
+ {-300, 1625},
+ {-200, 1515},
+ {-100, 1368},
+ {0, 1192},
+ {10, 1173},
+ {20, 1154},
+ {30, 1135},
+ {40, 1116},
+ {50, 1097},
+ {60, 1078},
+ {70, 1059},
+ {80, 1040},
+ {90, 1020},
+ {100, 1001},
+ {110, 982},
+ {120, 963},
+ {130, 944},
+ {140, 925},
+ {150, 907},
+ {160, 888},
+ {170, 870},
+ {180, 851},
+ {190, 833},
+ {200, 815},
+ {210, 797},
+ {220, 780},
+ {230, 762},
+ {240, 745},
+ {250, 728},
+ {260, 711},
+ {270, 695},
+ {280, 679},
+ {290, 663},
+ {300, 647},
+ {310, 632},
+ {320, 616},
+ {330, 602},
+ {340, 587},
+ {350, 573},
+ {360, 559},
+ {370, 545},
+ {380, 531},
+ {390, 518},
+ {400, 505},
+ {410, 492},
+ {420, 480},
+ {430, 465},
+ {440, 456},
+ {450, 445},
+ {460, 433},
+ {470, 422},
+ {480, 412},
+ {490, 401},
+ {500, 391},
+ {510, 381},
+ {520, 371},
+ {530, 362},
+ {540, 352},
+ {550, 343},
+ {560, 335},
+ {570, 326},
+ {580, 318},
+ {590, 309},
+ {600, 302},
+ {610, 294},
+ {620, 286},
+ {630, 279},
+ {640, 272},
+ {650, 265},
+ {660, 258},
+ {670, 252},
+ {680, 245},
+ {690, 239},
+ {700, 233},
+ {710, 227},
+ {720, 221},
+ {730, 216},
+ {740, 211},
+ {750, 205},
+ {760, 200},
+ {770, 195},
+ {780, 190},
+ {790, 186}
+};
+
+/* Voltage to temperature */
+static const struct qpnp_vadc_map_pt adcmap_ncp03wf683[] = {
+ {1742, -40},
+ {1718, -35},
+ {1687, -30},
+ {1647, -25},
+ {1596, -20},
+ {1534, -15},
+ {1459, -10},
+ {1372, -5},
+ {1275, 0},
+ {1169, 5},
+ {1058, 10},
+ {945, 15},
+ {834, 20},
+ {729, 25},
+ {630, 30},
+ {541, 35},
+ {461, 40},
+ {392, 45},
+ {332, 50},
+ {280, 55},
+ {236, 60},
+ {199, 65},
+ {169, 70},
+ {142, 75},
+ {121, 80},
+ {102, 85},
+ {87, 90},
+ {74, 95},
+ {64, 100},
+ {55, 105},
+ {47, 110},
+ {40, 115},
+ {35, 120},
+ {30, 125}
+};
+
+/*
+ * Voltage to temperature table for 100k pull up for NTCG104EF104 with
+ * 1.875V reference.
+ */
+static const struct qpnp_vadc_map_pt adcmap_100k_104ef_104fb_1875_vref[] = {
+ { 1831, -40 },
+ { 1814, -35 },
+ { 1791, -30 },
+ { 1761, -25 },
+ { 1723, -20 },
+ { 1675, -15 },
+ { 1616, -10 },
+ { 1545, -5 },
+ { 1463, 0 },
+ { 1370, 5 },
+ { 1268, 10 },
+ { 1160, 15 },
+ { 1049, 20 },
+ { 937, 25 },
+ { 828, 30 },
+ { 726, 35 },
+ { 630, 40 },
+ { 544, 45 },
+ { 467, 50 },
+ { 399, 55 },
+ { 340, 60 },
+ { 290, 65 },
+ { 247, 70 },
+ { 209, 75 },
+ { 179, 80 },
+ { 153, 85 },
+ { 130, 90 },
+ { 112, 95 },
+ { 96, 100 },
+ { 82, 105 },
+ { 71, 110 },
+ { 62, 115 },
+ { 53, 120 },
+ { 46, 125 },
+};
+
+static int32_t qpnp_adc_map_voltage_temp(const struct qpnp_vadc_map_pt *pts,
+ uint32_t tablesize, int32_t input, int64_t *output)
+{
+ bool descending = 1;
+ uint32_t i = 0;
+
+ if (pts == NULL)
+ return -EINVAL;
+
+ /* Check if table is descending or ascending */
+ if (tablesize > 1) {
+ if (pts[0].x < pts[1].x)
+ descending = 0;
+ }
+
+ while (i < tablesize) {
+ if ((descending == 1) && (pts[i].x < input)) {
+ /* table entry is less than measured
+ value and table is descending, stop */
+ break;
+ } else if ((descending == 0) &&
+ (pts[i].x > input)) {
+ /* table entry is greater than measured
+ value and table is ascending, stop */
+ break;
+ } else {
+ i++;
+ }
+ }
+
+ if (i == 0)
+ *output = pts[0].y;
+ else if (i == tablesize)
+ *output = pts[tablesize-1].y;
+ else {
+ /* result is between search_index and search_index-1 */
+ /* interpolate linearly */
+ *output = (((int32_t) ((pts[i].y - pts[i-1].y)*
+ (input - pts[i-1].x))/
+ (pts[i].x - pts[i-1].x))+
+ pts[i-1].y);
+ }
+
+ return 0;
+}
+
+static int32_t qpnp_adc_map_temp_voltage(const struct qpnp_vadc_map_pt *pts,
+ uint32_t tablesize, int32_t input, int64_t *output)
+{
+ bool descending = 1;
+ uint32_t i = 0;
+
+ if (pts == NULL)
+ return -EINVAL;
+
+ /* Check if table is descending or ascending */
+ if (tablesize > 1) {
+ if (pts[0].y < pts[1].y)
+ descending = 0;
+ }
+
+ while (i < tablesize) {
+ if ((descending == 1) && (pts[i].y < input)) {
+ /* table entry is less than measured
+ value and table is descending, stop */
+ break;
+ } else if ((descending == 0) && (pts[i].y > input)) {
+ /* table entry is greater than measured
+ value and table is ascending, stop */
+ break;
+ } else {
+ i++;
+ }
+ }
+
+ if (i == 0) {
+ *output = pts[0].x;
+ } else if (i == tablesize) {
+ *output = pts[tablesize-1].x;
+ } else {
+ /* result is between search_index and search_index-1 */
+ /* interpolate linearly */
+ *output = (((int32_t) ((pts[i].x - pts[i-1].x)*
+ (input - pts[i-1].y))/
+ (pts[i].y - pts[i-1].y))+
+ pts[i-1].x);
+ }
+
+ return 0;
+}
+
+static void qpnp_adc_scale_with_calib_param(int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ int64_t *scale_voltage)
+{
+ *scale_voltage = (adc_code -
+ chan_properties->adc_graph[chan_properties->calib_type].adc_gnd)
+ * chan_properties->adc_graph[chan_properties->calib_type].dx;
+ *scale_voltage = div64_s64(*scale_voltage,
+ chan_properties->adc_graph[chan_properties->calib_type].dy);
+
+ if (chan_properties->calib_type == CALIB_ABSOLUTE)
+ *scale_voltage +=
+ chan_properties->adc_graph[chan_properties->calib_type].dx;
+
+ if (*scale_voltage < 0)
+ *scale_voltage = 0;
+}
+
+int32_t qpnp_adc_scale_pmic_therm(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ int64_t pmic_voltage = 0;
+
+ if (!chan_properties || !chan_properties->offset_gain_numerator ||
+ !chan_properties->offset_gain_denominator || !adc_properties
+ || !adc_chan_result
+ || !chan_properties->adc_graph[CALIB_ABSOLUTE].dy)
+ return -EINVAL;
+
+ if (adc_properties->adc_hc) {
+ /* (ADC code * vref_vadc (1.875V)) / 0x4000 */
+ pmic_voltage = (adc_code * adc_properties->adc_vdd_reference
+ * 1000);
+ pmic_voltage = div64_s64(pmic_voltage,
+ (QPNP_VADC_HC_VREF_CODE * 1000));
+ } else
+ qpnp_adc_scale_with_calib_param(adc_code, adc_properties,
+ chan_properties, &pmic_voltage);
+ if (pmic_voltage > 0) {
+ /* 2mV/K */
+ adc_chan_result->measurement = pmic_voltage*
+ chan_properties->offset_gain_denominator;
+
+ do_div(adc_chan_result->measurement,
+ chan_properties->offset_gain_numerator * 2);
+ } else
+ adc_chan_result->measurement = 0;
+
+ /* Change to .001 deg C */
+ adc_chan_result->measurement -= KELVINMIL_DEGMIL;
+ adc_chan_result->physical = (int32_t)adc_chan_result->measurement;
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_pmic_therm);
+
+int32_t qpnp_adc_scale_millidegc_pmic_voltage_thr(struct qpnp_vadc_chip *chip,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{
+ struct qpnp_vadc_linear_graph btm_param;
+ int64_t low_output = 0, high_output = 0;
+ int rc = 0, sign = 0;
+
+ rc = qpnp_get_vadc_gain_and_offset(chip, &btm_param, CALIB_ABSOLUTE);
+ if (rc < 0) {
+ pr_err("Could not acquire gain and offset\n");
+ return rc;
+ }
+
+ /* Convert to Kelvin and account for voltage to be written as 2mV/K */
+ low_output = (param->low_temp + KELVINMIL_DEGMIL) * 2;
+ /* Convert to voltage threshold */
+ low_output = (low_output - QPNP_ADC_625_UV) * btm_param.dy;
+ if (low_output < 0) {
+ sign = 1;
+ low_output = -low_output;
+ }
+ do_div(low_output, QPNP_ADC_625_UV);
+ if (sign)
+ low_output = -low_output;
+ low_output += btm_param.adc_gnd;
+
+ sign = 0;
+ /* Convert to Kelvin and account for voltage to be written as 2mV/K */
+ high_output = (param->high_temp + KELVINMIL_DEGMIL) * 2;
+ /* Convert to voltage threshold */
+ high_output = (high_output - QPNP_ADC_625_UV) * btm_param.dy;
+ if (high_output < 0) {
+ sign = 1;
+ high_output = -high_output;
+ }
+ do_div(high_output, QPNP_ADC_625_UV);
+ if (sign)
+ high_output = -high_output;
+ high_output += btm_param.adc_gnd;
+
+ *low_threshold = (uint32_t) low_output;
+ *high_threshold = (uint32_t) high_output;
+ pr_debug("high_temp:%d, low_temp:%d\n", param->high_temp,
+ param->low_temp);
+ pr_debug("adc_code_high:%x, adc_code_low:%x\n", *high_threshold,
+ *low_threshold);
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_millidegc_pmic_voltage_thr);
+
+/* Scales the ADC code to degC using the mapping
+ * table for the XO thermistor.
+ */
+int32_t qpnp_adc_tdkntcg_therm(struct qpnp_vadc_chip *chip,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ int64_t xo_thm_voltage = 0;
+
+ if (!chan_properties || !chan_properties->offset_gain_numerator ||
+ !chan_properties->offset_gain_denominator || !adc_properties
+ || !adc_chan_result)
+ return -EINVAL;
+
+ if (adc_properties->adc_hc) {
+ /* (ADC code * vref_vadc (1.875V) * 1000) / (0x4000 * 1000) */
+ xo_thm_voltage = (adc_code * adc_properties->adc_vdd_reference
+ * 1000);
+ xo_thm_voltage = div64_s64(xo_thm_voltage,
+ (QPNP_VADC_HC_VREF_CODE * 1000));
+
+ qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb_1875_vref,
+ ARRAY_SIZE(adcmap_100k_104ef_104fb_1875_vref),
+ xo_thm_voltage, &adc_chan_result->physical);
+ } else {
+ qpnp_adc_scale_with_calib_param(adc_code,
+ adc_properties, chan_properties, &xo_thm_voltage);
+
+ if (chan_properties->calib_type == CALIB_ABSOLUTE)
+ do_div(xo_thm_voltage, 1000);
+
+ qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb,
+ ARRAY_SIZE(adcmap_100k_104ef_104fb),
+ xo_thm_voltage, &adc_chan_result->physical);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_tdkntcg_therm);
+
+int32_t qpnp_adc_scale_batt_therm(struct qpnp_vadc_chip *chip,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ int64_t bat_voltage = 0;
+
+ qpnp_adc_scale_with_calib_param(adc_code,
+ adc_properties, chan_properties, &bat_voltage);
+
+ adc_chan_result->measurement = bat_voltage;
+
+ return qpnp_adc_map_temp_voltage(
+ adcmap_btm_threshold,
+ ARRAY_SIZE(adcmap_btm_threshold),
+ bat_voltage,
+ &adc_chan_result->physical);
+}
+EXPORT_SYMBOL(qpnp_adc_scale_batt_therm);
+
+int32_t qpnp_adc_scale_qrd_batt_therm(struct qpnp_vadc_chip *chip,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ int64_t bat_voltage = 0;
+
+ qpnp_adc_scale_with_calib_param(adc_code,
+ adc_properties, chan_properties, &bat_voltage);
+
+ adc_chan_result->measurement = bat_voltage;
+
+ return qpnp_adc_map_temp_voltage(
+ adcmap_qrd_btm_threshold,
+ ARRAY_SIZE(adcmap_qrd_btm_threshold),
+ bat_voltage,
+ &adc_chan_result->physical);
+}
+EXPORT_SYMBOL(qpnp_adc_scale_qrd_batt_therm);
+
+int32_t qpnp_adc_scale_qrd_skuaa_batt_therm(struct qpnp_vadc_chip *chip,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ int64_t bat_voltage = 0;
+
+ qpnp_adc_scale_with_calib_param(adc_code,
+ adc_properties, chan_properties, &bat_voltage);
+
+ adc_chan_result->measurement = bat_voltage;
+
+ return qpnp_adc_map_temp_voltage(
+ adcmap_qrd_skuaa_btm_threshold,
+ ARRAY_SIZE(adcmap_qrd_skuaa_btm_threshold),
+ bat_voltage,
+ &adc_chan_result->physical);
+}
+EXPORT_SYMBOL(qpnp_adc_scale_qrd_skuaa_batt_therm);
+
+int32_t qpnp_adc_scale_qrd_skug_batt_therm(struct qpnp_vadc_chip *chip,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ int64_t bat_voltage = 0;
+
+ qpnp_adc_scale_with_calib_param(adc_code,
+ adc_properties, chan_properties, &bat_voltage);
+ adc_chan_result->measurement = bat_voltage;
+
+ return qpnp_adc_map_temp_voltage(
+ adcmap_qrd_skug_btm_threshold,
+ ARRAY_SIZE(adcmap_qrd_skug_btm_threshold),
+ bat_voltage,
+ &adc_chan_result->physical);
+}
+EXPORT_SYMBOL(qpnp_adc_scale_qrd_skug_batt_therm);
+
+int32_t qpnp_adc_scale_qrd_skuh_batt_therm(struct qpnp_vadc_chip *chip,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ int64_t bat_voltage = 0;
+
+ qpnp_adc_scale_with_calib_param(adc_code,
+ adc_properties, chan_properties, &bat_voltage);
+
+ return qpnp_adc_map_temp_voltage(
+ adcmap_qrd_skuh_btm_threshold,
+ ARRAY_SIZE(adcmap_qrd_skuh_btm_threshold),
+ bat_voltage,
+ &adc_chan_result->physical);
+}
+EXPORT_SYMBOL(qpnp_adc_scale_qrd_skuh_batt_therm);
+
+int32_t qpnp_adc_scale_qrd_skut1_batt_therm(struct qpnp_vadc_chip *chip,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ int64_t bat_voltage = 0;
+
+ qpnp_adc_scale_with_calib_param(adc_code,
+ adc_properties, chan_properties, &bat_voltage);
+
+ return qpnp_adc_map_temp_voltage(
+ adcmap_qrd_skut1_btm_threshold,
+ ARRAY_SIZE(adcmap_qrd_skut1_btm_threshold),
+ bat_voltage,
+ &adc_chan_result->physical);
+}
+EXPORT_SYMBOL(qpnp_adc_scale_qrd_skut1_batt_therm);
+
+int32_t qpnp_adc_scale_smb_batt_therm(struct qpnp_vadc_chip *chip,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ int64_t bat_voltage = 0;
+
+ qpnp_adc_scale_with_calib_param(adc_code,
+ adc_properties, chan_properties, &bat_voltage);
+
+ return qpnp_adc_map_temp_voltage(
+ adcmap_smb_batt_therm,
+ ARRAY_SIZE(adcmap_smb_batt_therm),
+ bat_voltage,
+ &adc_chan_result->physical);
+}
+EXPORT_SYMBOL(qpnp_adc_scale_smb_batt_therm);
+
+int32_t qpnp_adc_scale_therm_pu1(struct qpnp_vadc_chip *chip,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ int64_t therm_voltage = 0;
+
+ qpnp_adc_scale_with_calib_param(adc_code,
+ adc_properties, chan_properties, &therm_voltage);
+
+ qpnp_adc_map_voltage_temp(adcmap_150k_104ef_104fb,
+ ARRAY_SIZE(adcmap_150k_104ef_104fb),
+ therm_voltage, &adc_chan_result->physical);
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_therm_pu1);
+
+int32_t qpnp_adc_scale_therm_pu2(struct qpnp_vadc_chip *chip,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ int64_t therm_voltage = 0;
+
+ if (!chan_properties || !chan_properties->offset_gain_numerator ||
+ !chan_properties->offset_gain_denominator || !adc_properties)
+ return -EINVAL;
+
+ if (adc_properties->adc_hc) {
+ /* (ADC code * vref_vadc (1.875V) * 1000) / (0x4000 * 1000) */
+ therm_voltage = (adc_code * adc_properties->adc_vdd_reference
+ * 1000);
+ therm_voltage = div64_s64(therm_voltage,
+ (QPNP_VADC_HC_VREF_CODE * 1000));
+
+ qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb_1875_vref,
+ ARRAY_SIZE(adcmap_100k_104ef_104fb_1875_vref),
+ therm_voltage, &adc_chan_result->physical);
+ } else {
+ qpnp_adc_scale_with_calib_param(adc_code,
+ adc_properties, chan_properties, &therm_voltage);
+
+ if (chan_properties->calib_type == CALIB_ABSOLUTE)
+ do_div(therm_voltage, 1000);
+
+ qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb,
+ ARRAY_SIZE(adcmap_100k_104ef_104fb),
+ therm_voltage, &adc_chan_result->physical);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_therm_pu2);
+
+int32_t qpnp_adc_tm_scale_voltage_therm_pu2(struct qpnp_vadc_chip *chip,
+ uint32_t reg, int64_t *result)
+{
+ int64_t adc_voltage = 0;
+ struct qpnp_vadc_linear_graph param1;
+ int negative_offset;
+
+ qpnp_get_vadc_gain_and_offset(chip, &param1, CALIB_RATIOMETRIC);
+
+ adc_voltage = (reg - param1.adc_gnd) * param1.adc_vref;
+ if (adc_voltage < 0) {
+ negative_offset = 1;
+ adc_voltage = -adc_voltage;
+ }
+
+ do_div(adc_voltage, param1.dy);
+
+ qpnp_adc_map_voltage_temp(adcmap_100k_104ef_104fb,
+ ARRAY_SIZE(adcmap_100k_104ef_104fb),
+ adc_voltage, result);
+ if (negative_offset)
+ adc_voltage = -adc_voltage;
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_tm_scale_voltage_therm_pu2);
+
+int32_t qpnp_adc_tm_scale_therm_voltage_pu2(struct qpnp_vadc_chip *chip,
+ struct qpnp_adc_tm_config *param)
+{
+ struct qpnp_vadc_linear_graph param1;
+ int rc;
+
+ qpnp_get_vadc_gain_and_offset(chip, &param1, CALIB_RATIOMETRIC);
+
+ rc = qpnp_adc_map_temp_voltage(adcmap_100k_104ef_104fb,
+ ARRAY_SIZE(adcmap_100k_104ef_104fb),
+ param->low_thr_temp, &param->low_thr_voltage);
+ if (rc)
+ return rc;
+
+ param->low_thr_voltage *= param1.dy;
+ do_div(param->low_thr_voltage, param1.adc_vref);
+ param->low_thr_voltage += param1.adc_gnd;
+
+ rc = qpnp_adc_map_temp_voltage(adcmap_100k_104ef_104fb,
+ ARRAY_SIZE(adcmap_100k_104ef_104fb),
+ param->high_thr_temp, &param->high_thr_voltage);
+ if (rc)
+ return rc;
+
+ param->high_thr_voltage *= param1.dy;
+ do_div(param->high_thr_voltage, param1.adc_vref);
+ param->high_thr_voltage += param1.adc_gnd;
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_tm_scale_therm_voltage_pu2);
+
+int32_t qpnp_adc_scale_therm_ncp03(struct qpnp_vadc_chip *chip,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ int64_t therm_voltage = 0;
+
+ qpnp_adc_scale_with_calib_param(adc_code,
+ adc_properties, chan_properties, &therm_voltage);
+
+ qpnp_adc_map_voltage_temp(adcmap_ncp03wf683,
+ ARRAY_SIZE(adcmap_ncp03wf683),
+ therm_voltage, &adc_chan_result->physical);
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_therm_ncp03);
+
+int32_t qpnp_adc_scale_batt_id(struct qpnp_vadc_chip *chip,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ int64_t batt_id_voltage = 0;
+
+ qpnp_adc_scale_with_calib_param(adc_code,
+ adc_properties, chan_properties, &batt_id_voltage);
+
+ adc_chan_result->physical = batt_id_voltage;
+ adc_chan_result->physical = adc_chan_result->measurement;
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_batt_id);
+
+int32_t qpnp_adc_scale_default(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ int64_t scale_voltage = 0;
+
+ if (!chan_properties || !chan_properties->offset_gain_numerator ||
+ !chan_properties->offset_gain_denominator || !adc_properties
+ || !adc_chan_result)
+ return -EINVAL;
+
+ if (adc_properties->adc_hc) {
+ /* (ADC code * vref_vadc (1.875V)) / 0x4000 */
+ scale_voltage = (adc_code * adc_properties->adc_vdd_reference
+ * 1000);
+ scale_voltage = div64_s64(scale_voltage,
+ QPNP_VADC_HC_VREF_CODE);
+ } else {
+ qpnp_adc_scale_with_calib_param(adc_code, adc_properties,
+ chan_properties, &scale_voltage);
+ if (!chan_properties->calib_type == CALIB_ABSOLUTE)
+ scale_voltage *= 1000;
+ }
+
+ scale_voltage *= chan_properties->offset_gain_denominator;
+ scale_voltage = div64_s64(scale_voltage,
+ chan_properties->offset_gain_numerator);
+ adc_chan_result->measurement = scale_voltage;
+ /*
+ * Note: adc_chan_result->measurement is in the unit of
+ * adc_properties.adc_reference. For generic channel processing,
+ * channel measurement is a scale/ratio relative to the adc
+ * reference input
+ */
+ adc_chan_result->physical = adc_chan_result->measurement;
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_default);
+
+int32_t qpnp_adc_usb_scaler(struct qpnp_vadc_chip *chip,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{
+ struct qpnp_vadc_linear_graph usb_param;
+
+ qpnp_get_vadc_gain_and_offset(chip, &usb_param, CALIB_RATIOMETRIC);
+
+ *low_threshold = param->low_thr * usb_param.dy;
+ do_div(*low_threshold, usb_param.adc_vref);
+ *low_threshold += usb_param.adc_gnd;
+
+ *high_threshold = param->high_thr * usb_param.dy;
+ do_div(*high_threshold, usb_param.adc_vref);
+ *high_threshold += usb_param.adc_gnd;
+
+ pr_debug("high_volt:%d, low_volt:%d\n", param->high_thr,
+ param->low_thr);
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_usb_scaler);
+
+int32_t qpnp_adc_vbatt_rscaler(struct qpnp_vadc_chip *chip,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{
+ struct qpnp_vadc_linear_graph vbatt_param;
+ int rc = 0, sign = 0;
+ int64_t low_thr = 0, high_thr = 0;
+
+ rc = qpnp_get_vadc_gain_and_offset(chip, &vbatt_param, CALIB_ABSOLUTE);
+ if (rc < 0)
+ return rc;
+
+ low_thr = (((param->low_thr/param->gain_den) - QPNP_ADC_625_UV) *
+ vbatt_param.dy);
+ if (low_thr < 0) {
+ sign = 1;
+ low_thr = -low_thr;
+ }
+ do_div(low_thr, QPNP_ADC_625_UV);
+ if (sign)
+ low_thr = -low_thr;
+ *low_threshold = low_thr + vbatt_param.adc_gnd;
+
+ sign = 0;
+ high_thr = (((param->high_thr/param->gain_den) - QPNP_ADC_625_UV) *
+ vbatt_param.dy);
+ if (high_thr < 0) {
+ sign = 1;
+ high_thr = -high_thr;
+ }
+ do_div(high_thr, QPNP_ADC_625_UV);
+ if (sign)
+ high_thr = -high_thr;
+ *high_threshold = high_thr + vbatt_param.adc_gnd;
+
+ pr_debug("high_volt:%d, low_volt:%d\n", param->high_thr,
+ param->low_thr);
+ pr_debug("adc_code_high:%x, adc_code_low:%x\n", *high_threshold,
+ *low_threshold);
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_vbatt_rscaler);
+
+int32_t qpnp_adc_absolute_rthr(struct qpnp_vadc_chip *chip,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{
+ struct qpnp_vadc_linear_graph vbatt_param;
+ int rc = 0, sign = 0;
+ int64_t low_thr = 0, high_thr = 0;
+
+ rc = qpnp_get_vadc_gain_and_offset(chip, &vbatt_param, CALIB_ABSOLUTE);
+ if (rc < 0)
+ return rc;
+
+ low_thr = (((param->low_thr) - QPNP_ADC_625_UV) * vbatt_param.dy);
+ if (low_thr < 0) {
+ sign = 1;
+ low_thr = -low_thr;
+ }
+ do_div(low_thr, QPNP_ADC_625_UV);
+ if (sign)
+ low_thr = -low_thr;
+ *low_threshold = low_thr + vbatt_param.adc_gnd;
+
+ sign = 0;
+ high_thr = (((param->high_thr) - QPNP_ADC_625_UV) * vbatt_param.dy);
+ if (high_thr < 0) {
+ sign = 1;
+ high_thr = -high_thr;
+ }
+ do_div(high_thr, QPNP_ADC_625_UV);
+ if (sign)
+ high_thr = -high_thr;
+ *high_threshold = high_thr + vbatt_param.adc_gnd;
+
+ pr_debug("high_volt:%d, low_volt:%d\n", param->high_thr,
+ param->low_thr);
+ pr_debug("adc_code_high:%x, adc_code_low:%x\n", *high_threshold,
+ *low_threshold);
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_absolute_rthr);
+
+int32_t qpnp_vadc_absolute_rthr(struct qpnp_vadc_chip *chip,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{
+ struct qpnp_vadc_linear_graph vbatt_param;
+ int rc = 0, sign = 0;
+ int64_t low_thr = 0, high_thr = 0;
+
+ if (!chan_prop || !chan_prop->offset_gain_numerator ||
+ !chan_prop->offset_gain_denominator)
+ return -EINVAL;
+
+ rc = qpnp_get_vadc_gain_and_offset(chip, &vbatt_param, CALIB_ABSOLUTE);
+ if (rc < 0)
+ return rc;
+
+ low_thr = (((param->low_thr)/(int)chan_prop->offset_gain_denominator
+ - QPNP_ADC_625_UV) * vbatt_param.dy);
+ if (low_thr < 0) {
+ sign = 1;
+ low_thr = -low_thr;
+ }
+ low_thr = low_thr * chan_prop->offset_gain_numerator;
+ do_div(low_thr, QPNP_ADC_625_UV);
+ if (sign)
+ low_thr = -low_thr;
+ *low_threshold = low_thr + vbatt_param.adc_gnd;
+
+ sign = 0;
+ high_thr = (((param->high_thr)/(int)chan_prop->offset_gain_denominator
+ - QPNP_ADC_625_UV) * vbatt_param.dy);
+ if (high_thr < 0) {
+ sign = 1;
+ high_thr = -high_thr;
+ }
+ high_thr = high_thr * chan_prop->offset_gain_numerator;
+ do_div(high_thr, QPNP_ADC_625_UV);
+ if (sign)
+ high_thr = -high_thr;
+ *high_threshold = high_thr + vbatt_param.adc_gnd;
+
+ pr_debug("high_volt:%d, low_volt:%d\n", param->high_thr,
+ param->low_thr);
+ pr_debug("adc_code_high:%x, adc_code_low:%x\n", *high_threshold,
+ *low_threshold);
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_vadc_absolute_rthr);
+
+int32_t qpnp_adc_btm_scaler(struct qpnp_vadc_chip *chip,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{
+ struct qpnp_vadc_linear_graph btm_param;
+ int64_t low_output = 0, high_output = 0;
+ int rc = 0;
+
+ qpnp_get_vadc_gain_and_offset(chip, &btm_param, CALIB_RATIOMETRIC);
+
+ pr_debug("warm_temp:%d and cool_temp:%d\n", param->high_temp,
+ param->low_temp);
+ rc = qpnp_adc_map_voltage_temp(
+ adcmap_btm_threshold,
+ ARRAY_SIZE(adcmap_btm_threshold),
+ (param->low_temp),
+ &low_output);
+ if (rc) {
+ pr_debug("low_temp mapping failed with %d\n", rc);
+ return rc;
+ }
+
+ pr_debug("low_output:%lld\n", low_output);
+ low_output *= btm_param.dy;
+ do_div(low_output, btm_param.adc_vref);
+ low_output += btm_param.adc_gnd;
+
+ rc = qpnp_adc_map_voltage_temp(
+ adcmap_btm_threshold,
+ ARRAY_SIZE(adcmap_btm_threshold),
+ (param->high_temp),
+ &high_output);
+ if (rc) {
+ pr_debug("high temp mapping failed with %d\n", rc);
+ return rc;
+ }
+
+ pr_debug("high_output:%lld\n", high_output);
+ high_output *= btm_param.dy;
+ do_div(high_output, btm_param.adc_vref);
+ high_output += btm_param.adc_gnd;
+
+ /* btm low temperature correspondes to high voltage threshold */
+ *low_threshold = high_output;
+ /* btm high temperature correspondes to low voltage threshold */
+ *high_threshold = low_output;
+
+ pr_debug("high_volt:%d, low_volt:%d\n", *high_threshold,
+ *low_threshold);
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_btm_scaler);
+
+int32_t qpnp_adc_qrd_skuh_btm_scaler(struct qpnp_vadc_chip *chip,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{
+ struct qpnp_vadc_linear_graph btm_param;
+ int64_t low_output = 0, high_output = 0;
+ int rc = 0;
+
+ qpnp_get_vadc_gain_and_offset(chip, &btm_param, CALIB_RATIOMETRIC);
+
+ pr_debug("warm_temp:%d and cool_temp:%d\n", param->high_temp,
+ param->low_temp);
+ rc = qpnp_adc_map_voltage_temp(
+ adcmap_qrd_skuh_btm_threshold,
+ ARRAY_SIZE(adcmap_qrd_skuh_btm_threshold),
+ (param->low_temp),
+ &low_output);
+ if (rc) {
+ pr_debug("low_temp mapping failed with %d\n", rc);
+ return rc;
+ }
+
+ pr_debug("low_output:%lld\n", low_output);
+ low_output *= btm_param.dy;
+ do_div(low_output, btm_param.adc_vref);
+ low_output += btm_param.adc_gnd;
+
+ rc = qpnp_adc_map_voltage_temp(
+ adcmap_qrd_skuh_btm_threshold,
+ ARRAY_SIZE(adcmap_qrd_skuh_btm_threshold),
+ (param->high_temp),
+ &high_output);
+ if (rc) {
+ pr_debug("high temp mapping failed with %d\n", rc);
+ return rc;
+ }
+
+ pr_debug("high_output:%lld\n", high_output);
+ high_output *= btm_param.dy;
+ do_div(high_output, btm_param.adc_vref);
+ high_output += btm_param.adc_gnd;
+
+ /* btm low temperature correspondes to high voltage threshold */
+ *low_threshold = high_output;
+ /* btm high temperature correspondes to low voltage threshold */
+ *high_threshold = low_output;
+
+ pr_debug("high_volt:%d, low_volt:%d\n", *high_threshold,
+ *low_threshold);
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_qrd_skuh_btm_scaler);
+
+int32_t qpnp_adc_qrd_skut1_btm_scaler(struct qpnp_vadc_chip *chip,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{
+ struct qpnp_vadc_linear_graph btm_param;
+ int64_t low_output = 0, high_output = 0;
+ int rc = 0;
+
+ qpnp_get_vadc_gain_and_offset(chip, &btm_param, CALIB_RATIOMETRIC);
+
+ pr_debug("warm_temp:%d and cool_temp:%d\n", param->high_temp,
+ param->low_temp);
+ rc = qpnp_adc_map_voltage_temp(
+ adcmap_qrd_skut1_btm_threshold,
+ ARRAY_SIZE(adcmap_qrd_skut1_btm_threshold),
+ (param->low_temp),
+ &low_output);
+ if (rc) {
+ pr_debug("low_temp mapping failed with %d\n", rc);
+ return rc;
+ }
+
+ pr_debug("low_output:%lld\n", low_output);
+ low_output *= btm_param.dy;
+ do_div(low_output, btm_param.adc_vref);
+ low_output += btm_param.adc_gnd;
+
+ rc = qpnp_adc_map_voltage_temp(
+ adcmap_qrd_skut1_btm_threshold,
+ ARRAY_SIZE(adcmap_qrd_skut1_btm_threshold),
+ (param->high_temp),
+ &high_output);
+ if (rc) {
+ pr_debug("high temp mapping failed with %d\n", rc);
+ return rc;
+ }
+
+ pr_debug("high_output:%lld\n", high_output);
+ high_output *= btm_param.dy;
+ do_div(high_output, btm_param.adc_vref);
+ high_output += btm_param.adc_gnd;
+
+ /* btm low temperature correspondes to high voltage threshold */
+ *low_threshold = high_output;
+ /* btm high temperature correspondes to low voltage threshold */
+ *high_threshold = low_output;
+
+ pr_debug("high_volt:%d, low_volt:%d\n", *high_threshold,
+ *low_threshold);
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_qrd_skut1_btm_scaler);
+
+int32_t qpnp_adc_smb_btm_rscaler(struct qpnp_vadc_chip *chip,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{
+ struct qpnp_vadc_linear_graph btm_param;
+ int64_t low_output = 0, high_output = 0;
+ int rc = 0;
+
+ qpnp_get_vadc_gain_and_offset(chip, &btm_param, CALIB_RATIOMETRIC);
+
+ pr_debug("warm_temp:%d and cool_temp:%d\n", param->high_temp,
+ param->low_temp);
+ rc = qpnp_adc_map_voltage_temp(
+ adcmap_smb_batt_therm,
+ ARRAY_SIZE(adcmap_smb_batt_therm),
+ (param->low_temp),
+ &low_output);
+ if (rc) {
+ pr_debug("low_temp mapping failed with %d\n", rc);
+ return rc;
+ }
+
+ pr_debug("low_output:%lld\n", low_output);
+ low_output *= btm_param.dy;
+ do_div(low_output, btm_param.adc_vref);
+ low_output += btm_param.adc_gnd;
+
+ rc = qpnp_adc_map_voltage_temp(
+ adcmap_smb_batt_therm,
+ ARRAY_SIZE(adcmap_smb_batt_therm),
+ (param->high_temp),
+ &high_output);
+ if (rc) {
+ pr_debug("high temp mapping failed with %d\n", rc);
+ return rc;
+ }
+
+ pr_debug("high_output:%lld\n", high_output);
+ high_output *= btm_param.dy;
+ do_div(high_output, btm_param.adc_vref);
+ high_output += btm_param.adc_gnd;
+
+ /* btm low temperature correspondes to high voltage threshold */
+ *low_threshold = high_output;
+ /* btm high temperature correspondes to low voltage threshold */
+ *high_threshold = low_output;
+
+ pr_debug("high_volt:%d, low_volt:%d\n", *high_threshold,
+ *low_threshold);
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_smb_btm_rscaler);
+
+int32_t qpnp_adc_scale_pmi_chg_temp(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ int rc = 0;
+
+ rc = qpnp_adc_scale_default(vadc, adc_code, adc_properties,
+ chan_properties, adc_chan_result);
+ if (rc < 0)
+ return rc;
+
+ pr_debug("raw_code:%x, v_adc:%lld\n", adc_code,
+ adc_chan_result->physical);
+ adc_chan_result->physical = (int64_t) ((PMI_CHG_SCALE_1) *
+ (adc_chan_result->physical * 2));
+ adc_chan_result->physical = (int64_t) (adc_chan_result->physical +
+ PMI_CHG_SCALE_2);
+ adc_chan_result->physical = (int64_t) adc_chan_result->physical;
+ adc_chan_result->physical = div64_s64(adc_chan_result->physical,
+ 1000000);
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_scale_pmi_chg_temp);
+
+int32_t qpnp_adc_enable_voltage(struct qpnp_adc_drv *adc)
+{
+ int rc = 0;
+
+ if (adc->hkadc_ldo) {
+ rc = regulator_enable(adc->hkadc_ldo);
+ if (rc < 0) {
+ pr_err("Failed to enable hkadc ldo\n");
+ return rc;
+ }
+ }
+
+ if (adc->hkadc_ldo_ok) {
+ rc = regulator_enable(adc->hkadc_ldo_ok);
+ if (rc < 0) {
+ pr_err("Failed to enable hkadc ok signal\n");
+ return rc;
+ }
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_adc_enable_voltage);
+
+void qpnp_adc_disable_voltage(struct qpnp_adc_drv *adc)
+{
+ if (adc->hkadc_ldo)
+ regulator_disable(adc->hkadc_ldo);
+
+ if (adc->hkadc_ldo_ok)
+ regulator_disable(adc->hkadc_ldo_ok);
+
+}
+EXPORT_SYMBOL(qpnp_adc_disable_voltage);
+
+void qpnp_adc_free_voltage_resource(struct qpnp_adc_drv *adc)
+{
+ if (adc->hkadc_ldo)
+ regulator_put(adc->hkadc_ldo);
+
+ if (adc->hkadc_ldo_ok)
+ regulator_put(adc->hkadc_ldo_ok);
+}
+EXPORT_SYMBOL(qpnp_adc_free_voltage_resource);
+
+int qpnp_adc_get_revid_version(struct device *dev)
+{
+ struct pmic_revid_data *revid_data;
+ struct device_node *revid_dev_node;
+
+ revid_dev_node = of_parse_phandle(dev->of_node,
+ "qcom,pmic-revid", 0);
+ if (!revid_dev_node) {
+ pr_debug("Missing qcom,pmic-revid property\n");
+ return -EINVAL;
+ }
+
+ revid_data = get_revid_data(revid_dev_node);
+ if (IS_ERR(revid_data)) {
+ pr_debug("revid error rc = %ld\n", PTR_ERR(revid_data));
+ return -EINVAL;
+ }
+
+ if ((revid_data->rev1 == PM8941_V3P1_REV1) &&
+ (revid_data->rev2 == PM8941_V3P1_REV2) &&
+ (revid_data->rev3 == PM8941_V3P1_REV3) &&
+ (revid_data->rev4 == PM8941_V3P1_REV4) &&
+ (revid_data->pmic_type == PM8941_V3P1_TYPE) &&
+ (revid_data->pmic_subtype == PM8941_V3P1_SUBTYPE))
+ return QPNP_REV_ID_8941_3_1;
+ else if ((revid_data->rev1 == PM8941_V3P0_REV1) &&
+ (revid_data->rev2 == PM8941_V3P0_REV2) &&
+ (revid_data->rev3 == PM8941_V3P0_REV3) &&
+ (revid_data->rev4 == PM8941_V3P0_REV4) &&
+ (revid_data->pmic_type == PM8941_V3P0_TYPE) &&
+ (revid_data->pmic_subtype == PM8941_V3P0_SUBTYPE))
+ return QPNP_REV_ID_8941_3_0;
+ else if ((revid_data->rev1 == PM8941_V2P0_REV1) &&
+ (revid_data->rev2 == PM8941_V2P0_REV2) &&
+ (revid_data->rev3 == PM8941_V2P0_REV3) &&
+ (revid_data->rev4 == PM8941_V2P0_REV4) &&
+ (revid_data->pmic_type == PM8941_V2P0_TYPE) &&
+ (revid_data->pmic_subtype == PM8941_V2P0_SUBTYPE))
+ return QPNP_REV_ID_8941_2_0;
+ else if ((revid_data->rev1 == PM8226_V2P2_REV1) &&
+ (revid_data->rev2 == PM8226_V2P2_REV2) &&
+ (revid_data->rev3 == PM8226_V2P2_REV3) &&
+ (revid_data->rev4 == PM8226_V2P2_REV4) &&
+ (revid_data->pmic_type == PM8226_V2P2_TYPE) &&
+ (revid_data->pmic_subtype == PM8226_V2P2_SUBTYPE))
+ return QPNP_REV_ID_8026_2_2;
+ else if ((revid_data->rev1 == PM8226_V2P1_REV1) &&
+ (revid_data->rev2 == PM8226_V2P1_REV2) &&
+ (revid_data->rev3 == PM8226_V2P1_REV3) &&
+ (revid_data->rev4 == PM8226_V2P1_REV4) &&
+ (revid_data->pmic_type == PM8226_V2P1_TYPE) &&
+ (revid_data->pmic_subtype == PM8226_V2P1_SUBTYPE))
+ return QPNP_REV_ID_8026_2_1;
+ else if ((revid_data->rev1 == PM8226_V2P0_REV1) &&
+ (revid_data->rev2 == PM8226_V2P0_REV2) &&
+ (revid_data->rev3 == PM8226_V2P0_REV3) &&
+ (revid_data->rev4 == PM8226_V2P0_REV4) &&
+ (revid_data->pmic_type == PM8226_V2P0_TYPE) &&
+ (revid_data->pmic_subtype == PM8226_V2P0_SUBTYPE))
+ return QPNP_REV_ID_8026_2_0;
+ else if ((revid_data->rev1 == PM8226_V1P0_REV1) &&
+ (revid_data->rev2 == PM8226_V1P0_REV2) &&
+ (revid_data->rev3 == PM8226_V1P0_REV3) &&
+ (revid_data->rev4 == PM8226_V1P0_REV4) &&
+ (revid_data->pmic_type == PM8226_V1P0_TYPE) &&
+ (revid_data->pmic_subtype == PM8226_V1P0_SUBTYPE))
+ return QPNP_REV_ID_8026_1_0;
+ else if ((revid_data->rev1 == PM8110_V1P0_REV1) &&
+ (revid_data->rev2 == PM8110_V1P0_REV2) &&
+ (revid_data->rev3 == PM8110_V1P0_REV3) &&
+ (revid_data->rev4 == PM8110_V1P0_REV4) &&
+ (revid_data->pmic_type == PM8110_V1P0_TYPE) &&
+ (revid_data->pmic_subtype == PM8110_V1P0_SUBTYPE))
+ return QPNP_REV_ID_8110_1_0;
+ else if ((revid_data->rev1 == PM8110_V2P0_REV1) &&
+ (revid_data->rev2 == PM8110_V2P0_REV2) &&
+ (revid_data->rev3 == PM8110_V2P0_REV3) &&
+ (revid_data->rev4 == PM8110_V2P0_REV4) &&
+ (revid_data->pmic_type == PM8110_V2P0_TYPE) &&
+ (revid_data->pmic_subtype == PM8110_V2P0_SUBTYPE))
+ return QPNP_REV_ID_8110_2_0;
+ else if ((revid_data->rev1 == PM8916_V1P0_REV1) &&
+ (revid_data->rev2 == PM8916_V1P0_REV2) &&
+ (revid_data->rev3 == PM8916_V1P0_REV3) &&
+ (revid_data->rev4 == PM8916_V1P0_REV4) &&
+ (revid_data->pmic_type == PM8916_V1P0_TYPE) &&
+ (revid_data->pmic_subtype == PM8916_V1P0_SUBTYPE))
+ return QPNP_REV_ID_8916_1_0;
+ else if ((revid_data->rev1 == PM8916_V1P1_REV1) &&
+ (revid_data->rev2 == PM8916_V1P1_REV2) &&
+ (revid_data->rev3 == PM8916_V1P1_REV3) &&
+ (revid_data->rev4 == PM8916_V1P1_REV4) &&
+ (revid_data->pmic_type == PM8916_V1P1_TYPE) &&
+ (revid_data->pmic_subtype == PM8916_V1P1_SUBTYPE))
+ return QPNP_REV_ID_8916_1_1;
+ else if ((revid_data->rev1 == PM8916_V2P0_REV1) &&
+ (revid_data->rev2 == PM8916_V2P0_REV2) &&
+ (revid_data->rev3 == PM8916_V2P0_REV3) &&
+ (revid_data->rev4 == PM8916_V2P0_REV4) &&
+ (revid_data->pmic_type == PM8916_V2P0_TYPE) &&
+ (revid_data->pmic_subtype == PM8916_V2P0_SUBTYPE))
+ return QPNP_REV_ID_8916_2_0;
+ else if ((revid_data->rev1 == PM8909_V1P0_REV1) &&
+ (revid_data->rev2 == PM8909_V1P0_REV2) &&
+ (revid_data->rev3 == PM8909_V1P0_REV3) &&
+ (revid_data->rev4 == PM8909_V1P0_REV4) &&
+ (revid_data->pmic_type == PM8909_V1P0_TYPE) &&
+ (revid_data->pmic_subtype == PM8909_V1P0_SUBTYPE))
+ return QPNP_REV_ID_8909_1_0;
+ else if ((revid_data->rev1 == PM8909_V1P1_REV1) &&
+ (revid_data->rev2 == PM8909_V1P1_REV2) &&
+ (revid_data->rev3 == PM8909_V1P1_REV3) &&
+ (revid_data->rev4 == PM8909_V1P1_REV4) &&
+ (revid_data->pmic_type == PM8909_V1P1_TYPE) &&
+ (revid_data->pmic_subtype == PM8909_V1P1_SUBTYPE))
+ return QPNP_REV_ID_8909_1_1;
+ else if ((revid_data->rev4 == PM8950_V1P0_REV4) &&
+ (revid_data->pmic_type == PM8950_V1P0_TYPE) &&
+ (revid_data->pmic_subtype == PM8950_V1P0_SUBTYPE))
+ return QPNP_REV_ID_PM8950_1_0;
+ else
+ return -EINVAL;
+}
+EXPORT_SYMBOL(qpnp_adc_get_revid_version);
+
+int32_t qpnp_adc_get_devicetree_data(struct spmi_device *spmi,
+ struct qpnp_adc_drv *adc_qpnp)
+{
+ struct device_node *node = spmi->dev.of_node;
+ struct resource *res;
+ struct device_node *child;
+ struct qpnp_adc_amux *adc_channel_list;
+ struct qpnp_adc_properties *adc_prop;
+ struct qpnp_adc_amux_properties *amux_prop;
+ int count_adc_channel_list = 0, decimation, rc = 0, i = 0;
+ bool adc_hc;
+
+ if (!node)
+ return -EINVAL;
+
+ for_each_child_of_node(node, child)
+ count_adc_channel_list++;
+
+ if (!count_adc_channel_list) {
+ pr_err("No channel listing\n");
+ return -EINVAL;
+ }
+
+ adc_qpnp->spmi = spmi;
+
+ adc_prop = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_adc_properties),
+ GFP_KERNEL);
+ if (!adc_prop)
+ return -ENOMEM;
+
+ adc_channel_list = devm_kzalloc(&spmi->dev,
+ ((sizeof(struct qpnp_adc_amux)) * count_adc_channel_list),
+ GFP_KERNEL);
+ if (!adc_channel_list)
+ return -ENOMEM;
+
+ amux_prop = devm_kzalloc(&spmi->dev,
+ sizeof(struct qpnp_adc_amux_properties) +
+ sizeof(struct qpnp_vadc_chan_properties), GFP_KERNEL);
+ if (!amux_prop) {
+ dev_err(&spmi->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ adc_qpnp->adc_channels = adc_channel_list;
+ adc_qpnp->amux_prop = amux_prop;
+ adc_hc = adc_qpnp->adc_hc;
+ adc_prop->adc_hc = adc_hc;
+
+ for_each_child_of_node(node, child) {
+ int channel_num, scaling, post_scaling, hw_settle_time;
+ int fast_avg_setup, calib_type = 0, rc;
+ const char *calibration_param, *channel_name;
+
+ channel_name = of_get_property(child,
+ "label", NULL) ? : child->name;
+ if (!channel_name) {
+ pr_err("Invalid channel name\n");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(child, "reg", &channel_num);
+ if (rc) {
+ pr_err("Invalid channel num\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_u32(child, "qcom,decimation",
+ &decimation);
+ if (rc) {
+ pr_err("Invalid channel decimation property\n");
+ return -EINVAL;
+ }
+ if (!of_device_is_compatible(node, "qcom,qpnp-iadc")) {
+ rc = of_property_read_u32(child,
+ "qcom,hw-settle-time", &hw_settle_time);
+ if (rc) {
+ pr_err("Invalid channel hw settle time property\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_u32(child,
+ "qcom,pre-div-channel-scaling", &scaling);
+ if (rc) {
+ pr_err("Invalid channel scaling property\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_u32(child,
+ "qcom,scale-function", &post_scaling);
+ if (rc) {
+ pr_err("Invalid channel post scaling property\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_string(child,
+ "qcom,calibration-type", &calibration_param);
+ if (rc) {
+ pr_err("Invalid calibration type\n");
+ return -EINVAL;
+ }
+ if (!strcmp(calibration_param, "absolute")) {
+ if (adc_hc)
+ calib_type = ADC_HC_ABS_CAL;
+ else
+ calib_type = CALIB_ABSOLUTE;
+ } else if (!strcmp(calibration_param, "ratiometric")) {
+ if (adc_hc)
+ calib_type = ADC_HC_RATIO_CAL;
+ else
+ calib_type = CALIB_RATIOMETRIC;
+ } else if (!strcmp(calibration_param, "no_cal")) {
+ if (adc_hc)
+ calib_type = ADC_HC_NO_CAL;
+ else {
+ pr_err("%s: Invalid calibration property\n",
+ __func__);
+ return -EINVAL;
+ }
+ } else {
+ pr_err("%s: Invalid calibration property\n",
+ __func__);
+ return -EINVAL;
+ }
+ }
+ rc = of_property_read_u32(child,
+ "qcom,fast-avg-setup", &fast_avg_setup);
+ if (rc) {
+ pr_err("Invalid channel fast average setup\n");
+ return -EINVAL;
+ }
+ /* Individual channel properties */
+ adc_channel_list[i].name = (char *)channel_name;
+ adc_channel_list[i].channel_num = channel_num;
+ adc_channel_list[i].adc_decimation = decimation;
+ adc_channel_list[i].fast_avg_setup = fast_avg_setup;
+ if (!of_device_is_compatible(node, "qcom,qpnp-iadc")) {
+ adc_channel_list[i].chan_path_prescaling = scaling;
+ adc_channel_list[i].adc_scale_fn = post_scaling;
+ adc_channel_list[i].hw_settle_time = hw_settle_time;
+ adc_channel_list[i].calib_type = calib_type;
+ }
+ i++;
+ }
+
+ /* Get the ADC VDD reference voltage and ADC bit resolution */
+ rc = of_property_read_u32(node, "qcom,adc-vdd-reference",
+ &adc_prop->adc_vdd_reference);
+ if (rc) {
+ pr_err("Invalid adc vdd reference property\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_u32(node, "qcom,adc-bit-resolution",
+ &adc_prop->bitresolution);
+ if (rc) {
+ pr_err("Invalid adc bit resolution property\n");
+ return -EINVAL;
+ }
+ adc_qpnp->adc_prop = adc_prop;
+
+ /* Get the peripheral address */
+ res = spmi_get_resource(spmi, 0, IORESOURCE_MEM, 0);
+ if (!res) {
+ pr_err("No base address definition\n");
+ return -EINVAL;
+ }
+
+ adc_qpnp->slave = spmi->sid;
+ adc_qpnp->offset = res->start;
+
+ /* Register the ADC peripheral interrupt */
+ adc_qpnp->adc_irq_eoc = spmi_get_irq_byname(spmi, NULL,
+ "eoc-int-en-set");
+ if (adc_qpnp->adc_irq_eoc < 0) {
+ pr_err("Invalid irq\n");
+ return -ENXIO;
+ }
+
+ init_completion(&adc_qpnp->adc_rslt_completion);
+
+ if (of_get_property(node, "hkadc_ldo-supply", NULL)) {
+ adc_qpnp->hkadc_ldo = regulator_get(&spmi->dev,
+ "hkadc_ldo");
+ if (IS_ERR(adc_qpnp->hkadc_ldo)) {
+ pr_err("hkadc_ldo-supply node not found\n");
+ return -EINVAL;
+ }
+
+ rc = regulator_set_voltage(adc_qpnp->hkadc_ldo,
+ QPNP_VADC_LDO_VOLTAGE_MIN,
+ QPNP_VADC_LDO_VOLTAGE_MAX);
+ if (rc < 0) {
+ pr_err("setting voltage for hkadc_ldo failed\n");
+ return rc;
+ }
+
+ rc = regulator_set_optimum_mode(adc_qpnp->hkadc_ldo,
+ 100000);
+ if (rc < 0) {
+ pr_err("hkadc_ldo optimum mode failed%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (of_get_property(node, "hkadc_ok-supply", NULL)) {
+ adc_qpnp->hkadc_ldo_ok = regulator_get(&spmi->dev,
+ "hkadc_ok");
+ if (IS_ERR(adc_qpnp->hkadc_ldo_ok)) {
+ pr_err("hkadc_ok node not found\n");
+ return -EINVAL;
+ }
+
+ rc = regulator_set_voltage(adc_qpnp->hkadc_ldo_ok,
+ QPNP_VADC_OK_VOLTAGE_MIN,
+ QPNP_VADC_OK_VOLTAGE_MAX);
+ if (rc < 0) {
+ pr_err("setting voltage for hkadc-ldo-ok failed\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_get_devicetree_data);
diff --git a/drivers/hwmon/qpnp-adc-current.c b/drivers/hwmon/qpnp-adc-current.c
new file mode 100644
index 000000000000..b9a9fbad94ab
--- /dev/null
+++ b/drivers/hwmon/qpnp-adc-current.c
@@ -0,0 +1,1654 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/spmi.h>
+#include <linux/of_irq.h>
+#ifdef CONFIG_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/platform_device.h>
+#ifdef CONFIG_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+
+/* QPNP IADC register definition */
+#define QPNP_IADC_REVISION1 0x0
+#define QPNP_IADC_REVISION2 0x1
+#define QPNP_IADC_REVISION3 0x2
+#define QPNP_IADC_REVISION4 0x3
+#define QPNP_IADC_PERPH_TYPE 0x4
+#define QPNP_IADC_PERH_SUBTYPE 0x5
+
+#define QPNP_IADC_SUPPORTED_REVISION2 1
+
+#define QPNP_STATUS1 0x8
+#define QPNP_STATUS1_OP_MODE 4
+#define QPNP_STATUS1_MULTI_MEAS_EN BIT(3)
+#define QPNP_STATUS1_MEAS_INTERVAL_EN_STS BIT(2)
+#define QPNP_STATUS1_REQ_STS BIT(1)
+#define QPNP_STATUS1_EOC BIT(0)
+#define QPNP_STATUS1_REQ_STS_EOC_MASK 0x3
+#define QPNP_STATUS2 0x9
+#define QPNP_STATUS2_CONV_SEQ_STATE_SHIFT 4
+#define QPNP_STATUS2_FIFO_NOT_EMPTY_FLAG BIT(1)
+#define QPNP_STATUS2_CONV_SEQ_TIMEOUT_STS BIT(0)
+#define QPNP_CONV_TIMEOUT_ERR 2
+
+#define QPNP_IADC_MODE_CTL 0x40
+#define QPNP_OP_MODE_SHIFT 4
+#define QPNP_USE_BMS_DATA BIT(4)
+#define QPNP_VADC_SYNCH_EN BIT(2)
+#define QPNP_OFFSET_RMV_EN BIT(1)
+#define QPNP_ADC_TRIM_EN BIT(0)
+#define QPNP_IADC_EN_CTL1 0x46
+#define QPNP_IADC_ADC_EN BIT(7)
+#define QPNP_ADC_CH_SEL_CTL 0x48
+#define QPNP_ADC_DIG_PARAM 0x50
+#define QPNP_ADC_CLK_SEL_MASK 0x3
+#define QPNP_ADC_DEC_RATIO_SEL_MASK 0xc
+#define QPNP_ADC_DIG_DEC_RATIO_SEL_SHIFT 2
+
+#define QPNP_CONV_REQ 0x52
+#define QPNP_CONV_REQ_SET BIT(7)
+#define QPNP_CONV_SEQ_CTL 0x54
+#define QPNP_CONV_SEQ_HOLDOFF_SHIFT 4
+#define QPNP_CONV_SEQ_TRIG_CTL 0x55
+#define QPNP_FAST_AVG_CTL 0x5a
+
+#define QPNP_M0_LOW_THR_LSB 0x5c
+#define QPNP_M0_LOW_THR_MSB 0x5d
+#define QPNP_M0_HIGH_THR_LSB 0x5e
+#define QPNP_M0_HIGH_THR_MSB 0x5f
+#define QPNP_M1_LOW_THR_LSB 0x69
+#define QPNP_M1_LOW_THR_MSB 0x6a
+#define QPNP_M1_HIGH_THR_LSB 0x6b
+#define QPNP_M1_HIGH_THR_MSB 0x6c
+
+#define QPNP_DATA0 0x60
+#define QPNP_DATA1 0x61
+#define QPNP_CONV_TIMEOUT_ERR 2
+
+#define QPNP_IADC_SEC_ACCESS 0xD0
+#define QPNP_IADC_SEC_ACCESS_DATA 0xA5
+#define QPNP_IADC_MSB_OFFSET 0xF2
+#define QPNP_IADC_LSB_OFFSET 0xF3
+#define QPNP_IADC_NOMINAL_RSENSE 0xF4
+#define QPNP_IADC_ATE_GAIN_CALIB_OFFSET 0xF5
+#define QPNP_INT_TEST_VAL 0xE1
+
+#define QPNP_IADC_ADC_CH_SEL_CTL 0x48
+#define QPNP_IADC_ADC_CHX_SEL_SHIFT 3
+
+#define QPNP_IADC_ADC_DIG_PARAM 0x50
+#define QPNP_IADC_CLK_SEL_SHIFT 1
+#define QPNP_IADC_DEC_RATIO_SEL 3
+
+#define QPNP_IADC_CONV_REQUEST 0x52
+#define QPNP_IADC_CONV_REQ BIT(7)
+
+#define QPNP_IADC_DATA0 0x60
+#define QPNP_IADC_DATA1 0x61
+
+#define QPNP_ADC_CONV_TIME_MIN 2000
+#define QPNP_ADC_CONV_TIME_MAX 2100
+#define QPNP_ADC_ERR_COUNT 20
+
+#define QPNP_ADC_GAIN_NV 17857
+#define QPNP_OFFSET_CALIBRATION_SHORT_CADC_LEADS_IDEAL 0
+#define QPNP_IADC_INTERNAL_RSENSE_N_OHMS_FACTOR 10000000
+#define QPNP_IADC_NANO_VOLTS_FACTOR 1000000
+#define QPNP_IADC_CALIB_SECONDS 300000
+#define QPNP_IADC_RSENSE_LSB_N_OHMS_PER_BIT 15625
+#define QPNP_IADC_DIE_TEMP_CALIB_OFFSET 5000
+
+#define QPNP_RAW_CODE_16_BIT_MSB_MASK 0xff00
+#define QPNP_RAW_CODE_16_BIT_LSB_MASK 0xff
+#define QPNP_BIT_SHIFT_8 8
+#define QPNP_RSENSE_MSB_SIGN_CHECK 0x80
+#define QPNP_ADC_COMPLETION_TIMEOUT HZ
+#define SMBB_BAT_IF_TRIM_CNST_RDS_MASK 0x7
+#define SMBB_BAT_IF_TRIM_CNST_RDS_MASK_CONST_0 0
+#define SMBB_BAT_IF_TRIM_CNST_RDS_MASK_CONST_2 2
+#define QPNP_IADC1_USR_TRIM2_ADC_FULLSCALE1_CONST 127
+#define QPNP_IADC_RSENSE_DEFAULT_VALUE 7800000
+#define QPNP_IADC_RSENSE_DEFAULT_TYPEB_GF 9000000
+#define QPNP_IADC_RSENSE_DEFAULT_TYPEB_SMIC 9700000
+
+struct qpnp_iadc_comp {
+ bool ext_rsense;
+ u8 id;
+ u8 sys_gain;
+ u8 revision_dig_major;
+ u8 revision_ana_minor;
+};
+
+struct qpnp_iadc_chip {
+ struct device *dev;
+ struct qpnp_adc_drv *adc;
+ int32_t rsense;
+ bool external_rsense;
+ bool default_internal_rsense;
+ struct device *iadc_hwmon;
+ struct list_head list;
+ int64_t die_temp;
+ struct delayed_work iadc_work;
+ bool iadc_mode_sel;
+ struct qpnp_iadc_comp iadc_comp;
+ struct qpnp_vadc_chip *vadc_dev;
+ struct work_struct trigger_completion_work;
+ bool skip_auto_calibrations;
+ bool iadc_poll_eoc;
+ u16 batt_id_trim_cnst_rds;
+ int rds_trim_default_type;
+ int max_channels_available;
+ bool rds_trim_default_check;
+ int32_t rsense_workaround_value;
+ struct sensor_device_attribute sens_attr[0];
+};
+
+LIST_HEAD(qpnp_iadc_device_list);
+
+enum qpnp_iadc_rsense_rds_workaround {
+ QPNP_IADC_RDS_DEFAULT_TYPEA,
+ QPNP_IADC_RDS_DEFAULT_TYPEB,
+ QPNP_IADC_RDS_DEFAULT_TYPEC,
+};
+
+static int32_t qpnp_iadc_read_reg(struct qpnp_iadc_chip *iadc,
+ uint32_t reg, u8 *data)
+{
+ int rc;
+
+ rc = spmi_ext_register_readl(iadc->adc->spmi->ctrl, iadc->adc->slave,
+ (iadc->adc->offset + reg), data, 1);
+ if (rc < 0) {
+ pr_err("qpnp iadc read reg %d failed with %d\n", reg, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int32_t qpnp_iadc_write_reg(struct qpnp_iadc_chip *iadc,
+ uint32_t reg, u8 data)
+{
+ int rc;
+ u8 *buf;
+
+ buf = &data;
+ rc = spmi_ext_register_writel(iadc->adc->spmi->ctrl, iadc->adc->slave,
+ (iadc->adc->offset + reg), buf, 1);
+ if (rc < 0) {
+ pr_err("qpnp iadc write reg %d failed with %d\n", reg, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qpnp_iadc_is_valid(struct qpnp_iadc_chip *iadc)
+{
+ struct qpnp_iadc_chip *iadc_chip = NULL;
+
+ list_for_each_entry(iadc_chip, &qpnp_iadc_device_list, list)
+ if (iadc == iadc_chip)
+ return 0;
+
+ return -EINVAL;
+}
+
+static void qpnp_iadc_trigger_completion(struct work_struct *work)
+{
+ struct qpnp_iadc_chip *iadc = container_of(work,
+ struct qpnp_iadc_chip, trigger_completion_work);
+
+ if (qpnp_iadc_is_valid(iadc) < 0)
+ return;
+
+ complete(&iadc->adc->adc_rslt_completion);
+
+ return;
+}
+
+static irqreturn_t qpnp_iadc_isr(int irq, void *dev_id)
+{
+ struct qpnp_iadc_chip *iadc = dev_id;
+
+ schedule_work(&iadc->trigger_completion_work);
+
+ return IRQ_HANDLED;
+}
+
+static int32_t qpnp_iadc_enable(struct qpnp_iadc_chip *dev, bool state)
+{
+ int rc = 0;
+ u8 data = 0;
+
+ data = QPNP_IADC_ADC_EN;
+ if (state) {
+ rc = qpnp_iadc_write_reg(dev, QPNP_IADC_EN_CTL1,
+ data);
+ if (rc < 0) {
+ pr_err("IADC enable failed\n");
+ return rc;
+ }
+ } else {
+ rc = qpnp_iadc_write_reg(dev, QPNP_IADC_EN_CTL1,
+ (~data & QPNP_IADC_ADC_EN));
+ if (rc < 0) {
+ pr_err("IADC disable failed\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int32_t qpnp_iadc_status_debug(struct qpnp_iadc_chip *dev)
+{
+ int rc = 0;
+ u8 mode = 0, status1 = 0, chan = 0, dig = 0, en = 0;
+
+ rc = qpnp_iadc_read_reg(dev, QPNP_IADC_MODE_CTL, &mode);
+ if (rc < 0) {
+ pr_err("mode ctl register read failed with %d\n", rc);
+ return rc;
+ }
+
+ rc = qpnp_iadc_read_reg(dev, QPNP_ADC_DIG_PARAM, &dig);
+ if (rc < 0) {
+ pr_err("digital param read failed with %d\n", rc);
+ return rc;
+ }
+
+ rc = qpnp_iadc_read_reg(dev, QPNP_IADC_ADC_CH_SEL_CTL, &chan);
+ if (rc < 0) {
+ pr_err("channel read failed with %d\n", rc);
+ return rc;
+ }
+
+ rc = qpnp_iadc_read_reg(dev, QPNP_STATUS1, &status1);
+ if (rc < 0) {
+ pr_err("status1 read failed with %d\n", rc);
+ return rc;
+ }
+
+ rc = qpnp_iadc_read_reg(dev, QPNP_IADC_EN_CTL1, &en);
+ if (rc < 0) {
+ pr_err("en read failed with %d\n", rc);
+ return rc;
+ }
+
+ pr_debug("EOC not set with status:%x, dig:%x, ch:%x, mode:%x, en:%x\n",
+ status1, dig, chan, mode, en);
+
+ rc = qpnp_iadc_enable(dev, false);
+ if (rc < 0) {
+ pr_err("IADC disable failed with %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int32_t qpnp_iadc_read_conversion_result(struct qpnp_iadc_chip *iadc,
+ int16_t *data)
+{
+ uint8_t rslt_lsb, rslt_msb;
+ uint16_t rslt;
+ int32_t rc;
+
+ rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_DATA0, &rslt_lsb);
+ if (rc < 0) {
+ pr_err("qpnp adc result read failed with %d\n", rc);
+ return rc;
+ }
+
+ rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_DATA1, &rslt_msb);
+ if (rc < 0) {
+ pr_err("qpnp adc result read failed with %d\n", rc);
+ return rc;
+ }
+
+ rslt = (rslt_msb << 8) | rslt_lsb;
+ *data = rslt;
+
+ rc = qpnp_iadc_enable(iadc, false);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+#define QPNP_IADC_PM8026_2_REV2 4
+#define QPNP_IADC_PM8026_2_REV3 2
+
+#define QPNP_COEFF_1 969000
+#define QPNP_COEFF_2 32
+#define QPNP_COEFF_3_TYPEA 1700000
+#define QPNP_COEFF_3_TYPEB 1000000
+#define QPNP_COEFF_4 100
+#define QPNP_COEFF_5 15
+#define QPNP_COEFF_6 100000
+#define QPNP_COEFF_7 21
+#define QPNP_COEFF_8 100000000
+#define QPNP_COEFF_9 38
+#define QPNP_COEFF_10 40
+#define QPNP_COEFF_11 7
+#define QPNP_COEFF_12 11
+#define QPNP_COEFF_13 37
+#define QPNP_COEFF_14 39
+#define QPNP_COEFF_15 9
+#define QPNP_COEFF_16 11
+#define QPNP_COEFF_17 851200
+#define QPNP_COEFF_18 296500
+#define QPNP_COEFF_19 222400
+#define QPNP_COEFF_20 813800
+#define QPNP_COEFF_21 1059100
+#define QPNP_COEFF_22 5000000
+#define QPNP_COEFF_23 3722500
+#define QPNP_COEFF_24 84
+#define QPNP_COEFF_25 33
+#define QPNP_COEFF_26 22
+#define QPNP_COEFF_27 53
+#define QPNP_COEFF_28 48
+
+static int32_t qpnp_iadc_comp(int64_t *result, struct qpnp_iadc_chip *iadc,
+ int64_t die_temp)
+{
+ int64_t temp_var = 0, sys_gain_coeff = 0, old;
+ int32_t coeff_a = 0, coeff_b = 0;
+ int version = 0;
+
+ version = qpnp_adc_get_revid_version(iadc->dev);
+ if (version == -EINVAL)
+ return 0;
+
+ old = *result;
+ *result = *result * 1000000;
+
+ if (iadc->iadc_comp.sys_gain > 127)
+ sys_gain_coeff = -QPNP_COEFF_6 *
+ (iadc->iadc_comp.sys_gain - 128);
+ else
+ sys_gain_coeff = QPNP_COEFF_6 *
+ iadc->iadc_comp.sys_gain;
+
+ switch (version) {
+ case QPNP_REV_ID_8941_3_1:
+ switch (iadc->iadc_comp.id) {
+ case COMP_ID_GF:
+ if (!iadc->iadc_comp.ext_rsense) {
+ /* internal rsense */
+ coeff_a = QPNP_COEFF_2;
+ coeff_b = -QPNP_COEFF_3_TYPEA;
+ } else {
+ if (*result < 0) {
+ /* charge */
+ coeff_a = QPNP_COEFF_5;
+ coeff_b = QPNP_COEFF_6;
+ } else {
+ /* discharge */
+ coeff_a = -QPNP_COEFF_7;
+ coeff_b = QPNP_COEFF_6;
+ }
+ }
+ break;
+ case COMP_ID_TSMC:
+ default:
+ if (!iadc->iadc_comp.ext_rsense) {
+ /* internal rsense */
+ coeff_a = QPNP_COEFF_2;
+ coeff_b = -QPNP_COEFF_3_TYPEB;
+ } else {
+ if (*result < 0) {
+ /* charge */
+ coeff_a = QPNP_COEFF_5;
+ coeff_b = QPNP_COEFF_6;
+ } else {
+ /* discharge */
+ coeff_a = -QPNP_COEFF_7;
+ coeff_b = QPNP_COEFF_6;
+ }
+ }
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8026_2_1:
+ case QPNP_REV_ID_8026_2_2:
+ /* pm8026 rev 2.1 and 2.2 */
+ switch (iadc->iadc_comp.id) {
+ case COMP_ID_GF:
+ if (!iadc->iadc_comp.ext_rsense) {
+ /* internal rsense */
+ if (*result < 0) {
+ /* charge */
+ coeff_a = 0;
+ coeff_b = 0;
+ } else {
+ coeff_a = QPNP_COEFF_25;
+ coeff_b = 0;
+ }
+ } else {
+ if (*result < 0) {
+ /* charge */
+ coeff_a = 0;
+ coeff_b = 0;
+ } else {
+ /* discharge */
+ coeff_a = 0;
+ coeff_b = 0;
+ }
+ }
+ break;
+ case COMP_ID_TSMC:
+ default:
+ if (!iadc->iadc_comp.ext_rsense) {
+ /* internal rsense */
+ if (*result < 0) {
+ /* charge */
+ coeff_a = 0;
+ coeff_b = 0;
+ } else {
+ coeff_a = QPNP_COEFF_26;
+ coeff_b = 0;
+ }
+ } else {
+ if (*result < 0) {
+ /* charge */
+ coeff_a = 0;
+ coeff_b = 0;
+ } else {
+ /* discharge */
+ coeff_a = 0;
+ coeff_b = 0;
+ }
+ }
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8026_1_0:
+ /* pm8026 rev 1.0 */
+ switch (iadc->iadc_comp.id) {
+ case COMP_ID_GF:
+ if (!iadc->iadc_comp.ext_rsense) {
+ /* internal rsense */
+ if (*result < 0) {
+ /* charge */
+ coeff_a = QPNP_COEFF_9;
+ coeff_b = -QPNP_COEFF_17;
+ } else {
+ coeff_a = QPNP_COEFF_10;
+ coeff_b = QPNP_COEFF_18;
+ }
+ } else {
+ if (*result < 0) {
+ /* charge */
+ coeff_a = -QPNP_COEFF_11;
+ coeff_b = 0;
+ } else {
+ /* discharge */
+ coeff_a = -QPNP_COEFF_17;
+ coeff_b = -QPNP_COEFF_19;
+ }
+ }
+ break;
+ case COMP_ID_TSMC:
+ default:
+ if (!iadc->iadc_comp.ext_rsense) {
+ /* internal rsense */
+ if (*result < 0) {
+ /* charge */
+ coeff_a = QPNP_COEFF_13;
+ coeff_b = -QPNP_COEFF_20;
+ } else {
+ coeff_a = QPNP_COEFF_14;
+ coeff_b = QPNP_COEFF_21;
+ }
+ } else {
+ if (*result < 0) {
+ /* charge */
+ coeff_a = -QPNP_COEFF_15;
+ coeff_b = 0;
+ } else {
+ /* discharge */
+ coeff_a = -QPNP_COEFF_12;
+ coeff_b = -QPNP_COEFF_19;
+ }
+ }
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8110_1_0:
+ /* pm8110 rev 1.0 */
+ switch (iadc->iadc_comp.id) {
+ case COMP_ID_GF:
+ if (!iadc->iadc_comp.ext_rsense) {
+ /* internal rsense */
+ if (*result < 0) {
+ /* charge */
+ coeff_a = QPNP_COEFF_24;
+ coeff_b = -QPNP_COEFF_22;
+ } else {
+ coeff_a = QPNP_COEFF_24;
+ coeff_b = -QPNP_COEFF_23;
+ }
+ }
+ break;
+ case COMP_ID_SMIC:
+ default:
+ if (!iadc->iadc_comp.ext_rsense) {
+ /* internal rsense */
+ if (*result < 0) {
+ /* charge */
+ coeff_a = QPNP_COEFF_24;
+ coeff_b = -QPNP_COEFF_22;
+ } else {
+ coeff_a = QPNP_COEFF_24;
+ coeff_b = -QPNP_COEFF_23;
+ }
+ }
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8110_2_0:
+ die_temp -= 25000;
+ /* pm8110 rev 2.0 */
+ switch (iadc->iadc_comp.id) {
+ case COMP_ID_GF:
+ if (!iadc->iadc_comp.ext_rsense) {
+ /* internal rsense */
+ if (*result < 0) {
+ /* charge */
+ coeff_a = 0;
+ coeff_b = 0;
+ } else {
+ coeff_a = QPNP_COEFF_27;
+ coeff_b = 0;
+ }
+ }
+ break;
+ case COMP_ID_SMIC:
+ default:
+ if (!iadc->iadc_comp.ext_rsense) {
+ /* internal rsense */
+ if (*result < 0) {
+ /* charge */
+ coeff_a = 0;
+ coeff_b = 0;
+ } else {
+ coeff_a = QPNP_COEFF_28;
+ coeff_b = 0;
+ }
+ }
+ break;
+ }
+ break;
+ default:
+ case QPNP_REV_ID_8026_2_0:
+ /* pm8026 rev 1.0 */
+ coeff_a = 0;
+ coeff_b = 0;
+ break;
+ }
+
+ temp_var = (coeff_a * die_temp) + coeff_b;
+ temp_var = div64_s64(temp_var, QPNP_COEFF_4);
+ temp_var = 1000 * (1000000 - temp_var);
+
+ if (!iadc->iadc_comp.ext_rsense) {
+ /* internal rsense */
+ *result = div64_s64(*result * 1000, temp_var);
+ }
+
+ if (iadc->iadc_comp.ext_rsense) {
+ /* external rsense */
+ sys_gain_coeff = (1000000 +
+ div64_s64(sys_gain_coeff, QPNP_COEFF_4));
+ temp_var = div64_s64(temp_var * sys_gain_coeff, 1000000);
+ *result = div64_s64(*result * 1000, temp_var);
+ }
+ pr_debug("%lld compensated into %lld, a: %d, b: %d, sys_gain: %lld\n",
+ old, *result, coeff_a, coeff_b, sys_gain_coeff);
+
+ return 0;
+}
+
+int32_t qpnp_iadc_comp_result(struct qpnp_iadc_chip *iadc, int64_t *result)
+{
+ return qpnp_iadc_comp(result, iadc, iadc->die_temp);
+}
+EXPORT_SYMBOL(qpnp_iadc_comp_result);
+
+static int qpnp_iadc_rds_trim_update_check(struct qpnp_iadc_chip *iadc)
+{
+ int rc = 0;
+ u8 trim2_val = 0, smbb_batt_trm_data = 0;
+ u8 smbb_batt_trm_cnst_rds = 0;
+
+ if (!iadc->rds_trim_default_check) {
+ pr_debug("No internal rds trim check needed\n");
+ return 0;
+ }
+
+ rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_NOMINAL_RSENSE, &trim2_val);
+ if (rc < 0) {
+ pr_err("qpnp adc trim2_fullscale1 reg read failed %d\n", rc);
+ return rc;
+ }
+
+ rc = spmi_ext_register_readl(iadc->adc->spmi->ctrl, iadc->adc->slave,
+ iadc->batt_id_trim_cnst_rds, &smbb_batt_trm_data, 1);
+ if (rc < 0) {
+ pr_err("batt_id trim_cnst rds reg read failed %d\n", rc);
+ return rc;
+ }
+
+ smbb_batt_trm_cnst_rds = smbb_batt_trm_data &
+ SMBB_BAT_IF_TRIM_CNST_RDS_MASK;
+
+ pr_debug("n_trim:0x%x smb_trm:0x%x\n", trim2_val, smbb_batt_trm_data);
+
+ if (iadc->rds_trim_default_type == QPNP_IADC_RDS_DEFAULT_TYPEA) {
+
+ if ((smbb_batt_trm_cnst_rds ==
+ SMBB_BAT_IF_TRIM_CNST_RDS_MASK_CONST_2) &&
+ (trim2_val == QPNP_IADC1_USR_TRIM2_ADC_FULLSCALE1_CONST)) {
+ iadc->rsense_workaround_value =
+ QPNP_IADC_RSENSE_DEFAULT_VALUE;
+ iadc->default_internal_rsense = true;
+ }
+ } else if (iadc->rds_trim_default_type ==
+ QPNP_IADC_RDS_DEFAULT_TYPEB) {
+ if ((smbb_batt_trm_cnst_rds >=
+ SMBB_BAT_IF_TRIM_CNST_RDS_MASK_CONST_2) &&
+ (trim2_val == QPNP_IADC1_USR_TRIM2_ADC_FULLSCALE1_CONST)) {
+ iadc->rsense_workaround_value =
+ QPNP_IADC_RSENSE_DEFAULT_VALUE;
+ iadc->default_internal_rsense = true;
+ } else if ((smbb_batt_trm_cnst_rds <
+ SMBB_BAT_IF_TRIM_CNST_RDS_MASK_CONST_2) &&
+ (trim2_val ==
+ QPNP_IADC1_USR_TRIM2_ADC_FULLSCALE1_CONST)) {
+ if (iadc->iadc_comp.id == COMP_ID_GF) {
+ iadc->rsense_workaround_value =
+ QPNP_IADC_RSENSE_DEFAULT_TYPEB_GF;
+ iadc->default_internal_rsense = true;
+ } else if (iadc->iadc_comp.id == COMP_ID_SMIC) {
+ iadc->rsense_workaround_value =
+ QPNP_IADC_RSENSE_DEFAULT_TYPEB_SMIC;
+ iadc->default_internal_rsense = true;
+ }
+ }
+ } else if (iadc->rds_trim_default_type == QPNP_IADC_RDS_DEFAULT_TYPEC) {
+
+ if ((smbb_batt_trm_cnst_rds >
+ SMBB_BAT_IF_TRIM_CNST_RDS_MASK_CONST_0) &&
+ (smbb_batt_trm_cnst_rds <=
+ SMBB_BAT_IF_TRIM_CNST_RDS_MASK_CONST_2) &&
+ (trim2_val == QPNP_IADC1_USR_TRIM2_ADC_FULLSCALE1_CONST)) {
+ iadc->rsense_workaround_value =
+ QPNP_IADC_RSENSE_DEFAULT_VALUE;
+ iadc->default_internal_rsense = true;
+ }
+ }
+
+ return 0;
+}
+
+static int32_t qpnp_iadc_comp_info(struct qpnp_iadc_chip *iadc)
+{
+ int rc = 0;
+
+ rc = qpnp_iadc_read_reg(iadc, QPNP_INT_TEST_VAL, &iadc->iadc_comp.id);
+ if (rc < 0) {
+ pr_err("qpnp adc comp id failed with %d\n", rc);
+ return rc;
+ }
+
+ rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_REVISION2,
+ &iadc->iadc_comp.revision_dig_major);
+ if (rc < 0) {
+ pr_err("qpnp adc revision2 read failed with %d\n", rc);
+ return rc;
+ }
+
+ rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_REVISION3,
+ &iadc->iadc_comp.revision_ana_minor);
+ if (rc < 0) {
+ pr_err("qpnp adc revision3 read failed with %d\n", rc);
+ return rc;
+ }
+
+ rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_ATE_GAIN_CALIB_OFFSET,
+ &iadc->iadc_comp.sys_gain);
+ if (rc < 0) {
+ pr_err("full scale read failed with %d\n", rc);
+ return rc;
+ }
+
+ if (iadc->external_rsense)
+ iadc->iadc_comp.ext_rsense = true;
+
+ pr_debug("fab id = %u, revision_dig_major = %u, revision_ana_minor = %u sys gain = %u, external_rsense = %d\n",
+ iadc->iadc_comp.id,
+ iadc->iadc_comp.revision_dig_major,
+ iadc->iadc_comp.revision_ana_minor,
+ iadc->iadc_comp.sys_gain,
+ iadc->iadc_comp.ext_rsense);
+ return rc;
+}
+
+static int32_t qpnp_iadc_configure(struct qpnp_iadc_chip *iadc,
+ enum qpnp_iadc_channels channel,
+ uint16_t *raw_code, uint32_t mode_sel)
+{
+ u8 qpnp_iadc_mode_reg = 0, qpnp_iadc_ch_sel_reg = 0;
+ u8 qpnp_iadc_conv_req = 0, qpnp_iadc_dig_param_reg = 0;
+ u8 status1 = 0;
+ uint32_t count = 0;
+ int32_t rc = 0;
+
+ qpnp_iadc_ch_sel_reg = channel;
+
+ qpnp_iadc_dig_param_reg |= iadc->adc->amux_prop->decimation <<
+ QPNP_IADC_DEC_RATIO_SEL;
+ if (iadc->iadc_mode_sel)
+ qpnp_iadc_mode_reg |= (QPNP_ADC_TRIM_EN | QPNP_VADC_SYNCH_EN);
+ else
+ qpnp_iadc_mode_reg |= QPNP_ADC_TRIM_EN;
+
+ qpnp_iadc_conv_req = QPNP_IADC_CONV_REQ;
+
+ rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_MODE_CTL, qpnp_iadc_mode_reg);
+ if (rc) {
+ pr_err("qpnp adc read adc failed with %d\n", rc);
+ return rc;
+ }
+
+ rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_ADC_CH_SEL_CTL,
+ qpnp_iadc_ch_sel_reg);
+ if (rc) {
+ pr_err("qpnp adc read adc failed with %d\n", rc);
+ return rc;
+ }
+
+ rc = qpnp_iadc_write_reg(iadc, QPNP_ADC_DIG_PARAM,
+ qpnp_iadc_dig_param_reg);
+ if (rc) {
+ pr_err("qpnp adc read adc failed with %d\n", rc);
+ return rc;
+ }
+
+ rc = qpnp_iadc_write_reg(iadc, QPNP_FAST_AVG_CTL,
+ iadc->adc->amux_prop->fast_avg_setup);
+ if (rc < 0) {
+ pr_err("qpnp adc fast averaging configure error\n");
+ return rc;
+ }
+
+ if (!iadc->iadc_poll_eoc)
+ reinit_completion(&iadc->adc->adc_rslt_completion);
+
+ rc = qpnp_iadc_enable(iadc, true);
+ if (rc)
+ return rc;
+
+ rc = qpnp_iadc_write_reg(iadc, QPNP_CONV_REQ, qpnp_iadc_conv_req);
+ if (rc) {
+ pr_err("qpnp adc read adc failed with %d\n", rc);
+ return rc;
+ }
+
+ if (iadc->iadc_poll_eoc) {
+ while (status1 != QPNP_STATUS1_EOC) {
+ rc = qpnp_iadc_read_reg(iadc, QPNP_STATUS1, &status1);
+ if (rc < 0)
+ return rc;
+ status1 &= QPNP_STATUS1_REQ_STS_EOC_MASK;
+ usleep_range(QPNP_ADC_CONV_TIME_MIN,
+ QPNP_ADC_CONV_TIME_MAX);
+ count++;
+ if (count > QPNP_ADC_ERR_COUNT) {
+ pr_err("retry error exceeded\n");
+ rc = qpnp_iadc_status_debug(iadc);
+ if (rc < 0)
+ pr_err("IADC status debug failed\n");
+ rc = -EINVAL;
+ return rc;
+ }
+ }
+ } else {
+ rc = wait_for_completion_timeout(
+ &iadc->adc->adc_rslt_completion,
+ QPNP_ADC_COMPLETION_TIMEOUT);
+ if (!rc) {
+ rc = qpnp_iadc_read_reg(iadc, QPNP_STATUS1, &status1);
+ if (rc < 0)
+ return rc;
+ status1 &= QPNP_STATUS1_REQ_STS_EOC_MASK;
+ if (status1 == QPNP_STATUS1_EOC)
+ pr_debug("End of conversion status set\n");
+ else {
+ rc = qpnp_iadc_status_debug(iadc);
+ if (rc < 0) {
+ pr_err("status debug failed %d\n", rc);
+ return rc;
+ }
+ return -EINVAL;
+ }
+ }
+ }
+
+ rc = qpnp_iadc_read_conversion_result(iadc, raw_code);
+ if (rc) {
+ pr_err("qpnp adc read adc failed with %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+#define IADC_CENTER 0xC000
+#define IADC_READING_RESOLUTION_N 542535
+#define IADC_READING_RESOLUTION_D 100000
+static int32_t qpnp_convert_raw_offset_voltage(struct qpnp_iadc_chip *iadc)
+{
+ s64 numerator;
+
+ if ((iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw) == 0) {
+ pr_err("raw offset errors! raw_gain:0x%x and raw_offset:0x%x\n",
+ iadc->adc->calib.gain_raw, iadc->adc->calib.offset_raw);
+ return -EINVAL;
+ }
+
+ numerator = iadc->adc->calib.offset_raw - IADC_CENTER;
+ numerator *= IADC_READING_RESOLUTION_N;
+ iadc->adc->calib.offset_uv = div_s64(numerator,
+ IADC_READING_RESOLUTION_D);
+
+ numerator = iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw;
+ numerator *= IADC_READING_RESOLUTION_N;
+
+ iadc->adc->calib.gain_uv = div_s64(numerator,
+ IADC_READING_RESOLUTION_D);
+
+ pr_debug("gain_uv:%d offset_uv:%d\n",
+ iadc->adc->calib.gain_uv, iadc->adc->calib.offset_uv);
+ return 0;
+}
+
+#define IADC_IDEAL_RAW_GAIN 3291
+int32_t qpnp_iadc_calibrate_for_trim(struct qpnp_iadc_chip *iadc,
+ bool batfet_closed)
+{
+ uint8_t rslt_lsb, rslt_msb;
+ int32_t rc = 0, version = 0;
+ uint16_t raw_data;
+ uint32_t mode_sel = 0;
+ bool iadc_offset_ch_batfet_check;
+
+ if (qpnp_iadc_is_valid(iadc) < 0)
+ return -EPROBE_DEFER;
+
+ mutex_lock(&iadc->adc->adc_lock);
+
+ if (iadc->iadc_poll_eoc) {
+ pr_debug("acquiring iadc eoc wakelock\n");
+ pm_stay_awake(iadc->dev);
+ }
+
+ iadc->adc->amux_prop->decimation = DECIMATION_TYPE1;
+ iadc->adc->amux_prop->fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
+
+ rc = qpnp_iadc_configure(iadc, GAIN_CALIBRATION_17P857MV,
+ &raw_data, mode_sel);
+ if (rc < 0) {
+ pr_err("qpnp adc result read failed with %d\n", rc);
+ goto fail;
+ }
+
+ iadc->adc->calib.gain_raw = raw_data;
+
+ /*
+ * there is a features on PM8941 in the BMS where if the batfet is
+ * opened the BMS reads from INTERNAL_RSENSE (channel 0) actually go to
+ * OFFSET_CALIBRATION_CSP_CSN (channel 5). Hence if batfet is opened
+ * we have to calibrate based on OFFSET_CALIBRATION_CSP_CSN even for
+ * internal rsense.
+ */
+ version = qpnp_adc_get_revid_version(iadc->dev);
+ if ((version == QPNP_REV_ID_8941_3_1) ||
+ (version == QPNP_REV_ID_8941_3_0) ||
+ (version == QPNP_REV_ID_8941_2_0))
+ iadc_offset_ch_batfet_check = true;
+ else
+ iadc_offset_ch_batfet_check = false;
+
+ if ((iadc_offset_ch_batfet_check && !batfet_closed) ||
+ (iadc->external_rsense)) {
+ /* external offset calculation */
+ rc = qpnp_iadc_configure(iadc, OFFSET_CALIBRATION_CSP_CSN,
+ &raw_data, mode_sel);
+ if (rc < 0) {
+ pr_err("qpnp adc result read failed with %d\n", rc);
+ goto fail;
+ }
+ } else {
+ /* internal offset calculation */
+ rc = qpnp_iadc_configure(iadc, OFFSET_CALIBRATION_CSP2_CSN2,
+ &raw_data, mode_sel);
+ if (rc < 0) {
+ pr_err("qpnp adc result read failed with %d\n", rc);
+ goto fail;
+ }
+ }
+
+ iadc->adc->calib.offset_raw = raw_data;
+ if (rc < 0) {
+ pr_err("qpnp adc offset/gain calculation failed\n");
+ goto fail;
+ }
+
+ if (iadc->iadc_comp.revision_dig_major == QPNP_IADC_PM8026_2_REV2
+ && iadc->iadc_comp.revision_ana_minor ==
+ QPNP_IADC_PM8026_2_REV3)
+ iadc->adc->calib.gain_raw =
+ iadc->adc->calib.offset_raw + IADC_IDEAL_RAW_GAIN;
+
+ pr_debug("raw gain:0x%x, raw offset:0x%x\n",
+ iadc->adc->calib.gain_raw, iadc->adc->calib.offset_raw);
+
+ rc = qpnp_convert_raw_offset_voltage(iadc);
+ if (rc < 0) {
+ pr_err("qpnp raw_voltage conversion failed\n");
+ goto fail;
+ }
+
+ rslt_msb = (raw_data & QPNP_RAW_CODE_16_BIT_MSB_MASK) >>
+ QPNP_BIT_SHIFT_8;
+ rslt_lsb = raw_data & QPNP_RAW_CODE_16_BIT_LSB_MASK;
+
+ pr_debug("trim values:lsb:0x%x and msb:0x%x\n", rslt_lsb, rslt_msb);
+
+ rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_SEC_ACCESS,
+ QPNP_IADC_SEC_ACCESS_DATA);
+ if (rc < 0) {
+ pr_err("qpnp iadc configure error for sec access\n");
+ goto fail;
+ }
+
+ rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_MSB_OFFSET,
+ rslt_msb);
+ if (rc < 0) {
+ pr_err("qpnp iadc configure error for MSB write\n");
+ goto fail;
+ }
+
+ rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_SEC_ACCESS,
+ QPNP_IADC_SEC_ACCESS_DATA);
+ if (rc < 0) {
+ pr_err("qpnp iadc configure error for sec access\n");
+ goto fail;
+ }
+
+ rc = qpnp_iadc_write_reg(iadc, QPNP_IADC_LSB_OFFSET,
+ rslt_lsb);
+ if (rc < 0) {
+ pr_err("qpnp iadc configure error for LSB write\n");
+ goto fail;
+ }
+fail:
+ if (iadc->iadc_poll_eoc) {
+ pr_debug("releasing iadc eoc wakelock\n");
+ pm_relax(iadc->dev);
+ }
+ mutex_unlock(&iadc->adc->adc_lock);
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_iadc_calibrate_for_trim);
+
+static void qpnp_iadc_work(struct work_struct *work)
+{
+ struct qpnp_iadc_chip *iadc = container_of(work,
+ struct qpnp_iadc_chip, iadc_work.work);
+ int rc = 0;
+
+ if (!iadc->skip_auto_calibrations) {
+ rc = qpnp_iadc_calibrate_for_trim(iadc, true);
+ if (rc)
+ pr_debug("periodic IADC calibration failed\n");
+ }
+
+ schedule_delayed_work(&iadc->iadc_work,
+ round_jiffies_relative(msecs_to_jiffies
+ (QPNP_IADC_CALIB_SECONDS)));
+ return;
+}
+
+static int32_t qpnp_iadc_version_check(struct qpnp_iadc_chip *iadc)
+{
+ uint8_t revision;
+ int rc;
+
+ rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_REVISION2, &revision);
+ if (rc < 0) {
+ pr_err("qpnp adc result read failed with %d\n", rc);
+ return rc;
+ }
+
+ if (revision < QPNP_IADC_SUPPORTED_REVISION2) {
+ pr_err("IADC Version not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct qpnp_iadc_chip *qpnp_get_iadc(struct device *dev, const char *name)
+{
+ struct qpnp_iadc_chip *iadc;
+ struct device_node *node = NULL;
+ char prop_name[QPNP_MAX_PROP_NAME_LEN];
+
+ snprintf(prop_name, QPNP_MAX_PROP_NAME_LEN, "qcom,%s-iadc", name);
+
+ node = of_parse_phandle(dev->of_node, prop_name, 0);
+ if (node == NULL)
+ return ERR_PTR(-ENODEV);
+
+ list_for_each_entry(iadc, &qpnp_iadc_device_list, list)
+ if (iadc->adc->spmi->dev.of_node == node)
+ return iadc;
+ return ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL(qpnp_get_iadc);
+
+int32_t qpnp_iadc_get_rsense(struct qpnp_iadc_chip *iadc, int32_t *rsense)
+{
+ uint8_t rslt_rsense = 0;
+ int32_t rc = 0, sign_bit = 0;
+
+ if (qpnp_iadc_is_valid(iadc) < 0)
+ return -EPROBE_DEFER;
+
+ if (iadc->external_rsense) {
+ *rsense = iadc->rsense;
+ } else if (iadc->default_internal_rsense) {
+ *rsense = iadc->rsense_workaround_value;
+ } else {
+
+ rc = qpnp_iadc_read_reg(iadc, QPNP_IADC_NOMINAL_RSENSE,
+ &rslt_rsense);
+ if (rc < 0) {
+ pr_err("qpnp adc rsense read failed with %d\n", rc);
+ return rc;
+ }
+
+ pr_debug("rsense:0%x\n", rslt_rsense);
+
+ if (rslt_rsense & QPNP_RSENSE_MSB_SIGN_CHECK)
+ sign_bit = 1;
+
+ rslt_rsense &= ~QPNP_RSENSE_MSB_SIGN_CHECK;
+
+ if (sign_bit)
+ *rsense = QPNP_IADC_INTERNAL_RSENSE_N_OHMS_FACTOR -
+ (rslt_rsense * QPNP_IADC_RSENSE_LSB_N_OHMS_PER_BIT);
+ else
+ *rsense = QPNP_IADC_INTERNAL_RSENSE_N_OHMS_FACTOR +
+ (rslt_rsense * QPNP_IADC_RSENSE_LSB_N_OHMS_PER_BIT);
+ }
+ pr_debug("rsense value is %d\n", *rsense);
+
+ if (*rsense == 0)
+ pr_err("incorrect rsens value:%d rslt_rsense:%d\n",
+ *rsense, rslt_rsense);
+
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_iadc_get_rsense);
+
+static int32_t qpnp_check_pmic_temp(struct qpnp_iadc_chip *iadc)
+{
+ struct qpnp_vadc_result result_pmic_therm;
+ int64_t die_temp_offset;
+ int rc = 0;
+
+ rc = qpnp_vadc_read(iadc->vadc_dev, DIE_TEMP, &result_pmic_therm);
+ if (rc < 0)
+ return rc;
+
+ die_temp_offset = result_pmic_therm.physical -
+ iadc->die_temp;
+ if (die_temp_offset < 0)
+ die_temp_offset = -die_temp_offset;
+
+ if (die_temp_offset > QPNP_IADC_DIE_TEMP_CALIB_OFFSET) {
+ iadc->die_temp = result_pmic_therm.physical;
+ if (!iadc->skip_auto_calibrations) {
+ rc = qpnp_iadc_calibrate_for_trim(iadc, true);
+ if (rc)
+ pr_err("IADC calibration failed rc = %d\n", rc);
+ }
+ }
+
+ return rc;
+}
+
+int32_t qpnp_iadc_read(struct qpnp_iadc_chip *iadc,
+ enum qpnp_iadc_channels channel,
+ struct qpnp_iadc_result *result)
+{
+ int32_t rc, rsense_n_ohms, sign = 0, num, mode_sel = 0;
+ int32_t rsense_u_ohms = 0;
+ int64_t result_current;
+ uint16_t raw_data;
+ int dt_index = 0;
+
+ if (qpnp_iadc_is_valid(iadc) < 0)
+ return -EPROBE_DEFER;
+
+ if ((iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw) == 0) {
+ pr_err("raw offset errors! run iadc calibration again\n");
+ return -EINVAL;
+ }
+
+ rc = qpnp_check_pmic_temp(iadc);
+ if (rc) {
+ pr_err("Error checking pmic therm temp\n");
+ return rc;
+ }
+
+ mutex_lock(&iadc->adc->adc_lock);
+
+ while (((enum qpnp_iadc_channels)
+ iadc->adc->adc_channels[dt_index].channel_num
+ != channel) && (dt_index < iadc->max_channels_available))
+ dt_index++;
+
+ if (dt_index >= iadc->max_channels_available) {
+ pr_err("not a valid IADC channel\n");
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ iadc->adc->amux_prop->decimation =
+ iadc->adc->adc_channels[dt_index].adc_decimation;
+ iadc->adc->amux_prop->fast_avg_setup =
+ iadc->adc->adc_channels[dt_index].fast_avg_setup;
+
+ if (iadc->iadc_poll_eoc) {
+ pr_debug("acquiring iadc eoc wakelock\n");
+ pm_stay_awake(iadc->dev);
+ }
+
+ rc = qpnp_iadc_configure(iadc, channel, &raw_data, mode_sel);
+ if (rc < 0) {
+ pr_err("qpnp adc result read failed with %d\n", rc);
+ goto fail;
+ }
+
+ rc = qpnp_iadc_get_rsense(iadc, &rsense_n_ohms);
+ pr_debug("current raw:0%x and rsense:%d\n",
+ raw_data, rsense_n_ohms);
+ rsense_u_ohms = rsense_n_ohms/1000;
+ num = raw_data - iadc->adc->calib.offset_raw;
+ if (num < 0) {
+ sign = 1;
+ num = -num;
+ }
+
+ result->result_uv = (num * QPNP_ADC_GAIN_NV)/
+ (iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw);
+ result_current = result->result_uv;
+ result_current *= QPNP_IADC_NANO_VOLTS_FACTOR;
+ /* Intentional fall through. Process the result w/o comp */
+ do_div(result_current, rsense_u_ohms);
+
+ if (sign) {
+ result->result_uv = -result->result_uv;
+ result_current = -result_current;
+ }
+ result_current *= -1;
+ rc = qpnp_iadc_comp_result(iadc, &result_current);
+ if (rc < 0)
+ pr_err("Error during compensating the IADC\n");
+ rc = 0;
+ result_current *= -1;
+
+ result->result_ua = (int32_t) result_current;
+fail:
+ if (iadc->iadc_poll_eoc) {
+ pr_debug("releasing iadc eoc wakelock\n");
+ pm_relax(iadc->dev);
+ }
+ mutex_unlock(&iadc->adc->adc_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_iadc_read);
+
+int32_t qpnp_iadc_get_gain_and_offset(struct qpnp_iadc_chip *iadc,
+ struct qpnp_iadc_calib *result)
+{
+ int rc;
+
+ if (qpnp_iadc_is_valid(iadc) < 0)
+ return -EPROBE_DEFER;
+
+ rc = qpnp_check_pmic_temp(iadc);
+ if (rc) {
+ pr_err("Error checking pmic therm temp\n");
+ return rc;
+ }
+
+ mutex_lock(&iadc->adc->adc_lock);
+ result->gain_raw = iadc->adc->calib.gain_raw;
+ result->ideal_gain_nv = QPNP_ADC_GAIN_NV;
+ result->gain_uv = iadc->adc->calib.gain_uv;
+ result->offset_raw = iadc->adc->calib.offset_raw;
+ result->ideal_offset_uv =
+ QPNP_OFFSET_CALIBRATION_SHORT_CADC_LEADS_IDEAL;
+ result->offset_uv = iadc->adc->calib.offset_uv;
+ pr_debug("raw gain:0%x, raw offset:0%x\n",
+ result->gain_raw, result->offset_raw);
+ pr_debug("gain_uv:%d offset_uv:%d\n",
+ result->gain_uv, result->offset_uv);
+ mutex_unlock(&iadc->adc->adc_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_iadc_get_gain_and_offset);
+
+int qpnp_iadc_skip_calibration(struct qpnp_iadc_chip *iadc)
+{
+ iadc->skip_auto_calibrations = true;
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_iadc_skip_calibration);
+
+int qpnp_iadc_resume_calibration(struct qpnp_iadc_chip *iadc)
+{
+ iadc->skip_auto_calibrations = false;
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_iadc_resume_calibration);
+
+int32_t qpnp_iadc_vadc_sync_read(struct qpnp_iadc_chip *iadc,
+ enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result,
+ enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result)
+{
+ int rc = 0, mode_sel = 0, num = 0, rsense_n_ohms = 0, sign = 0;
+ int dt_index = 0;
+ uint16_t raw_data;
+ int32_t rsense_u_ohms = 0;
+ int64_t result_current;
+
+ if (qpnp_iadc_is_valid(iadc) < 0)
+ return -EPROBE_DEFER;
+
+ if ((iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw) == 0) {
+ pr_err("raw offset errors! run iadc calibration again\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&iadc->adc->adc_lock);
+
+ if (iadc->iadc_poll_eoc) {
+ pr_debug("acquiring iadc eoc wakelock\n");
+ pm_stay_awake(iadc->dev);
+ }
+
+ iadc->iadc_mode_sel = true;
+
+ rc = qpnp_vadc_iadc_sync_request(iadc->vadc_dev, v_channel);
+ if (rc) {
+ pr_err("Configuring VADC failed\n");
+ goto fail;
+ }
+
+ while (((enum qpnp_iadc_channels)
+ iadc->adc->adc_channels[dt_index].channel_num
+ != i_channel) && (dt_index < iadc->max_channels_available))
+ dt_index++;
+
+ if (dt_index >= iadc->max_channels_available) {
+ pr_err("not a valid IADC channel\n");
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ iadc->adc->amux_prop->decimation =
+ iadc->adc->adc_channels[dt_index].adc_decimation;
+ iadc->adc->amux_prop->fast_avg_setup =
+ iadc->adc->adc_channels[dt_index].fast_avg_setup;
+
+ rc = qpnp_iadc_configure(iadc, i_channel, &raw_data, mode_sel);
+ if (rc < 0) {
+ pr_err("qpnp adc result read failed with %d\n", rc);
+ goto fail_release_vadc;
+ }
+
+ rc = qpnp_iadc_get_rsense(iadc, &rsense_n_ohms);
+ pr_debug("current raw:0%x and rsense:%d\n",
+ raw_data, rsense_n_ohms);
+ rsense_u_ohms = rsense_n_ohms/1000;
+ num = raw_data - iadc->adc->calib.offset_raw;
+ if (num < 0) {
+ sign = 1;
+ num = -num;
+ }
+
+ i_result->result_uv = (num * QPNP_ADC_GAIN_NV)/
+ (iadc->adc->calib.gain_raw - iadc->adc->calib.offset_raw);
+ result_current = i_result->result_uv;
+ result_current *= QPNP_IADC_NANO_VOLTS_FACTOR;
+ /* Intentional fall through. Process the result w/o comp */
+ if (!rsense_u_ohms) {
+ pr_err("rsense error=%d\n", rsense_u_ohms);
+ goto fail_release_vadc;
+ }
+
+ do_div(result_current, rsense_u_ohms);
+
+ if (sign) {
+ i_result->result_uv = -i_result->result_uv;
+ result_current = -result_current;
+ }
+ result_current *= -1;
+ rc = qpnp_iadc_comp_result(iadc, &result_current);
+ if (rc < 0)
+ pr_err("Error during compensating the IADC\n");
+ rc = 0;
+ result_current *= -1;
+
+ i_result->result_ua = (int32_t) result_current;
+
+fail_release_vadc:
+ rc = qpnp_vadc_iadc_sync_complete_request(iadc->vadc_dev, v_channel,
+ v_result);
+ if (rc)
+ pr_err("Releasing VADC failed\n");
+fail:
+ iadc->iadc_mode_sel = false;
+
+ if (iadc->iadc_poll_eoc) {
+ pr_debug("releasing iadc eoc wakelock\n");
+ pm_relax(iadc->dev);
+ }
+ mutex_unlock(&iadc->adc->adc_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_iadc_vadc_sync_read);
+
+static ssize_t qpnp_iadc_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct qpnp_iadc_chip *iadc = dev_get_drvdata(dev);
+ struct qpnp_iadc_result result;
+ int rc = -1;
+
+ rc = qpnp_iadc_read(iadc, attr->index, &result);
+
+ if (rc)
+ return 0;
+
+ return snprintf(buf, QPNP_ADC_HWMON_NAME_LENGTH,
+ "Result:%d\n", result.result_ua);
+}
+
+static struct sensor_device_attribute qpnp_adc_attr =
+ SENSOR_ATTR(NULL, S_IRUGO, qpnp_iadc_show, NULL, 0);
+
+static int32_t qpnp_iadc_init_hwmon(struct qpnp_iadc_chip *iadc,
+ struct spmi_device *spmi)
+{
+ struct device_node *child;
+ struct device_node *node = spmi->dev.of_node;
+ int rc = 0, i = 0, channel;
+
+ for_each_child_of_node(node, child) {
+ channel = iadc->adc->adc_channels[i].channel_num;
+ qpnp_adc_attr.index = iadc->adc->adc_channels[i].channel_num;
+ qpnp_adc_attr.dev_attr.attr.name =
+ iadc->adc->adc_channels[i].name;
+ memcpy(&iadc->sens_attr[i], &qpnp_adc_attr,
+ sizeof(qpnp_adc_attr));
+ sysfs_attr_init(&iadc->sens_attr[i].dev_attr.attr);
+ rc = device_create_file(&spmi->dev,
+ &iadc->sens_attr[i].dev_attr);
+ if (rc) {
+ dev_err(&spmi->dev,
+ "device_create_file failed for dev %s\n",
+ iadc->adc->adc_channels[i].name);
+ goto hwmon_err_sens;
+ }
+ i++;
+ }
+
+ return 0;
+hwmon_err_sens:
+ pr_err("Init HWMON failed for qpnp_iadc with %d\n", rc);
+ return rc;
+}
+
+static int qpnp_iadc_probe(struct spmi_device *spmi)
+{
+ struct qpnp_iadc_chip *iadc;
+ struct qpnp_adc_drv *adc_qpnp;
+ struct device_node *node = spmi->dev.of_node;
+ struct device_node *child;
+ struct resource *res;
+ int rc, count_adc_channel_list = 0, i = 0;
+
+ for_each_child_of_node(node, child)
+ count_adc_channel_list++;
+
+ if (!count_adc_channel_list) {
+ pr_err("No channel listing\n");
+ return -EINVAL;
+ }
+
+ iadc = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_iadc_chip) +
+ (sizeof(struct sensor_device_attribute) *
+ count_adc_channel_list), GFP_KERNEL);
+ if (!iadc) {
+ dev_err(&spmi->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ adc_qpnp = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_adc_drv),
+ GFP_KERNEL);
+ if (!adc_qpnp) {
+ dev_err(&spmi->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ iadc->dev = &(spmi->dev);
+ iadc->adc = adc_qpnp;
+
+ rc = qpnp_adc_get_devicetree_data(spmi, iadc->adc);
+ if (rc) {
+ dev_err(&spmi->dev, "failed to read device tree\n");
+ return rc;
+ }
+
+ res = spmi_get_resource_byname(spmi, NULL, IORESOURCE_MEM,
+ "batt-id-trim-cnst-rds");
+ if (!res) {
+ dev_err(&spmi->dev, "failed to read batt_id trim register\n");
+ return -EINVAL;
+ }
+ iadc->batt_id_trim_cnst_rds = res->start;
+ rc = of_property_read_u32(node, "qcom,use-default-rds-trim",
+ &iadc->rds_trim_default_type);
+ if (rc)
+ pr_debug("No trim workaround needed\n");
+ else {
+ pr_debug("Use internal RDS trim workaround\n");
+ iadc->rds_trim_default_check = true;
+ }
+
+ iadc->vadc_dev = qpnp_get_vadc(&spmi->dev, "iadc");
+ if (IS_ERR(iadc->vadc_dev)) {
+ rc = PTR_ERR(iadc->vadc_dev);
+ if (rc != -EPROBE_DEFER)
+ pr_err("vadc property missing, rc=%d\n", rc);
+ return rc;
+ }
+
+ mutex_init(&iadc->adc->adc_lock);
+
+ rc = of_property_read_u32(node, "qcom,rsense",
+ &iadc->rsense);
+ if (rc)
+ pr_debug("Defaulting to internal rsense\n");
+ else {
+ pr_debug("Use external rsense\n");
+ iadc->external_rsense = true;
+ }
+
+ iadc->iadc_poll_eoc = of_property_read_bool(node,
+ "qcom,iadc-poll-eoc");
+ if (!iadc->iadc_poll_eoc) {
+ rc = devm_request_irq(&spmi->dev, iadc->adc->adc_irq_eoc,
+ qpnp_iadc_isr, IRQF_TRIGGER_RISING,
+ "qpnp_iadc_interrupt", iadc);
+ if (rc) {
+ dev_err(&spmi->dev, "failed to request adc irq\n");
+ return rc;
+ } else
+ enable_irq_wake(iadc->adc->adc_irq_eoc);
+ }
+
+ rc = qpnp_iadc_init_hwmon(iadc, spmi);
+ if (rc) {
+ dev_err(&spmi->dev, "failed to initialize qpnp hwmon adc\n");
+ return rc;
+ }
+ iadc->iadc_hwmon = hwmon_device_register(&iadc->adc->spmi->dev);
+
+ rc = qpnp_iadc_version_check(iadc);
+ if (rc) {
+ dev_err(&spmi->dev, "IADC version not supported\n");
+ goto fail;
+ }
+
+ iadc->max_channels_available = count_adc_channel_list;
+ INIT_WORK(&iadc->trigger_completion_work, qpnp_iadc_trigger_completion);
+ INIT_DELAYED_WORK(&iadc->iadc_work, qpnp_iadc_work);
+ rc = qpnp_iadc_comp_info(iadc);
+ if (rc) {
+ dev_err(&spmi->dev, "abstracting IADC comp info failed!\n");
+ goto fail;
+ }
+
+ rc = qpnp_iadc_rds_trim_update_check(iadc);
+ if (rc) {
+ dev_err(&spmi->dev, "Rds trim update failed!\n");
+ goto fail;
+ }
+
+ dev_set_drvdata(&spmi->dev, iadc);
+ list_add(&iadc->list, &qpnp_iadc_device_list);
+ rc = qpnp_iadc_calibrate_for_trim(iadc, true);
+ if (rc)
+ dev_err(&spmi->dev, "failed to calibrate for USR trim\n");
+
+ if (iadc->iadc_poll_eoc)
+ device_init_wakeup(iadc->dev, 1);
+
+ schedule_delayed_work(&iadc->iadc_work,
+ round_jiffies_relative(msecs_to_jiffies
+ (QPNP_IADC_CALIB_SECONDS)));
+ return 0;
+fail:
+ for_each_child_of_node(node, child) {
+ device_remove_file(&spmi->dev,
+ &iadc->sens_attr[i].dev_attr);
+ i++;
+ }
+ hwmon_device_unregister(iadc->iadc_hwmon);
+
+ return rc;
+}
+
+static int qpnp_iadc_remove(struct spmi_device *spmi)
+{
+ struct qpnp_iadc_chip *iadc = dev_get_drvdata(&spmi->dev);
+ struct device_node *node = spmi->dev.of_node;
+ struct device_node *child;
+ int i = 0;
+
+ cancel_delayed_work(&iadc->iadc_work);
+ for_each_child_of_node(node, child) {
+ device_remove_file(&spmi->dev,
+ &iadc->sens_attr[i].dev_attr);
+ i++;
+ }
+ hwmon_device_unregister(iadc->iadc_hwmon);
+ if (iadc->iadc_poll_eoc)
+ pm_relax(iadc->dev);
+ dev_set_drvdata(&spmi->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id qpnp_iadc_match_table[] = {
+ { .compatible = "qcom,qpnp-iadc",
+ },
+ {}
+};
+
+static struct spmi_driver qpnp_iadc_driver = {
+ .driver = {
+ .name = "qcom,qpnp-iadc",
+ .of_match_table = qpnp_iadc_match_table,
+ },
+ .probe = qpnp_iadc_probe,
+ .remove = qpnp_iadc_remove,
+};
+
+static int __init qpnp_iadc_init(void)
+{
+ return spmi_driver_register(&qpnp_iadc_driver);
+}
+module_init(qpnp_iadc_init);
+
+static void __exit qpnp_iadc_exit(void)
+{
+ spmi_driver_unregister(&qpnp_iadc_driver);
+}
+module_exit(qpnp_iadc_exit);
+
+MODULE_DESCRIPTION("QPNP PMIC current ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/qpnp-adc-voltage.c b/drivers/hwmon/qpnp-adc-voltage.c
new file mode 100644
index 000000000000..5799957771e8
--- /dev/null
+++ b/drivers/hwmon/qpnp-adc-voltage.c
@@ -0,0 +1,2807 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/hwmon.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/spmi.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/thermal.h>
+
+/* QPNP VADC register definition */
+#define QPNP_VADC_REVISION1 0x0
+#define QPNP_VADC_REVISION2 0x1
+#define QPNP_VADC_REVISION3 0x2
+#define QPNP_VADC_REVISION4 0x3
+#define QPNP_VADC_PERPH_TYPE 0x4
+#define QPNP_VADC_PERH_SUBTYPE 0x5
+
+#define QPNP_VADC_SUPPORTED_REVISION2 1
+
+#define QPNP_VADC_STATUS1 0x8
+#define QPNP_VADC_STATUS1_OP_MODE 4
+#define QPNP_VADC_STATUS1_MEAS_INTERVAL_EN_STS BIT(2)
+#define QPNP_VADC_STATUS1_REQ_STS BIT(1)
+#define QPNP_VADC_STATUS1_EOC BIT(0)
+#define QPNP_VADC_STATUS1_REQ_STS_EOC_MASK 0x3
+#define QPNP_VADC_STATUS1_OP_MODE_MASK 0x18
+#define QPNP_VADC_MEAS_INT_MODE 0x2
+#define QPNP_VADC_MEAS_INT_MODE_MASK 0x10
+
+#define QPNP_VADC_STATUS2 0x9
+#define QPNP_VADC_STATUS2_CONV_SEQ_STATE 6
+#define QPNP_VADC_STATUS2_FIFO_NOT_EMPTY_FLAG BIT(1)
+#define QPNP_VADC_STATUS2_CONV_SEQ_TIMEOUT_STS BIT(0)
+#define QPNP_VADC_STATUS2_CONV_SEQ_STATE_SHIFT 4
+#define QPNP_VADC_CONV_TIMEOUT_ERR 2
+
+#define QPNP_VADC_MODE_CTL 0x40
+#define QPNP_VADC_OP_MODE_SHIFT 3
+#define QPNP_VADC_VREF_XO_THM_FORCE BIT(2)
+#define QPNP_VADC_AMUX_TRIM_EN BIT(1)
+#define QPNP_VADC_TRIM_EN BIT(0)
+#define QPNP_VADC_EN_CTL1 0x46
+#define QPNP_VADC_EN BIT(7)
+#define QPNP_VADC_CH_SEL_CTL 0x48
+#define QPNP_VADC_DIG_PARAM 0x50
+#define QPNP_VADC_DIG_DEC_RATIO_SEL_SHIFT 3
+#define QPNP_VADC_HW_SETTLE_DELAY 0x51
+#define QPNP_VADC_CONV_REQ 0x52
+#define QPNP_VADC_CONV_REQ_SET BIT(7)
+#define QPNP_VADC_CONV_SEQ_CTL 0x54
+#define QPNP_VADC_CONV_SEQ_HOLDOFF_SHIFT 4
+#define QPNP_VADC_CONV_SEQ_TRIG_CTL 0x55
+#define QPNP_VADC_MEAS_INTERVAL_CTL 0x57
+#define QPNP_VADC_MEAS_INTERVAL_OP_CTL 0x59
+#define QPNP_VADC_MEAS_INTERVAL_OP_SET BIT(7)
+
+#define QPNP_VADC_CONV_SEQ_FALLING_EDGE 0x0
+#define QPNP_VADC_CONV_SEQ_RISING_EDGE 0x1
+#define QPNP_VADC_CONV_SEQ_EDGE_SHIFT 7
+#define QPNP_VADC_FAST_AVG_CTL 0x5a
+
+#define QPNP_VADC_LOW_THR_LSB 0x5c
+#define QPNP_VADC_LOW_THR_MSB 0x5d
+#define QPNP_VADC_HIGH_THR_LSB 0x5e
+#define QPNP_VADC_HIGH_THR_MSB 0x5f
+#define QPNP_VADC_ACCESS 0xd0
+#define QPNP_VADC_ACCESS_DATA 0xa5
+#define QPNP_VADC_PERH_RESET_CTL3 0xda
+#define QPNP_FOLLOW_OTST2_RB BIT(3)
+#define QPNP_FOLLOW_WARM_RB BIT(2)
+#define QPNP_FOLLOW_SHUTDOWN1_RB BIT(1)
+#define QPNP_FOLLOW_SHUTDOWN2_RB BIT(0)
+
+#define QPNP_INT_TEST_VAL 0xE1
+
+#define QPNP_VADC_DATA0 0x60
+#define QPNP_VADC_DATA1 0x61
+#define QPNP_VADC_CONV_TIMEOUT_ERR 2
+#define QPNP_VADC_CONV_TIME_MIN 1000
+#define QPNP_VADC_CONV_TIME_MAX 1100
+#define QPNP_ADC_COMPLETION_TIMEOUT HZ
+#define QPNP_VADC_ERR_COUNT 20
+#define QPNP_OP_MODE_SHIFT 3
+
+#define QPNP_VADC_THR_LSB_MASK(val) (val & 0xff)
+#define QPNP_VADC_THR_MSB_MASK(val) ((val & 0xff00) >> 8)
+#define QPNP_MIN_TIME 2000
+#define QPNP_MAX_TIME 2000
+#define QPNP_RETRY 100
+#define QPNP_VADC_ABSOLUTE_RECALIB_OFFSET 8
+#define QPNP_VADC_RATIOMETRIC_RECALIB_OFFSET 12
+#define QPNP_VADC_RECALIB_MAXCNT 10
+#define QPNP_VADC_OFFSET_DUMP 8
+
+/* QPNP VADC refreshed register set */
+#define QPNP_VADC_HC1_STATUS1 0x8
+
+#define QPNP_VADC_HC1_DATA_HOLD_CTL 0x3f
+#define QPNP_VADC_HC1_DATA_HOLD_CTL_FIELD BIT(1)
+
+#define QPNP_VADC_HC1_ADC_DIG_PARAM 0x42
+#define QPNP_VADC_HC1_CAL_VAL BIT(6)
+#define QPNP_VADC_HC1_CAL_VAL_SHIFT 6
+#define QPNP_VADC_HC1_CAL_SEL_MASK 0x30
+#define QPNP_VADC_HC1_CAL_SEL_SHIFT 4
+#define QPNP_VADC_HC1_DEC_RATIO_SEL 0xc
+#define QPNP_VADC_HC1_DEC_RATIO_SHIFT 2
+#define QPNP_VADC_HC1_FAST_AVG_CTL 0x43
+#define QPNP_VADC_HC1_FAST_AVG_SAMPLES_MASK 0xfff
+#define QPNP_VADC_HC1_ADC_CH_SEL_CTL 0x44
+#define QPNP_VADC_HC1_DELAY_CTL 0x45
+#define QPNP_VADC_HC1_DELAY_CTL_MASK 0xfff
+#define QPNP_VADC_MC1_EN_CTL1 0x46
+#define QPNP_VADC_HC1_ADC_EN BIT(7)
+#define QPNP_VADC_MC1_CONV_REQ 0x47
+#define QPNP_VADC_HC1_CONV_REQ_START BIT(7)
+
+#define QPNP_VADC_HC1_VBAT_MIN_THR0 0x48
+#define QPNP_VADC_HC1_VBAT_MIN_THR1 0x49
+
+#define QPNP_VADC_HC1_DATA0 0x50
+#define QPNP_VADC_HC1_DATA1 0x51
+#define QPNP_VADC_HC1_DATA_CHECK_USR 0x8000
+
+#define QPNP_VADC_HC1_VBAT_MIN_DATA0 0x52
+#define QPNP_VADC_MC1_VBAT_MIN_DATA1 0x53
+
+/*
+ * Conversion time varies between 213uS to 6827uS based on the decimation,
+ * clock rate, fast average samples with no measurement in queue.
+ */
+#define QPNP_VADC_HC1_CONV_TIME_MIN_US 213
+#define QPNP_VADC_HC1_CONV_TIME_MAX_US 214
+#define QPNP_VADC_HC1_ERR_COUNT 1600
+
+struct qpnp_vadc_mode_state {
+ bool meas_int_mode;
+ bool meas_int_request_in_queue;
+ bool vadc_meas_int_enable;
+ struct qpnp_adc_tm_btm_param *param;
+ struct qpnp_adc_amux vadc_meas_amux;
+};
+
+struct qpnp_vadc_thermal_data {
+ bool thermal_node;
+ int thermal_chan;
+ enum qpnp_vadc_channels vadc_channel;
+ struct thermal_zone_device *tz_dev;
+ struct qpnp_vadc_chip *vadc_dev;
+};
+
+struct qpnp_vadc_chip {
+ struct device *dev;
+ struct qpnp_adc_drv *adc;
+ struct list_head list;
+ struct dentry *dent;
+ struct device *vadc_hwmon;
+ bool vadc_init_calib;
+ int max_channels_available;
+ bool vadc_iadc_sync_lock;
+ u8 id;
+ struct work_struct trigger_completion_work;
+ bool vadc_poll_eoc;
+ bool vadc_recalib_check;
+ u8 revision_ana_minor;
+ u8 revision_dig_major;
+ struct workqueue_struct *high_thr_wq;
+ struct workqueue_struct *low_thr_wq;
+ struct work_struct trigger_high_thr_work;
+ struct work_struct trigger_low_thr_work;
+ struct qpnp_vadc_mode_state *state_copy;
+ struct qpnp_vadc_thermal_data *vadc_therm_chan;
+ struct power_supply *vadc_chg_vote;
+ bool vadc_hc;
+ struct sensor_device_attribute sens_attr[0];
+};
+
+LIST_HEAD(qpnp_vadc_device_list);
+
+static struct qpnp_vadc_scale_fn vadc_scale_fn[] = {
+ [SCALE_DEFAULT] = {qpnp_adc_scale_default},
+ [SCALE_BATT_THERM] = {qpnp_adc_scale_batt_therm},
+ [SCALE_PMIC_THERM] = {qpnp_adc_scale_pmic_therm},
+ [SCALE_XOTHERM] = {qpnp_adc_tdkntcg_therm},
+ [SCALE_THERM_100K_PULLUP] = {qpnp_adc_scale_therm_pu2},
+ [SCALE_THERM_150K_PULLUP] = {qpnp_adc_scale_therm_pu1},
+ [SCALE_QRD_BATT_THERM] = {qpnp_adc_scale_qrd_batt_therm},
+ [SCALE_QRD_SKUAA_BATT_THERM] = {qpnp_adc_scale_qrd_skuaa_batt_therm},
+ [SCALE_SMB_BATT_THERM] = {qpnp_adc_scale_smb_batt_therm},
+ [SCALE_QRD_SKUG_BATT_THERM] = {qpnp_adc_scale_qrd_skug_batt_therm},
+ [SCALE_QRD_SKUH_BATT_THERM] = {qpnp_adc_scale_qrd_skuh_batt_therm},
+ [SCALE_NCP_03WF683_THERM] = {qpnp_adc_scale_therm_ncp03},
+ [SCALE_QRD_SKUT1_BATT_THERM] = {qpnp_adc_scale_qrd_skut1_batt_therm},
+ [SCALE_PMI_CHG_TEMP] = {qpnp_adc_scale_pmi_chg_temp},
+};
+
+static struct qpnp_vadc_rscale_fn adc_vadc_rscale_fn[] = {
+ [SCALE_RVADC_ABSOLUTE] = {qpnp_vadc_absolute_rthr},
+};
+
+static int32_t qpnp_vadc_read_reg(struct qpnp_vadc_chip *vadc, int16_t reg,
+ u8 *data, int len)
+{
+ int rc;
+
+ rc = spmi_ext_register_readl(vadc->adc->spmi->ctrl, vadc->adc->slave,
+ (vadc->adc->offset + reg), data, len);
+ if (rc < 0) {
+ pr_err("qpnp adc read reg %d failed with %d\n", reg, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int32_t qpnp_vadc_write_reg(struct qpnp_vadc_chip *vadc, int16_t reg,
+ u8 *buf, int len)
+{
+ int rc;
+
+ rc = spmi_ext_register_writel(vadc->adc->spmi->ctrl, vadc->adc->slave,
+ (vadc->adc->offset + reg), buf, len);
+ if (rc < 0) {
+ pr_err("qpnp adc write reg %d failed with %d\n", reg, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int32_t qpnp_vadc_warm_rst_configure(struct qpnp_vadc_chip *vadc)
+{
+ int rc = 0;
+ u8 data = 0, buf = 0;
+
+ buf = QPNP_VADC_ACCESS_DATA;
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_ACCESS, &buf, 1);
+ if (rc < 0) {
+ pr_err("VADC write access failed\n");
+ return rc;
+ }
+
+ rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_PERH_RESET_CTL3, &data, 1);
+ if (rc < 0) {
+ pr_err("VADC perh reset ctl3 read failed\n");
+ return rc;
+ }
+
+ buf = QPNP_VADC_ACCESS_DATA;
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_ACCESS, &buf, 1);
+ if (rc < 0) {
+ pr_err("VADC write access failed\n");
+ return rc;
+ }
+
+ data |= QPNP_FOLLOW_WARM_RB;
+
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_PERH_RESET_CTL3, &data, 1);
+ if (rc < 0) {
+ pr_err("VADC perh reset ctl3 write failed\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int32_t qpnp_vadc_mode_select(struct qpnp_vadc_chip *vadc, u8 mode_ctl)
+{
+ int rc;
+
+ mode_ctl |= (QPNP_VADC_TRIM_EN | QPNP_VADC_AMUX_TRIM_EN);
+
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_MODE_CTL, &mode_ctl, 1);
+ if (rc < 0)
+ pr_err("vadc write mode selection err:%d\n", rc);
+
+ return rc;
+}
+
+static int32_t qpnp_vadc_enable(struct qpnp_vadc_chip *vadc, bool state)
+{
+ int rc = 0;
+ u8 data = 0;
+
+ data = QPNP_VADC_EN;
+ if (state) {
+ if (vadc->adc->hkadc_ldo && vadc->adc->hkadc_ldo_ok) {
+ rc = qpnp_adc_enable_voltage(vadc->adc);
+ if (rc) {
+ pr_err("failed enabling VADC LDO\n");
+ return rc;
+ }
+ }
+
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_EN_CTL1, &data, 1);
+ if (rc < 0) {
+ pr_err("VADC enable failed\n");
+ return rc;
+ }
+ } else {
+ data = (~data & QPNP_VADC_EN);
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_EN_CTL1, &data, 1);
+ if (rc < 0) {
+ pr_err("VADC disable failed\n");
+ return rc;
+ }
+
+ if (vadc->adc->hkadc_ldo && vadc->adc->hkadc_ldo_ok)
+ qpnp_adc_disable_voltage(vadc->adc);
+ }
+
+ return 0;
+}
+
+static int32_t qpnp_vadc_status_debug(struct qpnp_vadc_chip *vadc)
+{
+ int rc = 0, i = 0;
+ u8 buf[8], offset = 0;
+
+ for (i = 0; i < QPNP_VADC_OFFSET_DUMP; i++) {
+ rc = qpnp_vadc_read_reg(vadc, offset, buf, 8);
+ if (rc) {
+ pr_err("debug register dump failed\n");
+ return rc;
+ }
+ offset += QPNP_VADC_OFFSET_DUMP;
+ pr_err("row%d: 0%x 0%x 0%x 0%x 0%x 0%x 0%x 0x%x\n",
+ i, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
+ buf[6], buf[7]);
+ }
+
+ rc = qpnp_vadc_enable(vadc, false);
+ if (rc < 0) {
+ pr_err("VADC disable failed with %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+static int32_t qpnp_vadc_configure(struct qpnp_vadc_chip *vadc,
+ struct qpnp_adc_amux_properties *chan_prop)
+{
+ u8 decimation = 0, conv_sequence = 0, conv_sequence_trig = 0;
+ u8 mode_ctrl = 0, meas_int_op_ctl_data = 0, buf = 0;
+ int rc = 0;
+
+ /* Mode selection */
+ mode_ctrl |= ((chan_prop->mode_sel << QPNP_VADC_OP_MODE_SHIFT) |
+ (QPNP_VADC_TRIM_EN | QPNP_VADC_AMUX_TRIM_EN));
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_MODE_CTL, &mode_ctrl, 1);
+ if (rc < 0) {
+ pr_err("Mode configure write error\n");
+ return rc;
+ }
+
+ /* Channel selection */
+ buf = chan_prop->amux_channel;
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_CH_SEL_CTL, &buf, 1);
+ if (rc < 0) {
+ pr_err("Channel configure error\n");
+ return rc;
+ }
+
+ /* Digital parameter setup */
+ decimation = chan_prop->decimation <<
+ QPNP_VADC_DIG_DEC_RATIO_SEL_SHIFT;
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_DIG_PARAM, &decimation, 1);
+ if (rc < 0) {
+ pr_err("Digital parameter configure write error\n");
+ return rc;
+ }
+
+ /* HW settling time delay */
+ buf = chan_prop->hw_settle_time;
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HW_SETTLE_DELAY, &buf, 1);
+ if (rc < 0) {
+ pr_err("HW settling time setup error\n");
+ return rc;
+ }
+
+ pr_debug("mode:%d, channel:%d, decimation:%d, hw_settle:%d\n",
+ mode_ctrl, chan_prop->amux_channel, decimation,
+ chan_prop->hw_settle_time);
+
+ if (chan_prop->mode_sel == (ADC_OP_NORMAL_MODE <<
+ QPNP_VADC_OP_MODE_SHIFT)) {
+ /* Normal measurement mode */
+ buf = chan_prop->fast_avg_setup;
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_FAST_AVG_CTL,
+ &buf, 1);
+ if (rc < 0) {
+ pr_err("Fast averaging configure error\n");
+ return rc;
+ }
+ /* Ensure MEAS_INTERVAL_OP_CTL is set to 0 */
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_MEAS_INTERVAL_OP_CTL,
+ &meas_int_op_ctl_data, 1);
+ if (rc < 0) {
+ pr_err("Measurement interval OP configure error\n");
+ return rc;
+ }
+ } else if (chan_prop->mode_sel == (ADC_OP_CONVERSION_SEQUENCER <<
+ QPNP_VADC_OP_MODE_SHIFT)) {
+ /* Conversion sequence mode */
+ conv_sequence = ((ADC_SEQ_HOLD_100US <<
+ QPNP_VADC_CONV_SEQ_HOLDOFF_SHIFT) |
+ ADC_CONV_SEQ_TIMEOUT_5MS);
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_CONV_SEQ_CTL,
+ &conv_sequence, 1);
+ if (rc < 0) {
+ pr_err("Conversion sequence error\n");
+ return rc;
+ }
+
+ conv_sequence_trig = ((QPNP_VADC_CONV_SEQ_RISING_EDGE <<
+ QPNP_VADC_CONV_SEQ_EDGE_SHIFT) |
+ chan_prop->trigger_channel);
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_CONV_SEQ_TRIG_CTL,
+ &conv_sequence_trig, 1);
+ if (rc < 0) {
+ pr_err("Conversion trigger error\n");
+ return rc;
+ }
+ } else if (chan_prop->mode_sel == ADC_OP_MEASUREMENT_INTERVAL) {
+ buf = QPNP_VADC_MEAS_INTERVAL_OP_SET;
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_MEAS_INTERVAL_OP_CTL,
+ &buf, 1);
+ if (rc < 0) {
+ pr_err("Measurement interval OP configure error\n");
+ return rc;
+ }
+ }
+
+ if (!vadc->vadc_poll_eoc)
+ reinit_completion(&vadc->adc->adc_rslt_completion);
+
+ rc = qpnp_vadc_enable(vadc, true);
+ if (rc)
+ return rc;
+
+ if (!vadc->vadc_iadc_sync_lock) {
+ /* Request conversion */
+ buf = QPNP_VADC_CONV_REQ_SET;
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_CONV_REQ, &buf, 1);
+ if (rc < 0) {
+ pr_err("Request conversion failed\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int32_t qpnp_vadc_read_conversion_result(struct qpnp_vadc_chip *vadc,
+ int32_t *data)
+{
+ uint8_t rslt_lsb, rslt_msb;
+ int rc = 0, status = 0;
+
+ status = qpnp_vadc_read_reg(vadc, QPNP_VADC_DATA0, &rslt_lsb, 1);
+ if (status < 0) {
+ pr_err("qpnp adc result read failed for data0\n");
+ goto fail;
+ }
+
+ status = qpnp_vadc_read_reg(vadc, QPNP_VADC_DATA1, &rslt_msb, 1);
+ if (status < 0) {
+ pr_err("qpnp adc result read failed for data1\n");
+ goto fail;
+ }
+
+ *data = (rslt_msb << 8) | rslt_lsb;
+
+fail:
+ rc = qpnp_vadc_enable(vadc, false);
+ if (rc)
+ return rc;
+
+ return status;
+}
+
+static int32_t qpnp_vadc_read_status(struct qpnp_vadc_chip *vadc, int mode_sel)
+{
+ u8 status1, status2, status2_conv_seq_state;
+ u8 status_err = QPNP_VADC_CONV_TIMEOUT_ERR;
+ int rc;
+
+ switch (mode_sel) {
+ case (ADC_OP_CONVERSION_SEQUENCER << QPNP_VADC_OP_MODE_SHIFT):
+ rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1, 1);
+ if (rc) {
+ pr_err("qpnp_vadc read mask interrupt failed\n");
+ return rc;
+ }
+
+ rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS2, &status2, 1);
+ if (rc) {
+ pr_err("qpnp_vadc read mask interrupt failed\n");
+ return rc;
+ }
+
+ if (!(status2 & ~QPNP_VADC_STATUS2_CONV_SEQ_TIMEOUT_STS) &&
+ (status1 & (~QPNP_VADC_STATUS1_REQ_STS |
+ QPNP_VADC_STATUS1_EOC))) {
+ rc = status_err;
+ return rc;
+ }
+
+ status2_conv_seq_state = status2 >>
+ QPNP_VADC_STATUS2_CONV_SEQ_STATE_SHIFT;
+ if (status2_conv_seq_state != ADC_CONV_SEQ_IDLE) {
+ pr_err("qpnp vadc seq error with status %d\n",
+ status2);
+ rc = -EINVAL;
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int qpnp_vadc_is_valid(struct qpnp_vadc_chip *vadc)
+{
+ struct qpnp_vadc_chip *vadc_chip = NULL;
+
+ list_for_each_entry(vadc_chip, &qpnp_vadc_device_list, list)
+ if (vadc == vadc_chip)
+ return 0;
+
+ return -EINVAL;
+}
+
+static void qpnp_vadc_work(struct work_struct *work)
+{
+ struct qpnp_vadc_chip *vadc = container_of(work,
+ struct qpnp_vadc_chip, trigger_completion_work);
+
+ if (qpnp_vadc_is_valid(vadc) < 0)
+ return;
+
+ complete(&vadc->adc->adc_rslt_completion);
+}
+
+static void qpnp_vadc_low_thr_fn(struct work_struct *work)
+{
+ struct qpnp_vadc_chip *vadc = container_of(work,
+ struct qpnp_vadc_chip, trigger_low_thr_work);
+
+ vadc->state_copy->meas_int_mode = false;
+ vadc->state_copy->meas_int_request_in_queue = false;
+ vadc->state_copy->param->threshold_notification(
+ ADC_TM_LOW_STATE,
+ vadc->state_copy->param->btm_ctx);
+}
+
+static void qpnp_vadc_high_thr_fn(struct work_struct *work)
+{
+ struct qpnp_vadc_chip *vadc = container_of(work,
+ struct qpnp_vadc_chip, trigger_high_thr_work);
+
+ vadc->state_copy->meas_int_mode = false;
+ vadc->state_copy->meas_int_request_in_queue = false;
+ vadc->state_copy->param->threshold_notification(
+ ADC_TM_HIGH_STATE,
+ vadc->state_copy->param->btm_ctx);
+}
+
+static irqreturn_t qpnp_vadc_isr(int irq, void *dev_id)
+{
+ struct qpnp_vadc_chip *vadc = dev_id;
+
+ schedule_work(&vadc->trigger_completion_work);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qpnp_vadc_low_thr_isr(int irq, void *data)
+{
+ struct qpnp_vadc_chip *vadc = data;
+ u8 mode_ctl = 0, mode = 0;
+ int rc = 0;
+
+ rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_MODE_CTL, &mode, 1);
+ if (rc < 0) {
+ pr_err("mode ctl register read failed with %d\n", rc);
+ return rc;
+ }
+
+ if (!(mode & QPNP_VADC_MEAS_INT_MODE_MASK)) {
+ pr_debug("Spurious VADC threshold 0x%x\n", mode);
+ return IRQ_HANDLED;
+ }
+
+ mode_ctl = ADC_OP_NORMAL_MODE;
+ /* Set measurement in single measurement mode */
+ qpnp_vadc_mode_select(vadc, mode_ctl);
+ qpnp_vadc_enable(vadc, false);
+ schedule_work(&vadc->trigger_low_thr_work);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qpnp_vadc_high_thr_isr(int irq, void *data)
+{
+ struct qpnp_vadc_chip *vadc = data;
+ u8 mode_ctl = 0, mode = 0;
+ int rc = 0;
+
+ rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_MODE_CTL, &mode, 1);
+ if (rc < 0) {
+ pr_err("mode ctl register read failed with %d\n", rc);
+ return rc;
+ }
+
+ if (!(mode & QPNP_VADC_MEAS_INT_MODE_MASK)) {
+ pr_debug("Spurious VADC threshold 0x%x\n", mode);
+ return IRQ_HANDLED;
+ }
+
+ mode_ctl = ADC_OP_NORMAL_MODE;
+ /* Set measurement in single measurement mode */
+ qpnp_vadc_mode_select(vadc, mode_ctl);
+ qpnp_vadc_enable(vadc, false);
+ schedule_work(&vadc->trigger_high_thr_work);
+
+ return IRQ_HANDLED;
+}
+
+static int32_t qpnp_vadc_version_check(struct qpnp_vadc_chip *dev)
+{
+ uint8_t revision;
+ int rc;
+
+ rc = qpnp_vadc_read_reg(dev, QPNP_VADC_REVISION2, &revision, 1);
+ if (rc < 0) {
+ pr_err("qpnp adc result read failed with %d\n", rc);
+ return rc;
+ }
+
+ if (revision < QPNP_VADC_SUPPORTED_REVISION2) {
+ pr_err("VADC Version not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int32_t
+ qpnp_vadc_channel_post_scaling_calib_check(struct qpnp_vadc_chip *vadc,
+ int channel)
+{
+ int version, rc = 0;
+
+ version = qpnp_adc_get_revid_version(vadc->dev);
+
+ if (version == QPNP_REV_ID_PM8950_1_0) {
+ if ((channel == LR_MUX7_HW_ID) ||
+ (channel == P_MUX2_1_1) ||
+ (channel == LR_MUX3_XO_THERM) ||
+ (channel == LR_MUX3_BUF_XO_THERM_BUF) ||
+ (channel == P_MUX4_1_1)) {
+ vadc->adc->amux_prop->chan_prop->calib_type =
+ CALIB_ABSOLUTE;
+ return rc;
+ }
+ }
+
+ return -EINVAL;
+}
+
+#define QPNP_VBAT_COEFF_1 3000
+#define QPNP_VBAT_COEFF_2 45810000
+#define QPNP_VBAT_COEFF_3 100000
+#define QPNP_VBAT_COEFF_4 3500
+#define QPNP_VBAT_COEFF_5 80000000
+#define QPNP_VBAT_COEFF_6 4400
+#define QPNP_VBAT_COEFF_7 32200000
+#define QPNP_VBAT_COEFF_8 3880
+#define QPNP_VBAT_COEFF_9 5770
+#define QPNP_VBAT_COEFF_10 3660
+#define QPNP_VBAT_COEFF_11 5320
+#define QPNP_VBAT_COEFF_12 8060000
+#define QPNP_VBAT_COEFF_13 102640000
+#define QPNP_VBAT_COEFF_14 22220000
+#define QPNP_VBAT_COEFF_15 83060000
+#define QPNP_VBAT_COEFF_16 2810
+#define QPNP_VBAT_COEFF_17 5260
+#define QPNP_VBAT_COEFF_18 8027
+#define QPNP_VBAT_COEFF_19 2347
+#define QPNP_VBAT_COEFF_20 6043
+#define QPNP_VBAT_COEFF_21 1914
+#define QPNP_VBAT_OFFSET_SMIC 9446
+#define QPNP_VBAT_OFFSET_GF 9441
+#define QPNP_OCV_OFFSET_SMIC 4596
+#define QPNP_OCV_OFFSET_GF 5896
+#define QPNP_VBAT_COEFF_22 6800
+#define QPNP_VBAT_COEFF_23 3500
+#define QPNP_VBAT_COEFF_24 4360
+#define QPNP_VBAT_COEFF_25 8060
+#define QPNP_VBAT_COEFF_26 7895
+#define QPNP_VBAT_COEFF_27 5658
+#define QPNP_VBAT_COEFF_28 5760
+#define QPNP_VBAT_COEFF_29 7900
+#define QPNP_VBAT_COEFF_30 5660
+#define QPNP_VBAT_COEFF_31 3620
+#define QPNP_VBAT_COEFF_32 1230
+#define QPNP_VBAT_COEFF_33 5760
+#define QPNP_VBAT_COEFF_34 4080
+#define QPNP_VBAT_COEFF_35 7000
+#define QPNP_VBAT_COEFF_36 3040
+#define QPNP_VBAT_COEFF_37 3850
+#define QPNP_VBAT_COEFF_38 5000
+#define QPNP_VBAT_COEFF_39 2610
+#define QPNP_VBAT_COEFF_40 4190
+#define QPNP_VBAT_COEFF_41 5800
+#define QPNP_VBAT_COEFF_42 2620
+#define QPNP_VBAT_COEFF_43 4030
+#define QPNP_VBAT_COEFF_44 3230
+#define QPNP_VBAT_COEFF_45 3450
+#define QPNP_VBAT_COEFF_46 2120
+#define QPNP_VBAT_COEFF_47 3560
+#define QPNP_VBAT_COEFF_48 2190
+#define QPNP_VBAT_COEFF_49 4180
+#define QPNP_VBAT_COEFF_50 27800000
+#define QPNP_VBAT_COEFF_51 5110
+#define QPNP_VBAT_COEFF_52 34444000
+
+static int32_t qpnp_ocv_comp(int64_t *result,
+ struct qpnp_vadc_chip *vadc, int64_t die_temp)
+{
+ int64_t temp_var = 0, offset = 0;
+ int64_t old = *result;
+ int version;
+
+ version = qpnp_adc_get_revid_version(vadc->dev);
+ if (version == -EINVAL)
+ return 0;
+
+ if (version == QPNP_REV_ID_8026_2_2) {
+ if (die_temp > 25000)
+ return 0;
+ }
+
+ switch (version) {
+ case QPNP_REV_ID_8941_3_1:
+ switch (vadc->id) {
+ case COMP_ID_TSMC:
+ temp_var = ((die_temp - 25000) *
+ (-QPNP_VBAT_COEFF_4));
+ break;
+ default:
+ case COMP_ID_GF:
+ temp_var = ((die_temp - 25000) *
+ (-QPNP_VBAT_COEFF_1));
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8026_1_0:
+ switch (vadc->id) {
+ case COMP_ID_TSMC:
+ temp_var = (((die_temp *
+ (-QPNP_VBAT_COEFF_10))
+ - QPNP_VBAT_COEFF_14));
+ break;
+ default:
+ case COMP_ID_GF:
+ temp_var = (((die_temp *
+ (-QPNP_VBAT_COEFF_8))
+ + QPNP_VBAT_COEFF_12));
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8026_2_0:
+ case QPNP_REV_ID_8026_2_1:
+ switch (vadc->id) {
+ case COMP_ID_TSMC:
+ temp_var = ((die_temp - 25000) *
+ (-QPNP_VBAT_COEFF_10));
+ break;
+ default:
+ case COMP_ID_GF:
+ temp_var = ((die_temp - 25000) *
+ (-QPNP_VBAT_COEFF_8));
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8026_2_2:
+ switch (vadc->id) {
+ case COMP_ID_TSMC:
+ *result -= QPNP_VBAT_COEFF_22;
+ temp_var = (die_temp - 25000) *
+ QPNP_VBAT_COEFF_24;
+ break;
+ default:
+ case COMP_ID_GF:
+ *result -= QPNP_VBAT_COEFF_22;
+ temp_var = (die_temp - 25000) *
+ QPNP_VBAT_COEFF_25;
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8110_2_0:
+ switch (vadc->id) {
+ case COMP_ID_SMIC:
+ *result -= QPNP_OCV_OFFSET_SMIC;
+ if (die_temp < 25000)
+ temp_var = QPNP_VBAT_COEFF_18;
+ else
+ temp_var = QPNP_VBAT_COEFF_19;
+ temp_var = (die_temp - 25000) * temp_var;
+ break;
+ default:
+ case COMP_ID_GF:
+ *result -= QPNP_OCV_OFFSET_GF;
+ if (die_temp < 25000)
+ temp_var = QPNP_VBAT_COEFF_20;
+ else
+ temp_var = QPNP_VBAT_COEFF_21;
+ temp_var = (die_temp - 25000) * temp_var;
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8916_1_0:
+ switch (vadc->id) {
+ case COMP_ID_SMIC:
+ if (die_temp < 25000)
+ temp_var = QPNP_VBAT_COEFF_26;
+ else
+ temp_var = QPNP_VBAT_COEFF_27;
+ temp_var = (die_temp - 25000) * temp_var;
+ break;
+ default:
+ case COMP_ID_GF:
+ offset = QPNP_OCV_OFFSET_GF;
+ if (die_temp < 25000)
+ temp_var = QPNP_VBAT_COEFF_26;
+ else
+ temp_var = QPNP_VBAT_COEFF_27;
+ temp_var = (die_temp - 25000) * temp_var;
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8916_1_1:
+ switch (vadc->id) {
+ /* FAB_ID is zero */
+ case COMP_ID_GF:
+ if (die_temp < 25000)
+ temp_var = QPNP_VBAT_COEFF_29;
+ else
+ temp_var = QPNP_VBAT_COEFF_30;
+ temp_var = (die_temp - 25000) * temp_var;
+ break;
+ /* FAB_ID is non-zero */
+ default:
+ if (die_temp < 25000)
+ temp_var = QPNP_VBAT_COEFF_31;
+ else
+ temp_var = (-QPNP_VBAT_COEFF_32);
+ temp_var = (die_temp - 25000) * temp_var;
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8916_2_0:
+ switch (vadc->id) {
+ case COMP_ID_SMIC:
+ offset = (-QPNP_VBAT_COEFF_38);
+ if (die_temp < 0)
+ temp_var = die_temp * QPNP_VBAT_COEFF_36;
+ else if (die_temp > 40000)
+ temp_var = ((die_temp - 40000) *
+ (-QPNP_VBAT_COEFF_37));
+ break;
+ case COMP_ID_TSMC:
+ if (die_temp < 10000)
+ temp_var = ((die_temp - 10000) *
+ QPNP_VBAT_COEFF_41);
+ else if (die_temp > 50000)
+ temp_var = ((die_temp - 50000) *
+ (-QPNP_VBAT_COEFF_42));
+ break;
+ default:
+ case COMP_ID_GF:
+ if (die_temp < 20000)
+ temp_var = ((die_temp - 20000) *
+ QPNP_VBAT_COEFF_45);
+ else if (die_temp > 40000)
+ temp_var = ((die_temp - 40000) *
+ (-QPNP_VBAT_COEFF_46));
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8909_1_0:
+ switch (vadc->id) {
+ case COMP_ID_SMIC:
+ temp_var = (-QPNP_VBAT_COEFF_50);
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8909_1_1:
+ switch (vadc->id) {
+ case COMP_ID_SMIC:
+ temp_var = (QPNP_VBAT_COEFF_52);
+ break;
+ }
+ break;
+ default:
+ temp_var = 0;
+ break;
+ }
+
+ temp_var = div64_s64(temp_var, QPNP_VBAT_COEFF_3);
+
+ temp_var = 1000000 + temp_var;
+
+ *result = *result * temp_var;
+
+ if (offset)
+ *result -= offset;
+
+ *result = div64_s64(*result, 1000000);
+ pr_debug("%lld compensated into %lld\n", old, *result);
+
+ return 0;
+}
+
+static int32_t qpnp_vbat_sns_comp(int64_t *result,
+ struct qpnp_vadc_chip *vadc, int64_t die_temp)
+{
+ int64_t temp_var = 0, offset = 0;
+ int64_t old = *result;
+ int version;
+
+ version = qpnp_adc_get_revid_version(vadc->dev);
+ if (version == -EINVAL)
+ return 0;
+
+ if (version != QPNP_REV_ID_8941_3_1) {
+ /* min(die_temp_c, 60_degC) */
+ if (die_temp > 60000)
+ die_temp = 60000;
+ }
+
+ switch (version) {
+ case QPNP_REV_ID_8941_3_1:
+ switch (vadc->id) {
+ case COMP_ID_TSMC:
+ temp_var = ((die_temp - 25000) *
+ (-QPNP_VBAT_COEFF_1));
+ break;
+ default:
+ case COMP_ID_GF:
+ /* min(die_temp_c, 60_degC) */
+ if (die_temp > 60000)
+ die_temp = 60000;
+ temp_var = ((die_temp - 25000) *
+ (-QPNP_VBAT_COEFF_1));
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8026_1_0:
+ switch (vadc->id) {
+ case COMP_ID_TSMC:
+ temp_var = (((die_temp *
+ (-QPNP_VBAT_COEFF_11))
+ + QPNP_VBAT_COEFF_15));
+ break;
+ default:
+ case COMP_ID_GF:
+ temp_var = (((die_temp *
+ (-QPNP_VBAT_COEFF_9))
+ + QPNP_VBAT_COEFF_13));
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8026_2_0:
+ case QPNP_REV_ID_8026_2_1:
+ switch (vadc->id) {
+ case COMP_ID_TSMC:
+ temp_var = ((die_temp - 25000) *
+ (-QPNP_VBAT_COEFF_11));
+ break;
+ default:
+ case COMP_ID_GF:
+ temp_var = ((die_temp - 25000) *
+ (-QPNP_VBAT_COEFF_9));
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8026_2_2:
+ switch (vadc->id) {
+ case COMP_ID_TSMC:
+ *result -= QPNP_VBAT_COEFF_23;
+ temp_var = 0;
+ break;
+ default:
+ case COMP_ID_GF:
+ *result -= QPNP_VBAT_COEFF_23;
+ temp_var = 0;
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8110_2_0:
+ switch (vadc->id) {
+ case COMP_ID_SMIC:
+ *result -= QPNP_VBAT_OFFSET_SMIC;
+ temp_var = ((die_temp - 25000) *
+ (QPNP_VBAT_COEFF_17));
+ break;
+ default:
+ case COMP_ID_GF:
+ *result -= QPNP_VBAT_OFFSET_GF;
+ temp_var = ((die_temp - 25000) *
+ (QPNP_VBAT_COEFF_16));
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8916_1_0:
+ switch (vadc->id) {
+ case COMP_ID_SMIC:
+ temp_var = ((die_temp - 25000) *
+ (QPNP_VBAT_COEFF_28));
+ break;
+ default:
+ case COMP_ID_GF:
+ temp_var = ((die_temp - 25000) *
+ (QPNP_VBAT_COEFF_28));
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8916_1_1:
+ switch (vadc->id) {
+ /* FAB_ID is zero */
+ case COMP_ID_GF:
+ temp_var = ((die_temp - 25000) *
+ (QPNP_VBAT_COEFF_33));
+ break;
+ /* FAB_ID is non-zero */
+ default:
+ offset = QPNP_VBAT_COEFF_35;
+ if (die_temp > 50000) {
+ temp_var = ((die_temp - 25000) *
+ (QPNP_VBAT_COEFF_34));
+ }
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8916_2_0:
+ switch (vadc->id) {
+ case COMP_ID_SMIC:
+ if (die_temp < 0) {
+ temp_var = (die_temp *
+ QPNP_VBAT_COEFF_39);
+ } else if (die_temp > 40000) {
+ temp_var = ((die_temp - 40000) *
+ (-QPNP_VBAT_COEFF_40));
+ }
+ break;
+ case COMP_ID_TSMC:
+ if (die_temp < 10000)
+ temp_var = ((die_temp - 10000) *
+ QPNP_VBAT_COEFF_43);
+ else if (die_temp > 50000)
+ temp_var = ((die_temp - 50000) *
+ (-QPNP_VBAT_COEFF_44));
+ break;
+ default:
+ case COMP_ID_GF:
+ if (die_temp < 20000)
+ temp_var = ((die_temp - 20000) *
+ QPNP_VBAT_COEFF_47);
+ else if (die_temp > 40000)
+ temp_var = ((die_temp - 40000) *
+ (-QPNP_VBAT_COEFF_48));
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8909_1_0:
+ switch (vadc->id) {
+ case COMP_ID_SMIC:
+ if (die_temp < 30000)
+ temp_var = (-QPNP_VBAT_COEFF_50);
+ else if (die_temp > 30000)
+ temp_var = (((die_temp - 30000) *
+ (-QPNP_VBAT_COEFF_49)) +
+ (-QPNP_VBAT_COEFF_50));
+ break;
+ }
+ break;
+ case QPNP_REV_ID_8909_1_1:
+ switch (vadc->id) {
+ case COMP_ID_SMIC:
+ if (die_temp < 30000)
+ temp_var = (QPNP_VBAT_COEFF_52);
+ else if (die_temp > 30000)
+ temp_var = (((die_temp - 30000) *
+ (-QPNP_VBAT_COEFF_51)) +
+ (QPNP_VBAT_COEFF_52));
+ break;
+ }
+ break;
+ default:
+ temp_var = 0;
+ break;
+ }
+
+ temp_var = div64_s64(temp_var, QPNP_VBAT_COEFF_3);
+
+ temp_var = 1000000 + temp_var;
+
+ *result = *result * temp_var;
+
+ if (offset)
+ *result -= offset;
+
+ *result = div64_s64(*result, 1000000);
+ pr_debug("%lld compensated into %lld\n", old, *result);
+
+ return 0;
+}
+
+int32_t qpnp_vbat_sns_comp_result(struct qpnp_vadc_chip *vadc,
+ int64_t *result, bool is_pon_ocv)
+{
+ struct qpnp_vadc_result die_temp_result;
+ int rc = 0;
+
+ rc = qpnp_vadc_is_valid(vadc);
+ if (rc < 0)
+ return rc;
+
+ rc = qpnp_vadc_conv_seq_request(vadc, ADC_SEQ_NONE,
+ DIE_TEMP, &die_temp_result);
+ if (rc < 0) {
+ pr_err("Error reading die_temp\n");
+ return rc;
+ }
+
+ pr_debug("die-temp = %lld\n", die_temp_result.physical);
+
+ if (is_pon_ocv)
+ rc = qpnp_ocv_comp(result, vadc, die_temp_result.physical);
+ else
+ rc = qpnp_vbat_sns_comp(result, vadc,
+ die_temp_result.physical);
+
+ if (rc < 0)
+ pr_err("Error with vbat compensation\n");
+
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_vbat_sns_comp_result);
+
+static void qpnp_vadc_625mv_channel_sel(struct qpnp_vadc_chip *vadc,
+ uint32_t *ref_channel_sel)
+{
+ uint32_t dt_index = 0;
+
+ /* Check if the buffered 625mV channel exists */
+ while ((vadc->adc->adc_channels[dt_index].channel_num
+ != SPARE1) && (dt_index < vadc->max_channels_available))
+ dt_index++;
+
+ if (dt_index >= vadc->max_channels_available) {
+ pr_debug("Use default 625mV ref channel\n");
+ *ref_channel_sel = REF_625MV;
+ } else {
+ pr_debug("Use buffered 625mV ref channel\n");
+ *ref_channel_sel = SPARE1;
+ }
+}
+
+int32_t qpnp_vadc_calib_vref(struct qpnp_vadc_chip *vadc,
+ enum qpnp_adc_calib_type calib_type,
+ int *calib_data)
+{
+ struct qpnp_adc_amux_properties conv;
+ int rc, count = 0, calib_read = 0;
+ u8 status1 = 0;
+
+ if (calib_type == CALIB_ABSOLUTE)
+ conv.amux_channel = REF_125V;
+ else if (calib_type == CALIB_RATIOMETRIC)
+ conv.amux_channel = VDD_VADC;
+
+ conv.decimation = DECIMATION_TYPE2;
+ conv.mode_sel = ADC_OP_NORMAL_MODE << QPNP_VADC_OP_MODE_SHIFT;
+ conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
+ conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
+
+ rc = qpnp_vadc_configure(vadc, &conv);
+ if (rc) {
+ pr_err("qpnp_vadc configure failed with %d\n", rc);
+ goto calib_fail;
+ }
+
+ while (status1 != QPNP_VADC_STATUS1_EOC) {
+ rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1, 1);
+ if (rc < 0)
+ return rc;
+ status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
+ usleep_range(QPNP_VADC_CONV_TIME_MIN,
+ QPNP_VADC_CONV_TIME_MAX);
+ count++;
+ if (count > QPNP_VADC_ERR_COUNT) {
+ rc = -ENODEV;
+ goto calib_fail;
+ }
+ }
+
+ rc = qpnp_vadc_read_conversion_result(vadc, &calib_read);
+ if (rc) {
+ pr_err("qpnp adc read adc failed with %d\n", rc);
+ goto calib_fail;
+ }
+ *calib_data = calib_read;
+calib_fail:
+ return rc;
+}
+
+
+int32_t qpnp_vadc_calib_gnd(struct qpnp_vadc_chip *vadc,
+ enum qpnp_adc_calib_type calib_type,
+ int *calib_data)
+{
+ struct qpnp_adc_amux_properties conv;
+ int rc, count = 0, calib_read = 0;
+ u8 status1 = 0;
+ uint32_t ref_channel_sel = 0;
+
+ if (calib_type == CALIB_ABSOLUTE) {
+ qpnp_vadc_625mv_channel_sel(vadc, &ref_channel_sel);
+ conv.amux_channel = ref_channel_sel;
+ } else if (calib_type == CALIB_RATIOMETRIC)
+ conv.amux_channel = GND_REF;
+
+ conv.decimation = DECIMATION_TYPE2;
+ conv.mode_sel = ADC_OP_NORMAL_MODE << QPNP_VADC_OP_MODE_SHIFT;
+ conv.hw_settle_time = ADC_CHANNEL_HW_SETTLE_DELAY_0US;
+ conv.fast_avg_setup = ADC_FAST_AVG_SAMPLE_1;
+
+ rc = qpnp_vadc_configure(vadc, &conv);
+ if (rc) {
+ pr_err("qpnp_vadc configure failed with %d\n", rc);
+ goto calib_fail;
+ }
+
+ while (status1 != QPNP_VADC_STATUS1_EOC) {
+ rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1, 1);
+ if (rc < 0)
+ return rc;
+ status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
+ usleep_range(QPNP_VADC_CONV_TIME_MIN,
+ QPNP_VADC_CONV_TIME_MAX);
+ count++;
+ if (count > QPNP_VADC_ERR_COUNT) {
+ rc = -ENODEV;
+ goto calib_fail;
+ }
+ }
+
+ rc = qpnp_vadc_read_conversion_result(vadc, &calib_read);
+ if (rc) {
+ pr_err("qpnp adc read adc failed with %d\n", rc);
+ goto calib_fail;
+ }
+ *calib_data = calib_read;
+calib_fail:
+ return rc;
+}
+
+static int32_t qpnp_vadc_calib_device(struct qpnp_vadc_chip *vadc)
+{
+ int rc, calib_read_1 = 0, calib_read_2 = 0;
+
+ rc = qpnp_vadc_calib_vref(vadc, CALIB_ABSOLUTE, &calib_read_1);
+ if (rc) {
+ pr_err("qpnp adc absolute vref calib failed with %d\n", rc);
+ goto calib_fail;
+ }
+ rc = qpnp_vadc_calib_gnd(vadc, CALIB_ABSOLUTE, &calib_read_2);
+ if (rc) {
+ pr_err("qpnp adc absolute gnd calib failed with %d\n", rc);
+ goto calib_fail;
+ }
+ pr_debug("absolute reference raw: 625mV:0x%x 1.25V:0x%x\n",
+ calib_read_2, calib_read_1);
+
+ if (calib_read_1 == calib_read_2) {
+ pr_err("absolute reference raw: 625mV:0x%x 1.25V:0x%x\n",
+ calib_read_2, calib_read_1);
+ rc = -EINVAL;
+ goto calib_fail;
+ }
+
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].dy =
+ (calib_read_1 - calib_read_2);
+
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].dx
+ = QPNP_ADC_625_UV;
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].adc_vref =
+ calib_read_1;
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].adc_gnd =
+ calib_read_2;
+
+ calib_read_1 = 0;
+ calib_read_2 = 0;
+ rc = qpnp_vadc_calib_vref(vadc, CALIB_RATIOMETRIC, &calib_read_1);
+ if (rc) {
+ pr_err("qpnp adc ratiometric vref calib failed with %d\n", rc);
+ goto calib_fail;
+ }
+ rc = qpnp_vadc_calib_gnd(vadc, CALIB_RATIOMETRIC, &calib_read_2);
+ if (rc) {
+ pr_err("qpnp adc ratiometric gnd calib failed with %d\n", rc);
+ goto calib_fail;
+ }
+ pr_debug("ratiometric reference raw: VDD:0x%x GND:0x%x\n",
+ calib_read_1, calib_read_2);
+
+ if (calib_read_1 == calib_read_2) {
+ pr_err("ratiometric reference raw: VDD:0x%x GND:0x%x\n",
+ calib_read_1, calib_read_2);
+ rc = -EINVAL;
+ goto calib_fail;
+ }
+
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dy =
+ (calib_read_1 - calib_read_2);
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dx =
+ vadc->adc->adc_prop->adc_vdd_reference;
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].adc_vref
+ = calib_read_1;
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].adc_gnd
+ = calib_read_2;
+
+calib_fail:
+ return rc;
+}
+
+int32_t qpnp_get_vadc_gain_and_offset(struct qpnp_vadc_chip *vadc,
+ struct qpnp_vadc_linear_graph *param,
+ enum qpnp_adc_calib_type calib_type)
+{
+ int rc = 0;
+
+ rc = qpnp_vadc_is_valid(vadc);
+ if (rc < 0)
+ return rc;
+
+ switch (calib_type) {
+ case CALIB_RATIOMETRIC:
+ param->dy =
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dy;
+ param->dx =
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].dx;
+ param->adc_vref = vadc->adc->adc_prop->adc_vdd_reference;
+ param->adc_gnd =
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_RATIOMETRIC].adc_gnd;
+ break;
+ case CALIB_ABSOLUTE:
+ param->dy =
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].dy;
+ param->dx =
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].dx;
+ param->adc_vref = vadc->adc->adc_prop->adc_vdd_reference;
+ param->adc_gnd =
+ vadc->adc->amux_prop->chan_prop->adc_graph[CALIB_ABSOLUTE].adc_gnd;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_get_vadc_gain_and_offset);
+
+static int32_t qpnp_vadc_wait_for_req_sts_check(struct qpnp_vadc_chip *vadc)
+{
+ u8 status1 = 0;
+ int rc, count = 0;
+
+ /* Re-enable the peripheral */
+ rc = qpnp_vadc_enable(vadc, true);
+ if (rc) {
+ pr_err("vadc re-enable peripheral failed with %d\n", rc);
+ return rc;
+ }
+
+ /* The VADC_TM bank needs to be disabled for new conversion request */
+ rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1, 1);
+ if (rc) {
+ pr_err("vadc read status1 failed with %d\n", rc);
+ return rc;
+ }
+
+ /* Disable the bank if a conversion is occuring */
+ while ((status1 & QPNP_VADC_STATUS1_REQ_STS) && (count < QPNP_RETRY)) {
+ /* Wait time is based on the optimum sampling rate
+ * and adding enough time buffer to account for ADC conversions
+ * occuring on different peripheral banks */
+ usleep_range(QPNP_MIN_TIME, QPNP_MAX_TIME);
+ rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1, 1);
+ if (rc < 0) {
+ pr_err("vadc disable failed with %d\n", rc);
+ return rc;
+ }
+ count++;
+ }
+
+ if (count >= QPNP_RETRY)
+ pr_err("QPNP vadc status req bit did not fall low!!\n");
+
+ rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1, 1);
+
+ /* Disable the peripheral */
+ rc = qpnp_vadc_enable(vadc, false);
+ if (rc < 0)
+ pr_err("vadc peripheral disable failed with %d\n", rc);
+
+ return rc;
+}
+
+static int32_t qpnp_vadc_manage_meas_int_requests(struct qpnp_vadc_chip *chip)
+{
+ struct qpnp_vadc_chip *vadc = dev_get_drvdata(chip->dev);
+ int rc = 0, dt_index = 0;
+ u8 mode_ctl = 0;
+
+ pr_debug("meas_int_mode:0x%x, mode_ctl:%0x\n",
+ vadc->state_copy->meas_int_mode, mode_ctl);
+
+ if (vadc->state_copy->meas_int_mode) {
+ pr_debug("meas interval in progress. Procced to disable it\n");
+ /* measurement interval in progress. Proceed to disable it */
+ mode_ctl = ADC_OP_NORMAL_MODE;
+ rc = qpnp_vadc_mode_select(vadc, mode_ctl);
+ if (rc < 0) {
+ pr_err("NORM mode select failed with %d\n", rc);
+ return rc;
+ }
+
+ /* Disable bank */
+ rc = qpnp_vadc_enable(vadc, false);
+ if (rc) {
+ pr_err("Disable bank failed with %d\n", rc);
+ return rc;
+ }
+
+ /* Check if a conversion is in progress */
+ rc = qpnp_vadc_wait_for_req_sts_check(vadc);
+ if (rc < 0) {
+ pr_err("req_sts check failed with %d\n", rc);
+ return rc;
+ }
+
+ vadc->state_copy->meas_int_mode = false;
+ vadc->state_copy->meas_int_request_in_queue = true;
+ } else if (vadc->state_copy->meas_int_request_in_queue) {
+ /* put the meas interval back in queue */
+ pr_debug("put meas interval back in queue\n");
+ vadc->adc->amux_prop->amux_channel =
+ vadc->state_copy->vadc_meas_amux.channel_num;
+ while ((vadc->adc->adc_channels[dt_index].channel_num
+ != vadc->adc->amux_prop->amux_channel) &&
+ (dt_index < vadc->max_channels_available))
+ dt_index++;
+ if (dt_index >= vadc->max_channels_available) {
+ pr_err("not a valid VADC channel\n");
+ rc = -EINVAL;
+ return rc;
+ }
+
+ vadc->adc->amux_prop->decimation =
+ vadc->adc->amux_prop->decimation;
+ vadc->adc->amux_prop->hw_settle_time =
+ vadc->adc->amux_prop->hw_settle_time;
+ vadc->adc->amux_prop->fast_avg_setup =
+ vadc->adc->amux_prop->fast_avg_setup;
+ vadc->adc->amux_prop->mode_sel = ADC_OP_MEASUREMENT_INTERVAL;
+ rc = qpnp_vadc_configure(vadc, vadc->adc->amux_prop);
+ if (rc) {
+ pr_err("vadc configure failed with %d\n", rc);
+ return rc;
+ }
+
+ vadc->state_copy->meas_int_mode = true;
+ vadc->state_copy->meas_int_request_in_queue = false;
+ }
+ dev_set_drvdata(vadc->dev, vadc);
+
+ return 0;
+}
+
+struct qpnp_vadc_chip *qpnp_get_vadc(struct device *dev, const char *name)
+{
+ struct qpnp_vadc_chip *vadc;
+ struct device_node *node = NULL;
+ char prop_name[QPNP_MAX_PROP_NAME_LEN];
+
+ snprintf(prop_name, QPNP_MAX_PROP_NAME_LEN, "qcom,%s-vadc", name);
+
+ node = of_parse_phandle(dev->of_node, prop_name, 0);
+ if (node == NULL)
+ return ERR_PTR(-ENODEV);
+
+ list_for_each_entry(vadc, &qpnp_vadc_device_list, list)
+ if (vadc->adc->spmi->dev.of_node == node)
+ return vadc;
+ return ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL(qpnp_get_vadc);
+
+int32_t qpnp_vadc_conv_seq_request(struct qpnp_vadc_chip *vadc,
+ enum qpnp_vadc_trigger trigger_channel,
+ enum qpnp_vadc_channels channel,
+ struct qpnp_vadc_result *result)
+{
+ int rc = 0, scale_type, amux_prescaling, dt_index = 0, calib_type = 0;
+ uint32_t ref_channel, count = 0, local_idx = 0;
+ int32_t vref_calib = 0, gnd_calib = 0, new_vref_calib = 0, offset = 0;
+ int32_t calib_offset = 0;
+ u8 status1 = 0;
+
+ if (qpnp_vadc_is_valid(vadc))
+ return -EPROBE_DEFER;
+
+ mutex_lock(&vadc->adc->adc_lock);
+
+ if (vadc->state_copy->vadc_meas_int_enable)
+ qpnp_vadc_manage_meas_int_requests(vadc);
+
+ if (channel == REF_625MV) {
+ qpnp_vadc_625mv_channel_sel(vadc, &ref_channel);
+ channel = ref_channel;
+ }
+
+ vadc->adc->amux_prop->amux_channel = channel;
+
+ while ((vadc->adc->adc_channels[dt_index].channel_num
+ != channel) && (dt_index < vadc->max_channels_available))
+ dt_index++;
+
+ if (dt_index >= vadc->max_channels_available) {
+ pr_err("not a valid VADC channel\n");
+ rc = -EINVAL;
+ goto fail_unlock;
+ }
+
+ calib_type = vadc->adc->adc_channels[dt_index].calib_type;
+ if (calib_type >= CALIB_NONE) {
+ pr_err("not a valid calib_type\n");
+ rc = -EINVAL;
+ goto fail_unlock;
+ }
+ calib_offset = (calib_type == CALIB_ABSOLUTE) ?
+ QPNP_VADC_ABSOLUTE_RECALIB_OFFSET :
+ QPNP_VADC_RATIOMETRIC_RECALIB_OFFSET;
+ rc = qpnp_vadc_version_check(vadc);
+ if (rc)
+ goto fail_unlock;
+ if (vadc->vadc_recalib_check) {
+ rc = qpnp_vadc_calib_vref(vadc, calib_type, &vref_calib);
+ if (rc) {
+ pr_err("Calibration failed\n");
+ goto fail_unlock;
+ }
+ } else if (!vadc->vadc_init_calib) {
+ rc = qpnp_vadc_calib_device(vadc);
+ if (rc) {
+ pr_err("Calibration failed\n");
+ goto fail_unlock;
+ } else {
+ vadc->vadc_init_calib = true;
+ }
+ }
+
+recalibrate:
+ status1 = 0;
+ vadc->adc->amux_prop->decimation =
+ vadc->adc->adc_channels[dt_index].adc_decimation;
+ vadc->adc->amux_prop->hw_settle_time =
+ vadc->adc->adc_channels[dt_index].hw_settle_time;
+ vadc->adc->amux_prop->fast_avg_setup =
+ vadc->adc->adc_channels[dt_index].fast_avg_setup;
+
+ if (trigger_channel < ADC_SEQ_NONE)
+ vadc->adc->amux_prop->mode_sel = (ADC_OP_CONVERSION_SEQUENCER
+ << QPNP_VADC_OP_MODE_SHIFT);
+ else if (trigger_channel == ADC_SEQ_NONE)
+ vadc->adc->amux_prop->mode_sel = (ADC_OP_NORMAL_MODE
+ << QPNP_VADC_OP_MODE_SHIFT);
+ else {
+ pr_err("Invalid trigger channel:%d\n", trigger_channel);
+ goto fail_unlock;
+ }
+
+ vadc->adc->amux_prop->trigger_channel = trigger_channel;
+
+ rc = qpnp_vadc_configure(vadc, vadc->adc->amux_prop);
+ if (rc) {
+ pr_err("qpnp vadc configure failed with %d\n", rc);
+ goto fail_unlock;
+ }
+
+ if (vadc->vadc_poll_eoc) {
+ while (status1 != QPNP_VADC_STATUS1_EOC) {
+ rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1,
+ &status1, 1);
+ if (rc < 0)
+ goto fail_unlock;
+ status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
+ if (status1 == QPNP_VADC_STATUS1_EOC)
+ break;
+ usleep_range(QPNP_VADC_CONV_TIME_MIN,
+ QPNP_VADC_CONV_TIME_MAX);
+ count++;
+ if (count > QPNP_VADC_ERR_COUNT) {
+ pr_err("retry error exceeded\n");
+ rc = qpnp_vadc_status_debug(vadc);
+ if (rc < 0)
+ pr_err("VADC disable failed\n");
+ rc = -EINVAL;
+ goto fail_unlock;
+ }
+ }
+ } else {
+ rc = wait_for_completion_timeout(
+ &vadc->adc->adc_rslt_completion,
+ QPNP_ADC_COMPLETION_TIMEOUT);
+ if (!rc) {
+ rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1,
+ &status1, 1);
+ if (rc < 0)
+ goto fail_unlock;
+ status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
+ if (status1 == QPNP_VADC_STATUS1_EOC)
+ pr_debug("End of conversion status set\n");
+ else {
+ rc = qpnp_vadc_status_debug(vadc);
+ if (rc < 0)
+ pr_err("VADC disable failed\n");
+ rc = -EINVAL;
+ goto fail_unlock;
+ }
+ }
+ }
+
+ if (trigger_channel < ADC_SEQ_NONE) {
+ rc = qpnp_vadc_read_status(vadc,
+ vadc->adc->amux_prop->mode_sel);
+ if (rc)
+ pr_debug("Conversion sequence timed out - %d\n", rc);
+ }
+
+ rc = qpnp_vadc_read_conversion_result(vadc, &result->adc_code);
+ if (rc) {
+ pr_err("qpnp vadc read adc code failed with %d\n", rc);
+ goto fail_unlock;
+ }
+
+ if (vadc->vadc_recalib_check) {
+ rc = qpnp_vadc_calib_gnd(vadc, calib_type, &gnd_calib);
+ if (rc) {
+ pr_err("Calibration failed\n");
+ goto fail_unlock;
+ }
+ rc = qpnp_vadc_calib_vref(vadc, calib_type, &new_vref_calib);
+ if (rc < 0) {
+ pr_err("qpnp vadc calib read failed with %d\n", rc);
+ goto fail_unlock;
+ }
+
+ if (local_idx >= QPNP_VADC_RECALIB_MAXCNT) {
+ pr_err("invalid recalib count=%d\n", local_idx);
+ rc = -EINVAL;
+ goto fail_unlock;
+ }
+ pr_debug(
+ "chan=%d, calib=%s, vref_calib=0x%x, gnd_calib=0x%x, new_vref_calib=0x%x\n",
+ channel,
+ ((calib_type == CALIB_ABSOLUTE) ?
+ "ABSOLUTE" : "RATIOMETRIC"),
+ vref_calib, gnd_calib, new_vref_calib);
+
+ offset = (new_vref_calib - vref_calib);
+ if (offset < 0)
+ offset = -offset;
+ if (offset <= calib_offset) {
+ pr_debug(
+ "qpnp vadc recalibration not required,offset:%d\n",
+ offset);
+ local_idx = 0;
+ vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].dy =
+ (vref_calib - gnd_calib);
+ vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].dx =
+ (calib_type == CALIB_ABSOLUTE) ? QPNP_ADC_625_UV :
+ vadc->adc->adc_prop->adc_vdd_reference;
+ vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].adc_vref
+ = vref_calib;
+ vadc->adc->amux_prop->chan_prop->adc_graph[calib_type].adc_gnd
+ = gnd_calib;
+ } else {
+ vref_calib = new_vref_calib;
+ local_idx = local_idx + 1;
+ if (local_idx >= QPNP_VADC_RECALIB_MAXCNT) {
+ pr_err(
+ "qpnp_vadc recalibration failed, count=%d",
+ local_idx);
+ } else {
+ pr_debug(
+ "qpnp vadc recalibration requested,offset:%d\n",
+ offset);
+ offset = 0;
+ goto recalibrate;
+ }
+ }
+ }
+
+ amux_prescaling =
+ vadc->adc->adc_channels[dt_index].chan_path_prescaling;
+
+ if (amux_prescaling >= PATH_SCALING_NONE) {
+ rc = -EINVAL;
+ goto fail_unlock;
+ }
+
+ vadc->adc->amux_prop->chan_prop->offset_gain_numerator =
+ qpnp_vadc_amux_scaling_ratio[amux_prescaling].num;
+ vadc->adc->amux_prop->chan_prop->offset_gain_denominator =
+ qpnp_vadc_amux_scaling_ratio[amux_prescaling].den;
+ vadc->adc->amux_prop->chan_prop->calib_type =
+ vadc->adc->adc_channels[dt_index].calib_type;
+
+ scale_type = vadc->adc->adc_channels[dt_index].adc_scale_fn;
+ if (scale_type >= SCALE_NONE) {
+ rc = -EBADF;
+ goto fail_unlock;
+ }
+
+ if ((qpnp_vadc_channel_post_scaling_calib_check(vadc, channel)) < 0)
+ pr_debug("Post scaling calib type not updated\n");
+
+ vadc_scale_fn[scale_type].chan(vadc, result->adc_code,
+ vadc->adc->adc_prop, vadc->adc->amux_prop->chan_prop, result);
+
+ pr_debug("channel=%d, adc_code=%d adc_result=%lld\n",
+ channel, result->adc_code, result->physical);
+
+fail_unlock:
+ if (vadc->state_copy->vadc_meas_int_enable)
+ qpnp_vadc_manage_meas_int_requests(vadc);
+
+ mutex_unlock(&vadc->adc->adc_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_vadc_conv_seq_request);
+
+int32_t qpnp_vadc_read(struct qpnp_vadc_chip *vadc,
+ enum qpnp_vadc_channels channel,
+ struct qpnp_vadc_result *result)
+{
+ struct qpnp_vadc_result die_temp_result;
+ int rc = 0;
+ enum power_supply_property prop;
+ union power_supply_propval ret = {0, };
+
+ if (vadc->vadc_hc) {
+ rc = qpnp_vadc_hc_read(vadc, channel, result);
+ if (rc < 0) {
+ pr_err("Error reading vadc_hc channel %d\n", channel);
+ return rc;
+ }
+
+ return 0;
+ }
+
+ if (channel == VBAT_SNS) {
+ rc = qpnp_vadc_conv_seq_request(vadc, ADC_SEQ_NONE,
+ channel, result);
+ if (rc < 0) {
+ pr_err("Error reading vbatt\n");
+ return rc;
+ }
+
+ rc = qpnp_vadc_conv_seq_request(vadc, ADC_SEQ_NONE,
+ DIE_TEMP, &die_temp_result);
+ if (rc < 0) {
+ pr_err("Error reading die_temp\n");
+ return rc;
+ }
+
+ rc = qpnp_vbat_sns_comp(&result->physical, vadc,
+ die_temp_result.physical);
+ if (rc < 0)
+ pr_err("Error with vbat compensation\n");
+
+ return 0;
+ } else if (channel == SPARE2) {
+ /* chg temp channel */
+ if (!vadc->vadc_chg_vote) {
+ vadc->vadc_chg_vote =
+ power_supply_get_by_name("battery");
+ if (!vadc->vadc_chg_vote) {
+ pr_err("no vadc_chg_vote found\n");
+ return -EINVAL;
+ }
+ }
+
+ prop = POWER_SUPPLY_PROP_FORCE_TLIM;
+ ret.intval = 1;
+
+ rc = vadc->vadc_chg_vote->set_property(vadc->vadc_chg_vote,
+ prop, &ret);
+ if (rc) {
+ pr_err("error enabling the charger circuitry vote\n");
+ return rc;
+ }
+
+ rc = qpnp_vadc_conv_seq_request(vadc, ADC_SEQ_NONE,
+ channel, result);
+ if (rc < 0)
+ pr_err("Error reading die_temp\n");
+
+ ret.intval = 0;
+ rc = vadc->vadc_chg_vote->set_property(vadc->vadc_chg_vote,
+ prop, &ret);
+ if (rc) {
+ pr_err("error enabling the charger circuitry vote\n");
+ return rc;
+ }
+
+ return 0;
+ } else
+ return qpnp_vadc_conv_seq_request(vadc, ADC_SEQ_NONE,
+ channel, result);
+}
+EXPORT_SYMBOL(qpnp_vadc_read);
+
+static void qpnp_vadc_lock(struct qpnp_vadc_chip *vadc)
+{
+ mutex_lock(&vadc->adc->adc_lock);
+}
+
+static void qpnp_vadc_unlock(struct qpnp_vadc_chip *vadc)
+{
+ mutex_unlock(&vadc->adc->adc_lock);
+}
+
+int32_t qpnp_vadc_iadc_sync_request(struct qpnp_vadc_chip *vadc,
+ enum qpnp_vadc_channels channel)
+{
+ int rc = 0, dt_index = 0, calib_type = 0;
+
+ if (qpnp_vadc_is_valid(vadc))
+ return -EPROBE_DEFER;
+
+ qpnp_vadc_lock(vadc);
+
+
+ vadc->adc->amux_prop->amux_channel = channel;
+
+ while ((vadc->adc->adc_channels[dt_index].channel_num
+ != channel) && (dt_index < vadc->max_channels_available))
+ dt_index++;
+
+ if (dt_index >= vadc->max_channels_available) {
+ pr_err("not a valid VADC channel\n");
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ calib_type = vadc->adc->adc_channels[dt_index].calib_type;
+ if (!vadc->vadc_init_calib) {
+ rc = qpnp_vadc_version_check(vadc);
+ if (rc)
+ goto fail;
+
+ rc = qpnp_vadc_calib_device(vadc);
+ if (rc) {
+ pr_err("Calibration failed\n");
+ goto fail;
+ } else
+ vadc->vadc_init_calib = true;
+ }
+
+ vadc->adc->amux_prop->decimation =
+ vadc->adc->adc_channels[dt_index].adc_decimation;
+ vadc->adc->amux_prop->hw_settle_time =
+ vadc->adc->adc_channels[dt_index].hw_settle_time;
+ vadc->adc->amux_prop->fast_avg_setup =
+ vadc->adc->adc_channels[dt_index].fast_avg_setup;
+ vadc->adc->amux_prop->mode_sel = (ADC_OP_NORMAL_MODE
+ << QPNP_VADC_OP_MODE_SHIFT);
+ vadc->vadc_iadc_sync_lock = true;
+
+ rc = qpnp_vadc_configure(vadc, vadc->adc->amux_prop);
+ if (rc) {
+ pr_err("qpnp vadc configure failed with %d\n", rc);
+ goto fail;
+ }
+
+ return rc;
+fail:
+ vadc->vadc_iadc_sync_lock = false;
+ qpnp_vadc_unlock(vadc);
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_vadc_iadc_sync_request);
+
+int32_t qpnp_vadc_iadc_sync_complete_request(struct qpnp_vadc_chip *vadc,
+ enum qpnp_vadc_channels channel,
+ struct qpnp_vadc_result *result)
+{
+ int rc = 0, scale_type, amux_prescaling, dt_index = 0;
+
+ vadc->adc->amux_prop->amux_channel = channel;
+
+ while ((vadc->adc->adc_channels[dt_index].channel_num
+ != channel) && (dt_index < vadc->max_channels_available))
+ dt_index++;
+
+ rc = qpnp_vadc_read_conversion_result(vadc, &result->adc_code);
+ if (rc) {
+ pr_err("qpnp vadc read adc code failed with %d\n", rc);
+ goto fail;
+ }
+
+ amux_prescaling =
+ vadc->adc->adc_channels[dt_index].chan_path_prescaling;
+
+ if (amux_prescaling >= PATH_SCALING_NONE) {
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ vadc->adc->amux_prop->chan_prop->offset_gain_numerator =
+ qpnp_vadc_amux_scaling_ratio[amux_prescaling].num;
+ vadc->adc->amux_prop->chan_prop->offset_gain_denominator =
+ qpnp_vadc_amux_scaling_ratio[amux_prescaling].den;
+
+ scale_type = vadc->adc->adc_channels[dt_index].adc_scale_fn;
+ if (scale_type >= SCALE_NONE) {
+ rc = -EBADF;
+ goto fail;
+ }
+
+ vadc_scale_fn[scale_type].chan(vadc, result->adc_code,
+ vadc->adc->adc_prop, vadc->adc->amux_prop->chan_prop, result);
+
+fail:
+ vadc->vadc_iadc_sync_lock = false;
+ qpnp_vadc_unlock(vadc);
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_vadc_iadc_sync_complete_request);
+
+static int32_t qpnp_vadc_thr_update(struct qpnp_vadc_chip *vadc,
+ int32_t high_thr, int32_t low_thr)
+{
+ int rc = 0;
+ u8 buf = 0;
+
+ pr_debug("client requested high:%d and low:%d\n",
+ high_thr, low_thr);
+
+ buf = QPNP_VADC_THR_LSB_MASK(low_thr);
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_LOW_THR_LSB, &buf, 1);
+ if (rc < 0) {
+ pr_err("low threshold lsb setting failed, err:%d\n", rc);
+ return rc;
+ }
+
+ buf = QPNP_VADC_THR_MSB_MASK(low_thr);
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_LOW_THR_MSB, &buf, 1);
+ if (rc < 0) {
+ pr_err("low threshold msb setting failed, err:%d\n", rc);
+ return rc;
+ }
+
+ buf = QPNP_VADC_THR_LSB_MASK(high_thr);
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HIGH_THR_LSB, &buf, 1);
+ if (rc < 0) {
+ pr_err("high threshold lsb setting failed, err:%d\n", rc);
+ return rc;
+ }
+
+ buf = QPNP_VADC_THR_MSB_MASK(high_thr);
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HIGH_THR_MSB, &buf, 1);
+ if (rc < 0) {
+ pr_err("high threshold msb setting failed, err:%d\n", rc);
+ return rc;
+ }
+
+ pr_debug("client requested high:%d and low:%d\n", high_thr, low_thr);
+
+ return rc;
+}
+
+int32_t qpnp_vadc_channel_monitor(struct qpnp_vadc_chip *chip,
+ struct qpnp_adc_tm_btm_param *param)
+{
+ uint32_t channel, scale_type = 0;
+ uint32_t low_thr = 0, high_thr = 0;
+ int rc = 0, idx = 0, amux_prescaling = 0;
+ struct qpnp_vadc_chip *vadc = dev_get_drvdata(chip->dev);
+ u8 buf = 0;
+
+ if (qpnp_vadc_is_valid(vadc))
+ return -EPROBE_DEFER;
+
+ if (!vadc->state_copy->vadc_meas_int_enable) {
+ pr_err("Recurring measurement interval not available\n");
+ return -EINVAL;
+ }
+
+ if (param->threshold_notification == NULL) {
+ pr_debug("No notification for high/low temp??\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&vadc->adc->adc_lock);
+
+ channel = param->channel;
+ while (idx < vadc->max_channels_available) {
+ if (vadc->adc->adc_channels[idx].channel_num == channel)
+ break;
+ else
+ idx++;
+ }
+
+ if (idx >= vadc->max_channels_available) {
+ pr_err("not a valid VADC channel\n");
+ rc = -EINVAL;
+ goto fail_unlock;
+ }
+
+ scale_type = vadc->adc->adc_channels[idx].adc_scale_fn;
+ if (scale_type >= SCALE_RVADC_SCALE_NONE) {
+ rc = -EBADF;
+ goto fail_unlock;
+ }
+
+ amux_prescaling =
+ vadc->adc->adc_channels[idx].chan_path_prescaling;
+
+ if (amux_prescaling >= PATH_SCALING_NONE) {
+ rc = -EINVAL;
+ goto fail_unlock;
+ }
+
+ vadc->adc->amux_prop->chan_prop->offset_gain_numerator =
+ qpnp_vadc_amux_scaling_ratio[amux_prescaling].num;
+ vadc->adc->amux_prop->chan_prop->offset_gain_denominator =
+ qpnp_vadc_amux_scaling_ratio[amux_prescaling].den;
+ vadc->adc->amux_prop->chan_prop->calib_type =
+ vadc->adc->adc_channels[idx].calib_type;
+
+ pr_debug("channel:%d, scale_type:%d, dt_idx:%d",
+ channel, scale_type, idx);
+ vadc->adc->amux_prop->amux_channel = channel;
+ vadc->adc->amux_prop->decimation =
+ vadc->adc->adc_channels[idx].adc_decimation;
+ vadc->adc->amux_prop->hw_settle_time =
+ vadc->adc->adc_channels[idx].hw_settle_time;
+ vadc->adc->amux_prop->fast_avg_setup =
+ vadc->adc->adc_channels[idx].fast_avg_setup;
+ vadc->adc->amux_prop->mode_sel = ADC_OP_MEASUREMENT_INTERVAL;
+ adc_vadc_rscale_fn[scale_type].chan(vadc,
+ vadc->adc->amux_prop->chan_prop, param,
+ &low_thr, &high_thr);
+
+ if (param->timer_interval >= ADC_MEAS1_INTERVAL_NONE) {
+ pr_err("Invalid timer interval :%d\n", param->timer_interval);
+ goto fail_unlock;
+ }
+
+ buf = param->timer_interval;
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_MEAS_INTERVAL_CTL, &buf, 1);
+ if (rc) {
+ pr_err("vadc meas timer failed with %d\n", rc);
+ goto fail_unlock;
+ }
+
+ rc = qpnp_vadc_thr_update(vadc, high_thr, low_thr);
+ if (rc) {
+ pr_err("vadc thr update failed with %d\n", rc);
+ goto fail_unlock;
+ }
+
+ rc = qpnp_vadc_configure(vadc, vadc->adc->amux_prop);
+ if (rc) {
+ pr_err("vadc configure failed with %d\n", rc);
+ goto fail_unlock;
+ }
+
+ vadc->state_copy->meas_int_mode = true;
+ vadc->state_copy->param = param;
+ vadc->state_copy->vadc_meas_amux.channel_num = channel;
+ vadc->state_copy->vadc_meas_amux.adc_decimation =
+ vadc->adc->amux_prop->decimation;
+ vadc->state_copy->vadc_meas_amux.hw_settle_time =
+ vadc->adc->amux_prop->hw_settle_time;
+ vadc->state_copy->vadc_meas_amux.fast_avg_setup =
+ vadc->adc->amux_prop->fast_avg_setup;
+ vadc->state_copy->meas_int_request_in_queue = false;
+ dev_set_drvdata(vadc->dev, vadc);
+
+fail_unlock:
+ mutex_unlock(&vadc->adc->adc_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_vadc_channel_monitor);
+
+int32_t qpnp_vadc_end_channel_monitor(struct qpnp_vadc_chip *chip)
+{
+ struct qpnp_vadc_chip *vadc = dev_get_drvdata(chip->dev);
+ u8 mode_ctl = 0;
+
+ if (qpnp_vadc_is_valid(vadc))
+ return -EPROBE_DEFER;
+
+ if (!vadc->state_copy->vadc_meas_int_enable) {
+ pr_err("Recurring measurement interval not available\n");
+ return -EINVAL;
+ }
+
+ vadc->state_copy->meas_int_mode = false;
+ vadc->state_copy->meas_int_request_in_queue = false;
+ dev_set_drvdata(vadc->dev, vadc);
+ mode_ctl = ADC_OP_NORMAL_MODE;
+ /* Set measurement in single measurement mode */
+ qpnp_vadc_mode_select(vadc, mode_ctl);
+ qpnp_vadc_enable(vadc, false);
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_vadc_end_channel_monitor);
+
+static int qpnp_vadc_hc_check_conversion_status(struct qpnp_vadc_chip *vadc)
+{
+ int rc = 0, count = 0;
+ u8 status1 = 0;
+
+ while (status1 != QPNP_VADC_STATUS1_EOC) {
+ rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status1, 1);
+ if (rc < 0)
+ return rc;
+ status1 &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
+ if (status1 == QPNP_VADC_STATUS1_EOC)
+ break;
+ usleep_range(QPNP_VADC_HC1_CONV_TIME_MIN_US,
+ QPNP_VADC_HC1_CONV_TIME_MAX_US);
+ count++;
+ if (count > QPNP_VADC_HC1_ERR_COUNT) {
+ pr_err("retry error exceeded\n");
+ rc = qpnp_vadc_status_debug(vadc);
+ if (rc < 0)
+ pr_err("VADC disable failed\n");
+ return -EINVAL;
+ }
+ }
+
+ return rc;
+}
+
+static int qpnp_vadc_hc_read_data(struct qpnp_vadc_chip *vadc, int *data)
+{
+ int rc = 0;
+ u8 buf = 0, rslt_lsb = 0, rslt_msb = 0;
+
+ /* Set hold bit */
+ rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_HC1_DATA_HOLD_CTL, &buf, 1);
+ if (rc) {
+ pr_err("debug register dump failed\n");
+ return rc;
+ }
+ buf |= QPNP_VADC_HC1_DATA_HOLD_CTL_FIELD;
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HC1_DATA_HOLD_CTL, &buf, 1);
+ if (rc) {
+ pr_err("debug register dump failed\n");
+ return rc;
+ }
+
+ rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_HC1_DATA0, &rslt_lsb, 1);
+ if (rc < 0) {
+ pr_err("qpnp adc result read failed for data0\n");
+ return rc;
+ }
+
+ rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_HC1_DATA1, &rslt_msb, 1);
+ if (rc < 0) {
+ pr_err("qpnp adc result read failed for data1\n");
+ return rc;
+ }
+
+ *data = (rslt_msb << 8) | rslt_lsb;
+
+ if (*data == QPNP_VADC_HC1_DATA_CHECK_USR) {
+ pr_err("Invalid data :0x%x\n", *data);
+ return -EINVAL;
+ }
+
+ rc = qpnp_vadc_enable(vadc, false);
+ if (rc) {
+ pr_err("VADC disable failed\n");
+ return rc;
+ }
+
+ /* De-assert hold bit */
+ buf &= ~QPNP_VADC_HC1_DATA_HOLD_CTL_FIELD;
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HC1_DATA_HOLD_CTL, &buf, 1);
+ if (rc)
+ pr_err("de-asserting hold bit failed\n");
+
+ return rc;
+}
+
+static void qpnp_vadc_hc_update_adc_dig_param(struct qpnp_vadc_chip *vadc,
+ struct qpnp_adc_amux *amux_prop, u8 *data)
+{
+ /* Update CAL value */
+ *data &= ~QPNP_VADC_HC1_CAL_VAL;
+ *data |= (amux_prop->cal_val << QPNP_VADC_HC1_CAL_VAL_SHIFT);
+
+ /* Update CAL select */
+ *data &= ~QPNP_VADC_HC1_CAL_SEL_MASK;
+ *data |= (amux_prop->calib_type << QPNP_VADC_HC1_CAL_SEL_SHIFT);
+
+ /* Update Decimation ratio select */
+ *data &= ~QPNP_VADC_HC1_DEC_RATIO_SEL;
+ *data |= (amux_prop->adc_decimation << QPNP_VADC_HC1_DEC_RATIO_SHIFT);
+
+ pr_debug("VADC_DIG_PARAM value:0x%x\n", *data);
+}
+
+static int qpnp_vadc_hc_configure(struct qpnp_vadc_chip *vadc,
+ struct qpnp_adc_amux *amux_prop)
+{
+ int rc = 0;
+ u8 buf[6];
+
+ /* Read registers 0x42 through 0x46 */
+ rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_HC1_ADC_DIG_PARAM, buf, 6);
+ if (rc < 0) {
+ pr_err("qpnp adc configure block read failed\n");
+ return rc;
+ }
+
+ /* ADC Digital param selection */
+ qpnp_vadc_hc_update_adc_dig_param(vadc, amux_prop, &buf[0]);
+
+ /* Update fast average sample value */
+ buf[1] &= (u8) ~QPNP_VADC_HC1_FAST_AVG_SAMPLES_MASK;
+ buf[1] |= amux_prop->fast_avg_setup;
+
+ /* Select ADC channel */
+ buf[2] = amux_prop->channel_num;
+
+ /* Select hw settle delay for the channel */
+ buf[3] &= (u8) ~QPNP_VADC_HC1_DELAY_CTL_MASK;
+ buf[3] |= amux_prop->hw_settle_time;
+
+ /* Select ADC enable */
+ buf[4] |= QPNP_VADC_HC1_ADC_EN;
+
+ /* Select CONV request */
+ buf[5] |= QPNP_VADC_HC1_CONV_REQ_START;
+
+ if (!vadc->vadc_poll_eoc)
+ reinit_completion(&vadc->adc->adc_rslt_completion);
+
+ pr_debug("dig:0x%x, fast_avg:0x%x, channel:0x%x, hw_settle:0x%x\n",
+ buf[0], buf[1], buf[2], buf[3]);
+
+ /* Block register write from 0x42 through 0x46 */
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HC1_ADC_DIG_PARAM, buf, 6);
+ if (rc < 0) {
+ pr_err("qpnp adc block register configure failed\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+int32_t qpnp_vadc_hc_read(struct qpnp_vadc_chip *vadc,
+ enum qpnp_vadc_channels channel,
+ struct qpnp_vadc_result *result)
+{
+ int rc = 0, scale_type, amux_prescaling, dt_index = 0, calib_type = 0;
+ struct qpnp_adc_amux amux_prop;
+
+ if (qpnp_vadc_is_valid(vadc))
+ return -EPROBE_DEFER;
+
+ mutex_lock(&vadc->adc->adc_lock);
+
+ while ((vadc->adc->adc_channels[dt_index].channel_num
+ != channel) && (dt_index < vadc->max_channels_available))
+ dt_index++;
+
+ if (dt_index >= vadc->max_channels_available) {
+ pr_err("not a valid VADC channel:%d\n", channel);
+ rc = -EINVAL;
+ goto fail_unlock;
+ }
+
+ calib_type = vadc->adc->adc_channels[dt_index].calib_type;
+ if (calib_type >= ADC_HC_CAL_SEL_NONE) {
+ pr_err("not a valid calib_type\n");
+ rc = -EINVAL;
+ goto fail_unlock;
+ }
+
+ amux_prop.adc_decimation =
+ vadc->adc->adc_channels[dt_index].adc_decimation;
+ amux_prop.calib_type = vadc->adc->adc_channels[dt_index].calib_type;
+ amux_prop.cal_val = vadc->adc->adc_channels[dt_index].cal_val;
+ amux_prop.fast_avg_setup =
+ vadc->adc->adc_channels[dt_index].fast_avg_setup;
+ amux_prop.channel_num = channel;
+ amux_prop.hw_settle_time =
+ vadc->adc->adc_channels[dt_index].hw_settle_time;
+
+ rc = qpnp_vadc_hc_configure(vadc, &amux_prop);
+ if (rc < 0) {
+ pr_err("Configuring VADC channel failed with %d\n", rc);
+ goto fail_unlock;
+ }
+
+ if (vadc->vadc_poll_eoc) {
+ rc = qpnp_vadc_hc_check_conversion_status(vadc);
+ if (rc < 0) {
+ pr_err("polling mode conversion failed\n");
+ goto fail_unlock;
+ }
+ } else {
+ rc = wait_for_completion_timeout(
+ &vadc->adc->adc_rslt_completion,
+ QPNP_ADC_COMPLETION_TIMEOUT);
+ if (!rc) {
+ rc = qpnp_vadc_hc_check_conversion_status(vadc);
+ if (rc < 0) {
+ pr_err("interrupt mode conversion failed\n");
+ goto fail_unlock;
+ }
+ pr_debug("End of conversion status set\n");
+ }
+ }
+
+ rc = qpnp_vadc_hc_read_data(vadc, &result->adc_code);
+ if (rc) {
+ pr_err("qpnp vadc read adc code failed with %d\n", rc);
+ goto fail_unlock;
+ }
+
+ amux_prescaling =
+ vadc->adc->adc_channels[dt_index].chan_path_prescaling;
+
+ if (amux_prescaling >= PATH_SCALING_NONE) {
+ rc = -EINVAL;
+ goto fail_unlock;
+ }
+
+ vadc->adc->amux_prop->chan_prop->offset_gain_numerator =
+ qpnp_vadc_amux_scaling_ratio[amux_prescaling].num;
+ vadc->adc->amux_prop->chan_prop->offset_gain_denominator =
+ qpnp_vadc_amux_scaling_ratio[amux_prescaling].den;
+
+ scale_type = vadc->adc->adc_channels[dt_index].adc_scale_fn;
+ if (scale_type >= SCALE_NONE) {
+ rc = -EBADF;
+ goto fail_unlock;
+ }
+
+ /* Note: Scaling functions for VADC_HC do not need offset/gain */
+ vadc_scale_fn[scale_type].chan(vadc, result->adc_code,
+ vadc->adc->adc_prop, vadc->adc->amux_prop->chan_prop, result);
+
+ pr_debug("channel=0x%x, adc_code=0x%x adc_result=%lld\n",
+ channel, result->adc_code, result->physical);
+
+fail_unlock:
+ mutex_unlock(&vadc->adc->adc_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_vadc_hc_read);
+
+static ssize_t qpnp_adc_show(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct qpnp_vadc_chip *vadc = dev_get_drvdata(dev);
+ struct qpnp_vadc_result result;
+ int rc = -1;
+
+ rc = qpnp_vadc_read(vadc, attr->index, &result);
+
+ if (rc) {
+ pr_err("VADC read error with %d\n", rc);
+ return 0;
+ }
+
+ return snprintf(buf, QPNP_ADC_HWMON_NAME_LENGTH,
+ "Result:%lld Raw:%x\n", result.physical, result.adc_code);
+}
+
+static struct sensor_device_attribute qpnp_adc_attr =
+ SENSOR_ATTR(NULL, S_IRUGO, qpnp_adc_show, NULL, 0);
+
+static int32_t qpnp_vadc_init_hwmon(struct qpnp_vadc_chip *vadc,
+ struct spmi_device *spmi)
+{
+ struct device_node *child;
+ struct device_node *node = spmi->dev.of_node;
+ int rc = 0, i = 0, channel;
+
+ for_each_child_of_node(node, child) {
+ channel = vadc->adc->adc_channels[i].channel_num;
+ qpnp_adc_attr.index = vadc->adc->adc_channels[i].channel_num;
+ qpnp_adc_attr.dev_attr.attr.name =
+ vadc->adc->adc_channels[i].name;
+ memcpy(&vadc->sens_attr[i], &qpnp_adc_attr,
+ sizeof(qpnp_adc_attr));
+ sysfs_attr_init(&vadc->sens_attr[i].dev_attr.attr);
+ rc = device_create_file(&spmi->dev,
+ &vadc->sens_attr[i].dev_attr);
+ if (rc) {
+ dev_err(&spmi->dev,
+ "device_create_file failed for dev %s\n",
+ vadc->adc->adc_channels[i].name);
+ goto hwmon_err_sens;
+ }
+ i++;
+ }
+
+ return 0;
+hwmon_err_sens:
+ pr_err("Init HWMON failed for qpnp_adc with %d\n", rc);
+ return rc;
+}
+
+static int qpnp_vadc_get_temp(struct thermal_zone_device *thermal,
+ unsigned long *temp)
+{
+ struct qpnp_vadc_thermal_data *vadc_therm = thermal->devdata;
+ struct qpnp_vadc_chip *vadc = vadc_therm->vadc_dev;
+ struct qpnp_vadc_result result;
+ int rc = 0;
+
+ rc = qpnp_vadc_read(vadc,
+ vadc_therm->vadc_channel, &result);
+ if (rc) {
+ pr_err("VADC read error with %d\n", rc);
+ return rc;
+ }
+
+ *temp = result.physical;
+
+ return rc;
+}
+
+static struct thermal_zone_device_ops qpnp_vadc_thermal_ops = {
+ .get_temp = qpnp_vadc_get_temp,
+};
+
+static int32_t qpnp_vadc_init_thermal(struct qpnp_vadc_chip *vadc,
+ struct spmi_device *spmi)
+{
+ struct device_node *child;
+ struct device_node *node = spmi->dev.of_node;
+ int rc = 0, i = 0;
+ bool thermal_node = false;
+
+ if (node == NULL)
+ goto thermal_err_sens;
+ for_each_child_of_node(node, child) {
+ char name[QPNP_THERMALNODE_NAME_LENGTH];
+
+ vadc->vadc_therm_chan[i].vadc_channel =
+ vadc->adc->adc_channels[i].channel_num;
+ vadc->vadc_therm_chan[i].thermal_chan = i;
+ thermal_node = of_property_read_bool(child,
+ "qcom,vadc-thermal-node");
+ if (thermal_node) {
+ /* Register with the thermal zone */
+ vadc->vadc_therm_chan[i].thermal_node = true;
+ snprintf(name, sizeof(name), "%s",
+ vadc->adc->adc_channels[i].name);
+ vadc->vadc_therm_chan[i].vadc_dev = vadc;
+ vadc->vadc_therm_chan[i].tz_dev =
+ thermal_zone_device_register(name,
+ 0, 0, &vadc->vadc_therm_chan[i],
+ &qpnp_vadc_thermal_ops, NULL, 0, 0);
+ if (IS_ERR(vadc->vadc_therm_chan[i].tz_dev)) {
+ pr_err("thermal device register failed.\n");
+ goto thermal_err_sens;
+ }
+ }
+ i++;
+ thermal_node = false;
+ }
+ return 0;
+thermal_err_sens:
+ pr_err("Init HWMON failed for qpnp_adc with %d\n", rc);
+ return rc;
+}
+
+static const struct of_device_id qpnp_vadc_match_table[] = {
+ { .compatible = "qcom,qpnp-vadc",
+ },
+ { .compatible = "qcom,qpnp-vadc-hc",
+ },
+ {}
+};
+
+static int qpnp_vadc_probe(struct spmi_device *spmi)
+{
+ struct qpnp_vadc_chip *vadc;
+ struct qpnp_adc_drv *adc_qpnp;
+ struct qpnp_vadc_thermal_data *adc_thermal;
+ struct device_node *node = spmi->dev.of_node;
+ struct device_node *child;
+ const struct of_device_id *id;
+ int rc, count_adc_channel_list = 0, i = 0;
+ u8 fab_id = 0;
+
+ for_each_child_of_node(node, child)
+ count_adc_channel_list++;
+
+ if (!count_adc_channel_list) {
+ pr_err("No channel listing\n");
+ return -EINVAL;
+ }
+
+ id = of_match_node(qpnp_vadc_match_table, node);
+ if (id == NULL) {
+ pr_err("qpnp_vadc_match of_node prop not present\n");
+ return -ENODEV;
+ }
+
+ vadc = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_vadc_chip) +
+ (sizeof(struct sensor_device_attribute) *
+ count_adc_channel_list), GFP_KERNEL);
+ if (!vadc) {
+ dev_err(&spmi->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ vadc->dev = &(spmi->dev);
+ adc_qpnp = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_adc_drv),
+ GFP_KERNEL);
+ if (!adc_qpnp)
+ return -ENOMEM;
+
+ vadc->state_copy = devm_kzalloc(&spmi->dev,
+ sizeof(struct qpnp_vadc_mode_state), GFP_KERNEL);
+ if (!vadc->state_copy)
+ return -ENOMEM;
+
+ vadc->adc = adc_qpnp;
+ adc_thermal = devm_kzalloc(&spmi->dev,
+ (sizeof(struct qpnp_vadc_thermal_data) *
+ count_adc_channel_list), GFP_KERNEL);
+ if (!adc_thermal) {
+ dev_err(&spmi->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ vadc->vadc_therm_chan = adc_thermal;
+ if (!strcmp(id->compatible, "qcom,qpnp-vadc-hc"))
+ vadc->vadc_hc = true;
+
+ rc = qpnp_adc_get_devicetree_data(spmi, vadc->adc);
+ if (rc) {
+ dev_err(&spmi->dev, "failed to read device tree\n");
+ return rc;
+ }
+ mutex_init(&vadc->adc->adc_lock);
+
+ rc = qpnp_vadc_init_hwmon(vadc, spmi);
+ if (rc) {
+ dev_err(&spmi->dev, "failed to initialize qpnp hwmon adc\n");
+ return rc;
+ }
+ vadc->vadc_hwmon = hwmon_device_register(&vadc->adc->spmi->dev);
+ rc = qpnp_vadc_init_thermal(vadc, spmi);
+ if (rc) {
+ dev_err(&spmi->dev, "failed to initialize qpnp thermal adc\n");
+ return rc;
+ }
+ vadc->vadc_init_calib = false;
+ vadc->max_channels_available = count_adc_channel_list;
+ rc = qpnp_vadc_read_reg(vadc, QPNP_INT_TEST_VAL, &fab_id, 1);
+ if (rc < 0) {
+ pr_err("qpnp adc comp id failed with %d\n", rc);
+ goto err_setup;
+ }
+ vadc->id = fab_id;
+ pr_debug("fab_id = %d\n", fab_id);
+
+ rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_REVISION2,
+ &vadc->revision_dig_major, 1);
+ if (rc < 0) {
+ pr_err("qpnp adc dig_major rev read failed with %d\n", rc);
+ goto err_setup;
+ }
+
+ rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_REVISION3,
+ &vadc->revision_ana_minor, 1);
+ if (rc < 0) {
+ pr_err("qpnp adc ana_minor rev read failed with %d\n", rc);
+ goto err_setup;
+ }
+
+ rc = qpnp_vadc_warm_rst_configure(vadc);
+ if (rc < 0) {
+ pr_err("Setting perp reset on warm reset failed %d\n", rc);
+ goto err_setup;
+ }
+
+ INIT_WORK(&vadc->trigger_completion_work, qpnp_vadc_work);
+
+ vadc->vadc_recalib_check = of_property_read_bool(node,
+ "qcom,vadc-recalib-check");
+
+ vadc->vadc_poll_eoc = of_property_read_bool(node,
+ "qcom,vadc-poll-eoc");
+ if (!vadc->vadc_poll_eoc) {
+ rc = devm_request_irq(&spmi->dev, vadc->adc->adc_irq_eoc,
+ qpnp_vadc_isr, IRQF_TRIGGER_RISING,
+ "qpnp_vadc_interrupt", vadc);
+ if (rc) {
+ dev_err(&spmi->dev,
+ "failed to request adc irq with error %d\n", rc);
+ goto err_setup;
+ } else {
+ enable_irq_wake(vadc->adc->adc_irq_eoc);
+ }
+ } else
+ device_init_wakeup(vadc->dev, 1);
+
+ vadc->state_copy->vadc_meas_int_enable = of_property_read_bool(node,
+ "qcom,vadc-meas-int-mode");
+ if (vadc->state_copy->vadc_meas_int_enable) {
+ vadc->adc->adc_high_thr_irq = spmi_get_irq_byname(spmi,
+ NULL, "high-thr-en-set");
+ if (vadc->adc->adc_high_thr_irq < 0) {
+ pr_err("Invalid irq\n");
+ rc = -ENXIO;
+ goto err_setup;
+ }
+
+ vadc->adc->adc_low_thr_irq = spmi_get_irq_byname(spmi,
+ NULL, "low-thr-en-set");
+ if (vadc->adc->adc_low_thr_irq < 0) {
+ pr_err("Invalid irq\n");
+ rc = -ENXIO;
+ goto err_setup;
+ }
+
+ rc = devm_request_irq(&spmi->dev, vadc->adc->adc_high_thr_irq,
+ qpnp_vadc_high_thr_isr,
+ IRQF_TRIGGER_RISING, "qpnp_vadc_high_interrupt", vadc);
+ if (rc) {
+ dev_err(&spmi->dev, "failed to request adc irq\n");
+ goto err_setup;
+ } else {
+ enable_irq_wake(vadc->adc->adc_high_thr_irq);
+ }
+
+ rc = devm_request_irq(&spmi->dev, vadc->adc->adc_low_thr_irq,
+ qpnp_vadc_low_thr_isr,
+ IRQF_TRIGGER_RISING, "qpnp_vadc_low_interrupt", vadc);
+ if (rc) {
+ dev_err(&spmi->dev, "failed to request adc irq\n");
+ goto err_setup;
+ } else {
+ enable_irq_wake(vadc->adc->adc_low_thr_irq);
+ }
+ INIT_WORK(&vadc->trigger_high_thr_work,
+ qpnp_vadc_high_thr_fn);
+ INIT_WORK(&vadc->trigger_low_thr_work, qpnp_vadc_low_thr_fn);
+ }
+
+ vadc->vadc_iadc_sync_lock = false;
+ dev_set_drvdata(&spmi->dev, vadc);
+ list_add(&vadc->list, &qpnp_vadc_device_list);
+
+ return 0;
+
+err_setup:
+ for_each_child_of_node(node, child) {
+ device_remove_file(&spmi->dev,
+ &vadc->sens_attr[i].dev_attr);
+ if (vadc->vadc_therm_chan[i].thermal_node)
+ thermal_zone_device_unregister(
+ vadc->vadc_therm_chan[i].tz_dev);
+ i++;
+ }
+ hwmon_device_unregister(vadc->vadc_hwmon);
+
+ return rc;
+}
+
+static int qpnp_vadc_remove(struct spmi_device *spmi)
+{
+ struct qpnp_vadc_chip *vadc = dev_get_drvdata(&spmi->dev);
+ struct device_node *node = spmi->dev.of_node;
+ struct device_node *child;
+ int i = 0;
+
+ for_each_child_of_node(node, child) {
+ device_remove_file(&spmi->dev,
+ &vadc->sens_attr[i].dev_attr);
+ if (vadc->vadc_therm_chan[i].thermal_node)
+ thermal_zone_device_unregister(
+ vadc->vadc_therm_chan[i].tz_dev);
+ i++;
+ }
+ hwmon_device_unregister(vadc->vadc_hwmon);
+ list_del(&vadc->list);
+ if (vadc->adc->hkadc_ldo && vadc->adc->hkadc_ldo_ok)
+ qpnp_adc_free_voltage_resource(vadc->adc);
+ dev_set_drvdata(&spmi->dev, NULL);
+
+ return 0;
+}
+
+static int qpnp_vadc_suspend_noirq(struct device *dev)
+{
+ struct qpnp_vadc_chip *vadc = dev_get_drvdata(dev);
+ u8 status = 0;
+
+ qpnp_vadc_read_reg(vadc, QPNP_VADC_STATUS1, &status, 1);
+ if (((status & QPNP_VADC_STATUS1_OP_MODE_MASK) >>
+ QPNP_VADC_OP_MODE_SHIFT) == QPNP_VADC_MEAS_INT_MODE) {
+ pr_debug("Meas interval in progress\n");
+ } else if (vadc->vadc_poll_eoc) {
+ status &= QPNP_VADC_STATUS1_REQ_STS_EOC_MASK;
+ pr_debug("vadc conversion status=%d\n", status);
+ if (status != QPNP_VADC_STATUS1_EOC) {
+ pr_err(
+ "Aborting suspend, adc conversion requested while suspending\n");
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops qpnp_vadc_pm_ops = {
+ .suspend_noirq = qpnp_vadc_suspend_noirq,
+};
+
+static struct spmi_driver qpnp_vadc_driver = {
+ .driver = {
+ .name = "qcom,qpnp-vadc",
+ .of_match_table = qpnp_vadc_match_table,
+ .pm = &qpnp_vadc_pm_ops,
+ },
+ .probe = qpnp_vadc_probe,
+ .remove = qpnp_vadc_remove,
+};
+
+static int __init qpnp_vadc_init(void)
+{
+ return spmi_driver_register(&qpnp_vadc_driver);
+}
+module_init(qpnp_vadc_init);
+
+static void __exit qpnp_vadc_exit(void)
+{
+ spmi_driver_unregister(&qpnp_vadc_driver);
+}
+module_exit(qpnp_vadc_exit);
+
+MODULE_DESCRIPTION("QPNP PMIC Voltage ADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index b1ab8bdf8251..d971205be2d3 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -587,6 +587,36 @@ config LEDS_POWERNV
To compile this driver as a module, choose 'm' here: the module
will be called leds-powernv.
+config LEDS_QPNP
+ tristate "Support for QPNP LEDs"
+ depends on LEDS_CLASS && MSM_SPMI && OF_SPMI
+ help
+ This driver supports the leds functionality of Qualcomm PNP PMIC. It
+ includes RGB Leds, WLED and Flash Led.
+
+ To compile this driver as a module, choose M here: the module will
+ be called leds-qpnp.
+
+config LEDS_QPNP_FLASH
+ tristate "Support for QPNP Flash LEDs"
+ depends on LEDS_CLASS && MSM_SPMI && OF_SPMI
+ help
+ This driver supports the leds functionality of Qualcomm Technologies
+ PNP PMIC. It includes Flash Led.
+
+ To compile this driver as a module, choose M here: the module will
+ be called leds-qpnp-flash.
+
+config LEDS_QPNP_WLED
+ tristate "Support for QPNP WLED"
+ depends on LEDS_CLASS && MSM_SPMI && OF_SPMI
+ help
+ This driver supports the WLED (White LED) functionality of
+ Qualcomm Technologies PNP PMIC. WLED is used for display backlight.
+
+ To compile this driver as a module, choose M here: the module will
+ be called leds-qpnp-wled.
+
config LEDS_SYSCON
bool "LED support for LEDs on system controllers"
depends on LEDS_CLASS=y
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index e9d53092765d..04a0f62c4035 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -60,6 +60,9 @@ obj-$(CONFIG_LEDS_MAX77693) += leds-max77693.o
obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o
obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o
obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o
+obj-$(CONFIG_LEDS_QPNP) += leds-qpnp.o
+obj-$(CONFIG_LEDS_QPNP_FLASH) += leds-qpnp-flash.o
+obj-$(CONFIG_LEDS_QPNP_WLED) += leds-qpnp-wled.o
obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o
obj-$(CONFIG_LEDS_VERSATILE) += leds-versatile.o
obj-$(CONFIG_LEDS_MENF21BMC) += leds-menf21bmc.o
diff --git a/drivers/leds/leds-qpnp-flash.c b/drivers/leds/leds-qpnp-flash.c
new file mode 100644
index 000000000000..f615a6a07b98
--- /dev/null
+++ b/drivers/leds/leds-qpnp-flash.c
@@ -0,0 +1,2653 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <linux/spmi.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
+#include <linux/power_supply.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include "leds.h"
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
+#define FLASH_LED_PERIPHERAL_SUBTYPE(base) (base + 0x05)
+#define FLASH_SAFETY_TIMER(base) (base + 0x40)
+#define FLASH_MAX_CURRENT(base) (base + 0x41)
+#define FLASH_LED0_CURRENT(base) (base + 0x42)
+#define FLASH_LED1_CURRENT(base) (base + 0x43)
+#define FLASH_CLAMP_CURRENT(base) (base + 0x44)
+#define FLASH_MODULE_ENABLE_CTRL(base) (base + 0x46)
+#define FLASH_LED_STROBE_CTRL(base) (base + 0x47)
+#define FLASH_LED_TMR_CTRL(base) (base + 0x48)
+#define FLASH_HEADROOM(base) (base + 0x4A)
+#define FLASH_STARTUP_DELAY(base) (base + 0x4B)
+#define FLASH_MASK_ENABLE(base) (base + 0x4C)
+#define FLASH_VREG_OK_FORCE(base) (base + 0x4F)
+#define FLASH_FAULT_DETECT(base) (base + 0x51)
+#define FLASH_THERMAL_DRATE(base) (base + 0x52)
+#define FLASH_CURRENT_RAMP(base) (base + 0x54)
+#define FLASH_VPH_PWR_DROOP(base) (base + 0x5A)
+#define FLASH_HDRM_SNS_ENABLE_CTRL0(base) (base + 0x5C)
+#define FLASH_HDRM_SNS_ENABLE_CTRL1(base) (base + 0x5D)
+#define FLASH_LED_UNLOCK_SECURE(base) (base + 0xD0)
+#define FLASH_PERPH_RESET_CTRL(base) (base + 0xDA)
+#define FLASH_TORCH(base) (base + 0xE4)
+
+#define FLASH_STATUS_REG_MASK 0xFF
+#define FLASH_LED_FAULT_STATUS(base) (base + 0x08)
+#define INT_LATCHED_STS(base) (base + 0x18)
+#define IN_POLARITY_HIGH(base) (base + 0x12)
+#define INT_SET_TYPE(base) (base + 0x11)
+#define INT_EN_SET(base) (base + 0x15)
+#define INT_LATCHED_CLR(base) (base + 0x14)
+
+#define FLASH_HEADROOM_MASK 0x03
+#define FLASH_STARTUP_DLY_MASK 0x03
+#define FLASH_VREG_OK_FORCE_MASK 0xC0
+#define FLASH_FAULT_DETECT_MASK 0x80
+#define FLASH_THERMAL_DERATE_MASK 0xBF
+#define FLASH_SECURE_MASK 0xFF
+#define FLASH_TORCH_MASK 0x03
+#define FLASH_CURRENT_MASK 0x7F
+#define FLASH_TMR_MASK 0x03
+#define FLASH_TMR_SAFETY 0x00
+#define FLASH_SAFETY_TIMER_MASK 0x7F
+#define FLASH_MODULE_ENABLE_MASK 0xE0
+#define FLASH_STROBE_MASK 0xC0
+#define FLASH_CURRENT_RAMP_MASK 0xBF
+#define FLASH_VPH_PWR_DROOP_MASK 0xF3
+#define FLASH_LED_HDRM_SNS_ENABLE_MASK 0x81
+#define FLASH_MASK_MODULE_CONTRL_MASK 0xE0
+#define FLASH_FOLLOW_OTST2_RB_MASK 0x08
+
+#define FLASH_LED_TRIGGER_DEFAULT "none"
+#define FLASH_LED_HEADROOM_DEFAULT_MV 500
+#define FLASH_LED_STARTUP_DELAY_DEFAULT_US 128
+#define FLASH_LED_CLAMP_CURRENT_DEFAULT_MA 200
+#define FLASH_LED_THERMAL_DERATE_THRESHOLD_DEFAULT_C 80
+#define FLASH_LED_RAMP_UP_STEP_DEFAULT_US 3
+#define FLASH_LED_RAMP_DN_STEP_DEFAULT_US 3
+#define FLASH_LED_VPH_PWR_DROOP_THRESHOLD_DEFAULT_MV 3200
+#define FLASH_LED_VPH_PWR_DROOP_DEBOUNCE_TIME_DEFAULT_US 10
+#define FLASH_LED_THERMAL_DERATE_RATE_DEFAULT_PERCENT 2
+#define FLASH_RAMP_UP_DELAY_US_MIN 1000
+#define FLASH_RAMP_UP_DELAY_US_MAX 1001
+#define FLASH_RAMP_DN_DELAY_US_MIN 2160
+#define FLASH_RAMP_DN_DELAY_US_MAX 2161
+#define FLASH_BOOST_REGULATOR_PROBE_DELAY_MS 2000
+#define FLASH_TORCH_MAX_LEVEL 0x0F
+#define FLASH_MAX_LEVEL 0x4F
+#define FLASH_LED_FLASH_HW_VREG_OK 0x40
+#define FLASH_LED_FLASH_SW_VREG_OK 0x80
+#define FLASH_LED_STROBE_TYPE_HW 0x04
+#define FLASH_DURATION_DIVIDER 10
+#define FLASH_LED_HEADROOM_DIVIDER 100
+#define FLASH_LED_HEADROOM_OFFSET 2
+#define FLASH_LED_MAX_CURRENT_MA 1000
+#define FLASH_LED_THERMAL_THRESHOLD_MIN 95
+#define FLASH_LED_THERMAL_DEVIDER 10
+#define FLASH_LED_VPH_DROOP_THRESHOLD_MIN_MV 2500
+#define FLASH_LED_VPH_DROOP_THRESHOLD_DIVIDER 100
+#define FLASH_LED_HDRM_SNS_ENABLE 0x81
+#define FLASH_LED_HDRM_SNS_DISABLE 0x01
+#define FLASH_LED_UA_PER_MA 1000
+#define FLASH_LED_MASK_MODULE_MASK2_ENABLE 0x20
+#define FLASH_LED_MASK3_ENABLE_SHIFT 7
+#define FLASH_LED_MODULE_CTRL_DEFAULT 0x60
+#define FLASH_LED_CURRENT_READING_DELAY_MIN 5000
+#define FLASH_LED_CURRENT_READING_DELAY_MAX 5001
+#define PMI8996_SUBTYPE 19
+#define FLASH_LED_OPEN_FAULT_DETECTED 0xC
+
+#define FLASH_UNLOCK_SECURE 0xA5
+#define FLASH_LED_TORCH_ENABLE 0x00
+#define FLASH_LED_TORCH_DISABLE 0x03
+#define FLASH_MODULE_ENABLE 0x80
+#define FLASH_LED0_TRIGGER 0x80
+#define FLASH_LED1_TRIGGER 0x40
+#define FLASH_LED0_ENABLEMENT 0x40
+#define FLASH_LED1_ENABLEMENT 0x20
+#define FLASH_LED_DISABLE 0x00
+#define FLASH_LED_MIN_CURRENT_MA 13
+#define FLASH_SUBTYPE_DUAL 0x01
+#define FLASH_SUBTYPE_SINGLE 0x02
+
+/*
+ * ID represents physical LEDs for individual control purpose.
+ */
+enum flash_led_id {
+ FLASH_LED_0 = 0,
+ FLASH_LED_1,
+ FLASH_LED_SWITCH,
+};
+
+enum flash_led_type {
+ FLASH = 0,
+ TORCH,
+ SWITCH,
+};
+
+enum thermal_derate_rate {
+ RATE_1_PERCENT = 0,
+ RATE_1P25_PERCENT,
+ RATE_2_PERCENT,
+ RATE_2P5_PERCENT,
+ RATE_5_PERCENT,
+};
+
+enum current_ramp_steps {
+ RAMP_STEP_0P2_US = 0,
+ RAMP_STEP_0P4_US,
+ RAMP_STEP_0P8_US,
+ RAMP_STEP_1P6_US,
+ RAMP_STEP_3P3_US,
+ RAMP_STEP_6P7_US,
+ RAMP_STEP_13P5_US,
+ RAMP_STEP_27US,
+};
+
+struct flash_regulator_data {
+ struct regulator *regs;
+ const char *reg_name;
+ u32 max_volt_uv;
+};
+
+/*
+ * Configurations for each individual LED
+ */
+struct flash_node_data {
+ struct spmi_device *spmi_dev;
+ struct led_classdev cdev;
+ struct work_struct work;
+ struct flash_regulator_data *reg_data;
+ u16 max_current;
+ u16 prgm_current;
+ u16 prgm_current2;
+ u16 duration;
+ u8 id;
+ u8 type;
+ u8 trigger;
+ u8 enable;
+ u8 num_regulators;
+ bool flash_on;
+};
+
+/*
+ * Flash LED configuration read from device tree
+ */
+struct flash_led_platform_data {
+ unsigned int temp_threshold_num;
+ unsigned int temp_derate_curr_num;
+ unsigned int *die_temp_derate_curr_ma;
+ unsigned int *die_temp_threshold_degc;
+ u16 ramp_up_step;
+ u16 ramp_dn_step;
+ u16 vph_pwr_droop_threshold;
+ u16 headroom;
+ u16 clamp_current;
+ u8 thermal_derate_threshold;
+ u8 vph_pwr_droop_debounce_time;
+ u8 startup_dly;
+ u8 thermal_derate_rate;
+ bool pmic_charger_support;
+ bool self_check_en;
+ bool thermal_derate_en;
+ bool current_ramp_en;
+ bool vph_pwr_droop_en;
+ bool hdrm_sns_ch0_en;
+ bool hdrm_sns_ch1_en;
+ bool power_detect_en;
+ bool mask3_en;
+ bool follow_rb_disable;
+ bool die_current_derate_en;
+};
+
+struct qpnp_flash_led_buffer {
+ size_t rpos;
+ size_t wpos;
+ size_t len;
+ char data[0];
+};
+
+/*
+ * Flash LED data structure containing flash LED attributes
+ */
+struct qpnp_flash_led {
+ struct pmic_revid_data *revid_data;
+ struct spmi_device *spmi_dev;
+ struct flash_led_platform_data *pdata;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *gpio_state_active;
+ struct pinctrl_state *gpio_state_suspend;
+ struct flash_node_data *flash_node;
+ struct power_supply *battery_psy;
+ struct workqueue_struct *ordered_workq;
+ struct qpnp_vadc_chip *vadc_dev;
+ struct mutex flash_led_lock;
+ struct qpnp_flash_led_buffer *log;
+ struct dentry *dbgfs_root;
+ int num_leds;
+ u32 buffer_cnt;
+ u16 base;
+ u16 current_addr;
+ u16 current2_addr;
+ u8 peripheral_type;
+ u8 fault_reg;
+ bool gpio_enabled;
+ bool charging_enabled;
+ bool strobe_debug;
+ bool dbg_feature_en;
+ bool open_fault;
+};
+
+static u8 qpnp_flash_led_ctrl_dbg_regs[] = {
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
+ 0x4A, 0x4B, 0x4C, 0x4F, 0x51, 0x52, 0x54, 0x55, 0x5A, 0x5C, 0x5D,
+};
+
+static int flash_led_dbgfs_file_open(struct qpnp_flash_led *led,
+ struct file *file)
+{
+ struct qpnp_flash_led_buffer *log;
+ size_t logbufsize = SZ_4K;
+
+ log = kzalloc(logbufsize, GFP_KERNEL);
+ if (!log)
+ return -ENOMEM;
+
+ log->rpos = 0;
+ log->wpos = 0;
+ log->len = logbufsize - sizeof(*log);
+ led->log = log;
+
+ led->buffer_cnt = 1;
+ file->private_data = led;
+
+ return 0;
+}
+
+static int flash_led_dfs_open(struct inode *inode, struct file *file)
+{
+ struct qpnp_flash_led *led = inode->i_private;
+
+ return flash_led_dbgfs_file_open(led, file);
+}
+
+static int flash_led_dfs_close(struct inode *inode, struct file *file)
+{
+ struct qpnp_flash_led *led = file->private_data;
+
+ if (led && led->log) {
+ file->private_data = NULL;
+ kfree(led->log);
+ }
+
+ return 0;
+}
+
+static int print_to_log(struct qpnp_flash_led_buffer *log,
+ const char *fmt, ...)
+{
+ va_list args;
+ int cnt;
+ char *log_buf = &log->data[log->wpos];
+ size_t size = log->len - log->wpos;
+
+ va_start(args, fmt);
+ cnt = vscnprintf(log_buf, size, fmt, args);
+ va_end(args);
+
+ log->wpos += cnt;
+ return cnt;
+}
+
+static ssize_t flash_led_dfs_latched_reg_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *ppos) {
+ struct qpnp_flash_led *led = fp->private_data;
+ struct qpnp_flash_led_buffer *log = led->log;
+ u8 val;
+ int rc;
+ size_t len;
+ size_t ret;
+
+ if (log->rpos >= log->wpos && led->buffer_cnt == 0)
+ return 0;
+
+ rc = spmi_ext_register_readl(led->spmi_dev->ctrl,
+ led->spmi_dev->sid, INT_LATCHED_STS(led->base), &val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read from address %x, rc(%d)\n",
+ INT_LATCHED_STS(led->base), rc);
+ return -EINVAL;
+ }
+ led->buffer_cnt--;
+
+ rc = print_to_log(log, "0x%05X ", INT_LATCHED_STS(led->base));
+ if (rc == 0)
+ return rc;
+
+ rc = print_to_log(log, "0x%02X ", val);
+ if (rc == 0)
+ return rc;
+
+ if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+ log->data[log->wpos - 1] = '\n';
+
+ len = min(count, log->wpos - log->rpos);
+
+ ret = copy_to_user(buf, &log->data[log->rpos], len);
+ if (ret) {
+ pr_err("error copy register value to user\n");
+ return -EFAULT;
+ }
+
+ len -= ret;
+ *ppos += len;
+ log->rpos += len;
+
+ return len;
+}
+
+static ssize_t flash_led_dfs_fault_reg_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *ppos) {
+ struct qpnp_flash_led *led = fp->private_data;
+ struct qpnp_flash_led_buffer *log = led->log;
+ int rc;
+ size_t len;
+ size_t ret;
+
+ if (log->rpos >= log->wpos && led->buffer_cnt == 0)
+ return 0;
+
+ led->buffer_cnt--;
+
+ rc = print_to_log(log, "0x%05X ", FLASH_LED_FAULT_STATUS(led->base));
+ if (rc == 0)
+ return rc;
+
+ rc = print_to_log(log, "0x%02X ", led->fault_reg);
+ if (rc == 0)
+ return rc;
+
+ if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+ log->data[log->wpos - 1] = '\n';
+
+ len = min(count, log->wpos - log->rpos);
+
+ ret = copy_to_user(buf, &log->data[log->rpos], len);
+ if (ret) {
+ pr_err("error copy register value to user\n");
+ return -EFAULT;
+ }
+
+ len -= ret;
+ *ppos += len;
+ log->rpos += len;
+
+ return len;
+}
+
+static ssize_t flash_led_dfs_fault_reg_enable(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos) {
+
+ u8 *val;
+ int pos = 0;
+ int cnt = 0;
+ int data;
+ size_t ret = 0;
+
+ struct qpnp_flash_led *led = file->private_data;
+ char *kbuf = kmalloc(count + 1, GFP_KERNEL);
+
+ if (!kbuf)
+ return -ENOMEM;
+
+ ret = copy_from_user(kbuf, buf, count);
+ if (!ret) {
+ pr_err("failed to copy data from user\n");
+ ret = -EFAULT;
+ goto free_buf;
+ }
+
+ count -= ret;
+ *ppos += count;
+ kbuf[count] = '\0';
+ val = kbuf;
+ while (sscanf(kbuf + pos, "%i", &data) == 1) {
+ pos++;
+ val[cnt++] = data & 0xff;
+ }
+
+ if (!cnt)
+ goto free_buf;
+
+ ret = count;
+ if (*val == 1)
+ led->strobe_debug = true;
+ else
+ led->strobe_debug = false;
+
+free_buf:
+ kfree(kbuf);
+ return ret;
+}
+
+static ssize_t flash_led_dfs_dbg_enable(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos) {
+
+ u8 *val;
+ int pos = 0;
+ int cnt = 0;
+ int data;
+ size_t ret = 0;
+ struct qpnp_flash_led *led = file->private_data;
+ char *kbuf = kmalloc(count + 1, GFP_KERNEL);
+
+ if (!kbuf)
+ return -ENOMEM;
+
+ ret = copy_from_user(kbuf, buf, count);
+ if (ret == count) {
+ pr_err("failed to copy data from user\n");
+ ret = -EFAULT;
+ goto free_buf;
+ }
+ count -= ret;
+ *ppos += count;
+ kbuf[count] = '\0';
+ val = kbuf;
+ while (sscanf(kbuf + pos, "%i", &data) == 1) {
+ pos++;
+ val[cnt++] = data & 0xff;
+ }
+
+ if (!cnt)
+ goto free_buf;
+
+ ret = count;
+ if (*val == 1)
+ led->dbg_feature_en = true;
+ else
+ led->dbg_feature_en = false;
+
+free_buf:
+ kfree(kbuf);
+ return ret;
+}
+
+static const struct file_operations flash_led_dfs_latched_reg_fops = {
+ .open = flash_led_dfs_open,
+ .release = flash_led_dfs_close,
+ .read = flash_led_dfs_latched_reg_read,
+};
+
+static const struct file_operations flash_led_dfs_strobe_reg_fops = {
+ .open = flash_led_dfs_open,
+ .release = flash_led_dfs_close,
+ .read = flash_led_dfs_fault_reg_read,
+ .write = flash_led_dfs_fault_reg_enable,
+};
+
+static const struct file_operations flash_led_dfs_dbg_feature_fops = {
+ .open = flash_led_dfs_open,
+ .release = flash_led_dfs_close,
+ .write = flash_led_dfs_dbg_enable,
+};
+
+static int
+qpnp_led_masked_write(struct spmi_device *spmi_dev, u16 addr, u8 mask, u8 val)
+{
+ int rc;
+ u8 reg;
+
+ rc = spmi_ext_register_readl(spmi_dev->ctrl, spmi_dev->sid,
+ addr, &reg, 1);
+ if (rc)
+ dev_err(&spmi_dev->dev,
+ "Unable to read from addr=%x, rc(%d)\n", addr, rc);
+
+ reg &= ~mask;
+ reg |= val;
+
+ rc = spmi_ext_register_writel(spmi_dev->ctrl, spmi_dev->sid,
+ addr, &reg, 1);
+ if (rc)
+ dev_err(&spmi_dev->dev,
+ "Unable to write to addr=%x, rc(%d)\n", addr, rc);
+
+ dev_dbg(&spmi_dev->dev, "Write 0x%02X to addr 0x%02X\n", val, addr);
+
+ return rc;
+}
+
+static int qpnp_flash_led_get_allowed_die_temp_curr(struct qpnp_flash_led *led,
+ int64_t die_temp_degc)
+{
+ int die_temp_curr_ma;
+
+ if (die_temp_degc >= led->pdata->die_temp_threshold_degc[0])
+ die_temp_curr_ma = 0;
+ else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[1])
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[0];
+ else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[2])
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[1];
+ else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[3])
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[2];
+ else if (die_temp_degc >= led->pdata->die_temp_threshold_degc[4])
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[3];
+ else
+ die_temp_curr_ma = led->pdata->die_temp_derate_curr_ma[4];
+
+ return die_temp_curr_ma;
+}
+
+static int64_t qpnp_flash_led_get_die_temp(struct qpnp_flash_led *led)
+{
+ struct qpnp_vadc_result die_temp_result;
+ int rc;
+
+ rc = qpnp_vadc_read(led->vadc_dev, SPARE2, &die_temp_result);
+ if (rc) {
+ pr_err("failed to read the die temp\n");
+ return -EINVAL;
+ }
+
+ return die_temp_result.physical;
+}
+
+static int qpnp_get_pmic_revid(struct qpnp_flash_led *led)
+{
+ struct device_node *revid_dev_node;
+
+ revid_dev_node = of_parse_phandle(led->spmi_dev->dev.of_node,
+ "qcom,pmic-revid", 0);
+ if (!revid_dev_node) {
+ dev_err(&led->spmi_dev->dev,
+ "qcom,pmic-revid property missing\n");
+ return -EINVAL;
+ }
+
+ led->revid_data = get_revid_data(revid_dev_node);
+ if (IS_ERR(led->revid_data)) {
+ pr_err("Couldn't get revid data rc = %ld\n",
+ PTR_ERR(led->revid_data));
+ return PTR_ERR(led->revid_data);
+ }
+
+ return 0;
+}
+
+static int
+qpnp_flash_led_get_max_avail_current(struct flash_node_data *flash_node,
+ struct qpnp_flash_led *led)
+{
+ union power_supply_propval prop;
+ int64_t chg_temp_milidegc, die_temp_degc;
+ int max_curr_avail_ma = 2000;
+ int allowed_die_temp_curr_ma = 2000;
+ int rc;
+
+ if (led->pdata->power_detect_en) {
+ if (!led->battery_psy) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to query power supply\n");
+ return -EINVAL;
+ }
+
+ /*
+ * When charging is enabled, enforce this new
+ * enabelment sequence to reduce fuel gauge
+ * resolution reading.
+ */
+ if (led->charging_enabled) {
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE, FLASH_MODULE_ENABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Module enable reg write failed\n");
+ return -EINVAL;
+ }
+
+ usleep_range(FLASH_LED_CURRENT_READING_DELAY_MIN,
+ FLASH_LED_CURRENT_READING_DELAY_MAX);
+ }
+
+ led->battery_psy->get_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_CURRENT_MAX, &prop);
+ if (!prop.intval) {
+ dev_err(&led->spmi_dev->dev,
+ "battery too low for flash\n");
+ return -EINVAL;
+ }
+
+ max_curr_avail_ma = (prop.intval / FLASH_LED_UA_PER_MA);
+ }
+
+ /* When thermal mitigation is available, this logic
+ * will execute, to derate current based on PMIC die
+ * temperature.
+ */
+ if (led->pdata->die_current_derate_en) {
+ chg_temp_milidegc = qpnp_flash_led_get_die_temp(led);
+ if (chg_temp_milidegc < 0)
+ return -EINVAL;
+
+ die_temp_degc = div_s64(chg_temp_milidegc, 1000);
+ allowed_die_temp_curr_ma =
+ qpnp_flash_led_get_allowed_die_temp_curr(led,
+ die_temp_degc);
+ if (allowed_die_temp_curr_ma < 0)
+ return -EINVAL;
+ }
+
+ max_curr_avail_ma = (max_curr_avail_ma >= allowed_die_temp_curr_ma)
+ ? allowed_die_temp_curr_ma : max_curr_avail_ma;
+
+ return max_curr_avail_ma;
+}
+
+static ssize_t qpnp_flash_led_die_temp_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qpnp_flash_led *led;
+ struct flash_node_data *flash_node;
+ unsigned long val;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->spmi_dev->dev);
+
+ /*'0' for disable die_temp feature; non-zero to enable feature*/
+ if (val == 0)
+ led->pdata->die_current_derate_en = false;
+ else
+ led->pdata->die_current_derate_en = true;
+
+ return count;
+}
+
+static ssize_t qpnp_led_strobe_type_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct flash_node_data *flash_node;
+ unsigned long state;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret = -EINVAL;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+
+ /* '0' for sw strobe; '1' for hw strobe */
+ if (state == 1)
+ flash_node->trigger |= FLASH_LED_STROBE_TYPE_HW;
+ else
+ flash_node->trigger &= ~FLASH_LED_STROBE_TYPE_HW;
+
+ return count;
+}
+
+static ssize_t qpnp_flash_led_dump_regs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qpnp_flash_led *led;
+ struct flash_node_data *flash_node;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ int rc, i, count = 0;
+ u16 addr;
+ u8 val;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->spmi_dev->dev);
+ for (i = 0; i < ARRAY_SIZE(qpnp_flash_led_ctrl_dbg_regs); i++) {
+ addr = led->base + qpnp_flash_led_ctrl_dbg_regs[i];
+ rc = spmi_ext_register_readl(led->spmi_dev->ctrl,
+ led->spmi_dev->sid, addr, &val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read from addr=%x, rc(%d)\n",
+ addr, rc);
+ return -EINVAL;
+ }
+
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "REG_0x%x = 0x%x\n", addr, val);
+
+ if (count >= PAGE_SIZE)
+ return PAGE_SIZE - 1;
+ }
+
+ return count;
+}
+
+static ssize_t qpnp_flash_led_current_derate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qpnp_flash_led *led;
+ struct flash_node_data *flash_node;
+ unsigned long val;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->spmi_dev->dev);
+
+ /*'0' for disable derate feature; non-zero to enable derate feature */
+ if (val == 0)
+ led->pdata->power_detect_en = false;
+ else
+ led->pdata->power_detect_en = true;
+
+ return count;
+}
+
+static ssize_t qpnp_flash_led_max_current_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qpnp_flash_led *led;
+ struct flash_node_data *flash_node;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ int max_curr_avail_ma = 0;
+
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->spmi_dev->dev);
+
+ if (led->flash_node[0].flash_on)
+ max_curr_avail_ma += led->flash_node[0].max_current;
+ if (led->flash_node[1].flash_on)
+ max_curr_avail_ma += led->flash_node[1].max_current;
+
+ if (led->pdata->power_detect_en ||
+ led->pdata->die_current_derate_en) {
+ max_curr_avail_ma =
+ qpnp_flash_led_get_max_avail_current(flash_node, led);
+
+ if (max_curr_avail_ma < 0)
+ return -EINVAL;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", max_curr_avail_ma);
+}
+
+static struct device_attribute qpnp_flash_led_attrs[] = {
+ __ATTR(strobe, (S_IRUGO | S_IWUSR | S_IWGRP),
+ NULL,
+ qpnp_led_strobe_type_store),
+ __ATTR(reg_dump, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_flash_led_dump_regs_show,
+ NULL),
+ __ATTR(enable_current_derate, (S_IRUGO | S_IWUSR | S_IWGRP),
+ NULL,
+ qpnp_flash_led_current_derate_store),
+ __ATTR(max_allowed_current, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_flash_led_max_current_show,
+ NULL),
+ __ATTR(enable_die_temp_current_derate, (S_IRUGO | S_IWUSR | S_IWGRP),
+ NULL,
+ qpnp_flash_led_die_temp_store),
+};
+
+static int qpnp_flash_led_get_thermal_derate_rate(const char *rate)
+{
+ /*
+ * return 5% derate as default value if user specifies
+ * a value un-supported
+ */
+ if (strcmp(rate, "1_PERCENT") == 0)
+ return RATE_1_PERCENT;
+ else if (strcmp(rate, "1P25_PERCENT") == 0)
+ return RATE_1P25_PERCENT;
+ else if (strcmp(rate, "2_PERCENT") == 0)
+ return RATE_2_PERCENT;
+ else if (strcmp(rate, "2P5_PERCENT") == 0)
+ return RATE_2P5_PERCENT;
+ else if (strcmp(rate, "5_PERCENT") == 0)
+ return RATE_5_PERCENT;
+ else
+ return RATE_5_PERCENT;
+}
+
+static int qpnp_flash_led_get_ramp_step(const char *step)
+{
+ /*
+ * return 27 us as default value if user specifies
+ * a value un-supported
+ */
+ if (strcmp(step, "0P2_US") == 0)
+ return RAMP_STEP_0P2_US;
+ else if (strcmp(step, "0P4_US") == 0)
+ return RAMP_STEP_0P4_US;
+ else if (strcmp(step, "0P8_US") == 0)
+ return RAMP_STEP_0P8_US;
+ else if (strcmp(step, "1P6_US") == 0)
+ return RAMP_STEP_1P6_US;
+ else if (strcmp(step, "3P3_US") == 0)
+ return RAMP_STEP_3P3_US;
+ else if (strcmp(step, "6P7_US") == 0)
+ return RAMP_STEP_6P7_US;
+ else if (strcmp(step, "13P5_US") == 0)
+ return RAMP_STEP_13P5_US;
+ else
+ return RAMP_STEP_27US;
+}
+
+static u8 qpnp_flash_led_get_droop_debounce_time(u8 val)
+{
+ /*
+ * return 10 us as default value if user specifies
+ * a value un-supported
+ */
+ switch (val) {
+ case 0:
+ return 0;
+ case 10:
+ return 1;
+ case 32:
+ return 2;
+ case 64:
+ return 3;
+ default:
+ return 1;
+ }
+}
+
+static u8 qpnp_flash_led_get_startup_dly(u8 val)
+{
+ /*
+ * return 128 us as default value if user specifies
+ * a value un-supported
+ */
+ switch (val) {
+ case 10:
+ return 0;
+ case 32:
+ return 1;
+ case 64:
+ return 2;
+ case 128:
+ return 3;
+ default:
+ return 3;
+ }
+}
+
+static int
+qpnp_flash_led_get_peripheral_type(struct qpnp_flash_led *led)
+{
+ int rc;
+ u8 val;
+
+ rc = spmi_ext_register_readl(led->spmi_dev->ctrl,
+ led->spmi_dev->sid,
+ FLASH_LED_PERIPHERAL_SUBTYPE(led->base),
+ &val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read peripheral subtype\n");
+ return -EINVAL;
+ }
+
+ return val;
+}
+
+static int qpnp_flash_led_module_disable(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node)
+{
+ union power_supply_propval psy_prop;
+ int rc;
+ u8 val, tmp;
+
+ rc = spmi_ext_register_readl(led->spmi_dev->ctrl,
+ led->spmi_dev->sid,
+ FLASH_LED_STROBE_CTRL(led->base),
+ &val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read strobe reg\n");
+ return -EINVAL;
+ }
+
+ tmp = (~flash_node->trigger) & val;
+ if (!tmp) {
+ if (flash_node->type == TORCH) {
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_LED_UNLOCK_SECURE(led->base),
+ FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Secure reg write failed\n");
+ return -EINVAL;
+ }
+
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_TORCH(led->base),
+ FLASH_TORCH_MASK, FLASH_LED_TORCH_DISABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Torch reg write failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (led->battery_psy &&
+ led->revid_data->pmic_subtype == PMI8996_SUBTYPE &&
+ !led->revid_data->rev3) {
+ psy_prop.intval = false;
+ rc = led->battery_psy->set_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_TRIGGER,
+ &psy_prop);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to enble charger i/p current limit\n");
+ return -EINVAL;
+ }
+ }
+
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE_MASK,
+ FLASH_LED_MODULE_CTRL_DEFAULT);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Module disable failed\n");
+ return -EINVAL;
+ }
+
+ if (led->pinctrl) {
+ rc = pinctrl_select_state(led->pinctrl,
+ led->gpio_state_suspend);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "failed to disable GPIO\n");
+ return -EINVAL;
+ }
+ led->gpio_enabled = false;
+ }
+
+ if (led->battery_psy) {
+ psy_prop.intval = false;
+ rc = led->battery_psy->set_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_ACTIVE,
+ &psy_prop);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to setup OTG pulse skip enable\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (flash_node->trigger & FLASH_LED0_TRIGGER) {
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ led->current_addr,
+ FLASH_CURRENT_MASK, 0x00);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "current register write failed\n");
+ return -EINVAL;
+ }
+
+ }
+
+ if (flash_node->trigger & FLASH_LED1_TRIGGER) {
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ led->current2_addr,
+ FLASH_CURRENT_MASK, 0x00);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "current register write failed\n");
+ return -EINVAL;
+ }
+ }
+
+ if (flash_node->id == FLASH_LED_SWITCH)
+ flash_node->trigger &= FLASH_LED_STROBE_TYPE_HW;
+
+ return 0;
+}
+
+static enum
+led_brightness qpnp_flash_led_brightness_get(struct led_classdev *led_cdev)
+{
+ return led_cdev->brightness;
+}
+
+static int flash_regulator_parse_dt(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node) {
+
+ int i = 0, rc;
+ struct device_node *node = flash_node->cdev.dev->of_node;
+ struct device_node *temp = NULL;
+ const char *temp_string;
+ u32 val;
+
+ flash_node->reg_data = devm_kzalloc(&led->spmi_dev->dev,
+ sizeof(struct flash_regulator_data *) *
+ flash_node->num_regulators,
+ GFP_KERNEL);
+ if (!flash_node->reg_data) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ for_each_child_of_node(node, temp) {
+ rc = of_property_read_string(temp, "regulator-name",
+ &temp_string);
+ if (!rc)
+ flash_node->reg_data[i].reg_name = temp_string;
+ else {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read regulator name\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(temp, "max-voltage", &val);
+ if (!rc) {
+ flash_node->reg_data[i].max_volt_uv = val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read max voltage\n");
+ return rc;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int flash_regulator_setup(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node, bool on)
+{
+ int i, rc = 0;
+
+ if (on == false) {
+ i = flash_node->num_regulators;
+ goto error_regulator_setup;
+ }
+
+ for (i = 0; i < flash_node->num_regulators; i++) {
+ flash_node->reg_data[i].regs =
+ regulator_get(flash_node->cdev.dev,
+ flash_node->reg_data[i].reg_name);
+ if (IS_ERR(flash_node->reg_data[i].regs)) {
+ rc = PTR_ERR(flash_node->reg_data[i].regs);
+ dev_err(&led->spmi_dev->dev,
+ "Failed to get regulator\n");
+ goto error_regulator_setup;
+ }
+
+ if (regulator_count_voltages(flash_node->reg_data[i].regs)
+ > 0) {
+ rc = regulator_set_voltage(flash_node->reg_data[i].regs,
+ flash_node->reg_data[i].max_volt_uv,
+ flash_node->reg_data[i].max_volt_uv);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "regulator set voltage failed\n");
+ regulator_put(flash_node->reg_data[i].regs);
+ goto error_regulator_setup;
+ }
+ }
+ }
+
+ return rc;
+
+error_regulator_setup:
+ while (i--) {
+ if (regulator_count_voltages(flash_node->reg_data[i].regs)
+ > 0) {
+ regulator_set_voltage(flash_node->reg_data[i].regs,
+ 0, flash_node->reg_data[i].max_volt_uv);
+ }
+
+ regulator_put(flash_node->reg_data[i].regs);
+ }
+
+ return rc;
+}
+
+static int flash_regulator_enable(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node, bool on)
+{
+ int i, rc = 0;
+
+ if (on == false) {
+ i = flash_node->num_regulators;
+ goto error_regulator_enable;
+ }
+
+ for (i = 0; i < flash_node->num_regulators; i++) {
+ rc = regulator_enable(flash_node->reg_data[i].regs);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "regulator enable failed\n");
+ goto error_regulator_enable;
+ }
+ }
+
+ return rc;
+
+error_regulator_enable:
+ while (i--)
+ regulator_disable(flash_node->reg_data[i].regs);
+
+ return rc;
+}
+
+static void qpnp_flash_led_work(struct work_struct *work)
+{
+ struct flash_node_data *flash_node = container_of(work,
+ struct flash_node_data, work);
+ struct qpnp_flash_led *led =
+ dev_get_drvdata(&flash_node->spmi_dev->dev);
+ union power_supply_propval psy_prop;
+ int rc, brightness = flash_node->cdev.brightness;
+ int max_curr_avail_ma = 0;
+ int total_curr_ma = 0;
+ int i;
+ u8 val;
+
+ mutex_lock(&led->flash_led_lock);
+
+ if (!brightness)
+ goto turn_off;
+
+ if (led->open_fault) {
+ dev_err(&led->spmi_dev->dev, "Open fault detected\n");
+ mutex_unlock(&led->flash_led_lock);
+ return;
+ }
+
+ if (!flash_node->flash_on && flash_node->num_regulators > 0) {
+ rc = flash_regulator_enable(led, flash_node, true);
+ if (rc) {
+ mutex_unlock(&led->flash_led_lock);
+ return;
+ }
+ }
+
+ if (!led->gpio_enabled && led->pinctrl) {
+ rc = pinctrl_select_state(led->pinctrl,
+ led->gpio_state_active);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "failed to enable GPIO\n");
+ goto error_enable_gpio;
+ }
+ led->gpio_enabled = true;
+ }
+
+ if (led->dbg_feature_en) {
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ INT_SET_TYPE(led->base),
+ FLASH_STATUS_REG_MASK, 0x1F);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "INT_SET_TYPE write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ IN_POLARITY_HIGH(led->base),
+ FLASH_STATUS_REG_MASK, 0x1F);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "IN_POLARITY_HIGH write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ INT_EN_SET(led->base),
+ FLASH_STATUS_REG_MASK, 0x1F);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "INT_EN_SET write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ INT_LATCHED_CLR(led->base),
+ FLASH_STATUS_REG_MASK, 0x1F);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "INT_LATCHED_CLR write failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+
+ if (flash_node->type == TORCH) {
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_LED_UNLOCK_SECURE(led->base),
+ FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Secure reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_TORCH(led->base),
+ FLASH_TORCH_MASK, FLASH_LED_TORCH_ENABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Torch reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ if (flash_node->id == FLASH_LED_SWITCH) {
+ val = (u8)(flash_node->prgm_current *
+ FLASH_TORCH_MAX_LEVEL
+ / flash_node->max_current);
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ led->current_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Torch reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ val = (u8)(flash_node->prgm_current2 *
+ FLASH_TORCH_MAX_LEVEL
+ / flash_node->max_current);
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ led->current2_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Torch reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else {
+ val = (u8)(flash_node->prgm_current *
+ FLASH_TORCH_MAX_LEVEL /
+ flash_node->max_current);
+ if (flash_node->id == FLASH_LED_0) {
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ led->current_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else {
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ led->current2_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+ }
+
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_MAX_CURRENT(led->base),
+ FLASH_CURRENT_MASK, FLASH_TORCH_MAX_LEVEL);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Max current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE_MASK, FLASH_MODULE_ENABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Module enable reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ if (led->pdata->hdrm_sns_ch0_en ||
+ led->pdata->hdrm_sns_ch1_en) {
+ if (flash_node->id == FLASH_LED_SWITCH) {
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ flash_node->trigger &
+ FLASH_LED0_TRIGGER ?
+ FLASH_LED_HDRM_SNS_ENABLE :
+ FLASH_LED_HDRM_SNS_DISABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Headroom sense enable failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ flash_node->trigger &
+ FLASH_LED1_TRIGGER ?
+ FLASH_LED_HDRM_SNS_ENABLE :
+ FLASH_LED_HDRM_SNS_DISABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Headroom sense enable failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_0) {
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_ENABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_1) {
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_ENABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+ }
+
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_LED_STROBE_CTRL(led->base),
+ (flash_node->id == FLASH_LED_SWITCH ? FLASH_STROBE_MASK
+ | FLASH_LED_STROBE_TYPE_HW
+ : flash_node->trigger |
+ FLASH_LED_STROBE_TYPE_HW),
+ flash_node->trigger);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Strobe reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->type == FLASH) {
+ if (flash_node->trigger & FLASH_LED0_TRIGGER)
+ max_curr_avail_ma += flash_node->max_current;
+ if (flash_node->trigger & FLASH_LED1_TRIGGER)
+ max_curr_avail_ma += flash_node->max_current;
+
+ psy_prop.intval = true;
+ rc = led->battery_psy->set_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_ACTIVE,
+ &psy_prop);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to setup OTG pulse skip enable\n");
+ goto exit_flash_led_work;
+ }
+
+ if (led->pdata->power_detect_en ||
+ led->pdata->die_current_derate_en) {
+ if (led->battery_psy) {
+ led->battery_psy->get_property(led->battery_psy,
+ POWER_SUPPLY_PROP_STATUS,
+ &psy_prop);
+ if (psy_prop.intval < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "Invalid battery status\n");
+ goto exit_flash_led_work;
+ }
+
+ if (psy_prop.intval ==
+ POWER_SUPPLY_STATUS_CHARGING)
+ led->charging_enabled = true;
+ else if (psy_prop.intval ==
+ POWER_SUPPLY_STATUS_DISCHARGING
+ || psy_prop.intval ==
+ POWER_SUPPLY_STATUS_NOT_CHARGING)
+ led->charging_enabled = false;
+ }
+ max_curr_avail_ma =
+ qpnp_flash_led_get_max_avail_current
+ (flash_node, led);
+ if (max_curr_avail_ma < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to get max avail curr\n");
+ goto exit_flash_led_work;
+ }
+ }
+
+ if (flash_node->id == FLASH_LED_SWITCH) {
+ if (flash_node->trigger & FLASH_LED0_TRIGGER)
+ total_curr_ma += flash_node->prgm_current;
+ if (flash_node->trigger & FLASH_LED1_TRIGGER)
+ total_curr_ma += flash_node->prgm_current2;
+
+ if (max_curr_avail_ma < total_curr_ma) {
+ flash_node->prgm_current =
+ (flash_node->prgm_current *
+ max_curr_avail_ma) / total_curr_ma;
+ flash_node->prgm_current2 =
+ (flash_node->prgm_current2 *
+ max_curr_avail_ma) / total_curr_ma;
+ }
+
+ val = (u8)(flash_node->prgm_current *
+ FLASH_MAX_LEVEL / flash_node->max_current);
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ led->current_addr, FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Current register write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ val = (u8)(flash_node->prgm_current2 *
+ FLASH_MAX_LEVEL / flash_node->max_current);
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ led->current2_addr, FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Current register write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else {
+ if (max_curr_avail_ma <
+ flash_node->prgm_current) {
+ dev_err(&led->spmi_dev->dev,
+ "battery only supprots %d mA\n",
+ max_curr_avail_ma);
+ flash_node->prgm_current =
+ (u16)max_curr_avail_ma;
+ }
+
+ val = (u8)(flash_node->prgm_current *
+ FLASH_MAX_LEVEL
+ / flash_node->max_current);
+ if (flash_node->id == FLASH_LED_0) {
+ rc = qpnp_led_masked_write(
+ led->spmi_dev,
+ led->current_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_1) {
+ rc = qpnp_led_masked_write(
+ led->spmi_dev,
+ led->current2_addr,
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+ }
+
+ val = (u8)((flash_node->duration - FLASH_DURATION_DIVIDER)
+ / FLASH_DURATION_DIVIDER);
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_SAFETY_TIMER(led->base),
+ FLASH_SAFETY_TIMER_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Safety timer reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_MAX_CURRENT(led->base),
+ FLASH_CURRENT_MASK, FLASH_MAX_LEVEL);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Max current reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ if (!led->charging_enabled) {
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE, FLASH_MODULE_ENABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Module enable reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ usleep_range(FLASH_RAMP_UP_DELAY_US_MIN,
+ FLASH_RAMP_UP_DELAY_US_MAX);
+ }
+
+ if (led->revid_data->pmic_subtype == PMI8996_SUBTYPE &&
+ !led->revid_data->rev3) {
+ rc = led->battery_psy->set_property(led->battery_psy,
+ POWER_SUPPLY_PROP_FLASH_TRIGGER,
+ &psy_prop);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to disable charger i/p curr limit\n");
+ goto exit_flash_led_work;
+ }
+ }
+
+ if (led->pdata->hdrm_sns_ch0_en ||
+ led->pdata->hdrm_sns_ch1_en) {
+ if (flash_node->id == FLASH_LED_SWITCH) {
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ (flash_node->trigger &
+ FLASH_LED0_TRIGGER ?
+ FLASH_LED_HDRM_SNS_ENABLE :
+ FLASH_LED_HDRM_SNS_DISABLE));
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Headroom sense enable failed\n");
+ goto exit_flash_led_work;
+ }
+
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ (flash_node->trigger &
+ FLASH_LED1_TRIGGER ?
+ FLASH_LED_HDRM_SNS_ENABLE :
+ FLASH_LED_HDRM_SNS_DISABLE));
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Headroom sense enable failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_0) {
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_ENABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_led_work;
+ }
+ } else if (flash_node->id == FLASH_LED_1) {
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_ENABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_led_work;
+ }
+ }
+ }
+
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_LED_STROBE_CTRL(led->base),
+ (flash_node->id == FLASH_LED_SWITCH ? FLASH_STROBE_MASK
+ | FLASH_LED_STROBE_TYPE_HW
+ : flash_node->trigger |
+ FLASH_LED_STROBE_TYPE_HW),
+ flash_node->trigger);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Strobe reg write failed\n");
+ goto exit_flash_led_work;
+ }
+
+ if (led->strobe_debug && led->dbg_feature_en) {
+ udelay(2000);
+ rc = spmi_ext_register_readl(led->spmi_dev->ctrl,
+ led->spmi_dev->sid,
+ FLASH_LED_FAULT_STATUS(led->base),
+ &val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read from addr= %x, rc(%d)\n",
+ FLASH_LED_FAULT_STATUS(led->base), rc);
+ goto exit_flash_led_work;
+ }
+ led->fault_reg = val;
+ }
+ } else {
+ pr_err("Both Torch and Flash cannot be select at same time\n");
+ for (i = 0; i < led->num_leds; i++)
+ led->flash_node[i].flash_on = false;
+ goto turn_off;
+ }
+
+ flash_node->flash_on = true;
+ mutex_unlock(&led->flash_led_lock);
+
+ return;
+
+turn_off:
+ if (flash_node->type == TORCH) {
+ /*
+ * Checking LED fault status detects hardware open fault.
+ * If fault occurs, all subsequent LED enablement requests
+ * will be rejected to protect hardware.
+ */
+ rc = spmi_ext_register_readl(led->spmi_dev->ctrl,
+ led->spmi_dev->sid,
+ FLASH_LED_FAULT_STATUS(led->base), &val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to read out fault status register\n");
+ goto exit_flash_led_work;
+ }
+
+ led->open_fault = (val & FLASH_LED_OPEN_FAULT_DETECTED);
+ }
+
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_LED_STROBE_CTRL(led->base),
+ (flash_node->id == FLASH_LED_SWITCH ? FLASH_STROBE_MASK
+ | FLASH_LED_STROBE_TYPE_HW
+ : flash_node->trigger
+ | FLASH_LED_STROBE_TYPE_HW),
+ FLASH_LED_DISABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev, "Strobe disable failed\n");
+ goto exit_flash_led_work;
+ }
+
+ usleep_range(FLASH_RAMP_DN_DELAY_US_MIN, FLASH_RAMP_DN_DELAY_US_MAX);
+exit_flash_hdrm_sns:
+ if (led->pdata->hdrm_sns_ch0_en) {
+ if (flash_node->id == FLASH_LED_0 ||
+ flash_node->id == FLASH_LED_SWITCH) {
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_HDRM_SNS_ENABLE_CTRL0(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_DISABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_hdrm_sns;
+ }
+ }
+ }
+
+ if (led->pdata->hdrm_sns_ch1_en) {
+ if (flash_node->id == FLASH_LED_1 ||
+ flash_node->id == FLASH_LED_SWITCH) {
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_HDRM_SNS_ENABLE_CTRL1(led->base),
+ FLASH_LED_HDRM_SNS_ENABLE_MASK,
+ FLASH_LED_HDRM_SNS_DISABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Headroom sense disable failed\n");
+ goto exit_flash_hdrm_sns;
+ }
+ }
+ }
+exit_flash_led_work:
+ rc = qpnp_flash_led_module_disable(led, flash_node);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev, "Module disable failed\n");
+ goto exit_flash_led_work;
+ }
+error_enable_gpio:
+ if (flash_node->flash_on && flash_node->num_regulators > 0)
+ flash_regulator_enable(led, flash_node, false);
+
+ flash_node->flash_on = false;
+ mutex_unlock(&led->flash_led_lock);
+
+ return;
+}
+
+static void qpnp_flash_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct flash_node_data *flash_node;
+ struct qpnp_flash_led *led;
+ flash_node = container_of(led_cdev, struct flash_node_data, cdev);
+ led = dev_get_drvdata(&flash_node->spmi_dev->dev);
+
+ if (value < LED_OFF) {
+ pr_err("Invalid brightness value\n");
+ return;
+ }
+
+ if (value > flash_node->cdev.max_brightness)
+ value = flash_node->cdev.max_brightness;
+
+ flash_node->cdev.brightness = value;
+ if (led->flash_node[led->num_leds - 1].id ==
+ FLASH_LED_SWITCH) {
+ if (flash_node->type == TORCH)
+ led->flash_node[led->num_leds - 1].type = TORCH;
+ else if (flash_node->type == FLASH)
+ led->flash_node[led->num_leds - 1].type = FLASH;
+
+ led->flash_node[led->num_leds - 1].max_current
+ = flash_node->max_current;
+
+ if (flash_node->id == FLASH_LED_0 ||
+ flash_node->id == FLASH_LED_1) {
+ if (value < FLASH_LED_MIN_CURRENT_MA && value != 0)
+ value = FLASH_LED_MIN_CURRENT_MA;
+
+ flash_node->prgm_current = value;
+ flash_node->flash_on = value ? true : false;
+ if (value)
+ led->flash_node[led->num_leds - 1].trigger |=
+ (0x80 >> flash_node->id);
+ else
+ led->flash_node[led->num_leds - 1].trigger &=
+ ~(0x80 >> flash_node->id);
+
+ if (flash_node->id == FLASH_LED_0)
+ led->flash_node[led->num_leds - 1].
+ prgm_current = flash_node->prgm_current;
+ else if (flash_node->id == FLASH_LED_1)
+ led->flash_node[led->num_leds - 1].
+ prgm_current2 =
+ flash_node->prgm_current;
+
+ return;
+ } else if (flash_node->id == FLASH_LED_SWITCH) {
+ if (!value) {
+ flash_node->prgm_current = 0;
+ flash_node->prgm_current2 = 0;
+ }
+ }
+ } else {
+ if (value < FLASH_LED_MIN_CURRENT_MA && value != 0)
+ value = FLASH_LED_MIN_CURRENT_MA;
+ flash_node->prgm_current = value;
+ }
+
+ queue_work(led->ordered_workq, &flash_node->work);
+
+ return;
+}
+
+static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led)
+{
+ int rc;
+ u8 val, temp_val;
+
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_MODULE_ENABLE_CTRL(led->base),
+ FLASH_MODULE_ENABLE_MASK,
+ FLASH_LED_MODULE_CTRL_DEFAULT);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev, "Module disable failed\n");
+ return rc;
+ }
+
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_LED_STROBE_CTRL(led->base),
+ FLASH_STROBE_MASK, FLASH_LED_DISABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev, "Strobe disable failed\n");
+ return rc;
+ }
+
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_LED_TMR_CTRL(led->base),
+ FLASH_TMR_MASK, FLASH_TMR_SAFETY);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "LED timer ctrl reg write failed(%d)\n", rc);
+ return rc;
+ }
+
+ val = (u8)(led->pdata->headroom / FLASH_LED_HEADROOM_DIVIDER -
+ FLASH_LED_HEADROOM_OFFSET);
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_HEADROOM(led->base),
+ FLASH_HEADROOM_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev, "Headroom reg write failed\n");
+ return rc;
+ }
+
+ val = qpnp_flash_led_get_startup_dly(led->pdata->startup_dly);
+
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_STARTUP_DELAY(led->base),
+ FLASH_STARTUP_DLY_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Startup delay reg write failed\n");
+ return rc;
+ }
+
+ val = (u8)(led->pdata->clamp_current * FLASH_MAX_LEVEL /
+ FLASH_LED_MAX_CURRENT_MA);
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_CLAMP_CURRENT(led->base),
+ FLASH_CURRENT_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Clamp current reg write failed\n");
+ return rc;
+ }
+
+ if (led->pdata->pmic_charger_support)
+ val = FLASH_LED_FLASH_HW_VREG_OK;
+ else
+ val = FLASH_LED_FLASH_SW_VREG_OK;
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_VREG_OK_FORCE(led->base),
+ FLASH_VREG_OK_FORCE_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "VREG OK force reg write failed\n");
+ return rc;
+ }
+
+ if (led->pdata->self_check_en)
+ val = FLASH_MODULE_ENABLE;
+ else
+ val = FLASH_LED_DISABLE;
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_FAULT_DETECT(led->base),
+ FLASH_FAULT_DETECT_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Fault detect reg write failed\n");
+ return rc;
+ }
+
+ val = 0x0;
+ val |= led->pdata->mask3_en << FLASH_LED_MASK3_ENABLE_SHIFT;
+ val |= FLASH_LED_MASK_MODULE_MASK2_ENABLE;
+ rc = qpnp_led_masked_write(led->spmi_dev, FLASH_MASK_ENABLE(led->base),
+ FLASH_MASK_MODULE_CONTRL_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev, "Mask module enable failed\n");
+ return rc;
+ }
+
+ rc = spmi_ext_register_readl(led->spmi_dev->ctrl,
+ led->spmi_dev->sid,
+ FLASH_PERPH_RESET_CTRL(led->base),
+ &val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read from address %x, rc(%d)\n",
+ FLASH_PERPH_RESET_CTRL(led->base), rc);
+ return -EINVAL;
+ }
+
+ if (led->pdata->follow_rb_disable) {
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_LED_UNLOCK_SECURE(led->base),
+ FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Secure reg write failed\n");
+ return -EINVAL;
+ }
+
+ val |= FLASH_FOLLOW_OTST2_RB_MASK;
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_PERPH_RESET_CTRL(led->base),
+ FLASH_FOLLOW_OTST2_RB_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "failed to reset OTST2_RB bit\n");
+ return rc;
+ }
+ } else {
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_LED_UNLOCK_SECURE(led->base),
+ FLASH_SECURE_MASK, FLASH_UNLOCK_SECURE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Secure reg write failed\n");
+ return -EINVAL;
+ }
+
+ val &= ~FLASH_FOLLOW_OTST2_RB_MASK;
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_PERPH_RESET_CTRL(led->base),
+ FLASH_FOLLOW_OTST2_RB_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "failed to reset OTST2_RB bit\n");
+ return rc;
+ }
+ }
+
+ if (!led->pdata->thermal_derate_en)
+ val = 0x0;
+ else {
+ val = led->pdata->thermal_derate_en << 7;
+ val |= led->pdata->thermal_derate_rate << 3;
+ val |= (led->pdata->thermal_derate_threshold -
+ FLASH_LED_THERMAL_THRESHOLD_MIN) /
+ FLASH_LED_THERMAL_DEVIDER;
+ }
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_THERMAL_DRATE(led->base),
+ FLASH_THERMAL_DERATE_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev, "Thermal derate reg write failed\n");
+ return rc;
+ }
+
+ if (!led->pdata->current_ramp_en)
+ val = 0x0;
+ else {
+ val = led->pdata->current_ramp_en << 7;
+ val |= led->pdata->ramp_up_step << 3;
+ val |= led->pdata->ramp_dn_step;
+ }
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_CURRENT_RAMP(led->base),
+ FLASH_CURRENT_RAMP_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev, "Current ramp reg write failed\n");
+ return rc;
+ }
+
+ if (!led->pdata->vph_pwr_droop_en)
+ val = 0x0;
+ else {
+ val = led->pdata->vph_pwr_droop_en << 7;
+ val |= ((led->pdata->vph_pwr_droop_threshold -
+ FLASH_LED_VPH_DROOP_THRESHOLD_MIN_MV) /
+ FLASH_LED_VPH_DROOP_THRESHOLD_DIVIDER) << 4;
+ temp_val =
+ qpnp_flash_led_get_droop_debounce_time(
+ led->pdata->vph_pwr_droop_debounce_time);
+ if (temp_val == 0xFF) {
+ dev_err(&led->spmi_dev->dev, "Invalid debounce time\n");
+ return temp_val;
+ }
+
+ val |= temp_val;
+ }
+ rc = qpnp_led_masked_write(led->spmi_dev,
+ FLASH_VPH_PWR_DROOP(led->base),
+ FLASH_VPH_PWR_DROOP_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev, "VPH PWR droop reg write failed\n");
+ return rc;
+ }
+
+ led->battery_psy = power_supply_get_by_name("battery");
+ if (!led->battery_psy) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to get battery power supply\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qpnp_flash_led_parse_each_led_dt(struct qpnp_flash_led *led,
+ struct flash_node_data *flash_node)
+{
+ const char *temp_string;
+ struct device_node *node = flash_node->cdev.dev->of_node;
+ struct device_node *temp = NULL;
+ int rc = 0, num_regs = 0;
+ u32 val;
+
+ rc = of_property_read_string(node, "label", &temp_string);
+ if (!rc) {
+ if (strcmp(temp_string, "flash") == 0)
+ flash_node->type = FLASH;
+ else if (strcmp(temp_string, "torch") == 0)
+ flash_node->type = TORCH;
+ else if (strcmp(temp_string, "switch") == 0)
+ flash_node->type = SWITCH;
+ else {
+ dev_err(&led->spmi_dev->dev,
+ "Wrong flash LED type\n");
+ return -EINVAL;
+ }
+ } else if (rc < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read flash type\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(node, "qcom,current", &val);
+ if (!rc) {
+ if (val < FLASH_LED_MIN_CURRENT_MA)
+ val = FLASH_LED_MIN_CURRENT_MA;
+ flash_node->prgm_current = val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read current\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(node, "qcom,id", &val);
+ if (!rc)
+ flash_node->id = (u8)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->spmi_dev->dev, "Unable to read led ID\n");
+ return rc;
+ }
+
+ if (flash_node->type == SWITCH || flash_node->type == FLASH) {
+ rc = of_property_read_u32(node, "qcom,duration", &val);
+ if (!rc)
+ flash_node->duration = (u16)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read duration\n");
+ return rc;
+ }
+ }
+
+ switch (led->peripheral_type) {
+ case FLASH_SUBTYPE_SINGLE:
+ flash_node->trigger = FLASH_LED0_TRIGGER;
+ break;
+ case FLASH_SUBTYPE_DUAL:
+ if (flash_node->id == FLASH_LED_0)
+ flash_node->trigger = FLASH_LED0_TRIGGER;
+ else if (flash_node->id == FLASH_LED_1)
+ flash_node->trigger = FLASH_LED1_TRIGGER;
+ break;
+ default:
+ dev_err(&led->spmi_dev->dev, "Invalid peripheral type\n");
+ }
+
+ while ((temp = of_get_next_child(node, temp))) {
+ if (of_find_property(temp, "regulator-name", NULL))
+ num_regs++;
+ }
+
+ if (num_regs)
+ flash_node->num_regulators = num_regs;
+
+ return rc;
+}
+
+static int qpnp_flash_led_parse_common_dt(
+ struct qpnp_flash_led *led,
+ struct device_node *node)
+{
+ int rc;
+ u32 val, temp_val;
+ const char *temp;
+
+ led->pdata->headroom = FLASH_LED_HEADROOM_DEFAULT_MV;
+ rc = of_property_read_u32(node, "qcom,headroom", &val);
+ if (!rc)
+ led->pdata->headroom = (u16)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->spmi_dev->dev, "Unable to read headroom\n");
+ return rc;
+ }
+
+ led->pdata->startup_dly = FLASH_LED_STARTUP_DELAY_DEFAULT_US;
+ rc = of_property_read_u32(node, "qcom,startup-dly", &val);
+ if (!rc)
+ led->pdata->startup_dly = (u8)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read startup delay\n");
+ return rc;
+ }
+
+ led->pdata->clamp_current = FLASH_LED_CLAMP_CURRENT_DEFAULT_MA;
+ rc = of_property_read_u32(node, "qcom,clamp-current", &val);
+ if (!rc) {
+ if (val < FLASH_LED_MIN_CURRENT_MA)
+ val = FLASH_LED_MIN_CURRENT_MA;
+ led->pdata->clamp_current = (u16)val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read clamp current\n");
+ return rc;
+ }
+
+ led->pdata->pmic_charger_support =
+ of_property_read_bool(node,
+ "qcom,pmic-charger-support");
+
+ led->pdata->self_check_en =
+ of_property_read_bool(node, "qcom,self-check-enabled");
+
+ led->pdata->thermal_derate_en =
+ of_property_read_bool(node,
+ "qcom,thermal-derate-enabled");
+
+ if (led->pdata->thermal_derate_en) {
+ led->pdata->thermal_derate_rate =
+ FLASH_LED_THERMAL_DERATE_RATE_DEFAULT_PERCENT;
+ rc = of_property_read_string(node, "qcom,thermal-derate-rate",
+ &temp);
+ if (!rc) {
+ temp_val =
+ qpnp_flash_led_get_thermal_derate_rate(temp);
+ if (temp_val < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "Invalid thermal derate rate\n");
+ return -EINVAL;
+ }
+
+ led->pdata->thermal_derate_rate = (u8)temp_val;
+ } else {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read thermal derate rate\n");
+ return -EINVAL;
+ }
+
+ led->pdata->thermal_derate_threshold =
+ FLASH_LED_THERMAL_DERATE_THRESHOLD_DEFAULT_C;
+ rc = of_property_read_u32(node, "qcom,thermal-derate-threshold",
+ &val);
+ if (!rc)
+ led->pdata->thermal_derate_threshold = (u8)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read thermal derate threshold\n");
+ return rc;
+ }
+ }
+
+ led->pdata->current_ramp_en =
+ of_property_read_bool(node,
+ "qcom,current-ramp-enabled");
+ if (led->pdata->current_ramp_en) {
+ led->pdata->ramp_up_step = FLASH_LED_RAMP_UP_STEP_DEFAULT_US;
+ rc = of_property_read_string(node, "qcom,ramp_up_step", &temp);
+ if (!rc) {
+ temp_val = qpnp_flash_led_get_ramp_step(temp);
+ if (temp_val < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "Invalid ramp up step values\n");
+ return -EINVAL;
+ }
+ led->pdata->ramp_up_step = (u8)temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read ramp up steps\n");
+ return rc;
+ }
+
+ led->pdata->ramp_dn_step = FLASH_LED_RAMP_DN_STEP_DEFAULT_US;
+ rc = of_property_read_string(node, "qcom,ramp_dn_step", &temp);
+ if (!rc) {
+ temp_val = qpnp_flash_led_get_ramp_step(temp);
+ if (temp_val < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "Invalid ramp down step values\n");
+ return rc;
+ }
+ led->pdata->ramp_dn_step = (u8)temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read ramp down steps\n");
+ return rc;
+ }
+ }
+
+ led->pdata->vph_pwr_droop_en = of_property_read_bool(node,
+ "qcom,vph-pwr-droop-enabled");
+ if (led->pdata->vph_pwr_droop_en) {
+ led->pdata->vph_pwr_droop_threshold =
+ FLASH_LED_VPH_PWR_DROOP_THRESHOLD_DEFAULT_MV;
+ rc = of_property_read_u32(node,
+ "qcom,vph-pwr-droop-threshold", &val);
+ if (!rc) {
+ led->pdata->vph_pwr_droop_threshold = (u16)val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read VPH PWR droop threshold\n");
+ return rc;
+ }
+
+ led->pdata->vph_pwr_droop_debounce_time =
+ FLASH_LED_VPH_PWR_DROOP_DEBOUNCE_TIME_DEFAULT_US;
+ rc = of_property_read_u32(node,
+ "qcom,vph-pwr-droop-debounce-time", &val);
+ if (!rc)
+ led->pdata->vph_pwr_droop_debounce_time = (u8)val;
+ else if (rc != -EINVAL) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read VPH PWR droop debounce time\n");
+ return rc;
+ }
+ }
+
+ led->pdata->hdrm_sns_ch0_en = of_property_read_bool(node,
+ "qcom,headroom-sense-ch0-enabled");
+
+ led->pdata->hdrm_sns_ch1_en = of_property_read_bool(node,
+ "qcom,headroom-sense-ch1-enabled");
+
+ led->pdata->power_detect_en = of_property_read_bool(node,
+ "qcom,power-detect-enabled");
+
+ led->pdata->mask3_en = of_property_read_bool(node,
+ "qcom,otst2-module-enabled");
+
+ led->pdata->follow_rb_disable = of_property_read_bool(node,
+ "qcom,follow-otst2-rb-disabled");
+
+ led->pdata->die_current_derate_en = of_property_read_bool(node,
+ "qcom,die-current-derate-enabled");
+
+ if (led->pdata->die_current_derate_en) {
+ led->vadc_dev = qpnp_get_vadc(&led->spmi_dev->dev,
+ "die-temp");
+ if (IS_ERR(led->vadc_dev)) {
+ pr_err("VADC channel property Missing\n");
+ return -EINVAL;
+ }
+
+ if (of_find_property(node, "qcom,die-temp-threshold",
+ &led->pdata->temp_threshold_num)) {
+
+ if (led->pdata->temp_threshold_num > 0) {
+ led->pdata->die_temp_threshold_degc =
+ devm_kzalloc(&led->spmi_dev->dev,
+ led->pdata->temp_threshold_num,
+ GFP_KERNEL);
+
+ if (led->pdata->die_temp_threshold_degc
+ == NULL) {
+ dev_err(&led->spmi_dev->dev,
+ "failed to allocate die temp array\n");
+ return -ENOMEM;
+ }
+ led->pdata->temp_threshold_num /=
+ sizeof(unsigned int);
+
+ rc = of_property_read_u32_array(node,
+ "qcom,die-temp-threshold",
+ led->pdata->die_temp_threshold_degc,
+ led->pdata->temp_threshold_num);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "couldn't read temp threshold rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+ }
+
+ if (of_find_property(node, "qcom,die-temp-derate-current",
+ &led->pdata->temp_derate_curr_num)) {
+ if (led->pdata->temp_derate_curr_num > 0) {
+ led->pdata->die_temp_derate_curr_ma =
+ devm_kzalloc(&led->spmi_dev->dev,
+ led->pdata->temp_derate_curr_num,
+ GFP_KERNEL);
+ if (led->pdata->die_temp_derate_curr_ma
+ == NULL) {
+ dev_err(&led->spmi_dev->dev,
+ "failed to allocate die derate current array\n");
+ return -ENOMEM;
+ }
+ led->pdata->temp_derate_curr_num /=
+ sizeof(unsigned int);
+
+ rc = of_property_read_u32_array(node,
+ "qcom,die-temp-derate-current",
+ led->pdata->die_temp_derate_curr_ma,
+ led->pdata->temp_derate_curr_num);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "couldn't read temp limits rc =%d\n",
+ rc);
+ return rc;
+ }
+ }
+ }
+ if (led->pdata->temp_threshold_num !=
+ led->pdata->temp_derate_curr_num) {
+ pr_err("Both array size are not same\n");
+ return -EINVAL;
+ }
+ }
+
+ led->pinctrl = devm_pinctrl_get(&led->spmi_dev->dev);
+ if (IS_ERR_OR_NULL(led->pinctrl)) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to acquire pinctrl\n");
+ led->pinctrl = NULL;
+ return 0;
+ } else {
+ led->gpio_state_active =
+ pinctrl_lookup_state(led->pinctrl, "flash_led_enable");
+ if (IS_ERR_OR_NULL(led->gpio_state_active)) {
+ dev_err(&led->spmi_dev->dev,
+ "Can not lookup LED active state\n");
+ devm_pinctrl_put(led->pinctrl);
+ led->pinctrl = NULL;
+ return PTR_ERR(led->gpio_state_active);
+ }
+ led->gpio_state_suspend =
+ pinctrl_lookup_state(led->pinctrl,
+ "flash_led_disable");
+ if (IS_ERR_OR_NULL(led->gpio_state_suspend)) {
+ dev_err(&led->spmi_dev->dev,
+ "Can not lookup LED disable state\n");
+ devm_pinctrl_put(led->pinctrl);
+ led->pinctrl = NULL;
+ return PTR_ERR(led->gpio_state_suspend);
+ }
+ }
+
+ return 0;
+}
+
+static int qpnp_flash_led_probe(struct spmi_device *spmi)
+{
+ struct qpnp_flash_led *led;
+ struct resource *flash_resource;
+ struct device_node *node, *temp;
+ struct dentry *root, *file;
+ int rc, i = 0, j, num_leds = 0;
+ u32 val;
+
+ node = spmi->dev.of_node;
+ if (node == NULL) {
+ dev_info(&spmi->dev, "No flash device defined\n");
+ return -ENODEV;
+ }
+
+ flash_resource = spmi_get_resource(spmi, 0, IORESOURCE_MEM, 0);
+ if (!flash_resource) {
+ dev_err(&spmi->dev, "Unable to get flash LED base address\n");
+ return -EINVAL;
+ }
+
+ led = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_flash_led),
+ GFP_KERNEL);
+ if (!led) {
+ dev_err(&spmi->dev,
+ "Unable to allocate memory for flash LED\n");
+ return -ENOMEM;
+ }
+
+ led->base = flash_resource->start;
+ led->spmi_dev = spmi;
+ led->current_addr = FLASH_LED0_CURRENT(led->base);
+ led->current2_addr = FLASH_LED1_CURRENT(led->base);
+
+ led->pdata = devm_kzalloc(&spmi->dev,
+ sizeof(struct flash_led_platform_data), GFP_KERNEL);
+ if (!led->pdata) {
+ dev_err(&spmi->dev,
+ "Unable to allocate memory for platform data\n");
+ return -ENOMEM;
+ }
+
+ led->peripheral_type =
+ (u8)qpnp_flash_led_get_peripheral_type(led);
+ if (led->peripheral_type < 0) {
+ dev_err(&spmi->dev, "Failed to get peripheral type\n");
+ return rc;
+ }
+
+ rc = qpnp_flash_led_parse_common_dt(led, node);
+ if (rc) {
+ dev_err(&spmi->dev,
+ "Failed to get common config for flash LEDs\n");
+ return rc;
+ }
+
+ rc = qpnp_flash_led_init_settings(led);
+ if (rc) {
+ dev_err(&spmi->dev, "Failed to initialize flash LED\n");
+ return rc;
+ }
+
+ rc = qpnp_get_pmic_revid(led);
+ if (rc)
+ return rc;
+
+ temp = NULL;
+ while ((temp = of_get_next_child(node, temp)))
+ num_leds++;
+
+ if (!num_leds)
+ return -ECHILD;
+
+ led->flash_node = devm_kzalloc(&spmi->dev,
+ (sizeof(struct flash_node_data) * num_leds),
+ GFP_KERNEL);
+ if (!led->flash_node) {
+ dev_err(&spmi->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&led->flash_led_lock);
+
+ led->ordered_workq = alloc_ordered_workqueue("flash_led_workqueue", 0);
+ if (!led->ordered_workq) {
+ dev_err(&spmi->dev,
+ "Failed to allocate ordered workqueue\n");
+ return -ENOMEM;
+ }
+
+ for_each_child_of_node(node, temp) {
+ led->flash_node[i].cdev.brightness_set =
+ qpnp_flash_led_brightness_set;
+ led->flash_node[i].cdev.brightness_get =
+ qpnp_flash_led_brightness_get;
+ led->flash_node[i].spmi_dev = spmi;
+
+ INIT_WORK(&led->flash_node[i].work, qpnp_flash_led_work);
+ rc = of_property_read_string(temp, "qcom,led-name",
+ &led->flash_node[i].cdev.name);
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read flash name\n");
+ return rc;
+ }
+
+ rc = of_property_read_string(temp, "qcom,default-led-trigger",
+ &led->flash_node[i].cdev.default_trigger);
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read trigger name\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(temp, "qcom,max-current", &val);
+ if (!rc) {
+ if (val < FLASH_LED_MIN_CURRENT_MA)
+ val = FLASH_LED_MIN_CURRENT_MA;
+ led->flash_node[i].max_current = (u16)val;
+ led->flash_node[i].cdev.max_brightness = val;
+ } else {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read max current\n");
+ return rc;
+ }
+ rc = led_classdev_register(&spmi->dev,
+ &led->flash_node[i].cdev);
+ if (rc) {
+ dev_err(&spmi->dev, "Unable to register led\n");
+ goto error_led_register;
+ }
+
+ led->flash_node[i].cdev.dev->of_node = temp;
+
+ rc = qpnp_flash_led_parse_each_led_dt(led, &led->flash_node[i]);
+ if (rc) {
+ dev_err(&spmi->dev,
+ "Failed to parse config for each LED\n");
+ goto error_led_register;
+ }
+
+ if (led->flash_node[i].num_regulators) {
+ rc = flash_regulator_parse_dt(led, &led->flash_node[i]);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to parse regulator data\n");
+ goto error_led_register;
+ }
+
+ rc = flash_regulator_setup(led, &led->flash_node[i],
+ true);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to set up regulator\n");
+ goto error_led_register;
+ }
+
+ }
+
+ for (j = 0; j < ARRAY_SIZE(qpnp_flash_led_attrs); j++) {
+ rc =
+ sysfs_create_file(&led->flash_node[i].cdev.dev->kobj,
+ &qpnp_flash_led_attrs[j].attr);
+ if (rc)
+ goto error_led_register;
+ }
+
+ i++;
+ }
+
+ led->num_leds = i;
+
+ root = debugfs_create_dir("flashLED", NULL);
+ if (IS_ERR_OR_NULL(root)) {
+ pr_err("Error creating top level directory err%ld",
+ (long)root);
+ if (PTR_ERR(root) == -ENODEV)
+ pr_err("debugfs is not enabled in kernel");
+ goto error_led_debugfs;
+ }
+
+ led->dbgfs_root = root;
+ file = debugfs_create_file("enable_debug", S_IRUSR | S_IWUSR, root,
+ led, &flash_led_dfs_dbg_feature_fops);
+ if (!file) {
+ pr_err("error creating 'enable_debug' entry\n");
+ goto error_led_debugfs;
+ }
+
+ file = debugfs_create_file("latched", S_IRUSR | S_IWUSR, root, led,
+ &flash_led_dfs_latched_reg_fops);
+ if (!file) {
+ pr_err("error creating 'latched' entry\n");
+ goto error_led_debugfs;
+ }
+
+ file = debugfs_create_file("strobe", S_IRUSR | S_IWUSR, root, led,
+ &flash_led_dfs_strobe_reg_fops);
+ if (!file) {
+ pr_err("error creating 'strobe' entry\n");
+ goto error_led_debugfs;
+ }
+
+ dev_set_drvdata(&spmi->dev, led);
+
+ return 0;
+
+error_led_debugfs:
+ i = led->num_leds - 1;
+ j = ARRAY_SIZE(qpnp_flash_led_attrs) - 1;
+error_led_register:
+ for (; i >= 0; i--) {
+ for (; j >= 0; j--)
+ sysfs_remove_file(&led->flash_node[i].cdev.dev->kobj,
+ &qpnp_flash_led_attrs[j].attr);
+ j = ARRAY_SIZE(qpnp_flash_led_attrs) - 1;
+ led_classdev_unregister(&led->flash_node[i].cdev);
+ }
+ debugfs_remove_recursive(root);
+ mutex_destroy(&led->flash_led_lock);
+ destroy_workqueue(led->ordered_workq);
+
+ return rc;
+}
+
+static int qpnp_flash_led_remove(struct spmi_device *spmi)
+{
+ struct qpnp_flash_led *led = dev_get_drvdata(&spmi->dev);
+ int i, j;
+
+ for (i = led->num_leds - 1; i >= 0; i--) {
+ if (led->flash_node[i].reg_data) {
+ if (led->flash_node[i].flash_on)
+ flash_regulator_enable(led,
+ &led->flash_node[i], false);
+ flash_regulator_setup(led, &led->flash_node[i],
+ false);
+ }
+ for (j = 0; j < ARRAY_SIZE(qpnp_flash_led_attrs); j++)
+ sysfs_remove_file(&led->flash_node[i].cdev.dev->kobj,
+ &qpnp_flash_led_attrs[j].attr);
+ led_classdev_unregister(&led->flash_node[i].cdev);
+ }
+ debugfs_remove_recursive(led->dbgfs_root);
+ mutex_destroy(&led->flash_led_lock);
+ destroy_workqueue(led->ordered_workq);
+
+ return 0;
+}
+
+static struct of_device_id spmi_match_table[] = {
+ { .compatible = "qcom,qpnp-flash-led",},
+ { },
+};
+
+static struct spmi_driver qpnp_flash_led_driver = {
+ .driver = {
+ .name = "qcom,qpnp-flash-led",
+ .of_match_table = spmi_match_table,
+ },
+ .probe = qpnp_flash_led_probe,
+ .remove = qpnp_flash_led_remove,
+};
+
+static int __init qpnp_flash_led_init(void)
+{
+ return spmi_driver_register(&qpnp_flash_led_driver);
+}
+late_initcall(qpnp_flash_led_init);
+
+static void __exit qpnp_flash_led_exit(void)
+{
+ spmi_driver_unregister(&qpnp_flash_led_driver);
+}
+module_exit(qpnp_flash_led_exit);
+
+MODULE_DESCRIPTION("QPNP Flash LED driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("leds:leds-qpnp-flash");
diff --git a/drivers/leds/leds-qpnp-wled.c b/drivers/leds/leds-qpnp-wled.c
new file mode 100644
index 000000000000..7d4e394d6be1
--- /dev/null
+++ b/drivers/leds/leds-qpnp-wled.c
@@ -0,0 +1,1743 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <linux/spmi.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/leds-qpnp-wled.h>
+
+#define QPNP_IRQ_FLAGS (IRQF_TRIGGER_RISING | \
+ IRQF_TRIGGER_FALLING | \
+ IRQF_ONESHOT)
+
+/* base addresses */
+#define QPNP_WLED_CTRL_BASE "qpnp-wled-ctrl-base"
+#define QPNP_WLED_SINK_BASE "qpnp-wled-sink-base"
+
+/* ctrl registers */
+#define QPNP_WLED_INT_EN_SET(b) (b + 0x15)
+#define QPNP_WLED_EN_REG(b) (b + 0x46)
+#define QPNP_WLED_FDBK_OP_REG(b) (b + 0x48)
+#define QPNP_WLED_VREF_REG(b) (b + 0x49)
+#define QPNP_WLED_BOOST_DUTY_REG(b) (b + 0x4B)
+#define QPNP_WLED_SWITCH_FREQ_REG(b) (b + 0x4C)
+#define QPNP_WLED_OVP_REG(b) (b + 0x4D)
+#define QPNP_WLED_ILIM_REG(b) (b + 0x4E)
+#define QPNP_WLED_SOFTSTART_RAMP_DLY(b) (b + 0x53)
+#define QPNP_WLED_VLOOP_COMP_RES_REG(b) (b + 0x55)
+#define QPNP_WLED_VLOOP_COMP_GM_REG(b) (b + 0x56)
+#define QPNP_WLED_PSM_CTRL_REG(b) (b + 0x5B)
+#define QPNP_WLED_SC_PRO_REG(b) (b + 0x5E)
+#define QPNP_WLED_TEST1_REG(b) (b + 0xE2)
+#define QPNP_WLED_TEST4_REG(b) (b + 0xE5)
+#define QPNP_WLED_REF_7P7_TRIM_REG(b) (b + 0xF2)
+
+#define QPNP_WLED_EN_MASK 0x7F
+#define QPNP_WLED_EN_SHIFT 7
+#define QPNP_WLED_FDBK_OP_MASK 0xF8
+#define QPNP_WLED_VREF_MASK 0xF0
+#define QPNP_WLED_VREF_STEP_MV 25
+#define QPNP_WLED_VREF_MIN_MV 300
+#define QPNP_WLED_VREF_MAX_MV 675
+#define QPNP_WLED_DFLT_VREF_MV 350
+
+#define QPNP_WLED_VLOOP_COMP_RES_MASK 0xF0
+#define QPNP_WLED_VLOOP_COMP_RES_OVERWRITE 0x80
+#define QPNP_WLED_LOOP_COMP_RES_DFLT_AMOLED_KOHM 320
+#define QPNP_WLED_LOOP_COMP_RES_STEP_KOHM 20
+#define QPNP_WLED_LOOP_COMP_RES_MIN_KOHM 20
+#define QPNP_WLED_LOOP_COMP_RES_MAX_KOHM 320
+#define QPNP_WLED_VLOOP_COMP_GM_MASK 0xF0
+#define QPNP_WLED_VLOOP_COMP_GM_OVERWRITE 0x80
+#define QPNP_WLED_LOOP_EA_GM_DFLT_AMOLED 0x03
+#define QPNP_WLED_LOOP_EA_GM_MIN 0x0
+#define QPNP_WLED_LOOP_EA_GM_MAX 0xF
+#define QPNP_WLED_VREF_PSM_MASK 0xF8
+#define QPNP_WLED_VREF_PSM_STEP_MV 50
+#define QPNP_WLED_VREF_PSM_MIN_MV 400
+#define QPNP_WLED_VREF_PSM_MAX_MV 750
+#define QPNP_WLED_VREF_PSM_DFLT_AMOLED_MV 450
+#define QPNP_WLED_PSM_CTRL_OVERWRITE 0x80
+#define QPNP_WLED_AVDD_MIN_TRIM_VALUE -7
+#define QPNP_WLED_AVDD_MAX_TRIM_VALUE 8
+#define QPNP_WLED_AVDD_TRIM_CENTER_VALUE 7
+
+#define QPNP_WLED_ILIM_MASK 0xF8
+#define QPNP_WLED_ILIM_MIN_MA 105
+#define QPNP_WLED_ILIM_MAX_MA 1980
+#define QPNP_WLED_ILIM_STEP_MA 280
+#define QPNP_WLED_DFLT_ILIM_MA 980
+#define QPNP_WLED_ILIM_OVERWRITE 0x80
+#define QPNP_WLED_BOOST_DUTY_MASK 0xFC
+#define QPNP_WLED_BOOST_DUTY_STEP_NS 52
+#define QPNP_WLED_BOOST_DUTY_MIN_NS 26
+#define QPNP_WLED_BOOST_DUTY_MAX_NS 156
+#define QPNP_WLED_DEF_BOOST_DUTY_NS 104
+#define QPNP_WLED_SWITCH_FREQ_MASK 0xF0
+#define QPNP_WLED_SWITCH_FREQ_800_KHZ 800
+#define QPNP_WLED_SWITCH_FREQ_1600_KHZ 1600
+#define QPNP_WLED_OVP_MASK 0xFC
+#define QPNP_WLED_OVP_17800_MV 17800
+#define QPNP_WLED_OVP_19400_MV 19400
+#define QPNP_WLED_OVP_29500_MV 29500
+#define QPNP_WLED_OVP_31000_MV 31000
+#define QPNP_WLED_TEST4_EN_VREF_UP 0x32
+#define QPNP_WLED_INT_EN_SET_OVP_DIS 0x00
+#define QPNP_WLED_INT_EN_SET_OVP_EN 0x02
+#define QPNP_WLED_OVP_FLT_SLEEP_US 10
+#define QPNP_WLED_TEST4_EN_IIND_UP 0x1
+
+/* sink registers */
+#define QPNP_WLED_CURR_SINK_REG(b) (b + 0x46)
+#define QPNP_WLED_SYNC_REG(b) (b + 0x47)
+#define QPNP_WLED_MOD_REG(b) (b + 0x4A)
+#define QPNP_WLED_HYB_THRES_REG(b) (b + 0x4B)
+#define QPNP_WLED_MOD_EN_REG(b, n) (b + 0x50 + (n * 0x10))
+#define QPNP_WLED_SYNC_DLY_REG(b, n) (QPNP_WLED_MOD_EN_REG(b, n) + 0x01)
+#define QPNP_WLED_FS_CURR_REG(b, n) (QPNP_WLED_MOD_EN_REG(b, n) + 0x02)
+#define QPNP_WLED_CABC_REG(b, n) (QPNP_WLED_MOD_EN_REG(b, n) + 0x06)
+#define QPNP_WLED_BRIGHT_LSB_REG(b, n) (QPNP_WLED_MOD_EN_REG(b, n) + 0x07)
+#define QPNP_WLED_BRIGHT_MSB_REG(b, n) (QPNP_WLED_MOD_EN_REG(b, n) + 0x08)
+#define QPNP_WLED_SINK_TEST5_REG(b) (b + 0xE6)
+
+#define QPNP_WLED_MOD_FREQ_1200_KHZ 1200
+#define QPNP_WLED_MOD_FREQ_2400_KHZ 2400
+#define QPNP_WLED_MOD_FREQ_9600_KHZ 9600
+#define QPNP_WLED_MOD_FREQ_19200_KHZ 19200
+#define QPNP_WLED_MOD_FREQ_MASK 0x3F
+#define QPNP_WLED_MOD_FREQ_SHIFT 6
+#define QPNP_WLED_ACC_CLK_FREQ_MASK 0xE7
+#define QPNP_WLED_ACC_CLK_FREQ_SHIFT 3
+#define QPNP_WLED_PHASE_STAG_MASK 0xDF
+#define QPNP_WLED_PHASE_STAG_SHIFT 5
+#define QPNP_WLED_DIM_RES_MASK 0xFD
+#define QPNP_WLED_DIM_RES_SHIFT 1
+#define QPNP_WLED_DIM_HYB_MASK 0xFB
+#define QPNP_WLED_DIM_HYB_SHIFT 2
+#define QPNP_WLED_DIM_ANA_MASK 0xFE
+#define QPNP_WLED_HYB_THRES_MASK 0xF8
+#define QPNP_WLED_HYB_THRES_MIN 78
+#define QPNP_WLED_DEF_HYB_THRES 625
+#define QPNP_WLED_HYB_THRES_MAX 10000
+#define QPNP_WLED_MOD_EN_MASK 0x7F
+#define QPNP_WLED_MOD_EN_SHFT 7
+#define QPNP_WLED_MOD_EN 1
+#define QPNP_WLED_GATE_DRV_MASK 0xFE
+#define QPNP_WLED_SYNC_DLY_MASK 0xF8
+#define QPNP_WLED_SYNC_DLY_MIN_US 0
+#define QPNP_WLED_SYNC_DLY_MAX_US 1400
+#define QPNP_WLED_SYNC_DLY_STEP_US 200
+#define QPNP_WLED_DEF_SYNC_DLY_US 400
+#define QPNP_WLED_FS_CURR_MASK 0xF0
+#define QPNP_WLED_FS_CURR_MIN_UA 0
+#define QPNP_WLED_FS_CURR_MAX_UA 30000
+#define QPNP_WLED_FS_CURR_STEP_UA 2500
+#define QPNP_WLED_CABC_MASK 0x7F
+#define QPNP_WLED_CABC_SHIFT 7
+#define QPNP_WLED_CURR_SINK_SHIFT 4
+#define QPNP_WLED_BRIGHT_LSB_MASK 0xFF
+#define QPNP_WLED_BRIGHT_MSB_SHIFT 8
+#define QPNP_WLED_BRIGHT_MSB_MASK 0x0F
+#define QPNP_WLED_SYNC 0x0F
+#define QPNP_WLED_SYNC_RESET 0x00
+
+#define QPNP_WLED_SINK_TEST5_HYB 0x14
+#define QPNP_WLED_SINK_TEST5_DIG 0x1E
+
+#define QPNP_WLED_SWITCH_FREQ_800_KHZ_CODE 0x0B
+#define QPNP_WLED_SWITCH_FREQ_1600_KHZ_CODE 0x05
+
+#define QPNP_WLED_DISP_SEL_REG(b) (b + 0x44)
+#define QPNP_WLED_MODULE_RDY_REG(b) (b + 0x45)
+#define QPNP_WLED_MODULE_EN_REG(b) (b + 0x46)
+#define QPNP_WLED_MODULE_RDY_MASK 0x7F
+#define QPNP_WLED_MODULE_RDY_SHIFT 7
+#define QPNP_WLED_MODULE_EN_MASK 0x7F
+#define QPNP_WLED_MODULE_EN_SHIFT 7
+#define QPNP_WLED_DISP_SEL_MASK 0x7F
+#define QPNP_WLED_DISP_SEL_SHIFT 7
+#define QPNP_WLED_EN_SC_MASK 0x7F
+#define QPNP_WLED_EN_SC_SHIFT 7
+#define QPNP_WLED_SC_PRO_EN_DSCHGR 0x8
+#define QPNP_WLED_SC_DEB_CYCLES_MIN 2
+#define QPNP_WLED_SC_DEB_CYCLES_MAX 16
+#define QPNP_WLED_SC_DEB_SUB 2
+#define QPNP_WLED_SC_DEB_CYCLES_DFLT_AMOLED 4
+#define QPNP_WLED_EXT_FET_DTEST2 0x09
+
+#define QPNP_WLED_SEC_ACCESS_REG(b) (b + 0xD0)
+#define QPNP_WLED_SEC_UNLOCK 0xA5
+
+#define QPNP_WLED_MAX_STRINGS 4
+#define WLED_MAX_LEVEL_4095 4095
+#define QPNP_WLED_RAMP_DLY_MS 20
+#define QPNP_WLED_TRIGGER_NONE "none"
+#define QPNP_WLED_STR_SIZE 20
+#define QPNP_WLED_MIN_MSLEEP 20
+#define QPNP_WLED_SC_DLY_MS 20
+
+/* output feedback mode */
+enum qpnp_wled_fdbk_op {
+ QPNP_WLED_FDBK_AUTO,
+ QPNP_WLED_FDBK_WLED1,
+ QPNP_WLED_FDBK_WLED2,
+ QPNP_WLED_FDBK_WLED3,
+ QPNP_WLED_FDBK_WLED4,
+};
+
+/* dimming modes */
+enum qpnp_wled_dim_mode {
+ QPNP_WLED_DIM_ANALOG,
+ QPNP_WLED_DIM_DIGITAL,
+ QPNP_WLED_DIM_HYBRID,
+};
+
+/* wled ctrl debug registers */
+static u8 qpnp_wled_ctrl_dbg_regs[] = {
+ 0x44, 0x46, 0x48, 0x49, 0x4b, 0x4c, 0x4d, 0x4e, 0x50, 0x51, 0x52, 0x53,
+ 0x54, 0x55, 0x56, 0x57, 0x58, 0x5a, 0x5b, 0x5d, 0x5e, 0xe2
+};
+
+/* wled sink debug registers */
+static u8 qpnp_wled_sink_dbg_regs[] = {
+ 0x46, 0x47, 0x48, 0x4a, 0x4b,
+ 0x50, 0x51, 0x52, 0x53, 0x56, 0x57, 0x58,
+ 0x60, 0x61, 0x62, 0x63, 0x66, 0x67, 0x68,
+ 0x70, 0x71, 0x72, 0x73, 0x76, 0x77, 0x78,
+ 0x80, 0x81, 0x82, 0x83, 0x86, 0x87, 0x88,
+ 0xe6,
+};
+
+/**
+ * qpnp_wled - wed data structure
+ * @ cdev - led class device
+ * @ spmi - spmi device
+ * @ work - worker for led operation
+ * @ lock - mutex lock for exclusive access
+ * @ fdbk_op - output feedback mode
+ * @ dim_mode - dimming mode
+ * @ ovp_irq - over voltage protection irq
+ * @ sc_irq - short circuit irq
+ * @ sc_cnt - short circuit irq count
+ * @ avdd_trim_steps_from_center - number of steps to trim from center value
+ * @ ctrl_base - base address for wled ctrl
+ * @ sink_base - base address for wled sink
+ * @ ibb_base - base address for IBB(Inverting Buck Boost)
+ * @ lab_base - base address for LAB(LCD/AMOLED Boost)
+ * @ mod_freq_khz - modulator frequency in KHZ
+ * @ hyb_thres - threshold for hybrid dimming
+ * @ sync_dly_us - sync delay in us
+ * @ vref_mv - ref voltage in mv
+ * @ vref_psm_mv - ref psm voltage in mv
+ * @ loop_comp_res_kohm - control to select the compensation resistor
+ * @ loop_ea_gm - control to select the gm for the gm stage in control loop
+ * @ sc_deb_cycles - debounce time for short circuit detection
+ * @ switch_freq_khz - switching frequency in KHZ
+ * @ ovp_mv - over voltage protection in mv
+ * @ ilim_ma - current limiter in ma
+ * @ boost_duty_ns - boost duty cycle in ns
+ * @ fs_curr_ua - full scale current in ua
+ * @ ramp_ms - delay between ramp steps in ms
+ * @ ramp_step - ramp step size
+ * @ cons_sync_write_delay_us - delay between two consecutive writes to SYNC
+ * @ strings - supported list of strings
+ * @ num_strings - number of strings
+ * @ en_9b_dim_res - enable or disable 9bit dimming
+ * @ en_phase_stag - enable or disable phase staggering
+ * @ en_cabc - enable or disable cabc
+ * @ disp_type_amoled - type of display: LCD/AMOLED
+ * @ ibb_bias_active - activate display bias
+ * @ lab_fast_precharge - fast/slow precharge
+ * @ en_ext_pfet_sc_pro - enable sc protection on external pfet
+ */
+struct qpnp_wled {
+ struct led_classdev cdev;
+ struct spmi_device *spmi;
+ struct work_struct work;
+ struct mutex lock;
+ enum qpnp_wled_fdbk_op fdbk_op;
+ enum qpnp_wled_dim_mode dim_mode;
+ int ovp_irq;
+ int sc_irq;
+ u32 sc_cnt;
+ u32 avdd_trim_steps_from_center;
+ u16 ctrl_base;
+ u16 sink_base;
+ u16 mod_freq_khz;
+ u16 hyb_thres;
+ u16 sync_dly_us;
+ u16 vref_mv;
+ u16 vref_psm_mv;
+ u16 loop_comp_res_kohm;
+ u16 loop_ea_gm;
+ u16 sc_deb_cycles;
+ u16 switch_freq_khz;
+ u16 ovp_mv;
+ u16 ilim_ma;
+ u16 boost_duty_ns;
+ u16 fs_curr_ua;
+ u16 ramp_ms;
+ u16 ramp_step;
+ u16 cons_sync_write_delay_us;
+ u8 strings[QPNP_WLED_MAX_STRINGS];
+ u8 num_strings;
+ bool en_9b_dim_res;
+ bool en_phase_stag;
+ bool en_cabc;
+ bool disp_type_amoled;
+ bool en_ext_pfet_sc_pro;
+ bool prev_state;
+};
+
+/* helper to read a pmic register */
+static int qpnp_wled_read_reg(struct qpnp_wled *wled, u8 *data, u16 addr)
+{
+ int rc;
+
+ rc = spmi_ext_register_readl(wled->spmi->ctrl, wled->spmi->sid,
+ addr, data, 1);
+ if (rc < 0)
+ dev_err(&wled->spmi->dev,
+ "Error reading address: %x(%d)\n", addr, rc);
+
+ return rc;
+}
+
+/* helper to write a pmic register */
+static int qpnp_wled_write_reg(struct qpnp_wled *wled, u8 *data, u16 addr)
+{
+ int rc;
+
+ rc = spmi_ext_register_writel(wled->spmi->ctrl, wled->spmi->sid,
+ addr, data, 1);
+ if (rc < 0)
+ dev_err(&wled->spmi->dev,
+ "Error writing address: %x(%d)\n", addr, rc);
+
+ dev_dbg(&wled->spmi->dev, "write: WLED_0x%x = 0x%x\n", addr, *data);
+
+ return rc;
+}
+
+static int qpnp_wled_sec_access(struct qpnp_wled *wled, u16 base_addr)
+{
+ int rc;
+ u8 reg = QPNP_WLED_SEC_UNLOCK;
+
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_SEC_ACCESS_REG(base_addr));
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int qpnp_wled_sync_reg_toggle(struct qpnp_wled *wled)
+{
+ int rc;
+ u8 reg;
+
+ /* sync */
+ reg = QPNP_WLED_SYNC;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_SYNC_REG(wled->sink_base));
+ if (rc < 0)
+ return rc;
+
+ if (wled->cons_sync_write_delay_us)
+ usleep_range(wled->cons_sync_write_delay_us,
+ wled->cons_sync_write_delay_us + 1);
+
+ reg = QPNP_WLED_SYNC_RESET;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_SYNC_REG(wled->sink_base));
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+/* set wled to a level of brightness */
+static int qpnp_wled_set_level(struct qpnp_wled *wled, int level)
+{
+ int i, rc;
+ u8 reg;
+
+ /* set brightness registers */
+ for (i = 0; i < wled->num_strings; i++) {
+ reg = level & QPNP_WLED_BRIGHT_LSB_MASK;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_BRIGHT_LSB_REG(wled->sink_base,
+ wled->strings[i]));
+ if (rc < 0)
+ return rc;
+
+ reg = level >> QPNP_WLED_BRIGHT_MSB_SHIFT;
+ reg = reg & QPNP_WLED_BRIGHT_MSB_MASK;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_BRIGHT_MSB_REG(wled->sink_base,
+ wled->strings[i]));
+ if (rc < 0)
+ return rc;
+ }
+
+ rc = qpnp_wled_sync_reg_toggle(wled);
+ if (rc < 0) {
+ dev_err(&wled->spmi->dev, "Failed to toggle sync reg %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qpnp_wled_module_en(struct qpnp_wled *wled,
+ u16 base_addr, bool state)
+{
+ int rc;
+ u8 reg;
+
+ /* disable OVP fault interrupt */
+ if (state) {
+ reg = QPNP_WLED_INT_EN_SET_OVP_DIS;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_INT_EN_SET(base_addr));
+ if (rc)
+ return rc;
+ }
+
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_MODULE_EN_REG(base_addr));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_WLED_MODULE_EN_MASK;
+ reg |= (state << QPNP_WLED_MODULE_EN_SHIFT);
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_MODULE_EN_REG(base_addr));
+ if (rc)
+ return rc;
+
+ /* enable OVP fault interrupt */
+ if (state) {
+ udelay(QPNP_WLED_OVP_FLT_SLEEP_US);
+ reg = QPNP_WLED_INT_EN_SET_OVP_EN;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_INT_EN_SET(base_addr));
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+/* sysfs store function for ramp */
+static ssize_t qpnp_wled_ramp_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct qpnp_wled *wled = dev_get_drvdata(dev);
+ int i, rc;
+
+ mutex_lock(&wled->lock);
+
+ if (!wled->cdev.brightness) {
+ rc = qpnp_wled_module_en(wled, wled->ctrl_base, true);
+ if (rc) {
+ dev_err(&wled->spmi->dev, "wled enable failed\n");
+ goto unlock_mutex;
+ }
+ }
+
+ /* ramp up */
+ for (i = 0; i <= wled->cdev.max_brightness;) {
+ rc = qpnp_wled_set_level(wled, i);
+ if (rc) {
+ dev_err(&wled->spmi->dev, "wled set level failed\n");
+ goto restore_brightness;
+ }
+
+ if (wled->ramp_ms < QPNP_WLED_MIN_MSLEEP)
+ usleep_range(wled->ramp_ms * USEC_PER_MSEC,
+ wled->ramp_ms * USEC_PER_MSEC);
+ else
+ msleep(wled->ramp_ms);
+
+ if (i == wled->cdev.max_brightness)
+ break;
+
+ i += wled->ramp_step;
+ if (i > wled->cdev.max_brightness)
+ i = wled->cdev.max_brightness;
+ }
+
+ /* ramp down */
+ for (i = wled->cdev.max_brightness; i >= 0;) {
+ rc = qpnp_wled_set_level(wled, i);
+ if (rc) {
+ dev_err(&wled->spmi->dev, "wled set level failed\n");
+ goto restore_brightness;
+ }
+
+ if (wled->ramp_ms < QPNP_WLED_MIN_MSLEEP)
+ usleep_range(wled->ramp_ms * USEC_PER_MSEC,
+ wled->ramp_ms * USEC_PER_MSEC);
+ else
+ msleep(wled->ramp_ms);
+
+ if (i == 0)
+ break;
+
+ i -= wled->ramp_step;
+ if (i < 0)
+ i = 0;
+ }
+
+ dev_info(&wled->spmi->dev, "wled ramp complete\n");
+
+restore_brightness:
+ /* restore the old brightness */
+ qpnp_wled_set_level(wled, wled->cdev.brightness);
+ if (!wled->cdev.brightness) {
+ rc = qpnp_wled_module_en(wled, wled->ctrl_base, false);
+ if (rc)
+ dev_err(&wled->spmi->dev, "wled enable failed\n");
+ }
+unlock_mutex:
+ mutex_unlock(&wled->lock);
+
+ return count;
+}
+
+static int qpnp_wled_dump_regs(struct qpnp_wled *wled, u16 base_addr,
+ u8 dbg_regs[], u8 size, char *label,
+ int count, char *buf)
+{
+ int i, rc;
+ u8 reg;
+
+ for (i = 0; i < size; i++) {
+ rc = qpnp_wled_read_reg(wled, &reg,
+ base_addr + dbg_regs[i]);
+ if (rc < 0)
+ return rc;
+
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%s: REG_0x%x = 0x%x\n", label,
+ base_addr + dbg_regs[i], reg);
+
+ if (count >= PAGE_SIZE)
+ return PAGE_SIZE - 1;
+ }
+
+ return count;
+}
+
+/* sysfs show function for debug registers */
+static ssize_t qpnp_wled_dump_regs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qpnp_wled *wled = dev_get_drvdata(dev);
+ int count = 0;
+
+ count = qpnp_wled_dump_regs(wled, wled->ctrl_base,
+ qpnp_wled_ctrl_dbg_regs,
+ ARRAY_SIZE(qpnp_wled_ctrl_dbg_regs),
+ "wled_ctrl", count, buf);
+
+ if (count < 0 || count == PAGE_SIZE - 1)
+ return count;
+
+ count = qpnp_wled_dump_regs(wled, wled->sink_base,
+ qpnp_wled_sink_dbg_regs,
+ ARRAY_SIZE(qpnp_wled_sink_dbg_regs),
+ "wled_sink", count, buf);
+
+ if (count < 0 || count == PAGE_SIZE - 1)
+ return count;
+
+ return count;
+}
+
+/* sysfs show function for ramp delay in each step */
+static ssize_t qpnp_wled_ramp_ms_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qpnp_wled *wled = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", wled->ramp_ms);
+}
+
+/* sysfs store function for ramp delay in each step */
+static ssize_t qpnp_wled_ramp_ms_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct qpnp_wled *wled = dev_get_drvdata(dev);
+ int data;
+
+ if (sscanf(buf, "%d", &data) != 1)
+ return -EINVAL;
+
+ wled->ramp_ms = data;
+ return count;
+}
+
+/* sysfs show function for ramp step */
+static ssize_t qpnp_wled_ramp_step_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qpnp_wled *wled = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", wled->ramp_step);
+}
+
+/* sysfs store function for ramp step */
+static ssize_t qpnp_wled_ramp_step_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct qpnp_wled *wled = dev_get_drvdata(dev);
+ int data;
+
+ if (sscanf(buf, "%d", &data) != 1)
+ return -EINVAL;
+
+ wled->ramp_step = data;
+ return count;
+}
+
+/* sysfs show function for dim mode */
+static ssize_t qpnp_wled_dim_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qpnp_wled *wled = dev_get_drvdata(dev);
+ char *str;
+
+ if (wled->dim_mode == QPNP_WLED_DIM_ANALOG)
+ str = "analog";
+ else if (wled->dim_mode == QPNP_WLED_DIM_DIGITAL)
+ str = "digital";
+ else
+ str = "hybrid";
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", str);
+}
+
+/* sysfs store function for dim mode*/
+static ssize_t qpnp_wled_dim_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct qpnp_wled *wled = dev_get_drvdata(dev);
+ char str[QPNP_WLED_STR_SIZE + 1];
+ int rc, temp;
+ u8 reg;
+
+ if (snprintf(str, QPNP_WLED_STR_SIZE, "%s", buf) > QPNP_WLED_STR_SIZE)
+ return -EINVAL;
+
+ if (strcmp(str, "analog") == 0)
+ temp = QPNP_WLED_DIM_ANALOG;
+ else if (strcmp(str, "digital") == 0)
+ temp = QPNP_WLED_DIM_DIGITAL;
+ else
+ temp = QPNP_WLED_DIM_HYBRID;
+
+ if (temp == wled->dim_mode)
+ return count;
+
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_MOD_REG(wled->sink_base));
+ if (rc < 0)
+ return rc;
+
+ if (temp == QPNP_WLED_DIM_HYBRID) {
+ reg &= QPNP_WLED_DIM_HYB_MASK;
+ reg |= (1 << QPNP_WLED_DIM_HYB_SHIFT);
+ } else {
+ reg &= QPNP_WLED_DIM_HYB_MASK;
+ reg |= (0 << QPNP_WLED_DIM_HYB_SHIFT);
+ reg &= QPNP_WLED_DIM_ANA_MASK;
+ reg |= temp;
+ }
+
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_MOD_REG(wled->sink_base));
+ if (rc)
+ return rc;
+
+ wled->dim_mode = temp;
+
+ return count;
+}
+
+/* sysfs show function for full scale current in ua*/
+static ssize_t qpnp_wled_fs_curr_ua_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qpnp_wled *wled = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", wled->fs_curr_ua);
+}
+
+/* sysfs store function for full scale current in ua*/
+static ssize_t qpnp_wled_fs_curr_ua_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct qpnp_wled *wled = dev_get_drvdata(dev);
+ int data, i, rc, temp;
+ u8 reg;
+
+ if (sscanf(buf, "%d", &data) != 1)
+ return -EINVAL;
+
+ for (i = 0; i < wled->num_strings; i++) {
+ if (data < QPNP_WLED_FS_CURR_MIN_UA)
+ data = QPNP_WLED_FS_CURR_MIN_UA;
+ else if (data > QPNP_WLED_FS_CURR_MAX_UA)
+ data = QPNP_WLED_FS_CURR_MAX_UA;
+
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_FS_CURR_REG(wled->sink_base,
+ wled->strings[i]));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_WLED_FS_CURR_MASK;
+ temp = data / QPNP_WLED_FS_CURR_STEP_UA;
+ reg |= temp;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_FS_CURR_REG(wled->sink_base,
+ wled->strings[i]));
+ if (rc)
+ return rc;
+ }
+
+ wled->fs_curr_ua = data;
+
+ rc = qpnp_wled_sync_reg_toggle(wled);
+ if (rc < 0) {
+ dev_err(&wled->spmi->dev, "Failed to toggle sync reg %d\n", rc);
+ return rc;
+ }
+
+ return count;
+}
+
+/* sysfs attributes exported by wled */
+static struct device_attribute qpnp_wled_attrs[] = {
+ __ATTR(dump_regs, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_wled_dump_regs_show,
+ NULL),
+ __ATTR(dim_mode, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_wled_dim_mode_show,
+ qpnp_wled_dim_mode_store),
+ __ATTR(fs_curr_ua, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_wled_fs_curr_ua_show,
+ qpnp_wled_fs_curr_ua_store),
+ __ATTR(start_ramp, (S_IRUGO | S_IWUSR | S_IWGRP),
+ NULL,
+ qpnp_wled_ramp_store),
+ __ATTR(ramp_ms, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_wled_ramp_ms_show,
+ qpnp_wled_ramp_ms_store),
+ __ATTR(ramp_step, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_wled_ramp_step_show,
+ qpnp_wled_ramp_step_store),
+};
+
+/* worker for setting wled brightness */
+static void qpnp_wled_work(struct work_struct *work)
+{
+ struct qpnp_wled *wled;
+ int level, rc;
+
+ wled = container_of(work, struct qpnp_wled, work);
+
+ level = wled->cdev.brightness;
+
+ mutex_lock(&wled->lock);
+
+ if (level) {
+ rc = qpnp_wled_set_level(wled, level);
+ if (rc) {
+ dev_err(&wled->spmi->dev, "wled set level failed\n");
+ goto unlock_mutex;
+ }
+ }
+
+ if (!!level != wled->prev_state) {
+ rc = qpnp_wled_module_en(wled, wled->ctrl_base, !!level);
+
+ if (rc) {
+ dev_err(&wled->spmi->dev, "wled %sable failed\n",
+ level ? "en" : "dis");
+ goto unlock_mutex;
+ }
+ }
+
+ wled->prev_state = !!level;
+unlock_mutex:
+ mutex_unlock(&wled->lock);
+}
+
+/* get api registered with led classdev for wled brightness */
+static enum led_brightness qpnp_wled_get(struct led_classdev *led_cdev)
+{
+ struct qpnp_wled *wled;
+
+ wled = container_of(led_cdev, struct qpnp_wled, cdev);
+
+ return wled->cdev.brightness;
+}
+
+/* set api registered with led classdev for wled brightness */
+static void qpnp_wled_set(struct led_classdev *led_cdev,
+ enum led_brightness level)
+{
+ struct qpnp_wled *wled;
+
+ wled = container_of(led_cdev, struct qpnp_wled, cdev);
+
+ if (level < LED_OFF)
+ level = LED_OFF;
+ else if (level > wled->cdev.max_brightness)
+ level = wled->cdev.max_brightness;
+
+ wled->cdev.brightness = level;
+ schedule_work(&wled->work);
+}
+
+static int qpnp_wled_set_disp(struct qpnp_wled *wled, u16 base_addr)
+{
+ int rc;
+ u8 reg;
+
+ /* display type */
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_DISP_SEL_REG(base_addr));
+ if (rc < 0)
+ return rc;
+
+ reg &= QPNP_WLED_DISP_SEL_MASK;
+ reg |= (wled->disp_type_amoled << QPNP_WLED_DISP_SEL_SHIFT);
+
+ rc = qpnp_wled_sec_access(wled, base_addr);
+ if (rc)
+ return rc;
+
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_DISP_SEL_REG(base_addr));
+ if (rc)
+ return rc;
+
+ if (wled->disp_type_amoled) {
+ /* Configure the PSM CTRL register for AMOLED */
+ if (wled->vref_psm_mv < QPNP_WLED_VREF_PSM_MIN_MV)
+ wled->vref_psm_mv = QPNP_WLED_VREF_PSM_MIN_MV;
+ else if (wled->vref_psm_mv > QPNP_WLED_VREF_PSM_MAX_MV)
+ wled->vref_psm_mv = QPNP_WLED_VREF_PSM_MAX_MV;
+
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_PSM_CTRL_REG(wled->ctrl_base));
+ if (rc < 0)
+ return rc;
+
+ reg &= QPNP_WLED_VREF_PSM_MASK;
+ reg |= ((wled->vref_psm_mv - QPNP_WLED_VREF_PSM_MIN_MV)/
+ QPNP_WLED_VREF_PSM_STEP_MV);
+ reg |= QPNP_WLED_PSM_CTRL_OVERWRITE;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_PSM_CTRL_REG(wled->ctrl_base));
+ if (rc)
+ return rc;
+
+ /* Configure the VLOOP COMP RES register for AMOLED */
+ if (wled->loop_comp_res_kohm < QPNP_WLED_LOOP_COMP_RES_MIN_KOHM)
+ wled->loop_comp_res_kohm =
+ QPNP_WLED_LOOP_COMP_RES_MIN_KOHM;
+ else if (wled->loop_comp_res_kohm >
+ QPNP_WLED_LOOP_COMP_RES_MAX_KOHM)
+ wled->loop_comp_res_kohm =
+ QPNP_WLED_LOOP_COMP_RES_MAX_KOHM;
+
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_VLOOP_COMP_RES_REG(wled->ctrl_base));
+ if (rc < 0)
+ return rc;
+
+ reg &= QPNP_WLED_VLOOP_COMP_RES_MASK;
+ reg |= ((wled->loop_comp_res_kohm -
+ QPNP_WLED_LOOP_COMP_RES_MIN_KOHM)/
+ QPNP_WLED_LOOP_COMP_RES_STEP_KOHM);
+ reg |= QPNP_WLED_VLOOP_COMP_RES_OVERWRITE;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_VLOOP_COMP_RES_REG(wled->ctrl_base));
+ if (rc)
+ return rc;
+
+ /* Configure the LOOP COMP GM register for AMOLED */
+ if (wled->loop_ea_gm < QPNP_WLED_LOOP_EA_GM_MIN)
+ wled->loop_ea_gm = QPNP_WLED_LOOP_EA_GM_MIN;
+ else if (wled->loop_ea_gm > QPNP_WLED_LOOP_EA_GM_MAX)
+ wled->loop_ea_gm = QPNP_WLED_LOOP_EA_GM_MAX;
+
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_VLOOP_COMP_GM_REG(wled->ctrl_base));
+ if (rc < 0)
+ return rc;
+
+ reg &= QPNP_WLED_VLOOP_COMP_GM_MASK;
+ reg |= (wled->loop_ea_gm | QPNP_WLED_VLOOP_COMP_GM_OVERWRITE);
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_VLOOP_COMP_GM_REG(wled->ctrl_base));
+ if (rc)
+ return rc;
+
+ /* Configure the Soft start Ramp delay for AMOLED */
+ reg = 0;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_SOFTSTART_RAMP_DLY(base_addr));
+ if (rc)
+ return rc;
+
+ /* Configure the CTRL TEST4 register for AMOLED */
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_TEST4_REG(wled->ctrl_base));
+ if (rc < 0)
+ return rc;
+
+ rc = qpnp_wled_sec_access(wled, base_addr);
+ if (rc)
+ return rc;
+
+ reg |= QPNP_WLED_TEST4_EN_IIND_UP;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_TEST4_REG(base_addr));
+ if (rc)
+ return rc;
+ } else {
+ /*
+ * enable VREF_UP to avoid false ovp on low brightness for LCD
+ */
+ rc = qpnp_wled_sec_access(wled, base_addr);
+ if (rc)
+ return rc;
+
+ reg = QPNP_WLED_TEST4_EN_VREF_UP;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_TEST4_REG(base_addr));
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+/* ovp irq handler */
+static irqreturn_t qpnp_wled_ovp_irq(int irq, void *_wled)
+{
+ struct qpnp_wled *wled = _wled;
+
+ dev_dbg(&wled->spmi->dev, "ovp detected\n");
+
+ return IRQ_HANDLED;
+}
+
+/* short circuit irq handler */
+static irqreturn_t qpnp_wled_sc_irq(int irq, void *_wled)
+{
+ struct qpnp_wled *wled = _wled;
+
+ dev_err(&wled->spmi->dev,
+ "Short circuit detected %d times\n", ++wled->sc_cnt);
+
+ qpnp_wled_module_en(wled, wled->ctrl_base, false);
+ msleep(QPNP_WLED_SC_DLY_MS);
+ qpnp_wled_module_en(wled, wled->ctrl_base, true);
+
+ return IRQ_HANDLED;
+}
+
+/* Configure WLED registers */
+static int qpnp_wled_config(struct qpnp_wled *wled)
+{
+ int rc, i, temp;
+ u8 reg = 0;
+
+ /* Configure display type */
+ rc = qpnp_wled_set_disp(wled, wled->ctrl_base);
+ if (rc < 0)
+ return rc;
+
+ /* Configure the FEEDBACK OUTPUT register */
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_FDBK_OP_REG(wled->ctrl_base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_WLED_FDBK_OP_MASK;
+ reg |= wled->fdbk_op;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_FDBK_OP_REG(wled->ctrl_base));
+ if (rc)
+ return rc;
+
+ /* Configure the VREF register */
+ if (wled->vref_mv < QPNP_WLED_VREF_MIN_MV)
+ wled->vref_mv = QPNP_WLED_VREF_MIN_MV;
+ else if (wled->vref_mv > QPNP_WLED_VREF_MAX_MV)
+ wled->vref_mv = QPNP_WLED_VREF_MAX_MV;
+
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_VREF_REG(wled->ctrl_base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_WLED_VREF_MASK;
+ temp = wled->vref_mv - QPNP_WLED_VREF_MIN_MV;
+ reg |= (temp / QPNP_WLED_VREF_STEP_MV);
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_VREF_REG(wled->ctrl_base));
+ if (rc)
+ return rc;
+
+ /* Configure the ILIM register */
+ if (wled->ilim_ma < QPNP_WLED_ILIM_MIN_MA)
+ wled->ilim_ma = QPNP_WLED_ILIM_MIN_MA;
+ else if (wled->ilim_ma > QPNP_WLED_ILIM_MAX_MA)
+ wled->ilim_ma = QPNP_WLED_ILIM_MAX_MA;
+
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_ILIM_REG(wled->ctrl_base));
+ if (rc < 0)
+ return rc;
+ temp = (wled->ilim_ma / QPNP_WLED_ILIM_STEP_MA);
+ if (temp != (reg & ~QPNP_WLED_ILIM_MASK)) {
+ reg &= QPNP_WLED_ILIM_MASK;
+ reg |= temp;
+ reg |= QPNP_WLED_ILIM_OVERWRITE;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_ILIM_REG(wled->ctrl_base));
+ if (rc)
+ return rc;
+ }
+
+ /* Configure the MAX BOOST DUTY register */
+ if (wled->boost_duty_ns < QPNP_WLED_BOOST_DUTY_MIN_NS)
+ wled->boost_duty_ns = QPNP_WLED_BOOST_DUTY_MIN_NS;
+ else if (wled->boost_duty_ns > QPNP_WLED_BOOST_DUTY_MAX_NS)
+ wled->boost_duty_ns = QPNP_WLED_BOOST_DUTY_MAX_NS;
+
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_BOOST_DUTY_REG(wled->ctrl_base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_WLED_BOOST_DUTY_MASK;
+ reg |= (wled->boost_duty_ns / QPNP_WLED_BOOST_DUTY_STEP_NS);
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_BOOST_DUTY_REG(wled->ctrl_base));
+ if (rc)
+ return rc;
+
+ /* Configure the SWITCHING FREQ register */
+ if (wled->switch_freq_khz == QPNP_WLED_SWITCH_FREQ_1600_KHZ)
+ temp = QPNP_WLED_SWITCH_FREQ_1600_KHZ_CODE;
+ else
+ temp = QPNP_WLED_SWITCH_FREQ_800_KHZ_CODE;
+
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_SWITCH_FREQ_REG(wled->ctrl_base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_WLED_SWITCH_FREQ_MASK;
+ reg |= temp;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_SWITCH_FREQ_REG(wled->ctrl_base));
+ if (rc)
+ return rc;
+
+ /* Configure the OVP register */
+ if (wled->ovp_mv <= QPNP_WLED_OVP_17800_MV) {
+ wled->ovp_mv = QPNP_WLED_OVP_17800_MV;
+ temp = 3;
+ } else if (wled->ovp_mv <= QPNP_WLED_OVP_19400_MV) {
+ wled->ovp_mv = QPNP_WLED_OVP_19400_MV;
+ temp = 2;
+ } else if (wled->ovp_mv <= QPNP_WLED_OVP_29500_MV) {
+ wled->ovp_mv = QPNP_WLED_OVP_29500_MV;
+ temp = 1;
+ } else {
+ wled->ovp_mv = QPNP_WLED_OVP_31000_MV;
+ temp = 0;
+ }
+
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_OVP_REG(wled->ctrl_base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_WLED_OVP_MASK;
+ reg |= temp;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_OVP_REG(wled->ctrl_base));
+ if (rc)
+ return rc;
+
+ if (wled->disp_type_amoled) {
+ /* Configure avdd trim register */
+ rc = qpnp_wled_sec_access(wled, wled->ctrl_base);
+ if (rc)
+ return rc;
+
+ /* Check if wled->avdd_trim_steps_from_center is negative */
+ if ((s32)wled->avdd_trim_steps_from_center <
+ QPNP_WLED_AVDD_MIN_TRIM_VALUE) {
+ wled->avdd_trim_steps_from_center =
+ QPNP_WLED_AVDD_MIN_TRIM_VALUE;
+ } else if ((s32)wled->avdd_trim_steps_from_center >
+ QPNP_WLED_AVDD_MAX_TRIM_VALUE) {
+ wled->avdd_trim_steps_from_center =
+ QPNP_WLED_AVDD_MAX_TRIM_VALUE;
+ }
+ reg = wled->avdd_trim_steps_from_center +
+ QPNP_WLED_AVDD_TRIM_CENTER_VALUE;
+
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_REF_7P7_TRIM_REG(wled->ctrl_base));
+ if (rc)
+ return rc;
+ }
+
+ /* Configure the MODULATION register */
+ if (wled->mod_freq_khz <= QPNP_WLED_MOD_FREQ_1200_KHZ) {
+ wled->mod_freq_khz = QPNP_WLED_MOD_FREQ_1200_KHZ;
+ temp = 3;
+ } else if (wled->mod_freq_khz <= QPNP_WLED_MOD_FREQ_2400_KHZ) {
+ wled->mod_freq_khz = QPNP_WLED_MOD_FREQ_2400_KHZ;
+ temp = 2;
+ } else if (wled->mod_freq_khz <= QPNP_WLED_MOD_FREQ_9600_KHZ) {
+ wled->mod_freq_khz = QPNP_WLED_MOD_FREQ_9600_KHZ;
+ temp = 1;
+ } else if (wled->mod_freq_khz <= QPNP_WLED_MOD_FREQ_19200_KHZ) {
+ wled->mod_freq_khz = QPNP_WLED_MOD_FREQ_19200_KHZ;
+ temp = 0;
+ } else {
+ wled->mod_freq_khz = QPNP_WLED_MOD_FREQ_9600_KHZ;
+ temp = 1;
+ }
+
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_MOD_REG(wled->sink_base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_WLED_MOD_FREQ_MASK;
+ reg |= (temp << QPNP_WLED_MOD_FREQ_SHIFT);
+
+ reg &= QPNP_WLED_PHASE_STAG_MASK;
+ reg |= (wled->en_phase_stag << QPNP_WLED_PHASE_STAG_SHIFT);
+
+ reg &= QPNP_WLED_ACC_CLK_FREQ_MASK;
+ reg |= (temp << QPNP_WLED_ACC_CLK_FREQ_SHIFT);
+
+ reg &= QPNP_WLED_DIM_RES_MASK;
+ reg |= (wled->en_9b_dim_res << QPNP_WLED_DIM_RES_SHIFT);
+
+ if (wled->dim_mode == QPNP_WLED_DIM_HYBRID) {
+ reg &= QPNP_WLED_DIM_HYB_MASK;
+ reg |= (1 << QPNP_WLED_DIM_HYB_SHIFT);
+ } else {
+ reg &= QPNP_WLED_DIM_HYB_MASK;
+ reg |= (0 << QPNP_WLED_DIM_HYB_SHIFT);
+ reg &= QPNP_WLED_DIM_ANA_MASK;
+ reg |= wled->dim_mode;
+ }
+
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_MOD_REG(wled->sink_base));
+ if (rc)
+ return rc;
+
+ /* Configure the HYBRID THRESHOLD register */
+ if (wled->hyb_thres < QPNP_WLED_HYB_THRES_MIN)
+ wled->hyb_thres = QPNP_WLED_HYB_THRES_MIN;
+ else if (wled->hyb_thres > QPNP_WLED_HYB_THRES_MAX)
+ wled->hyb_thres = QPNP_WLED_HYB_THRES_MAX;
+
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_HYB_THRES_REG(wled->sink_base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_WLED_HYB_THRES_MASK;
+ temp = fls(wled->hyb_thres / QPNP_WLED_HYB_THRES_MIN) - 1;
+ reg |= temp;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_HYB_THRES_REG(wled->sink_base));
+ if (rc)
+ return rc;
+
+ /* Configure TEST5 register */
+ if (wled->dim_mode == QPNP_WLED_DIM_DIGITAL)
+ reg = QPNP_WLED_SINK_TEST5_DIG;
+ else
+ reg = QPNP_WLED_SINK_TEST5_HYB;
+
+ rc = qpnp_wled_sec_access(wled, wled->sink_base);
+ if (rc)
+ return rc;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_SINK_TEST5_REG(wled->sink_base));
+ if (rc)
+ return rc;
+
+ /* disable all current sinks and enable selected strings */
+ reg = 0x00;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_CURR_SINK_REG(wled->sink_base));
+
+ for (i = 0; i < wled->num_strings; i++) {
+ if (wled->strings[i] >= QPNP_WLED_MAX_STRINGS) {
+ dev_err(&wled->spmi->dev, "Invalid string number\n");
+ return -EINVAL;
+ }
+
+ /* MODULATOR */
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_MOD_EN_REG(wled->sink_base,
+ wled->strings[i]));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_WLED_MOD_EN_MASK;
+ reg |= (QPNP_WLED_MOD_EN << QPNP_WLED_MOD_EN_SHFT);
+
+ if (wled->dim_mode == QPNP_WLED_DIM_HYBRID)
+ reg &= QPNP_WLED_GATE_DRV_MASK;
+ else
+ reg |= ~QPNP_WLED_GATE_DRV_MASK;
+
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_MOD_EN_REG(wled->sink_base,
+ wled->strings[i]));
+ if (rc)
+ return rc;
+
+ /* SYNC DELAY */
+ if (wled->sync_dly_us > QPNP_WLED_SYNC_DLY_MAX_US)
+ wled->sync_dly_us = QPNP_WLED_SYNC_DLY_MAX_US;
+
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_SYNC_DLY_REG(wled->sink_base,
+ wled->strings[i]));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_WLED_SYNC_DLY_MASK;
+ temp = wled->sync_dly_us / QPNP_WLED_SYNC_DLY_STEP_US;
+ reg |= temp;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_SYNC_DLY_REG(wled->sink_base,
+ wled->strings[i]));
+ if (rc)
+ return rc;
+
+ /* FULL SCALE CURRENT */
+ if (wled->fs_curr_ua > QPNP_WLED_FS_CURR_MAX_UA)
+ wled->fs_curr_ua = QPNP_WLED_FS_CURR_MAX_UA;
+
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_FS_CURR_REG(wled->sink_base,
+ wled->strings[i]));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_WLED_FS_CURR_MASK;
+ temp = wled->fs_curr_ua / QPNP_WLED_FS_CURR_STEP_UA;
+ reg |= temp;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_FS_CURR_REG(wled->sink_base,
+ wled->strings[i]));
+ if (rc)
+ return rc;
+
+ /* CABC */
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_CABC_REG(wled->sink_base,
+ wled->strings[i]));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_WLED_CABC_MASK;
+ reg |= (wled->en_cabc << QPNP_WLED_CABC_SHIFT);
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_CABC_REG(wled->sink_base,
+ wled->strings[i]));
+ if (rc)
+ return rc;
+
+ /* Enable CURRENT SINK */
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_CURR_SINK_REG(wled->sink_base));
+ if (rc < 0)
+ return rc;
+ temp = wled->strings[i] + QPNP_WLED_CURR_SINK_SHIFT;
+ reg |= (1 << temp);
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_CURR_SINK_REG(wled->sink_base));
+ if (rc)
+ return rc;
+ }
+
+ rc = qpnp_wled_sync_reg_toggle(wled);
+ if (rc < 0) {
+ dev_err(&wled->spmi->dev, "Failed to toggle sync reg %d\n", rc);
+ return rc;
+ }
+
+ /* setup ovp and sc irqs */
+ if (wled->ovp_irq >= 0) {
+ rc = devm_request_threaded_irq(&wled->spmi->dev, wled->ovp_irq,
+ NULL, qpnp_wled_ovp_irq,
+ QPNP_IRQ_FLAGS,
+ "qpnp_wled_ovp_irq", wled);
+ if (rc < 0) {
+ dev_err(&wled->spmi->dev,
+ "Unable to request ovp(%d) IRQ(err:%d)\n",
+ wled->ovp_irq, rc);
+ return rc;
+ }
+ }
+
+ if (wled->sc_irq >= 0) {
+ wled->sc_cnt = 0;
+ rc = devm_request_threaded_irq(&wled->spmi->dev, wled->sc_irq,
+ NULL, qpnp_wled_sc_irq,
+ QPNP_IRQ_FLAGS,
+ "qpnp_wled_sc_irq", wled);
+ if (rc < 0) {
+ dev_err(&wled->spmi->dev,
+ "Unable to request sc(%d) IRQ(err:%d)\n",
+ wled->sc_irq, rc);
+ return rc;
+ }
+
+ rc = qpnp_wled_read_reg(wled, &reg,
+ QPNP_WLED_SC_PRO_REG(wled->ctrl_base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_WLED_EN_SC_MASK;
+ reg |= 1 << QPNP_WLED_EN_SC_SHIFT;
+
+ if (wled->disp_type_amoled) {
+ if (wled->sc_deb_cycles < QPNP_WLED_SC_DEB_CYCLES_MIN)
+ wled->sc_deb_cycles =
+ QPNP_WLED_SC_DEB_CYCLES_MIN;
+ else if (wled->sc_deb_cycles >
+ QPNP_WLED_SC_DEB_CYCLES_MAX)
+ wled->sc_deb_cycles =
+ QPNP_WLED_SC_DEB_CYCLES_MAX;
+
+ temp = fls(wled->sc_deb_cycles) - QPNP_WLED_SC_DEB_SUB;
+ reg |= ((temp << 1) | QPNP_WLED_SC_PRO_EN_DSCHGR);
+ }
+
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_SC_PRO_REG(wled->ctrl_base));
+ if (rc)
+ return rc;
+
+ if (wled->en_ext_pfet_sc_pro) {
+ rc = qpnp_wled_sec_access(wled, wled->ctrl_base);
+ if (rc)
+ return rc;
+
+ reg = QPNP_WLED_EXT_FET_DTEST2;
+ rc = qpnp_wled_write_reg(wled, &reg,
+ QPNP_WLED_TEST1_REG(wled->ctrl_base));
+ if (rc)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/* parse wled dtsi parameters */
+static int qpnp_wled_parse_dt(struct qpnp_wled *wled)
+{
+ struct spmi_device *spmi = wled->spmi;
+ struct property *prop;
+ const char *temp_str;
+ u32 temp_val;
+ int rc, i;
+ u8 *strings;
+
+ wled->cdev.name = "wled";
+ rc = of_property_read_string(spmi->dev.of_node,
+ "linux,name", &wled->cdev.name);
+ if (rc && (rc != -EINVAL)) {
+ dev_err(&spmi->dev, "Unable to read led name\n");
+ return rc;
+ }
+
+ wled->cdev.default_trigger = QPNP_WLED_TRIGGER_NONE;
+ rc = of_property_read_string(spmi->dev.of_node, "linux,default-trigger",
+ &wled->cdev.default_trigger);
+ if (rc && (rc != -EINVAL)) {
+ dev_err(&spmi->dev, "Unable to read led trigger\n");
+ return rc;
+ }
+
+ wled->disp_type_amoled = of_property_read_bool(spmi->dev.of_node,
+ "qcom,disp-type-amoled");
+ if (wled->disp_type_amoled) {
+ wled->vref_psm_mv = QPNP_WLED_VREF_PSM_DFLT_AMOLED_MV;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,vref-psm-mv", &temp_val);
+ if (!rc) {
+ wled->vref_psm_mv = temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read vref-psm\n");
+ return rc;
+ }
+
+ wled->loop_comp_res_kohm =
+ QPNP_WLED_LOOP_COMP_RES_DFLT_AMOLED_KOHM;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,loop-comp-res-kohm", &temp_val);
+ if (!rc) {
+ wled->loop_comp_res_kohm = temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read loop-comp-res-kohm\n");
+ return rc;
+ }
+
+ wled->loop_ea_gm = QPNP_WLED_LOOP_EA_GM_DFLT_AMOLED;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,loop-ea-gm", &temp_val);
+ if (!rc) {
+ wled->loop_ea_gm = temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read loop-ea-gm\n");
+ return rc;
+ }
+
+ wled->sc_deb_cycles = QPNP_WLED_SC_DEB_CYCLES_DFLT_AMOLED;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,sc-deb-cycles", &temp_val);
+ if (!rc) {
+ wled->sc_deb_cycles = temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read sc debounce cycles\n");
+ return rc;
+ }
+
+ wled->avdd_trim_steps_from_center = 0;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,avdd-trim-steps-from-center", &temp_val);
+ if (!rc) {
+ wled->avdd_trim_steps_from_center = temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read avdd trim steps from center value\n");
+ return rc;
+ }
+ }
+
+ wled->fdbk_op = QPNP_WLED_FDBK_AUTO;
+ rc = of_property_read_string(spmi->dev.of_node,
+ "qcom,fdbk-output", &temp_str);
+ if (!rc) {
+ if (strcmp(temp_str, "wled1") == 0)
+ wled->fdbk_op = QPNP_WLED_FDBK_WLED1;
+ else if (strcmp(temp_str, "wled2") == 0)
+ wled->fdbk_op = QPNP_WLED_FDBK_WLED2;
+ else if (strcmp(temp_str, "wled3") == 0)
+ wled->fdbk_op = QPNP_WLED_FDBK_WLED3;
+ else if (strcmp(temp_str, "wled4") == 0)
+ wled->fdbk_op = QPNP_WLED_FDBK_WLED4;
+ else
+ wled->fdbk_op = QPNP_WLED_FDBK_AUTO;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read feedback output\n");
+ return rc;
+ }
+
+ wled->vref_mv = QPNP_WLED_DFLT_VREF_MV;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,vref-mv", &temp_val);
+ if (!rc) {
+ wled->vref_mv = temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read vref\n");
+ return rc;
+ }
+
+ wled->switch_freq_khz = QPNP_WLED_SWITCH_FREQ_800_KHZ;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,switch-freq-khz", &temp_val);
+ if (!rc) {
+ wled->switch_freq_khz = temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read switch freq\n");
+ return rc;
+ }
+
+ wled->ovp_mv = QPNP_WLED_OVP_29500_MV;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,ovp-mv", &temp_val);
+ if (!rc) {
+ wled->ovp_mv = temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read vref\n");
+ return rc;
+ }
+
+ wled->ilim_ma = QPNP_WLED_DFLT_ILIM_MA;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,ilim-ma", &temp_val);
+ if (!rc) {
+ wled->ilim_ma = temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read ilim\n");
+ return rc;
+ }
+
+ wled->boost_duty_ns = QPNP_WLED_DEF_BOOST_DUTY_NS;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,boost-duty-ns", &temp_val);
+ if (!rc) {
+ wled->boost_duty_ns = temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read boost duty\n");
+ return rc;
+ }
+
+ wled->mod_freq_khz = QPNP_WLED_MOD_FREQ_9600_KHZ;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,mod-freq-khz", &temp_val);
+ if (!rc) {
+ wled->mod_freq_khz = temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read modulation freq\n");
+ return rc;
+ }
+
+ wled->dim_mode = QPNP_WLED_DIM_HYBRID;
+ rc = of_property_read_string(spmi->dev.of_node,
+ "qcom,dim-mode", &temp_str);
+ if (!rc) {
+ if (strcmp(temp_str, "analog") == 0)
+ wled->dim_mode = QPNP_WLED_DIM_ANALOG;
+ else if (strcmp(temp_str, "digital") == 0)
+ wled->dim_mode = QPNP_WLED_DIM_DIGITAL;
+ else
+ wled->dim_mode = QPNP_WLED_DIM_HYBRID;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read dim mode\n");
+ return rc;
+ }
+
+ if (wled->dim_mode == QPNP_WLED_DIM_HYBRID) {
+ wled->hyb_thres = QPNP_WLED_DEF_HYB_THRES;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,hyb-thres", &temp_val);
+ if (!rc) {
+ wled->hyb_thres = temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read hyb threshold\n");
+ return rc;
+ }
+ }
+
+ wled->sync_dly_us = QPNP_WLED_DEF_SYNC_DLY_US;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,sync-dly-us", &temp_val);
+ if (!rc) {
+ wled->sync_dly_us = temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read sync delay\n");
+ return rc;
+ }
+
+ wled->fs_curr_ua = QPNP_WLED_FS_CURR_MAX_UA;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,fs-curr-ua", &temp_val);
+ if (!rc) {
+ wled->fs_curr_ua = temp_val;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read full scale current\n");
+ return rc;
+ }
+
+ wled->cons_sync_write_delay_us = 0;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,cons-sync-write-delay-us", &temp_val);
+ if (!rc)
+ wled->cons_sync_write_delay_us = temp_val;
+
+ wled->en_9b_dim_res = of_property_read_bool(spmi->dev.of_node,
+ "qcom,en-9b-dim-res");
+ wled->en_phase_stag = of_property_read_bool(spmi->dev.of_node,
+ "qcom,en-phase-stag");
+ wled->en_cabc = of_property_read_bool(spmi->dev.of_node,
+ "qcom,en-cabc");
+
+ prop = of_find_property(spmi->dev.of_node,
+ "qcom,led-strings-list", &temp_val);
+ if (!prop || !temp_val || temp_val > QPNP_WLED_MAX_STRINGS) {
+ dev_err(&spmi->dev, "Invalid strings info, use default");
+ wled->num_strings = QPNP_WLED_MAX_STRINGS;
+ for (i = 0; i < wled->num_strings; i++)
+ wled->strings[i] = i;
+ } else {
+ wled->num_strings = temp_val;
+ strings = prop->value;
+ for (i = 0; i < wled->num_strings; ++i)
+ wled->strings[i] = strings[i];
+ }
+
+ wled->ovp_irq = spmi_get_irq_byname(spmi, NULL, "ovp-irq");
+ if (wled->ovp_irq < 0)
+ dev_dbg(&spmi->dev, "ovp irq is not used\n");
+
+ wled->sc_irq = spmi_get_irq_byname(spmi, NULL, "sc-irq");
+ if (wled->sc_irq < 0)
+ dev_dbg(&spmi->dev, "sc irq is not used\n");
+
+ wled->en_ext_pfet_sc_pro = of_property_read_bool(spmi->dev.of_node,
+ "qcom,en-ext-pfet-sc-pro");
+
+ return 0;
+}
+
+static int qpnp_wled_probe(struct spmi_device *spmi)
+{
+ struct qpnp_wled *wled;
+ struct resource *wled_resource;
+ int rc, i;
+
+ wled = devm_kzalloc(&spmi->dev, sizeof(*wled), GFP_KERNEL);
+ if (!wled)
+ return -ENOMEM;
+
+ wled->spmi = spmi;
+
+ wled_resource = spmi_get_resource_byname(spmi, NULL, IORESOURCE_MEM,
+ QPNP_WLED_SINK_BASE);
+ if (!wled_resource) {
+ dev_err(&spmi->dev, "Unable to get wled sink base address\n");
+ return -EINVAL;
+ }
+
+ wled->sink_base = wled_resource->start;
+
+ wled_resource = spmi_get_resource_byname(spmi, NULL, IORESOURCE_MEM,
+ QPNP_WLED_CTRL_BASE);
+ if (!wled_resource) {
+ dev_err(&spmi->dev, "Unable to get wled ctrl base address\n");
+ return -EINVAL;
+ }
+
+ wled->ctrl_base = wled_resource->start;
+
+ dev_set_drvdata(&spmi->dev, wled);
+
+ rc = qpnp_wled_parse_dt(wled);
+ if (rc) {
+ dev_err(&spmi->dev, "DT parsing failed\n");
+ return rc;
+ }
+
+ rc = qpnp_wled_config(wled);
+ if (rc) {
+ dev_err(&spmi->dev, "wled config failed\n");
+ return rc;
+ }
+
+ mutex_init(&wled->lock);
+ INIT_WORK(&wled->work, qpnp_wled_work);
+ wled->ramp_ms = QPNP_WLED_RAMP_DLY_MS;
+ wled->ramp_step = 1;
+
+ wled->cdev.brightness_set = qpnp_wled_set;
+ wled->cdev.brightness_get = qpnp_wled_get;
+
+ wled->cdev.max_brightness = WLED_MAX_LEVEL_4095;
+
+ rc = led_classdev_register(&spmi->dev, &wled->cdev);
+ if (rc) {
+ dev_err(&spmi->dev, "wled registration failed(%d)\n", rc);
+ goto wled_register_fail;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(qpnp_wled_attrs); i++) {
+ rc = sysfs_create_file(&wled->cdev.dev->kobj,
+ &qpnp_wled_attrs[i].attr);
+ if (rc < 0) {
+ dev_err(&spmi->dev, "sysfs creation failed\n");
+ goto sysfs_fail;
+ }
+ }
+
+ return 0;
+
+sysfs_fail:
+ for (i--; i >= 0; i--)
+ sysfs_remove_file(&wled->cdev.dev->kobj,
+ &qpnp_wled_attrs[i].attr);
+ led_classdev_unregister(&wled->cdev);
+wled_register_fail:
+ cancel_work_sync(&wled->work);
+ mutex_destroy(&wled->lock);
+ return rc;
+}
+
+static int qpnp_wled_remove(struct spmi_device *spmi)
+{
+ struct qpnp_wled *wled = dev_get_drvdata(&spmi->dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qpnp_wled_attrs); i++)
+ sysfs_remove_file(&wled->cdev.dev->kobj,
+ &qpnp_wled_attrs[i].attr);
+
+ led_classdev_unregister(&wled->cdev);
+ cancel_work_sync(&wled->work);
+ mutex_destroy(&wled->lock);
+
+ return 0;
+}
+
+static struct of_device_id spmi_match_table[] = {
+ { .compatible = "qcom,qpnp-wled",},
+ { },
+};
+
+static struct spmi_driver qpnp_wled_driver = {
+ .driver = {
+ .name = "qcom,qpnp-wled",
+ .of_match_table = spmi_match_table,
+ },
+ .probe = qpnp_wled_probe,
+ .remove = qpnp_wled_remove,
+};
+
+static int __init qpnp_wled_init(void)
+{
+ return spmi_driver_register(&qpnp_wled_driver);
+}
+module_init(qpnp_wled_init);
+
+static void __exit qpnp_wled_exit(void)
+{
+ spmi_driver_unregister(&qpnp_wled_driver);
+}
+module_exit(qpnp_wled_exit);
+
+MODULE_DESCRIPTION("QPNP WLED driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("leds:leds-qpnp-wled");
diff --git a/drivers/leds/leds-qpnp.c b/drivers/leds/leds-qpnp.c
new file mode 100644
index 000000000000..466dfef62a2c
--- /dev/null
+++ b/drivers/leds/leds-qpnp.c
@@ -0,0 +1,4260 @@
+
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/spmi.h>
+#include <linux/qpnp/pwm.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+
+#define WLED_MOD_EN_REG(base, n) (base + 0x60 + n*0x10)
+#define WLED_IDAC_DLY_REG(base, n) (WLED_MOD_EN_REG(base, n) + 0x01)
+#define WLED_FULL_SCALE_REG(base, n) (WLED_IDAC_DLY_REG(base, n) + 0x01)
+#define WLED_MOD_SRC_SEL_REG(base, n) (WLED_FULL_SCALE_REG(base, n) + 0x01)
+
+/* wled control registers */
+#define WLED_OVP_INT_STATUS(base) (base + 0x10)
+#define WLED_BRIGHTNESS_CNTL_LSB(base, n) (base + 0x40 + 2*n)
+#define WLED_BRIGHTNESS_CNTL_MSB(base, n) (base + 0x41 + 2*n)
+#define WLED_MOD_CTRL_REG(base) (base + 0x46)
+#define WLED_SYNC_REG(base) (base + 0x47)
+#define WLED_FDBCK_CTRL_REG(base) (base + 0x48)
+#define WLED_SWITCHING_FREQ_REG(base) (base + 0x4C)
+#define WLED_OVP_CFG_REG(base) (base + 0x4D)
+#define WLED_BOOST_LIMIT_REG(base) (base + 0x4E)
+#define WLED_CURR_SINK_REG(base) (base + 0x4F)
+#define WLED_HIGH_POLE_CAP_REG(base) (base + 0x58)
+#define WLED_CURR_SINK_MASK 0xE0
+#define WLED_CURR_SINK_SHFT 0x05
+#define WLED_DISABLE_ALL_SINKS 0x00
+#define WLED_DISABLE_1_2_SINKS 0x80
+#define WLED_SWITCH_FREQ_MASK 0x0F
+#define WLED_OVP_VAL_MASK 0x03
+#define WLED_OVP_INT_MASK 0x02
+#define WLED_OVP_VAL_BIT_SHFT 0x00
+#define WLED_BOOST_LIMIT_MASK 0x07
+#define WLED_BOOST_LIMIT_BIT_SHFT 0x00
+#define WLED_BOOST_ON 0x80
+#define WLED_BOOST_OFF 0x00
+#define WLED_EN_MASK 0x80
+#define WLED_NO_MASK 0x00
+#define WLED_CP_SELECT_MAX 0x03
+#define WLED_CP_SELECT_MASK 0x02
+#define WLED_USE_EXT_GEN_MOD_SRC 0x01
+#define WLED_CTL_DLY_STEP 200
+#define WLED_CTL_DLY_MAX 1400
+#define WLED_MAX_CURR 25
+#define WLED_NO_CURRENT 0x00
+#define WLED_OVP_DELAY 1000
+#define WLED_OVP_DELAY_INT 200
+#define WLED_OVP_DELAY_LOOP 100
+#define WLED_MSB_MASK 0x0F
+#define WLED_MAX_CURR_MASK 0x1F
+#define WLED_OP_FDBCK_MASK 0x07
+#define WLED_OP_FDBCK_BIT_SHFT 0x00
+#define WLED_OP_FDBCK_DEFAULT 0x00
+
+#define WLED_SET_ILIM_CODE 0x01
+
+#define WLED_MAX_LEVEL 4095
+#define WLED_8_BIT_MASK 0xFF
+#define WLED_4_BIT_MASK 0x0F
+#define WLED_8_BIT_SHFT 0x08
+#define WLED_MAX_DUTY_CYCLE 0xFFF
+
+#define WLED_SYNC_VAL 0x07
+#define WLED_SYNC_RESET_VAL 0x00
+
+#define PMIC_VER_8026 0x04
+#define PMIC_VER_8941 0x01
+#define PMIC_VERSION_REG 0x0105
+
+#define WLED_DEFAULT_STRINGS 0x01
+#define WLED_THREE_STRINGS 0x03
+#define WLED_MAX_TRIES 5
+#define WLED_DEFAULT_OVP_VAL 0x02
+#define WLED_BOOST_LIM_DEFAULT 0x03
+#define WLED_CP_SEL_DEFAULT 0x00
+#define WLED_CTRL_DLY_DEFAULT 0x00
+#define WLED_SWITCH_FREQ_DEFAULT 0x0B
+
+#define FLASH_SAFETY_TIMER(base) (base + 0x40)
+#define FLASH_MAX_CURR(base) (base + 0x41)
+#define FLASH_LED_0_CURR(base) (base + 0x42)
+#define FLASH_LED_1_CURR(base) (base + 0x43)
+#define FLASH_CLAMP_CURR(base) (base + 0x44)
+#define FLASH_LED_TMR_CTRL(base) (base + 0x48)
+#define FLASH_HEADROOM(base) (base + 0x4A)
+#define FLASH_STARTUP_DELAY(base) (base + 0x4B)
+#define FLASH_MASK_ENABLE(base) (base + 0x4C)
+#define FLASH_VREG_OK_FORCE(base) (base + 0x4F)
+#define FLASH_ENABLE_CONTROL(base) (base + 0x46)
+#define FLASH_LED_STROBE_CTRL(base) (base + 0x47)
+#define FLASH_WATCHDOG_TMR(base) (base + 0x49)
+#define FLASH_FAULT_DETECT(base) (base + 0x51)
+#define FLASH_PERIPHERAL_SUBTYPE(base) (base + 0x05)
+#define FLASH_CURRENT_RAMP(base) (base + 0x54)
+
+#define FLASH_MAX_LEVEL 0x4F
+#define TORCH_MAX_LEVEL 0x0F
+#define FLASH_NO_MASK 0x00
+
+#define FLASH_MASK_1 0x20
+#define FLASH_MASK_REG_MASK 0xE0
+#define FLASH_HEADROOM_MASK 0x03
+#define FLASH_SAFETY_TIMER_MASK 0x7F
+#define FLASH_CURRENT_MASK 0xFF
+#define FLASH_MAX_CURRENT_MASK 0x7F
+#define FLASH_TMR_MASK 0x03
+#define FLASH_TMR_WATCHDOG 0x03
+#define FLASH_TMR_SAFETY 0x00
+#define FLASH_FAULT_DETECT_MASK 0X80
+#define FLASH_HW_VREG_OK 0x40
+#define FLASH_SW_VREG_OK 0x80
+#define FLASH_VREG_MASK 0xC0
+#define FLASH_STARTUP_DLY_MASK 0x02
+#define FLASH_CURRENT_RAMP_MASK 0xBF
+
+#define FLASH_ENABLE_ALL 0xE0
+#define FLASH_ENABLE_MODULE 0x80
+#define FLASH_ENABLE_MODULE_MASK 0x80
+#define FLASH_DISABLE_ALL 0x00
+#define FLASH_ENABLE_MASK 0xE0
+#define FLASH_ENABLE_LED_0 0xC0
+#define FLASH_ENABLE_LED_1 0xA0
+#define FLASH_INIT_MASK 0xE0
+#define FLASH_SELFCHECK_ENABLE 0x80
+#define FLASH_WATCHDOG_MASK 0x1F
+#define FLASH_RAMP_STEP_27US 0xBF
+
+#define FLASH_HW_SW_STROBE_SEL_MASK 0x04
+#define FLASH_STROBE_MASK 0xC7
+#define FLASH_LED_0_OUTPUT 0x80
+#define FLASH_LED_1_OUTPUT 0x40
+#define FLASH_TORCH_OUTPUT 0xC0
+
+#define FLASH_CURRENT_PRGM_MIN 1
+#define FLASH_CURRENT_PRGM_SHIFT 1
+#define FLASH_CURRENT_MAX 0x4F
+#define FLASH_CURRENT_TORCH 0x07
+
+#define FLASH_DURATION_200ms 0x13
+#define TORCH_DURATION_12s 0x0A
+#define FLASH_CLAMP_200mA 0x0F
+
+#define FLASH_SUBTYPE_DUAL 0x01
+#define FLASH_SUBTYPE_SINGLE 0x02
+
+#define FLASH_RAMP_UP_DELAY_US 1000
+#define FLASH_RAMP_DN_DELAY_US 2160
+
+#define LED_TRIGGER_DEFAULT "none"
+
+#define RGB_LED_SRC_SEL(base) (base + 0x45)
+#define RGB_LED_EN_CTL(base) (base + 0x46)
+#define RGB_LED_ATC_CTL(base) (base + 0x47)
+
+#define RGB_MAX_LEVEL LED_FULL
+#define RGB_LED_ENABLE_RED 0x80
+#define RGB_LED_ENABLE_GREEN 0x40
+#define RGB_LED_ENABLE_BLUE 0x20
+#define RGB_LED_SOURCE_VPH_PWR 0x01
+#define RGB_LED_ENABLE_MASK 0xE0
+#define RGB_LED_SRC_MASK 0x03
+#define QPNP_LED_PWM_FLAGS (PM_PWM_LUT_LOOP | PM_PWM_LUT_RAMP_UP)
+#define QPNP_LUT_RAMP_STEP_DEFAULT 255
+#define PWM_LUT_MAX_SIZE 63
+#define PWM_GPLED_LUT_MAX_SIZE 31
+#define RGB_LED_DISABLE 0x00
+
+#define MPP_MAX_LEVEL LED_FULL
+#define LED_MPP_MODE_CTRL(base) (base + 0x40)
+#define LED_MPP_VIN_CTRL(base) (base + 0x41)
+#define LED_MPP_EN_CTRL(base) (base + 0x46)
+#define LED_MPP_SINK_CTRL(base) (base + 0x4C)
+
+#define LED_MPP_CURRENT_MIN 5
+#define LED_MPP_CURRENT_MAX 40
+#define LED_MPP_VIN_CTRL_DEFAULT 0
+#define LED_MPP_CURRENT_PER_SETTING 5
+#define LED_MPP_SOURCE_SEL_DEFAULT LED_MPP_MODE_ENABLE
+
+#define LED_MPP_SINK_MASK 0x07
+#define LED_MPP_MODE_MASK 0x7F
+#define LED_MPP_VIN_MASK 0x03
+#define LED_MPP_EN_MASK 0x80
+#define LED_MPP_SRC_MASK 0x0F
+#define LED_MPP_MODE_CTRL_MASK 0x70
+
+#define LED_MPP_MODE_SINK (0x06 << 4)
+#define LED_MPP_MODE_ENABLE 0x01
+#define LED_MPP_MODE_OUTPUT 0x10
+#define LED_MPP_MODE_DISABLE 0x00
+#define LED_MPP_EN_ENABLE 0x80
+#define LED_MPP_EN_DISABLE 0x00
+
+#define MPP_SOURCE_DTEST1 0x08
+
+#define GPIO_MAX_LEVEL LED_FULL
+#define LED_GPIO_MODE_CTRL(base) (base + 0x40)
+#define LED_GPIO_VIN_CTRL(base) (base + 0x41)
+#define LED_GPIO_EN_CTRL(base) (base + 0x46)
+
+#define LED_GPIO_VIN_CTRL_DEFAULT 0
+#define LED_GPIO_SOURCE_SEL_DEFAULT LED_GPIO_MODE_ENABLE
+
+#define LED_GPIO_MODE_MASK 0x3F
+#define LED_GPIO_VIN_MASK 0x0F
+#define LED_GPIO_EN_MASK 0x80
+#define LED_GPIO_SRC_MASK 0x0F
+#define LED_GPIO_MODE_CTRL_MASK 0x30
+
+#define LED_GPIO_MODE_ENABLE 0x01
+#define LED_GPIO_MODE_DISABLE 0x00
+#define LED_GPIO_MODE_OUTPUT 0x10
+#define LED_GPIO_EN_ENABLE 0x80
+#define LED_GPIO_EN_DISABLE 0x00
+
+#define KPDBL_MAX_LEVEL LED_FULL
+#define KPDBL_ROW_SRC_SEL(base) (base + 0x40)
+#define KPDBL_ENABLE(base) (base + 0x46)
+#define KPDBL_ROW_SRC(base) (base + 0xE5)
+
+#define KPDBL_ROW_SRC_SEL_VAL_MASK 0x0F
+#define KPDBL_ROW_SCAN_EN_MASK 0x80
+#define KPDBL_ROW_SCAN_VAL_MASK 0x0F
+#define KPDBL_ROW_SCAN_EN_SHIFT 7
+#define KPDBL_MODULE_EN 0x80
+#define KPDBL_MODULE_DIS 0x00
+#define KPDBL_MODULE_EN_MASK 0x80
+#define NUM_KPDBL_LEDS 4
+#define KPDBL_MASTER_BIT_INDEX 0
+
+/**
+ * enum qpnp_leds - QPNP supported led ids
+ * @QPNP_ID_WLED - White led backlight
+ */
+enum qpnp_leds {
+ QPNP_ID_WLED = 0,
+ QPNP_ID_FLASH1_LED0,
+ QPNP_ID_FLASH1_LED1,
+ QPNP_ID_RGB_RED,
+ QPNP_ID_RGB_GREEN,
+ QPNP_ID_RGB_BLUE,
+ QPNP_ID_LED_MPP,
+ QPNP_ID_KPDBL,
+ QPNP_ID_LED_GPIO,
+ QPNP_ID_MAX,
+};
+
+/* current boost limit */
+enum wled_current_boost_limit {
+ WLED_CURR_LIMIT_105mA,
+ WLED_CURR_LIMIT_385mA,
+ WLED_CURR_LIMIT_525mA,
+ WLED_CURR_LIMIT_805mA,
+ WLED_CURR_LIMIT_980mA,
+ WLED_CURR_LIMIT_1260mA,
+ WLED_CURR_LIMIT_1400mA,
+ WLED_CURR_LIMIT_1680mA,
+};
+
+/* over voltage protection threshold */
+enum wled_ovp_threshold {
+ WLED_OVP_35V,
+ WLED_OVP_32V,
+ WLED_OVP_29V,
+ WLED_OVP_27V,
+};
+
+enum flash_headroom {
+ HEADROOM_250mV = 0,
+ HEADROOM_300mV,
+ HEADROOM_400mV,
+ HEADROOM_500mV,
+};
+
+enum flash_startup_dly {
+ DELAY_10us = 0,
+ DELAY_32us,
+ DELAY_64us,
+ DELAY_128us,
+};
+
+enum led_mode {
+ PWM_MODE = 0,
+ LPG_MODE,
+ MANUAL_MODE,
+};
+
+static u8 wled_debug_regs[] = {
+ /* brightness registers */
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45,
+ /* common registers */
+ 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
+ /* LED1 */
+ 0x60, 0x61, 0x62, 0x63, 0x66,
+ /* LED2 */
+ 0x70, 0x71, 0x72, 0x73, 0x76,
+ /* LED3 */
+ 0x80, 0x81, 0x82, 0x83, 0x86,
+};
+
+static u8 flash_debug_regs[] = {
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x48, 0x49, 0x4b, 0x4c,
+ 0x4f, 0x46, 0x47,
+};
+
+static u8 rgb_pwm_debug_regs[] = {
+ 0x45, 0x46, 0x47,
+};
+
+static u8 mpp_debug_regs[] = {
+ 0x40, 0x41, 0x42, 0x45, 0x46, 0x4c,
+};
+
+static u8 kpdbl_debug_regs[] = {
+ 0x40, 0x46, 0xb1, 0xb3, 0xb4, 0xe5,
+};
+
+static u8 gpio_debug_regs[] = {
+ 0x40, 0x41, 0x42, 0x45, 0x46,
+};
+
+/**
+ * pwm_config_data - pwm configuration data
+ * @lut_params - lut parameters to be used by pwm driver
+ * @pwm_device - pwm device
+ * @pwm_period_us - period for pwm, in us
+ * @mode - mode the led operates in
+ * @old_duty_pcts - storage for duty pcts that may need to be reused
+ * @default_mode - default mode of LED as set in device tree
+ * @use_blink - use blink sysfs entry
+ * @blinking - device is currently blinking w/LPG mode
+ */
+struct pwm_config_data {
+ struct lut_params lut_params;
+ struct pwm_device *pwm_dev;
+ u32 pwm_period_us;
+ struct pwm_duty_cycles *duty_cycles;
+ int *old_duty_pcts;
+ u8 mode;
+ u8 default_mode;
+ bool pwm_enabled;
+ bool use_blink;
+ bool blinking;
+};
+
+/**
+ * wled_config_data - wled configuration data
+ * @num_strings - number of wled strings to be configured
+ * @num_physical_strings - physical number of strings supported
+ * @ovp_val - over voltage protection threshold
+ * @boost_curr_lim - boot current limit
+ * @cp_select - high pole capacitance
+ * @ctrl_delay_us - delay in activation of led
+ * @dig_mod_gen_en - digital module generator
+ * @cs_out_en - current sink output enable
+ * @op_fdbck - selection of output as feedback for the boost
+ */
+struct wled_config_data {
+ u8 num_strings;
+ u8 num_physical_strings;
+ u8 ovp_val;
+ u8 boost_curr_lim;
+ u8 cp_select;
+ u8 ctrl_delay_us;
+ u8 switch_freq;
+ u8 op_fdbck;
+ u8 pmic_version;
+ bool dig_mod_gen_en;
+ bool cs_out_en;
+};
+
+/**
+ * mpp_config_data - mpp configuration data
+ * @pwm_cfg - device pwm configuration
+ * @current_setting - current setting, 5ma-40ma in 5ma increments
+ * @source_sel - source selection
+ * @mode_ctrl - mode control
+ * @vin_ctrl - input control
+ * @min_brightness - minimum brightness supported
+ * @pwm_mode - pwm mode in use
+ * @max_uV - maximum regulator voltage
+ * @min_uV - minimum regulator voltage
+ * @mpp_reg - regulator to power mpp based LED
+ * @enable - flag indicating LED on or off
+ */
+struct mpp_config_data {
+ struct pwm_config_data *pwm_cfg;
+ u8 current_setting;
+ u8 source_sel;
+ u8 mode_ctrl;
+ u8 vin_ctrl;
+ u8 min_brightness;
+ u8 pwm_mode;
+ u32 max_uV;
+ u32 min_uV;
+ struct regulator *mpp_reg;
+ bool enable;
+};
+
+/**
+ * flash_config_data - flash configuration data
+ * @current_prgm - current to be programmed, scaled by max level
+ * @clamp_curr - clamp current to use
+ * @headroom - headroom value to use
+ * @duration - duration of the flash
+ * @enable_module - enable address for particular flash
+ * @trigger_flash - trigger flash
+ * @startup_dly - startup delay for flash
+ * @strobe_type - select between sw and hw strobe
+ * @peripheral_subtype - module peripheral subtype
+ * @current_addr - address to write for current
+ * @second_addr - address of secondary flash to be written
+ * @safety_timer - enable safety timer or watchdog timer
+ * @torch_enable - enable flash LED torch mode
+ * @flash_reg_get - flash regulator attached or not
+ * @flash_wa_reg_get - workaround regulator attached or not
+ * @flash_on - flash status, on or off
+ * @torch_on - torch status, on or off
+ * @vreg_ok - specifies strobe type, sw or hw
+ * @no_smbb_support - specifies if smbb boost is not required and there is a
+ single regulator for both flash and torch
+ * @flash_boost_reg - boost regulator for flash
+ * @torch_boost_reg - boost regulator for torch
+ * @flash_wa_reg - flash regulator for wa
+ */
+struct flash_config_data {
+ u8 current_prgm;
+ u8 clamp_curr;
+ u8 headroom;
+ u8 duration;
+ u8 enable_module;
+ u8 trigger_flash;
+ u8 startup_dly;
+ u8 strobe_type;
+ u8 peripheral_subtype;
+ u16 current_addr;
+ u16 second_addr;
+ bool safety_timer;
+ bool torch_enable;
+ bool flash_reg_get;
+ bool flash_wa_reg_get;
+ bool flash_on;
+ bool torch_on;
+ bool vreg_ok;
+ bool no_smbb_support;
+ struct regulator *flash_boost_reg;
+ struct regulator *torch_boost_reg;
+ struct regulator *flash_wa_reg;
+};
+
+/**
+ * kpdbl_config_data - kpdbl configuration data
+ * @pwm_cfg - device pwm configuration
+ * @mode - running mode: pwm or lut
+ * @row_id - row id of the led
+ * @row_src_vbst - 0 for vph_pwr and 1 for vbst
+ * @row_src_en - enable row source
+ * @always_on - always on row
+ * @lut_params - lut parameters to be used by pwm driver
+ * @duty_cycles - duty cycles for lut
+ * @pwm_mode - pwm mode in use
+ */
+struct kpdbl_config_data {
+ struct pwm_config_data *pwm_cfg;
+ u32 row_id;
+ bool row_src_vbst;
+ bool row_src_en;
+ bool always_on;
+ struct pwm_duty_cycles *duty_cycles;
+ struct lut_params lut_params;
+ u8 pwm_mode;
+};
+
+/**
+ * rgb_config_data - rgb configuration data
+ * @pwm_cfg - device pwm configuration
+ * @enable - bits to enable led
+ */
+struct rgb_config_data {
+ struct pwm_config_data *pwm_cfg;
+ u8 enable;
+};
+
+/**
+ * gpio_config_data - gpio configuration data
+ * @source_sel - source selection
+ * @mode_ctrl - mode control
+ * @vin_ctrl - input control
+ * @enable - flag indicating LED on or off
+ */
+struct gpio_config_data {
+ u8 source_sel;
+ u8 mode_ctrl;
+ u8 vin_ctrl;
+ bool enable;
+};
+
+/**
+ * struct qpnp_led_data - internal led data structure
+ * @led_classdev - led class device
+ * @delayed_work - delayed work for turning off the LED
+ * @workqueue - dedicated workqueue to handle concurrency
+ * @work - workqueue for led
+ * @id - led index
+ * @base_reg - base register given in device tree
+ * @lock - to protect the transactions
+ * @reg - cached value of led register
+ * @num_leds - number of leds in the module
+ * @max_current - maximum current supported by LED
+ * @default_on - true: default state max, false, default state 0
+ * @turn_off_delay_ms - number of msec before turning off the LED
+ */
+struct qpnp_led_data {
+ struct led_classdev cdev;
+ struct spmi_device *spmi_dev;
+ struct delayed_work dwork;
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+ int id;
+ u16 base;
+ u8 reg;
+ u8 num_leds;
+ struct mutex lock;
+ struct wled_config_data *wled_cfg;
+ struct flash_config_data *flash_cfg;
+ struct kpdbl_config_data *kpdbl_cfg;
+ struct rgb_config_data *rgb_cfg;
+ struct mpp_config_data *mpp_cfg;
+ struct gpio_config_data *gpio_cfg;
+ int max_current;
+ bool default_on;
+ bool in_order_command_processing;
+ int turn_off_delay_ms;
+};
+
+static DEFINE_MUTEX(flash_lock);
+static struct pwm_device *kpdbl_master;
+static u32 kpdbl_master_period_us;
+DECLARE_BITMAP(kpdbl_leds_in_use, NUM_KPDBL_LEDS);
+static bool is_kpdbl_master_turn_on;
+
+static int
+qpnp_led_masked_write(struct qpnp_led_data *led, u16 addr, u8 mask, u8 val)
+{
+ int rc;
+ u8 reg;
+
+ rc = spmi_ext_register_readl(led->spmi_dev->ctrl, led->spmi_dev->sid,
+ addr, &reg, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read from addr=%x, rc(%d)\n", addr, rc);
+ }
+
+ reg &= ~mask;
+ reg |= val;
+
+ rc = spmi_ext_register_writel(led->spmi_dev->ctrl, led->spmi_dev->sid,
+ addr, &reg, 1);
+ if (rc)
+ dev_err(&led->spmi_dev->dev,
+ "Unable to write to addr=%x, rc(%d)\n", addr, rc);
+ return rc;
+}
+
+static void qpnp_dump_regs(struct qpnp_led_data *led, u8 regs[], u8 array_size)
+{
+ int i;
+ u8 val;
+
+ pr_debug("===== %s LED register dump start =====\n", led->cdev.name);
+ for (i = 0; i < array_size; i++) {
+ spmi_ext_register_readl(led->spmi_dev->ctrl,
+ led->spmi_dev->sid,
+ led->base + regs[i],
+ &val, sizeof(val));
+ pr_debug("%s: 0x%x = 0x%x\n", led->cdev.name,
+ led->base + regs[i], val);
+ }
+ pr_debug("===== %s LED register dump end =====\n", led->cdev.name);
+}
+
+static int qpnp_wled_sync(struct qpnp_led_data *led)
+{
+ int rc;
+ u8 val;
+
+ /* sync */
+ val = WLED_SYNC_VAL;
+ rc = spmi_ext_register_writel(led->spmi_dev->ctrl, led->spmi_dev->sid,
+ WLED_SYNC_REG(led->base), &val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED set sync reg failed(%d)\n", rc);
+ return rc;
+ }
+
+ val = WLED_SYNC_RESET_VAL;
+ rc = spmi_ext_register_writel(led->spmi_dev->ctrl, led->spmi_dev->sid,
+ WLED_SYNC_REG(led->base), &val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED reset sync reg failed(%d)\n", rc);
+ return rc;
+ }
+ return 0;
+}
+
+static int qpnp_wled_set(struct qpnp_led_data *led)
+{
+ int rc, duty, level, tries = 0;
+ u8 val, i, num_wled_strings, sink_val, ilim_val, ovp_val;
+
+ num_wled_strings = led->wled_cfg->num_strings;
+
+ level = led->cdev.brightness;
+
+ if (level > WLED_MAX_LEVEL)
+ level = WLED_MAX_LEVEL;
+ if (level == 0) {
+ for (i = 0; i < num_wled_strings; i++) {
+ rc = qpnp_led_masked_write(led,
+ WLED_FULL_SCALE_REG(led->base, i),
+ WLED_MAX_CURR_MASK, WLED_NO_CURRENT);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Write max current failure (%d)\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = qpnp_wled_sync(led);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED sync failed(%d)\n", rc);
+ return rc;
+ }
+
+ rc = spmi_ext_register_readl(led->spmi_dev->ctrl,
+ led->spmi_dev->sid, WLED_CURR_SINK_REG(led->base),
+ &sink_val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED read sink reg failed(%d)\n", rc);
+ return rc;
+ }
+
+ if (led->wled_cfg->pmic_version == PMIC_VER_8026) {
+ val = WLED_DISABLE_ALL_SINKS;
+ rc = spmi_ext_register_writel(led->spmi_dev->ctrl,
+ led->spmi_dev->sid,
+ WLED_CURR_SINK_REG(led->base), &val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED write sink reg failed(%d)\n", rc);
+ return rc;
+ }
+
+ usleep_range(WLED_OVP_DELAY, WLED_OVP_DELAY);
+ } else if (led->wled_cfg->pmic_version == PMIC_VER_8941) {
+ if (led->wled_cfg->num_physical_strings <=
+ WLED_THREE_STRINGS) {
+ val = WLED_DISABLE_1_2_SINKS;
+ rc = spmi_ext_register_writel(
+ led->spmi_dev->ctrl,
+ led->spmi_dev->sid,
+ WLED_CURR_SINK_REG(led->base), &val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED write sink reg failed");
+ return rc;
+ }
+
+ rc = spmi_ext_register_readl(
+ led->spmi_dev->ctrl,
+ led->spmi_dev->sid,
+ WLED_BOOST_LIMIT_REG(led->base),
+ &ilim_val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read boost reg");
+ }
+ val = WLED_SET_ILIM_CODE;
+ rc = spmi_ext_register_writel(
+ led->spmi_dev->ctrl,
+ led->spmi_dev->sid,
+ WLED_BOOST_LIMIT_REG(led->base),
+ &val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED write sink reg failed");
+ return rc;
+ }
+ usleep_range(WLED_OVP_DELAY, WLED_OVP_DELAY);
+ } else {
+ val = WLED_DISABLE_ALL_SINKS;
+ rc = spmi_ext_register_writel(
+ led->spmi_dev->ctrl,
+ led->spmi_dev->sid,
+ WLED_CURR_SINK_REG(led->base), &val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED write sink reg failed");
+ return rc;
+ }
+
+ msleep(WLED_OVP_DELAY_INT);
+ while (tries < WLED_MAX_TRIES) {
+ rc = spmi_ext_register_readl(
+ led->spmi_dev->ctrl,
+ led->spmi_dev->sid,
+ WLED_OVP_INT_STATUS(led->base),
+ &ovp_val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read boost reg");
+ }
+
+ if (ovp_val & WLED_OVP_INT_MASK)
+ break;
+
+ msleep(WLED_OVP_DELAY_LOOP);
+ tries++;
+ }
+ usleep_range(WLED_OVP_DELAY, WLED_OVP_DELAY);
+ }
+ }
+
+ val = WLED_BOOST_OFF;
+ rc = spmi_ext_register_writel(led->spmi_dev->ctrl,
+ led->spmi_dev->sid, WLED_MOD_CTRL_REG(led->base),
+ &val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED write ctrl reg failed(%d)\n", rc);
+ return rc;
+ }
+
+ for (i = 0; i < num_wled_strings; i++) {
+ rc = qpnp_led_masked_write(led,
+ WLED_FULL_SCALE_REG(led->base, i),
+ WLED_MAX_CURR_MASK, (u8)led->max_current);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Write max current failure (%d)\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = qpnp_wled_sync(led);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED sync failed(%d)\n", rc);
+ return rc;
+ }
+
+ if (led->wled_cfg->pmic_version == PMIC_VER_8941) {
+ if (led->wled_cfg->num_physical_strings <=
+ WLED_THREE_STRINGS) {
+ rc = spmi_ext_register_writel(
+ led->spmi_dev->ctrl,
+ led->spmi_dev->sid,
+ WLED_BOOST_LIMIT_REG(led->base),
+ &ilim_val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED write sink reg failed");
+ return rc;
+ }
+ } else {
+ /* restore OVP to original value */
+ rc = spmi_ext_register_writel(
+ led->spmi_dev->ctrl,
+ led->spmi_dev->sid,
+ WLED_OVP_CFG_REG(led->base),
+ &led->wled_cfg->ovp_val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED write sink reg failed");
+ return rc;
+ }
+ }
+ }
+
+ /* re-enable all sinks */
+ rc = spmi_ext_register_writel(led->spmi_dev->ctrl,
+ led->spmi_dev->sid, WLED_CURR_SINK_REG(led->base),
+ &sink_val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED write sink reg failed(%d)\n", rc);
+ return rc;
+ }
+
+ } else {
+ val = WLED_BOOST_ON;
+ rc = spmi_ext_register_writel(led->spmi_dev->ctrl,
+ led->spmi_dev->sid, WLED_MOD_CTRL_REG(led->base),
+ &val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED write ctrl reg failed(%d)\n", rc);
+ return rc;
+ }
+ }
+
+ duty = (WLED_MAX_DUTY_CYCLE * level) / WLED_MAX_LEVEL;
+
+ /* program brightness control registers */
+ for (i = 0; i < num_wled_strings; i++) {
+ rc = qpnp_led_masked_write(led,
+ WLED_BRIGHTNESS_CNTL_MSB(led->base, i), WLED_MSB_MASK,
+ (duty >> WLED_8_BIT_SHFT) & WLED_4_BIT_MASK);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED set brightness MSB failed(%d)\n", rc);
+ return rc;
+ }
+ val = duty & WLED_8_BIT_MASK;
+ rc = spmi_ext_register_writel(led->spmi_dev->ctrl,
+ led->spmi_dev->sid,
+ WLED_BRIGHTNESS_CNTL_LSB(led->base, i), &val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED set brightness LSB failed(%d)\n", rc);
+ return rc;
+ }
+ }
+
+ rc = qpnp_wled_sync(led);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev, "WLED sync failed(%d)\n", rc);
+ return rc;
+ }
+ return 0;
+}
+
+static int qpnp_mpp_set(struct qpnp_led_data *led)
+{
+ int rc;
+ u8 val;
+ int duty_us, duty_ns, period_us;
+
+ if (led->cdev.brightness) {
+ if (led->mpp_cfg->mpp_reg && !led->mpp_cfg->enable) {
+ rc = regulator_set_voltage(led->mpp_cfg->mpp_reg,
+ led->mpp_cfg->min_uV,
+ led->mpp_cfg->max_uV);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Regulator voltage set failed rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = regulator_enable(led->mpp_cfg->mpp_reg);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Regulator enable failed(%d)\n", rc);
+ goto err_reg_enable;
+ }
+ }
+
+ led->mpp_cfg->enable = true;
+
+ if (led->cdev.brightness < led->mpp_cfg->min_brightness) {
+ dev_warn(&led->spmi_dev->dev,
+ "brightness is less than supported..." \
+ "set to minimum supported\n");
+ led->cdev.brightness = led->mpp_cfg->min_brightness;
+ }
+
+ if (led->mpp_cfg->pwm_mode != MANUAL_MODE) {
+ if (!led->mpp_cfg->pwm_cfg->blinking) {
+ led->mpp_cfg->pwm_cfg->mode =
+ led->mpp_cfg->pwm_cfg->default_mode;
+ led->mpp_cfg->pwm_mode =
+ led->mpp_cfg->pwm_cfg->default_mode;
+ }
+ }
+ if (led->mpp_cfg->pwm_mode == PWM_MODE) {
+ /*config pwm for brightness scaling*/
+ period_us = led->mpp_cfg->pwm_cfg->pwm_period_us;
+ if (period_us > INT_MAX / NSEC_PER_USEC) {
+ duty_us = (period_us * led->cdev.brightness) /
+ LED_FULL;
+ rc = pwm_config_us(
+ led->mpp_cfg->pwm_cfg->pwm_dev,
+ duty_us,
+ period_us);
+ } else {
+ duty_ns = ((period_us * NSEC_PER_USEC) /
+ LED_FULL) * led->cdev.brightness;
+ rc = pwm_config(
+ led->mpp_cfg->pwm_cfg->pwm_dev,
+ duty_ns,
+ period_us * NSEC_PER_USEC);
+ }
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev, "Failed to " \
+ "configure pwm for new values\n");
+ goto err_mpp_reg_write;
+ }
+ }
+
+ if (led->mpp_cfg->pwm_mode != MANUAL_MODE)
+ pwm_enable(led->mpp_cfg->pwm_cfg->pwm_dev);
+ else {
+ if (led->cdev.brightness < LED_MPP_CURRENT_MIN)
+ led->cdev.brightness = LED_MPP_CURRENT_MIN;
+ else {
+ /*
+ * PMIC supports LED intensity from 5mA - 40mA
+ * in steps of 5mA. Brightness is rounded to
+ * 5mA or nearest lower supported values
+ */
+ led->cdev.brightness /= LED_MPP_CURRENT_MIN;
+ led->cdev.brightness *= LED_MPP_CURRENT_MIN;
+ }
+
+ val = (led->cdev.brightness / LED_MPP_CURRENT_MIN) - 1;
+
+ rc = qpnp_led_masked_write(led,
+ LED_MPP_SINK_CTRL(led->base),
+ LED_MPP_SINK_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write sink control reg\n");
+ goto err_mpp_reg_write;
+ }
+ }
+
+ val = (led->mpp_cfg->source_sel & LED_MPP_SRC_MASK) |
+ (led->mpp_cfg->mode_ctrl & LED_MPP_MODE_CTRL_MASK);
+
+ rc = qpnp_led_masked_write(led,
+ LED_MPP_MODE_CTRL(led->base), LED_MPP_MODE_MASK,
+ val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write led mode reg\n");
+ goto err_mpp_reg_write;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ LED_MPP_EN_CTRL(led->base), LED_MPP_EN_MASK,
+ LED_MPP_EN_ENABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write led enable " \
+ "reg\n");
+ goto err_mpp_reg_write;
+ }
+ } else {
+ if (led->mpp_cfg->pwm_mode != MANUAL_MODE) {
+ led->mpp_cfg->pwm_cfg->mode =
+ led->mpp_cfg->pwm_cfg->default_mode;
+ led->mpp_cfg->pwm_mode =
+ led->mpp_cfg->pwm_cfg->default_mode;
+ pwm_disable(led->mpp_cfg->pwm_cfg->pwm_dev);
+ }
+ rc = qpnp_led_masked_write(led,
+ LED_MPP_MODE_CTRL(led->base),
+ LED_MPP_MODE_MASK,
+ LED_MPP_MODE_DISABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write led mode reg\n");
+ goto err_mpp_reg_write;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ LED_MPP_EN_CTRL(led->base),
+ LED_MPP_EN_MASK,
+ LED_MPP_EN_DISABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write led enable reg\n");
+ goto err_mpp_reg_write;
+ }
+
+ if (led->mpp_cfg->mpp_reg && led->mpp_cfg->enable) {
+ rc = regulator_disable(led->mpp_cfg->mpp_reg);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "MPP regulator disable failed(%d)\n",
+ rc);
+ return rc;
+ }
+
+ rc = regulator_set_voltage(led->mpp_cfg->mpp_reg,
+ 0, led->mpp_cfg->max_uV);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "MPP regulator voltage set failed(%d)\n",
+ rc);
+ return rc;
+ }
+ }
+
+ led->mpp_cfg->enable = false;
+ }
+
+ if (led->mpp_cfg->pwm_mode != MANUAL_MODE)
+ led->mpp_cfg->pwm_cfg->blinking = false;
+ qpnp_dump_regs(led, mpp_debug_regs, ARRAY_SIZE(mpp_debug_regs));
+
+ return 0;
+
+err_mpp_reg_write:
+ if (led->mpp_cfg->mpp_reg)
+ regulator_disable(led->mpp_cfg->mpp_reg);
+err_reg_enable:
+ if (led->mpp_cfg->mpp_reg)
+ regulator_set_voltage(led->mpp_cfg->mpp_reg, 0,
+ led->mpp_cfg->max_uV);
+ led->mpp_cfg->enable = false;
+
+ return rc;
+}
+
+static int qpnp_gpio_set(struct qpnp_led_data *led)
+{
+ int rc, val;
+
+ if (led->cdev.brightness) {
+ val = (led->gpio_cfg->source_sel & LED_GPIO_SRC_MASK) |
+ (led->gpio_cfg->mode_ctrl & LED_GPIO_MODE_CTRL_MASK);
+
+ rc = qpnp_led_masked_write(led,
+ LED_GPIO_MODE_CTRL(led->base),
+ LED_GPIO_MODE_MASK,
+ val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write led mode reg\n");
+ goto err_gpio_reg_write;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ LED_GPIO_EN_CTRL(led->base),
+ LED_GPIO_EN_MASK,
+ LED_GPIO_EN_ENABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write led enable reg\n");
+ goto err_gpio_reg_write;
+ }
+
+ led->gpio_cfg->enable = true;
+ } else {
+ rc = qpnp_led_masked_write(led,
+ LED_GPIO_MODE_CTRL(led->base),
+ LED_GPIO_MODE_MASK,
+ LED_GPIO_MODE_DISABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write led mode reg\n");
+ goto err_gpio_reg_write;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ LED_GPIO_EN_CTRL(led->base),
+ LED_GPIO_EN_MASK,
+ LED_GPIO_EN_DISABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write led enable reg\n");
+ goto err_gpio_reg_write;
+ }
+
+ led->gpio_cfg->enable = false;
+ }
+
+ qpnp_dump_regs(led, gpio_debug_regs, ARRAY_SIZE(gpio_debug_regs));
+
+ return 0;
+
+err_gpio_reg_write:
+ led->gpio_cfg->enable = false;
+
+ return rc;
+}
+
+static int qpnp_flash_regulator_operate(struct qpnp_led_data *led, bool on)
+{
+ int rc, i;
+ struct qpnp_led_data *led_array;
+ bool regulator_on = false;
+
+ led_array = dev_get_drvdata(&led->spmi_dev->dev);
+ if (!led_array) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to get LED array\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < led->num_leds; i++)
+ regulator_on |= led_array[i].flash_cfg->flash_on;
+
+ if (!on)
+ goto regulator_turn_off;
+
+ if (!regulator_on && !led->flash_cfg->flash_on) {
+ for (i = 0; i < led->num_leds; i++) {
+ if (led_array[i].flash_cfg->flash_reg_get) {
+ if (led_array[i].flash_cfg->flash_wa_reg_get) {
+ rc = regulator_enable(
+ led_array[i].flash_cfg->
+ flash_wa_reg);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Flash wa regulator"
+ "enable failed(%d)\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = regulator_enable(
+ led_array[i].flash_cfg->\
+ flash_boost_reg);
+ if (rc) {
+ if (led_array[i].flash_cfg->
+ flash_wa_reg_get)
+ /* Disable flash wa regulator
+ * when flash boost regulator
+ * enable fails
+ */
+ regulator_disable(
+ led_array[i].flash_cfg->
+ flash_wa_reg);
+ dev_err(&led->spmi_dev->dev,
+ "Flash boost regulator enable"
+ "failed(%d)\n", rc);
+ return rc;
+ }
+ led->flash_cfg->flash_on = true;
+ }
+ break;
+ }
+ }
+
+ return 0;
+
+regulator_turn_off:
+ if (regulator_on && led->flash_cfg->flash_on) {
+ for (i = 0; i < led->num_leds; i++) {
+ if (led_array[i].flash_cfg->flash_reg_get) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_ENABLE_CONTROL(led->base),
+ FLASH_ENABLE_MASK,
+ FLASH_DISABLE_ALL);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Enable reg write failed(%d)\n",
+ rc);
+ }
+
+ rc = regulator_disable(led_array[i].flash_cfg->\
+ flash_boost_reg);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Flash boost regulator disable"
+ "failed(%d)\n", rc);
+ return rc;
+ }
+ if (led_array[i].flash_cfg->flash_wa_reg_get) {
+ rc = regulator_disable(
+ led_array[i].flash_cfg->
+ flash_wa_reg);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Flash_wa regulator"
+ "disable failed(%d)\n",
+ rc);
+ return rc;
+ }
+ }
+ led->flash_cfg->flash_on = false;
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int qpnp_torch_regulator_operate(struct qpnp_led_data *led, bool on)
+{
+ int rc;
+
+ if (!on)
+ goto regulator_turn_off;
+
+ if (!led->flash_cfg->torch_on) {
+ rc = regulator_enable(led->flash_cfg->torch_boost_reg);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Regulator enable failed(%d)\n", rc);
+ return rc;
+ }
+ led->flash_cfg->torch_on = true;
+ }
+ return 0;
+
+regulator_turn_off:
+ if (led->flash_cfg->torch_on) {
+ rc = qpnp_led_masked_write(led, FLASH_ENABLE_CONTROL(led->base),
+ FLASH_ENABLE_MODULE_MASK, FLASH_DISABLE_ALL);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Enable reg write failed(%d)\n", rc);
+ }
+
+ rc = regulator_disable(led->flash_cfg->torch_boost_reg);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Regulator disable failed(%d)\n", rc);
+ return rc;
+ }
+ led->flash_cfg->torch_on = false;
+ }
+ return 0;
+}
+
+static int qpnp_flash_set(struct qpnp_led_data *led)
+{
+ int rc, error;
+ int val = led->cdev.brightness;
+
+ if (led->flash_cfg->torch_enable)
+ led->flash_cfg->current_prgm =
+ (val * TORCH_MAX_LEVEL / led->max_current);
+ else
+ led->flash_cfg->current_prgm =
+ (val * FLASH_MAX_LEVEL / led->max_current);
+
+ /* Set led current */
+ if (val > 0) {
+ if (led->flash_cfg->torch_enable) {
+ if (led->flash_cfg->peripheral_subtype ==
+ FLASH_SUBTYPE_DUAL) {
+ if (!led->flash_cfg->no_smbb_support)
+ rc = qpnp_torch_regulator_operate(led,
+ true);
+ else
+ rc = qpnp_flash_regulator_operate(led,
+ true);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Torch regulator operate failed(%d)\n",
+ rc);
+ return rc;
+ }
+ } else if (led->flash_cfg->peripheral_subtype ==
+ FLASH_SUBTYPE_SINGLE) {
+ rc = qpnp_flash_regulator_operate(led, true);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Flash regulator operate failed(%d)\n",
+ rc);
+ goto error_flash_set;
+ }
+ }
+
+ qpnp_led_masked_write(led, FLASH_MAX_CURR(led->base),
+ FLASH_CURRENT_MASK,
+ TORCH_MAX_LEVEL);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Max current reg write failed(%d)\n",
+ rc);
+ goto error_reg_write;
+ }
+
+ qpnp_led_masked_write(led,
+ FLASH_LED_TMR_CTRL(led->base),
+ FLASH_TMR_MASK,
+ FLASH_TMR_WATCHDOG);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Timer control reg write failed(%d)\n",
+ rc);
+ goto error_reg_write;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ led->flash_cfg->current_addr,
+ FLASH_CURRENT_MASK,
+ led->flash_cfg->current_prgm);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Current reg write failed(%d)\n", rc);
+ goto error_reg_write;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ led->flash_cfg->second_addr,
+ FLASH_CURRENT_MASK,
+ led->flash_cfg->current_prgm);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "2nd Current reg write failed(%d)\n",
+ rc);
+ goto error_reg_write;
+ }
+
+ qpnp_led_masked_write(led,
+ FLASH_WATCHDOG_TMR(led->base),
+ FLASH_WATCHDOG_MASK,
+ led->flash_cfg->duration);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Max current reg write failed(%d)\n",
+ rc);
+ goto error_reg_write;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_ENABLE_CONTROL(led->base),
+ FLASH_ENABLE_MASK,
+ led->flash_cfg->enable_module);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Enable reg write failed(%d)\n",
+ rc);
+ goto error_reg_write;
+ }
+
+ if (!led->flash_cfg->strobe_type)
+ led->flash_cfg->trigger_flash &=
+ ~FLASH_HW_SW_STROBE_SEL_MASK;
+ else
+ led->flash_cfg->trigger_flash |=
+ FLASH_HW_SW_STROBE_SEL_MASK;
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_STROBE_CTRL(led->base),
+ led->flash_cfg->trigger_flash,
+ led->flash_cfg->trigger_flash);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "LED %d strobe reg write failed(%d)\n",
+ led->id, rc);
+ goto error_reg_write;
+ }
+ } else {
+ rc = qpnp_flash_regulator_operate(led, true);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Flash regulator operate failed(%d)\n",
+ rc);
+ goto error_flash_set;
+ }
+
+ qpnp_led_masked_write(led,
+ FLASH_LED_TMR_CTRL(led->base),
+ FLASH_TMR_MASK,
+ FLASH_TMR_SAFETY);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Timer control reg write failed(%d)\n",
+ rc);
+ goto error_reg_write;
+ }
+
+ /* Set flash safety timer */
+ rc = qpnp_led_masked_write(led,
+ FLASH_SAFETY_TIMER(led->base),
+ FLASH_SAFETY_TIMER_MASK,
+ led->flash_cfg->duration);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Safety timer reg write failed(%d)\n",
+ rc);
+ goto error_flash_set;
+ }
+
+ /* Set max current */
+ rc = qpnp_led_masked_write(led,
+ FLASH_MAX_CURR(led->base), FLASH_CURRENT_MASK,
+ FLASH_MAX_LEVEL);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Max current reg write failed(%d)\n",
+ rc);
+ goto error_flash_set;
+ }
+
+ /* Set clamp current */
+ rc = qpnp_led_masked_write(led,
+ FLASH_CLAMP_CURR(led->base),
+ FLASH_CURRENT_MASK,
+ led->flash_cfg->clamp_curr);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Clamp current reg write failed(%d)\n",
+ rc);
+ goto error_flash_set;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ led->flash_cfg->current_addr,
+ FLASH_CURRENT_MASK,
+ led->flash_cfg->current_prgm);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Current reg write failed(%d)\n", rc);
+ goto error_flash_set;
+ }
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_ENABLE_CONTROL(led->base),
+ led->flash_cfg->enable_module,
+ led->flash_cfg->enable_module);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Enable reg write failed(%d)\n", rc);
+ goto error_flash_set;
+ }
+
+ /*
+ * Add 1ms delay for bharger enter stable state
+ */
+ usleep_range(FLASH_RAMP_UP_DELAY_US, FLASH_RAMP_UP_DELAY_US);
+
+ if (!led->flash_cfg->strobe_type)
+ led->flash_cfg->trigger_flash &=
+ ~FLASH_HW_SW_STROBE_SEL_MASK;
+ else
+ led->flash_cfg->trigger_flash |=
+ FLASH_HW_SW_STROBE_SEL_MASK;
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_STROBE_CTRL(led->base),
+ led->flash_cfg->trigger_flash,
+ led->flash_cfg->trigger_flash);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "LED %d strobe reg write failed(%d)\n",
+ led->id, rc);
+ goto error_flash_set;
+ }
+ }
+ } else {
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_STROBE_CTRL(led->base),
+ led->flash_cfg->trigger_flash,
+ FLASH_DISABLE_ALL);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "LED %d flash write failed(%d)\n", led->id, rc);
+ if (led->flash_cfg->torch_enable)
+ goto error_torch_set;
+ else
+ goto error_flash_set;
+ }
+
+ if (led->flash_cfg->torch_enable) {
+ if (led->flash_cfg->peripheral_subtype ==
+ FLASH_SUBTYPE_DUAL) {
+ if (!led->flash_cfg->no_smbb_support)
+ rc = qpnp_torch_regulator_operate(led,
+ false);
+ else
+ rc = qpnp_flash_regulator_operate(led,
+ false);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Torch regulator operate failed(%d)\n",
+ rc);
+ return rc;
+ }
+ } else if (led->flash_cfg->peripheral_subtype ==
+ FLASH_SUBTYPE_SINGLE) {
+ rc = qpnp_flash_regulator_operate(led, false);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Flash regulator operate failed(%d)\n",
+ rc);
+ return rc;
+ }
+ }
+ } else {
+ /*
+ * Disable module after ramp down complete for stable
+ * behavior
+ */
+ usleep_range(FLASH_RAMP_UP_DELAY_US, FLASH_RAMP_UP_DELAY_US);
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_ENABLE_CONTROL(led->base),
+ led->flash_cfg->enable_module &
+ ~FLASH_ENABLE_MODULE_MASK,
+ FLASH_DISABLE_ALL);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Enable reg write failed(%d)\n", rc);
+ if (led->flash_cfg->torch_enable)
+ goto error_torch_set;
+ else
+ goto error_flash_set;
+ }
+
+ rc = qpnp_flash_regulator_operate(led, false);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Flash regulator operate failed(%d)\n",
+ rc);
+ return rc;
+ }
+ }
+ }
+
+ qpnp_dump_regs(led, flash_debug_regs, ARRAY_SIZE(flash_debug_regs));
+
+ return 0;
+
+error_reg_write:
+ if (led->flash_cfg->peripheral_subtype == FLASH_SUBTYPE_SINGLE)
+ goto error_flash_set;
+
+error_torch_set:
+ if (!led->flash_cfg->no_smbb_support)
+ error = qpnp_torch_regulator_operate(led, false);
+ else
+ error = qpnp_flash_regulator_operate(led, false);
+ if (error) {
+ dev_err(&led->spmi_dev->dev,
+ "Torch regulator operate failed(%d)\n", rc);
+ return error;
+ }
+ return rc;
+
+error_flash_set:
+ error = qpnp_flash_regulator_operate(led, false);
+ if (error) {
+ dev_err(&led->spmi_dev->dev,
+ "Flash regulator operate failed(%d)\n", rc);
+ return error;
+ }
+ return rc;
+}
+
+static int qpnp_kpdbl_set(struct qpnp_led_data *led)
+{
+ int rc;
+ int duty_us, duty_ns, period_us;
+
+ if (led->cdev.brightness) {
+ if (!led->kpdbl_cfg->pwm_cfg->blinking)
+ led->kpdbl_cfg->pwm_cfg->mode =
+ led->kpdbl_cfg->pwm_cfg->default_mode;
+
+ if (bitmap_empty(kpdbl_leds_in_use, NUM_KPDBL_LEDS)) {
+ rc = qpnp_led_masked_write(led, KPDBL_ENABLE(led->base),
+ KPDBL_MODULE_EN_MASK, KPDBL_MODULE_EN);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Enable reg write failed(%d)\n", rc);
+ return rc;
+ }
+ }
+
+ /* On some platforms, GPLED1 channel should always be enabled
+ * for the other GPLEDs 2/3/4 to glow. Before enabling GPLED
+ * 2/3/4, first check if GPLED1 is already enabled. If GPLED1
+ * channel is not enabled, then enable the GPLED1 channel but
+ * with a 0 brightness
+ */
+ if (!led->kpdbl_cfg->always_on &&
+ !test_bit(KPDBL_MASTER_BIT_INDEX, kpdbl_leds_in_use) &&
+ kpdbl_master) {
+ rc = pwm_config_us(kpdbl_master, 0,
+ kpdbl_master_period_us);
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "pwm config failed\n");
+ return rc;
+ }
+
+ rc = pwm_enable(kpdbl_master);
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "pwm enable failed\n");
+ return rc;
+ }
+ set_bit(KPDBL_MASTER_BIT_INDEX,
+ kpdbl_leds_in_use);
+ }
+
+ if (led->kpdbl_cfg->pwm_cfg->mode == PWM_MODE) {
+ period_us = led->kpdbl_cfg->pwm_cfg->pwm_period_us;
+ if (period_us > INT_MAX / NSEC_PER_USEC) {
+ duty_us = (period_us * led->cdev.brightness) /
+ KPDBL_MAX_LEVEL;
+ rc = pwm_config_us(
+ led->kpdbl_cfg->pwm_cfg->pwm_dev,
+ duty_us,
+ period_us);
+ } else {
+ duty_ns = ((period_us * NSEC_PER_USEC) /
+ KPDBL_MAX_LEVEL) * led->cdev.brightness;
+ rc = pwm_config(
+ led->kpdbl_cfg->pwm_cfg->pwm_dev,
+ duty_ns,
+ period_us * NSEC_PER_USEC);
+ }
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev, "pwm config failed\n");
+ return rc;
+ }
+ }
+
+ rc = pwm_enable(led->kpdbl_cfg->pwm_cfg->pwm_dev);
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev, "pwm enable failed\n");
+ return rc;
+ }
+
+ set_bit(led->kpdbl_cfg->row_id, kpdbl_leds_in_use);
+
+ /* is_kpdbl_master_turn_on will be set to true when GPLED1
+ * channel is enabled and has a valid brightness value
+ */
+ if (led->kpdbl_cfg->always_on)
+ is_kpdbl_master_turn_on = true;
+
+ } else {
+ led->kpdbl_cfg->pwm_cfg->mode =
+ led->kpdbl_cfg->pwm_cfg->default_mode;
+
+ /* Before disabling GPLED1, check if any other GPLED 2/3/4 is
+ * on. If any of the other GPLED 2/3/4 is on, then have the
+ * GPLED1 channel enabled with 0 brightness.
+ */
+ if (led->kpdbl_cfg->always_on) {
+ if (bitmap_weight(kpdbl_leds_in_use,
+ NUM_KPDBL_LEDS) > 1) {
+ rc = pwm_config_us(
+ led->kpdbl_cfg->pwm_cfg->pwm_dev, 0,
+ led->kpdbl_cfg->pwm_cfg->pwm_period_us);
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "pwm config failed\n");
+ return rc;
+ }
+
+ rc = pwm_enable(led->kpdbl_cfg->pwm_cfg->
+ pwm_dev);
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "pwm enable failed\n");
+ return rc;
+ }
+ } else {
+ if (kpdbl_master) {
+ pwm_disable(kpdbl_master);
+ clear_bit(KPDBL_MASTER_BIT_INDEX,
+ kpdbl_leds_in_use);
+ rc = qpnp_led_masked_write(
+ led, KPDBL_ENABLE(led->base),
+ KPDBL_MODULE_EN_MASK,
+ KPDBL_MODULE_DIS);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write led"
+ " enable reg\n");
+ return rc;
+ }
+ }
+ }
+ is_kpdbl_master_turn_on = false;
+ } else {
+ pwm_disable(led->kpdbl_cfg->pwm_cfg->pwm_dev);
+ clear_bit(led->kpdbl_cfg->row_id, kpdbl_leds_in_use);
+ if (bitmap_weight(kpdbl_leds_in_use,
+ NUM_KPDBL_LEDS) == 1 && kpdbl_master &&
+ !is_kpdbl_master_turn_on) {
+ pwm_disable(kpdbl_master);
+ clear_bit(KPDBL_MASTER_BIT_INDEX,
+ kpdbl_leds_in_use);
+ rc = qpnp_led_masked_write(
+ led, KPDBL_ENABLE(led->base),
+ KPDBL_MODULE_EN_MASK, KPDBL_MODULE_DIS);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write led enable reg\n");
+ return rc;
+ }
+ is_kpdbl_master_turn_on = false;
+ }
+ }
+ }
+
+ led->kpdbl_cfg->pwm_cfg->blinking = false;
+
+ qpnp_dump_regs(led, kpdbl_debug_regs, ARRAY_SIZE(kpdbl_debug_regs));
+
+ return 0;
+}
+
+static int qpnp_rgb_set(struct qpnp_led_data *led)
+{
+ int rc;
+ int duty_us, duty_ns, period_us;
+
+ if (led->cdev.brightness) {
+ if (!led->rgb_cfg->pwm_cfg->blinking)
+ led->rgb_cfg->pwm_cfg->mode =
+ led->rgb_cfg->pwm_cfg->default_mode;
+ if (led->rgb_cfg->pwm_cfg->mode == PWM_MODE) {
+ period_us = led->rgb_cfg->pwm_cfg->pwm_period_us;
+ if (period_us > INT_MAX / NSEC_PER_USEC) {
+ duty_us = (period_us * led->cdev.brightness) /
+ LED_FULL;
+ rc = pwm_config_us(
+ led->rgb_cfg->pwm_cfg->pwm_dev,
+ duty_us,
+ period_us);
+ } else {
+ duty_ns = ((period_us * NSEC_PER_USEC) /
+ LED_FULL) * led->cdev.brightness;
+ rc = pwm_config(
+ led->rgb_cfg->pwm_cfg->pwm_dev,
+ duty_ns,
+ period_us * NSEC_PER_USEC);
+ }
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "pwm config failed\n");
+ return rc;
+ }
+ }
+ rc = qpnp_led_masked_write(led,
+ RGB_LED_EN_CTL(led->base),
+ led->rgb_cfg->enable, led->rgb_cfg->enable);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write led enable reg\n");
+ return rc;
+ }
+
+ if (led->rgb_cfg->pwm_cfg->pwm_enabled) {
+ pwm_disable(led->rgb_cfg->pwm_cfg->pwm_dev);
+ led->rgb_cfg->pwm_cfg->pwm_enabled = 0;
+ }
+
+ rc = pwm_enable(led->rgb_cfg->pwm_cfg->pwm_dev);
+ if (!rc)
+ led->rgb_cfg->pwm_cfg->pwm_enabled = 1;
+ } else {
+ led->rgb_cfg->pwm_cfg->mode =
+ led->rgb_cfg->pwm_cfg->default_mode;
+ pwm_disable(led->rgb_cfg->pwm_cfg->pwm_dev);
+ led->rgb_cfg->pwm_cfg->pwm_enabled = 0;
+ rc = qpnp_led_masked_write(led,
+ RGB_LED_EN_CTL(led->base),
+ led->rgb_cfg->enable, RGB_LED_DISABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write led enable reg\n");
+ return rc;
+ }
+ }
+
+ led->rgb_cfg->pwm_cfg->blinking = false;
+ qpnp_dump_regs(led, rgb_pwm_debug_regs, ARRAY_SIZE(rgb_pwm_debug_regs));
+
+ return 0;
+}
+
+static void qpnp_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct qpnp_led_data *led;
+
+ led = container_of(led_cdev, struct qpnp_led_data, cdev);
+ if (value < LED_OFF) {
+ dev_err(&led->spmi_dev->dev, "Invalid brightness value\n");
+ return;
+ }
+
+ if (value > led->cdev.max_brightness)
+ value = led->cdev.max_brightness;
+
+ led->cdev.brightness = value;
+ if (led->in_order_command_processing)
+ queue_work(led->workqueue, &led->work);
+ else
+ schedule_work(&led->work);
+}
+
+static void __qpnp_led_work(struct qpnp_led_data *led,
+ enum led_brightness value)
+{
+ int rc;
+
+ if (led->id == QPNP_ID_FLASH1_LED0 || led->id == QPNP_ID_FLASH1_LED1)
+ mutex_lock(&flash_lock);
+ else
+ mutex_lock(&led->lock);
+
+ switch (led->id) {
+ case QPNP_ID_WLED:
+ rc = qpnp_wled_set(led);
+ if (rc < 0)
+ dev_err(&led->spmi_dev->dev,
+ "WLED set brightness failed (%d)\n", rc);
+ break;
+ case QPNP_ID_FLASH1_LED0:
+ case QPNP_ID_FLASH1_LED1:
+ rc = qpnp_flash_set(led);
+ if (rc < 0)
+ dev_err(&led->spmi_dev->dev,
+ "FLASH set brightness failed (%d)\n", rc);
+ break;
+ case QPNP_ID_RGB_RED:
+ case QPNP_ID_RGB_GREEN:
+ case QPNP_ID_RGB_BLUE:
+ rc = qpnp_rgb_set(led);
+ if (rc < 0)
+ dev_err(&led->spmi_dev->dev,
+ "RGB set brightness failed (%d)\n", rc);
+ break;
+ case QPNP_ID_LED_MPP:
+ rc = qpnp_mpp_set(led);
+ if (rc < 0)
+ dev_err(&led->spmi_dev->dev,
+ "MPP set brightness failed (%d)\n", rc);
+ break;
+ case QPNP_ID_LED_GPIO:
+ rc = qpnp_gpio_set(led);
+ if (rc < 0)
+ dev_err(&led->spmi_dev->dev,
+ "GPIO set brightness failed (%d)\n",
+ rc);
+ break;
+ case QPNP_ID_KPDBL:
+ rc = qpnp_kpdbl_set(led);
+ if (rc < 0)
+ dev_err(&led->spmi_dev->dev,
+ "KPDBL set brightness failed (%d)\n", rc);
+ break;
+ default:
+ dev_err(&led->spmi_dev->dev, "Invalid LED(%d)\n", led->id);
+ break;
+ }
+ if (led->id == QPNP_ID_FLASH1_LED0 || led->id == QPNP_ID_FLASH1_LED1)
+ mutex_unlock(&flash_lock);
+ else
+ mutex_unlock(&led->lock);
+
+}
+
+static void qpnp_led_work(struct work_struct *work)
+{
+ struct qpnp_led_data *led = container_of(work,
+ struct qpnp_led_data, work);
+
+ __qpnp_led_work(led, led->cdev.brightness);
+
+ return;
+}
+
+static int qpnp_led_set_max_brightness(struct qpnp_led_data *led)
+{
+ switch (led->id) {
+ case QPNP_ID_WLED:
+ led->cdev.max_brightness = WLED_MAX_LEVEL;
+ break;
+ case QPNP_ID_FLASH1_LED0:
+ case QPNP_ID_FLASH1_LED1:
+ led->cdev.max_brightness = led->max_current;
+ break;
+ case QPNP_ID_RGB_RED:
+ case QPNP_ID_RGB_GREEN:
+ case QPNP_ID_RGB_BLUE:
+ led->cdev.max_brightness = RGB_MAX_LEVEL;
+ break;
+ case QPNP_ID_LED_MPP:
+ if (led->mpp_cfg->pwm_mode == MANUAL_MODE)
+ led->cdev.max_brightness = led->max_current;
+ else
+ led->cdev.max_brightness = MPP_MAX_LEVEL;
+ break;
+ case QPNP_ID_LED_GPIO:
+ led->cdev.max_brightness = led->max_current;
+ break;
+ case QPNP_ID_KPDBL:
+ led->cdev.max_brightness = KPDBL_MAX_LEVEL;
+ break;
+ default:
+ dev_err(&led->spmi_dev->dev, "Invalid LED(%d)\n", led->id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static enum led_brightness qpnp_led_get(struct led_classdev *led_cdev)
+{
+ struct qpnp_led_data *led;
+
+ led = container_of(led_cdev, struct qpnp_led_data, cdev);
+
+ return led->cdev.brightness;
+}
+
+static void qpnp_led_turn_off_delayed(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct qpnp_led_data *led
+ = container_of(dwork, struct qpnp_led_data, dwork);
+
+ led->cdev.brightness = LED_OFF;
+ qpnp_led_set(&led->cdev, led->cdev.brightness);
+}
+
+static void qpnp_led_turn_off(struct qpnp_led_data *led)
+{
+ INIT_DELAYED_WORK(&led->dwork, qpnp_led_turn_off_delayed);
+ schedule_delayed_work(&led->dwork,
+ msecs_to_jiffies(led->turn_off_delay_ms));
+}
+
+static int qpnp_wled_init(struct qpnp_led_data *led)
+{
+ int rc, i;
+ u8 num_wled_strings, val = 0;
+
+ num_wled_strings = led->wled_cfg->num_strings;
+
+ /* verify ranges */
+ if (led->wled_cfg->ovp_val > WLED_OVP_27V) {
+ dev_err(&led->spmi_dev->dev, "Invalid ovp value\n");
+ return -EINVAL;
+ }
+
+ if (led->wled_cfg->boost_curr_lim > WLED_CURR_LIMIT_1680mA) {
+ dev_err(&led->spmi_dev->dev, "Invalid boost current limit\n");
+ return -EINVAL;
+ }
+
+ if (led->wled_cfg->cp_select > WLED_CP_SELECT_MAX) {
+ dev_err(&led->spmi_dev->dev, "Invalid pole capacitance\n");
+ return -EINVAL;
+ }
+
+ if ((led->max_current > WLED_MAX_CURR)) {
+ dev_err(&led->spmi_dev->dev, "Invalid max current\n");
+ return -EINVAL;
+ }
+
+ if ((led->wled_cfg->ctrl_delay_us % WLED_CTL_DLY_STEP) ||
+ (led->wled_cfg->ctrl_delay_us > WLED_CTL_DLY_MAX)) {
+ dev_err(&led->spmi_dev->dev, "Invalid control delay\n");
+ return -EINVAL;
+ }
+
+ /* program over voltage protection threshold */
+ rc = qpnp_led_masked_write(led, WLED_OVP_CFG_REG(led->base),
+ WLED_OVP_VAL_MASK,
+ (led->wled_cfg->ovp_val << WLED_OVP_VAL_BIT_SHFT));
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED OVP reg write failed(%d)\n", rc);
+ return rc;
+ }
+
+ /* program current boost limit */
+ rc = qpnp_led_masked_write(led, WLED_BOOST_LIMIT_REG(led->base),
+ WLED_BOOST_LIMIT_MASK, led->wled_cfg->boost_curr_lim);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED boost limit reg write failed(%d)\n", rc);
+ return rc;
+ }
+
+ /* program output feedback */
+ rc = qpnp_led_masked_write(led, WLED_FDBCK_CTRL_REG(led->base),
+ WLED_OP_FDBCK_MASK,
+ (led->wled_cfg->op_fdbck << WLED_OP_FDBCK_BIT_SHFT));
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED fdbck ctrl reg write failed(%d)\n", rc);
+ return rc;
+ }
+
+ /* program switch frequency */
+ rc = qpnp_led_masked_write(led,
+ WLED_SWITCHING_FREQ_REG(led->base),
+ WLED_SWITCH_FREQ_MASK, led->wled_cfg->switch_freq);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED switch freq reg write failed(%d)\n", rc);
+ return rc;
+ }
+
+ /* program current sink */
+ if (led->wled_cfg->cs_out_en) {
+ for (i = 0; i < led->wled_cfg->num_strings; i++)
+ val |= 1 << i;
+ rc = qpnp_led_masked_write(led, WLED_CURR_SINK_REG(led->base),
+ WLED_CURR_SINK_MASK, (val << WLED_CURR_SINK_SHFT));
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED curr sink reg write failed(%d)\n", rc);
+ return rc;
+ }
+ }
+
+ /* program high pole capacitance */
+ rc = qpnp_led_masked_write(led, WLED_HIGH_POLE_CAP_REG(led->base),
+ WLED_CP_SELECT_MASK, led->wled_cfg->cp_select);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED pole cap reg write failed(%d)\n", rc);
+ return rc;
+ }
+
+ /* program modulator, current mod src and cabc */
+ for (i = 0; i < num_wled_strings; i++) {
+ rc = qpnp_led_masked_write(led, WLED_MOD_EN_REG(led->base, i),
+ WLED_NO_MASK, WLED_EN_MASK);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED mod enable reg write failed(%d)\n", rc);
+ return rc;
+ }
+
+ if (led->wled_cfg->dig_mod_gen_en) {
+ rc = qpnp_led_masked_write(led,
+ WLED_MOD_SRC_SEL_REG(led->base, i),
+ WLED_NO_MASK, WLED_USE_EXT_GEN_MOD_SRC);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED dig mod en reg write failed(%d)\n", rc);
+ }
+ }
+
+ rc = qpnp_led_masked_write(led,
+ WLED_FULL_SCALE_REG(led->base, i), WLED_MAX_CURR_MASK,
+ (u8)led->max_current);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED max current reg write failed(%d)\n", rc);
+ return rc;
+ }
+
+ }
+
+ /* Reset WLED enable register */
+ rc = qpnp_led_masked_write(led, WLED_MOD_CTRL_REG(led->base),
+ WLED_8_BIT_MASK, WLED_BOOST_OFF);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "WLED write ctrl reg failed(%d)\n", rc);
+ return rc;
+ }
+
+ /* dump wled registers */
+ qpnp_dump_regs(led, wled_debug_regs, ARRAY_SIZE(wled_debug_regs));
+
+ return 0;
+}
+
+static ssize_t led_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qpnp_led_data *led;
+ unsigned long state;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret = -EINVAL;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ led = container_of(led_cdev, struct qpnp_led_data, cdev);
+
+ /* '1' to enable torch mode; '0' to switch to flash mode */
+ if (state == 1)
+ led->flash_cfg->torch_enable = true;
+ else
+ led->flash_cfg->torch_enable = false;
+
+ return count;
+}
+
+static ssize_t led_strobe_type_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qpnp_led_data *led;
+ unsigned long state;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret = -EINVAL;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ led = container_of(led_cdev, struct qpnp_led_data, cdev);
+
+ /* '0' for sw strobe; '1' for hw strobe */
+ if (state == 1)
+ led->flash_cfg->strobe_type = 1;
+ else
+ led->flash_cfg->strobe_type = 0;
+
+ return count;
+}
+
+static int qpnp_pwm_init(struct pwm_config_data *pwm_cfg,
+ struct spmi_device *spmi_dev,
+ const char *name)
+{
+ int rc, start_idx, idx_len, lut_max_size;
+
+ if (pwm_cfg->pwm_dev) {
+ if (pwm_cfg->mode == LPG_MODE) {
+ start_idx =
+ pwm_cfg->duty_cycles->start_idx;
+ idx_len =
+ pwm_cfg->duty_cycles->num_duty_pcts;
+
+ if (strnstr(name, "kpdbl", sizeof("kpdbl")))
+ lut_max_size = PWM_GPLED_LUT_MAX_SIZE;
+ else
+ lut_max_size = PWM_LUT_MAX_SIZE;
+
+ if (idx_len >= lut_max_size && start_idx) {
+ dev_err(&spmi_dev->dev,
+ "Wrong LUT size or index\n");
+ return -EINVAL;
+ }
+
+ if ((start_idx + idx_len) > lut_max_size) {
+ dev_err(&spmi_dev->dev,
+ "Exceed LUT limit\n");
+ return -EINVAL;
+ }
+ rc = pwm_lut_config(pwm_cfg->pwm_dev,
+ pwm_cfg->pwm_period_us,
+ pwm_cfg->duty_cycles->duty_pcts,
+ pwm_cfg->lut_params);
+ if (rc < 0) {
+ dev_err(&spmi_dev->dev, "Failed to " \
+ "configure pwm LUT\n");
+ return rc;
+ }
+ }
+ } else {
+ dev_err(&spmi_dev->dev,
+ "Invalid PWM device\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t pwm_us_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qpnp_led_data *led;
+ u32 pwm_us;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret;
+ u32 previous_pwm_us;
+ struct pwm_config_data *pwm_cfg;
+
+ led = container_of(led_cdev, struct qpnp_led_data, cdev);
+
+ ret = kstrtou32(buf, 10, &pwm_us);
+ if (ret)
+ return ret;
+
+ switch (led->id) {
+ case QPNP_ID_LED_MPP:
+ pwm_cfg = led->mpp_cfg->pwm_cfg;
+ break;
+ case QPNP_ID_RGB_RED:
+ case QPNP_ID_RGB_GREEN:
+ case QPNP_ID_RGB_BLUE:
+ pwm_cfg = led->rgb_cfg->pwm_cfg;
+ break;
+ case QPNP_ID_KPDBL:
+ pwm_cfg = led->kpdbl_cfg->pwm_cfg;
+ break;
+ default:
+ dev_err(&led->spmi_dev->dev,
+ "Invalid LED id type for pwm_us\n");
+ return -EINVAL;
+ }
+
+ if (pwm_cfg->mode == LPG_MODE)
+ pwm_cfg->blinking = true;
+
+ previous_pwm_us = pwm_cfg->pwm_period_us;
+
+ pwm_cfg->pwm_period_us = pwm_us;
+ pwm_free(pwm_cfg->pwm_dev);
+ ret = qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name);
+ if (ret) {
+ pwm_cfg->pwm_period_us = previous_pwm_us;
+ pwm_free(pwm_cfg->pwm_dev);
+ qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name);
+ qpnp_led_set(&led->cdev, led->cdev.brightness);
+ dev_err(&led->spmi_dev->dev,
+ "Failed to initialize pwm with new pwm_us value\n");
+ return ret;
+ }
+ qpnp_led_set(&led->cdev, led->cdev.brightness);
+ return count;
+}
+
+static ssize_t pause_lo_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qpnp_led_data *led;
+ u32 pause_lo;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret;
+ u32 previous_pause_lo;
+ struct pwm_config_data *pwm_cfg;
+
+ ret = kstrtou32(buf, 10, &pause_lo);
+ if (ret)
+ return ret;
+ led = container_of(led_cdev, struct qpnp_led_data, cdev);
+
+ switch (led->id) {
+ case QPNP_ID_LED_MPP:
+ pwm_cfg = led->mpp_cfg->pwm_cfg;
+ break;
+ case QPNP_ID_RGB_RED:
+ case QPNP_ID_RGB_GREEN:
+ case QPNP_ID_RGB_BLUE:
+ pwm_cfg = led->rgb_cfg->pwm_cfg;
+ break;
+ case QPNP_ID_KPDBL:
+ pwm_cfg = led->kpdbl_cfg->pwm_cfg;
+ break;
+ default:
+ dev_err(&led->spmi_dev->dev,
+ "Invalid LED id type for pause lo\n");
+ return -EINVAL;
+ }
+
+ if (pwm_cfg->mode == LPG_MODE)
+ pwm_cfg->blinking = true;
+
+ previous_pause_lo = pwm_cfg->lut_params.lut_pause_lo;
+
+ pwm_free(pwm_cfg->pwm_dev);
+ pwm_cfg->lut_params.lut_pause_lo = pause_lo;
+ ret = qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name);
+ if (ret) {
+ pwm_cfg->lut_params.lut_pause_lo = previous_pause_lo;
+ pwm_free(pwm_cfg->pwm_dev);
+ qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name);
+ qpnp_led_set(&led->cdev, led->cdev.brightness);
+ dev_err(&led->spmi_dev->dev,
+ "Failed to initialize pwm with new pause lo value\n");
+ return ret;
+ }
+ qpnp_led_set(&led->cdev, led->cdev.brightness);
+ return count;
+}
+
+static ssize_t pause_hi_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qpnp_led_data *led;
+ u32 pause_hi;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret;
+ u32 previous_pause_hi;
+ struct pwm_config_data *pwm_cfg;
+
+ ret = kstrtou32(buf, 10, &pause_hi);
+ if (ret)
+ return ret;
+ led = container_of(led_cdev, struct qpnp_led_data, cdev);
+
+ switch (led->id) {
+ case QPNP_ID_LED_MPP:
+ pwm_cfg = led->mpp_cfg->pwm_cfg;
+ break;
+ case QPNP_ID_RGB_RED:
+ case QPNP_ID_RGB_GREEN:
+ case QPNP_ID_RGB_BLUE:
+ pwm_cfg = led->rgb_cfg->pwm_cfg;
+ break;
+ case QPNP_ID_KPDBL:
+ pwm_cfg = led->kpdbl_cfg->pwm_cfg;
+ break;
+ default:
+ dev_err(&led->spmi_dev->dev,
+ "Invalid LED id type for pause hi\n");
+ return -EINVAL;
+ }
+
+ if (pwm_cfg->mode == LPG_MODE)
+ pwm_cfg->blinking = true;
+
+ previous_pause_hi = pwm_cfg->lut_params.lut_pause_hi;
+
+ pwm_free(pwm_cfg->pwm_dev);
+ pwm_cfg->lut_params.lut_pause_hi = pause_hi;
+ ret = qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name);
+ if (ret) {
+ pwm_cfg->lut_params.lut_pause_hi = previous_pause_hi;
+ pwm_free(pwm_cfg->pwm_dev);
+ qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name);
+ qpnp_led_set(&led->cdev, led->cdev.brightness);
+ dev_err(&led->spmi_dev->dev,
+ "Failed to initialize pwm with new pause hi value\n");
+ return ret;
+ }
+ qpnp_led_set(&led->cdev, led->cdev.brightness);
+ return count;
+}
+
+static ssize_t start_idx_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qpnp_led_data *led;
+ u32 start_idx;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret;
+ u32 previous_start_idx;
+ struct pwm_config_data *pwm_cfg;
+
+ ret = kstrtou32(buf, 10, &start_idx);
+ if (ret)
+ return ret;
+ led = container_of(led_cdev, struct qpnp_led_data, cdev);
+
+ switch (led->id) {
+ case QPNP_ID_LED_MPP:
+ pwm_cfg = led->mpp_cfg->pwm_cfg;
+ break;
+ case QPNP_ID_RGB_RED:
+ case QPNP_ID_RGB_GREEN:
+ case QPNP_ID_RGB_BLUE:
+ pwm_cfg = led->rgb_cfg->pwm_cfg;
+ break;
+ case QPNP_ID_KPDBL:
+ pwm_cfg = led->kpdbl_cfg->pwm_cfg;
+ break;
+ default:
+ dev_err(&led->spmi_dev->dev,
+ "Invalid LED id type for start idx\n");
+ return -EINVAL;
+ }
+
+ if (pwm_cfg->mode == LPG_MODE)
+ pwm_cfg->blinking = true;
+
+ previous_start_idx = pwm_cfg->duty_cycles->start_idx;
+ pwm_cfg->duty_cycles->start_idx = start_idx;
+ pwm_cfg->lut_params.start_idx = pwm_cfg->duty_cycles->start_idx;
+ pwm_free(pwm_cfg->pwm_dev);
+ ret = qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name);
+ if (ret) {
+ pwm_cfg->duty_cycles->start_idx = previous_start_idx;
+ pwm_cfg->lut_params.start_idx = pwm_cfg->duty_cycles->start_idx;
+ pwm_free(pwm_cfg->pwm_dev);
+ qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name);
+ qpnp_led_set(&led->cdev, led->cdev.brightness);
+ dev_err(&led->spmi_dev->dev,
+ "Failed to initialize pwm with new start idx value\n");
+ return ret;
+ }
+ qpnp_led_set(&led->cdev, led->cdev.brightness);
+ return count;
+}
+
+static ssize_t ramp_step_ms_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qpnp_led_data *led;
+ u32 ramp_step_ms;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret;
+ u32 previous_ramp_step_ms;
+ struct pwm_config_data *pwm_cfg;
+
+ ret = kstrtou32(buf, 10, &ramp_step_ms);
+ if (ret)
+ return ret;
+ led = container_of(led_cdev, struct qpnp_led_data, cdev);
+
+ switch (led->id) {
+ case QPNP_ID_LED_MPP:
+ pwm_cfg = led->mpp_cfg->pwm_cfg;
+ break;
+ case QPNP_ID_RGB_RED:
+ case QPNP_ID_RGB_GREEN:
+ case QPNP_ID_RGB_BLUE:
+ pwm_cfg = led->rgb_cfg->pwm_cfg;
+ break;
+ case QPNP_ID_KPDBL:
+ pwm_cfg = led->kpdbl_cfg->pwm_cfg;
+ break;
+ default:
+ dev_err(&led->spmi_dev->dev,
+ "Invalid LED id type for ramp step\n");
+ return -EINVAL;
+ }
+
+ if (pwm_cfg->mode == LPG_MODE)
+ pwm_cfg->blinking = true;
+
+ previous_ramp_step_ms = pwm_cfg->lut_params.ramp_step_ms;
+
+ pwm_free(pwm_cfg->pwm_dev);
+ pwm_cfg->lut_params.ramp_step_ms = ramp_step_ms;
+ ret = qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name);
+ if (ret) {
+ pwm_cfg->lut_params.ramp_step_ms = previous_ramp_step_ms;
+ pwm_free(pwm_cfg->pwm_dev);
+ qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name);
+ qpnp_led_set(&led->cdev, led->cdev.brightness);
+ dev_err(&led->spmi_dev->dev,
+ "Failed to initialize pwm with new ramp step value\n");
+ return ret;
+ }
+ qpnp_led_set(&led->cdev, led->cdev.brightness);
+ return count;
+}
+
+static ssize_t lut_flags_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qpnp_led_data *led;
+ u32 lut_flags;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret;
+ u32 previous_lut_flags;
+ struct pwm_config_data *pwm_cfg;
+
+ ret = kstrtou32(buf, 10, &lut_flags);
+ if (ret)
+ return ret;
+ led = container_of(led_cdev, struct qpnp_led_data, cdev);
+
+ switch (led->id) {
+ case QPNP_ID_LED_MPP:
+ pwm_cfg = led->mpp_cfg->pwm_cfg;
+ break;
+ case QPNP_ID_RGB_RED:
+ case QPNP_ID_RGB_GREEN:
+ case QPNP_ID_RGB_BLUE:
+ pwm_cfg = led->rgb_cfg->pwm_cfg;
+ break;
+ case QPNP_ID_KPDBL:
+ pwm_cfg = led->kpdbl_cfg->pwm_cfg;
+ break;
+ default:
+ dev_err(&led->spmi_dev->dev,
+ "Invalid LED id type for lut flags\n");
+ return -EINVAL;
+ }
+
+ if (pwm_cfg->mode == LPG_MODE)
+ pwm_cfg->blinking = true;
+
+ previous_lut_flags = pwm_cfg->lut_params.flags;
+
+ pwm_free(pwm_cfg->pwm_dev);
+ pwm_cfg->lut_params.flags = lut_flags;
+ ret = qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name);
+ if (ret) {
+ pwm_cfg->lut_params.flags = previous_lut_flags;
+ pwm_free(pwm_cfg->pwm_dev);
+ qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name);
+ qpnp_led_set(&led->cdev, led->cdev.brightness);
+ dev_err(&led->spmi_dev->dev,
+ "Failed to initialize pwm with new lut flags value\n");
+ return ret;
+ }
+ qpnp_led_set(&led->cdev, led->cdev.brightness);
+ return count;
+}
+
+static ssize_t duty_pcts_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qpnp_led_data *led;
+ int num_duty_pcts = 0;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ char *buffer;
+ ssize_t ret;
+ int i = 0;
+ int max_duty_pcts;
+ struct pwm_config_data *pwm_cfg;
+ u32 previous_num_duty_pcts;
+ int value;
+ int *previous_duty_pcts;
+
+ led = container_of(led_cdev, struct qpnp_led_data, cdev);
+
+ switch (led->id) {
+ case QPNP_ID_LED_MPP:
+ pwm_cfg = led->mpp_cfg->pwm_cfg;
+ max_duty_pcts = PWM_LUT_MAX_SIZE;
+ break;
+ case QPNP_ID_RGB_RED:
+ case QPNP_ID_RGB_GREEN:
+ case QPNP_ID_RGB_BLUE:
+ pwm_cfg = led->rgb_cfg->pwm_cfg;
+ max_duty_pcts = PWM_LUT_MAX_SIZE;
+ break;
+ case QPNP_ID_KPDBL:
+ pwm_cfg = led->kpdbl_cfg->pwm_cfg;
+ max_duty_pcts = PWM_GPLED_LUT_MAX_SIZE;
+ break;
+ default:
+ dev_err(&led->spmi_dev->dev,
+ "Invalid LED id type for duty pcts\n");
+ return -EINVAL;
+ }
+
+ if (pwm_cfg->mode == LPG_MODE)
+ pwm_cfg->blinking = true;
+
+ buffer = (char *)buf;
+
+ for (i = 0; i < max_duty_pcts; i++) {
+ if (buffer == NULL)
+ break;
+ ret = sscanf((const char *)buffer, "%u,%s", &value, buffer);
+ pwm_cfg->old_duty_pcts[i] = value;
+ num_duty_pcts++;
+ if (ret <= 1)
+ break;
+ }
+
+ if (num_duty_pcts >= max_duty_pcts) {
+ dev_err(&led->spmi_dev->dev,
+ "Number of duty pcts given exceeds max (%d)\n",
+ max_duty_pcts);
+ return -EINVAL;
+ }
+
+ previous_num_duty_pcts = pwm_cfg->duty_cycles->num_duty_pcts;
+ previous_duty_pcts = pwm_cfg->duty_cycles->duty_pcts;
+
+ pwm_cfg->duty_cycles->num_duty_pcts = num_duty_pcts;
+ pwm_cfg->duty_cycles->duty_pcts = pwm_cfg->old_duty_pcts;
+ pwm_cfg->old_duty_pcts = previous_duty_pcts;
+ pwm_cfg->lut_params.idx_len = pwm_cfg->duty_cycles->num_duty_pcts;
+
+ pwm_free(pwm_cfg->pwm_dev);
+ ret = qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name);
+ if (ret)
+ goto restore;
+
+ qpnp_led_set(&led->cdev, led->cdev.brightness);
+ return count;
+
+restore:
+ dev_err(&led->spmi_dev->dev,
+ "Failed to initialize pwm with new duty pcts value\n");
+ pwm_cfg->duty_cycles->num_duty_pcts = previous_num_duty_pcts;
+ pwm_cfg->old_duty_pcts = pwm_cfg->duty_cycles->duty_pcts;
+ pwm_cfg->duty_cycles->duty_pcts = previous_duty_pcts;
+ pwm_cfg->lut_params.idx_len = pwm_cfg->duty_cycles->num_duty_pcts;
+ pwm_free(pwm_cfg->pwm_dev);
+ qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name);
+ qpnp_led_set(&led->cdev, led->cdev.brightness);
+ return ret;
+}
+
+static void led_blink(struct qpnp_led_data *led,
+ struct pwm_config_data *pwm_cfg)
+{
+ int rc;
+
+ flush_work(&led->work);
+ mutex_lock(&led->lock);
+ if (pwm_cfg->use_blink) {
+ if (led->cdev.brightness) {
+ pwm_cfg->blinking = true;
+ if (led->id == QPNP_ID_LED_MPP)
+ led->mpp_cfg->pwm_mode = LPG_MODE;
+ else if (led->id == QPNP_ID_KPDBL)
+ led->kpdbl_cfg->pwm_mode = LPG_MODE;
+ pwm_cfg->mode = LPG_MODE;
+ } else {
+ pwm_cfg->blinking = false;
+ pwm_cfg->mode = pwm_cfg->default_mode;
+ if (led->id == QPNP_ID_LED_MPP)
+ led->mpp_cfg->pwm_mode = pwm_cfg->default_mode;
+ else if (led->id == QPNP_ID_KPDBL)
+ led->kpdbl_cfg->pwm_mode =
+ pwm_cfg->default_mode;
+ }
+ pwm_free(pwm_cfg->pwm_dev);
+ qpnp_pwm_init(pwm_cfg, led->spmi_dev, led->cdev.name);
+ if (led->id == QPNP_ID_RGB_RED || led->id == QPNP_ID_RGB_GREEN
+ || led->id == QPNP_ID_RGB_BLUE) {
+ rc = qpnp_rgb_set(led);
+ if (rc < 0)
+ dev_err(&led->spmi_dev->dev,
+ "RGB set brightness failed (%d)\n", rc);
+ } else if (led->id == QPNP_ID_LED_MPP) {
+ rc = qpnp_mpp_set(led);
+ if (rc < 0)
+ dev_err(&led->spmi_dev->dev,
+ "MPP set brightness failed (%d)\n", rc);
+ } else if (led->id == QPNP_ID_KPDBL) {
+ rc = qpnp_kpdbl_set(led);
+ if (rc < 0)
+ dev_err(&led->spmi_dev->dev,
+ "KPDBL set brightness failed (%d)\n", rc);
+ }
+ }
+ mutex_unlock(&led->lock);
+}
+
+static ssize_t blink_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct qpnp_led_data *led;
+ unsigned long blinking;
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ ssize_t ret = -EINVAL;
+
+ ret = kstrtoul(buf, 10, &blinking);
+ if (ret)
+ return ret;
+ led = container_of(led_cdev, struct qpnp_led_data, cdev);
+ led->cdev.brightness = blinking ? led->cdev.max_brightness : 0;
+
+ switch (led->id) {
+ case QPNP_ID_LED_MPP:
+ led_blink(led, led->mpp_cfg->pwm_cfg);
+ break;
+ case QPNP_ID_RGB_RED:
+ case QPNP_ID_RGB_GREEN:
+ case QPNP_ID_RGB_BLUE:
+ led_blink(led, led->rgb_cfg->pwm_cfg);
+ break;
+ case QPNP_ID_KPDBL:
+ led_blink(led, led->kpdbl_cfg->pwm_cfg);
+ break;
+ default:
+ dev_err(&led->spmi_dev->dev, "Invalid LED id type for blink\n");
+ return -EINVAL;
+ }
+ return count;
+}
+
+static DEVICE_ATTR(led_mode, 0664, NULL, led_mode_store);
+static DEVICE_ATTR(strobe, 0664, NULL, led_strobe_type_store);
+static DEVICE_ATTR(pwm_us, 0664, NULL, pwm_us_store);
+static DEVICE_ATTR(pause_lo, 0664, NULL, pause_lo_store);
+static DEVICE_ATTR(pause_hi, 0664, NULL, pause_hi_store);
+static DEVICE_ATTR(start_idx, 0664, NULL, start_idx_store);
+static DEVICE_ATTR(ramp_step_ms, 0664, NULL, ramp_step_ms_store);
+static DEVICE_ATTR(lut_flags, 0664, NULL, lut_flags_store);
+static DEVICE_ATTR(duty_pcts, 0664, NULL, duty_pcts_store);
+static DEVICE_ATTR(blink, 0664, NULL, blink_store);
+
+static struct attribute *led_attrs[] = {
+ &dev_attr_led_mode.attr,
+ &dev_attr_strobe.attr,
+ NULL
+};
+
+static const struct attribute_group led_attr_group = {
+ .attrs = led_attrs,
+};
+
+static struct attribute *pwm_attrs[] = {
+ &dev_attr_pwm_us.attr,
+ NULL
+};
+
+static struct attribute *lpg_attrs[] = {
+ &dev_attr_pause_lo.attr,
+ &dev_attr_pause_hi.attr,
+ &dev_attr_start_idx.attr,
+ &dev_attr_ramp_step_ms.attr,
+ &dev_attr_lut_flags.attr,
+ &dev_attr_duty_pcts.attr,
+ NULL
+};
+
+static struct attribute *blink_attrs[] = {
+ &dev_attr_blink.attr,
+ NULL
+};
+
+static const struct attribute_group pwm_attr_group = {
+ .attrs = pwm_attrs,
+};
+
+static const struct attribute_group lpg_attr_group = {
+ .attrs = lpg_attrs,
+};
+
+static const struct attribute_group blink_attr_group = {
+ .attrs = blink_attrs,
+};
+
+static int qpnp_flash_init(struct qpnp_led_data *led)
+{
+ int rc;
+
+ led->flash_cfg->flash_on = false;
+
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_STROBE_CTRL(led->base),
+ FLASH_STROBE_MASK, FLASH_DISABLE_ALL);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "LED %d flash write failed(%d)\n", led->id, rc);
+ return rc;
+ }
+
+ /* Disable flash LED module */
+ rc = qpnp_led_masked_write(led, FLASH_ENABLE_CONTROL(led->base),
+ FLASH_ENABLE_MASK, FLASH_DISABLE_ALL);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Enable reg write failed(%d)\n", rc);
+ return rc;
+ }
+
+ if (led->flash_cfg->torch_enable)
+ return 0;
+
+ /* Set headroom */
+ rc = qpnp_led_masked_write(led, FLASH_HEADROOM(led->base),
+ FLASH_HEADROOM_MASK, led->flash_cfg->headroom);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Headroom reg write failed(%d)\n", rc);
+ return rc;
+ }
+
+ /* Set startup delay */
+ rc = qpnp_led_masked_write(led,
+ FLASH_STARTUP_DELAY(led->base), FLASH_STARTUP_DLY_MASK,
+ led->flash_cfg->startup_dly);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Startup delay reg write failed(%d)\n", rc);
+ return rc;
+ }
+
+ /* Set timer control - safety or watchdog */
+ if (led->flash_cfg->safety_timer) {
+ rc = qpnp_led_masked_write(led,
+ FLASH_LED_TMR_CTRL(led->base),
+ FLASH_TMR_MASK, FLASH_TMR_SAFETY);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "LED timer ctrl reg write failed(%d)\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* Set Vreg force */
+ if (led->flash_cfg->vreg_ok)
+ rc = qpnp_led_masked_write(led, FLASH_VREG_OK_FORCE(led->base),
+ FLASH_VREG_MASK, FLASH_SW_VREG_OK);
+ else
+ rc = qpnp_led_masked_write(led, FLASH_VREG_OK_FORCE(led->base),
+ FLASH_VREG_MASK, FLASH_HW_VREG_OK);
+
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Vreg OK reg write failed(%d)\n", rc);
+ return rc;
+ }
+
+ /* Set self fault check */
+ rc = qpnp_led_masked_write(led, FLASH_FAULT_DETECT(led->base),
+ FLASH_FAULT_DETECT_MASK, FLASH_SELFCHECK_ENABLE);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Fault detect reg write failed(%d)\n", rc);
+ return rc;
+ }
+
+ /* Set mask enable */
+ rc = qpnp_led_masked_write(led, FLASH_MASK_ENABLE(led->base),
+ FLASH_MASK_REG_MASK, FLASH_MASK_1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Mask enable reg write failed(%d)\n", rc);
+ return rc;
+ }
+
+ /* Set current ramp */
+ rc = qpnp_led_masked_write(led, FLASH_CURRENT_RAMP(led->base),
+ FLASH_CURRENT_RAMP_MASK, FLASH_RAMP_STEP_27US);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Current ramp reg write failed(%d)\n", rc);
+ return rc;
+ }
+
+ led->flash_cfg->strobe_type = 0;
+
+ /* dump flash registers */
+ qpnp_dump_regs(led, flash_debug_regs, ARRAY_SIZE(flash_debug_regs));
+
+ return 0;
+}
+
+static int qpnp_kpdbl_init(struct qpnp_led_data *led)
+{
+ int rc;
+ u8 val;
+
+ /* select row source - vbst or vph */
+ rc = spmi_ext_register_readl(led->spmi_dev->ctrl, led->spmi_dev->sid,
+ KPDBL_ROW_SRC_SEL(led->base), &val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read from addr=%x, rc(%d)\n",
+ KPDBL_ROW_SRC_SEL(led->base), rc);
+ return rc;
+ }
+
+ if (led->kpdbl_cfg->row_src_vbst)
+ val |= 1 << led->kpdbl_cfg->row_id;
+ else
+ val &= ~(1 << led->kpdbl_cfg->row_id);
+
+ rc = spmi_ext_register_writel(led->spmi_dev->ctrl, led->spmi_dev->sid,
+ KPDBL_ROW_SRC_SEL(led->base), &val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read from addr=%x, rc(%d)\n",
+ KPDBL_ROW_SRC_SEL(led->base), rc);
+ return rc;
+ }
+
+ /* row source enable */
+ rc = spmi_ext_register_readl(led->spmi_dev->ctrl, led->spmi_dev->sid,
+ KPDBL_ROW_SRC(led->base), &val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read from addr=%x, rc(%d)\n",
+ KPDBL_ROW_SRC(led->base), rc);
+ return rc;
+ }
+
+ if (led->kpdbl_cfg->row_src_en)
+ val |= KPDBL_ROW_SCAN_EN_MASK | (1 << led->kpdbl_cfg->row_id);
+ else
+ val &= ~(1 << led->kpdbl_cfg->row_id);
+
+ rc = spmi_ext_register_writel(led->spmi_dev->ctrl, led->spmi_dev->sid,
+ KPDBL_ROW_SRC(led->base), &val, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to write to addr=%x, rc(%d)\n",
+ KPDBL_ROW_SRC(led->base), rc);
+ return rc;
+ }
+
+ /* enable module */
+ rc = qpnp_led_masked_write(led, KPDBL_ENABLE(led->base),
+ KPDBL_MODULE_EN_MASK, KPDBL_MODULE_EN);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Enable module write failed(%d)\n", rc);
+ return rc;
+ }
+
+ rc = qpnp_pwm_init(led->kpdbl_cfg->pwm_cfg, led->spmi_dev,
+ led->cdev.name);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to initialize pwm\n");
+ return rc;
+ }
+
+ if (led->kpdbl_cfg->always_on) {
+ kpdbl_master = led->kpdbl_cfg->pwm_cfg->pwm_dev;
+ kpdbl_master_period_us = led->kpdbl_cfg->pwm_cfg->pwm_period_us;
+ }
+
+ /* dump kpdbl registers */
+ qpnp_dump_regs(led, kpdbl_debug_regs, ARRAY_SIZE(kpdbl_debug_regs));
+
+ return 0;
+}
+
+static int qpnp_rgb_init(struct qpnp_led_data *led)
+{
+ int rc;
+
+ rc = qpnp_led_masked_write(led, RGB_LED_SRC_SEL(led->base),
+ RGB_LED_SRC_MASK, RGB_LED_SOURCE_VPH_PWR);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write led source select register\n");
+ return rc;
+ }
+
+ rc = qpnp_pwm_init(led->rgb_cfg->pwm_cfg, led->spmi_dev,
+ led->cdev.name);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to initialize pwm\n");
+ return rc;
+ }
+ /* Initialize led for use in auto trickle charging mode */
+ rc = qpnp_led_masked_write(led, RGB_LED_ATC_CTL(led->base),
+ led->rgb_cfg->enable, led->rgb_cfg->enable);
+
+ return 0;
+}
+
+static int qpnp_mpp_init(struct qpnp_led_data *led)
+{
+ int rc;
+ u8 val;
+
+
+ if (led->max_current < LED_MPP_CURRENT_MIN ||
+ led->max_current > LED_MPP_CURRENT_MAX) {
+ dev_err(&led->spmi_dev->dev,
+ "max current for mpp is not valid\n");
+ return -EINVAL;
+ }
+
+ val = (led->mpp_cfg->current_setting / LED_MPP_CURRENT_PER_SETTING) - 1;
+
+ if (val < 0)
+ val = 0;
+
+ rc = qpnp_led_masked_write(led, LED_MPP_VIN_CTRL(led->base),
+ LED_MPP_VIN_MASK, led->mpp_cfg->vin_ctrl);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write led vin control reg\n");
+ return rc;
+ }
+
+ rc = qpnp_led_masked_write(led, LED_MPP_SINK_CTRL(led->base),
+ LED_MPP_SINK_MASK, val);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write sink control reg\n");
+ return rc;
+ }
+
+ if (led->mpp_cfg->pwm_mode != MANUAL_MODE) {
+ rc = qpnp_pwm_init(led->mpp_cfg->pwm_cfg, led->spmi_dev,
+ led->cdev.name);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to initialize pwm\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int qpnp_gpio_init(struct qpnp_led_data *led)
+{
+ int rc;
+
+ rc = qpnp_led_masked_write(led, LED_GPIO_VIN_CTRL(led->base),
+ LED_GPIO_VIN_MASK, led->gpio_cfg->vin_ctrl);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failed to write led vin control reg\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qpnp_led_initialize(struct qpnp_led_data *led)
+{
+ int rc = 0;
+
+ switch (led->id) {
+ case QPNP_ID_WLED:
+ rc = qpnp_wled_init(led);
+ if (rc)
+ dev_err(&led->spmi_dev->dev,
+ "WLED initialize failed(%d)\n", rc);
+ break;
+ case QPNP_ID_FLASH1_LED0:
+ case QPNP_ID_FLASH1_LED1:
+ rc = qpnp_flash_init(led);
+ if (rc)
+ dev_err(&led->spmi_dev->dev,
+ "FLASH initialize failed(%d)\n", rc);
+ break;
+ case QPNP_ID_RGB_RED:
+ case QPNP_ID_RGB_GREEN:
+ case QPNP_ID_RGB_BLUE:
+ rc = qpnp_rgb_init(led);
+ if (rc)
+ dev_err(&led->spmi_dev->dev,
+ "RGB initialize failed(%d)\n", rc);
+ break;
+ case QPNP_ID_LED_MPP:
+ rc = qpnp_mpp_init(led);
+ if (rc)
+ dev_err(&led->spmi_dev->dev,
+ "MPP initialize failed(%d)\n", rc);
+ break;
+ case QPNP_ID_LED_GPIO:
+ rc = qpnp_gpio_init(led);
+ if (rc)
+ dev_err(&led->spmi_dev->dev,
+ "GPIO initialize failed(%d)\n", rc);
+ break;
+ case QPNP_ID_KPDBL:
+ rc = qpnp_kpdbl_init(led);
+ if (rc)
+ dev_err(&led->spmi_dev->dev,
+ "KPDBL initialize failed(%d)\n", rc);
+ break;
+ default:
+ dev_err(&led->spmi_dev->dev, "Invalid LED(%d)\n", led->id);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int qpnp_get_common_configs(struct qpnp_led_data *led,
+ struct device_node *node)
+{
+ int rc;
+ u32 val;
+ const char *temp_string;
+
+ led->cdev.default_trigger = LED_TRIGGER_DEFAULT;
+ rc = of_property_read_string(node, "linux,default-trigger",
+ &temp_string);
+ if (!rc)
+ led->cdev.default_trigger = temp_string;
+ else if (rc != -EINVAL)
+ return rc;
+
+ led->default_on = false;
+ rc = of_property_read_string(node, "qcom,default-state",
+ &temp_string);
+ if (!rc) {
+ if (strncmp(temp_string, "on", sizeof("on")) == 0)
+ led->default_on = true;
+ } else if (rc != -EINVAL)
+ return rc;
+
+ led->turn_off_delay_ms = 0;
+ rc = of_property_read_u32(node, "qcom,turn-off-delay-ms", &val);
+ if (!rc)
+ led->turn_off_delay_ms = val;
+ else if (rc != -EINVAL)
+ return rc;
+
+ return 0;
+}
+
+/*
+ * Handlers for alternative sources of platform_data
+ */
+static int qpnp_get_config_wled(struct qpnp_led_data *led,
+ struct device_node *node)
+{
+ u32 val;
+ int rc;
+
+ led->wled_cfg = devm_kzalloc(&led->spmi_dev->dev,
+ sizeof(struct wled_config_data), GFP_KERNEL);
+ if (!led->wled_cfg) {
+ dev_err(&led->spmi_dev->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ rc = spmi_ext_register_readl(led->spmi_dev->ctrl, led->spmi_dev->sid,
+ PMIC_VERSION_REG, &led->wled_cfg->pmic_version, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read pmic ver, rc(%d)\n", rc);
+ }
+
+ led->wled_cfg->num_strings = WLED_DEFAULT_STRINGS;
+ rc = of_property_read_u32(node, "qcom,num-strings", &val);
+ if (!rc)
+ led->wled_cfg->num_strings = (u8) val;
+ else if (rc != -EINVAL)
+ return rc;
+
+ led->wled_cfg->num_physical_strings = led->wled_cfg->num_strings;
+ rc = of_property_read_u32(node, "qcom,num-physical-strings", &val);
+ if (!rc)
+ led->wled_cfg->num_physical_strings = (u8) val;
+ else if (rc != -EINVAL)
+ return rc;
+
+ led->wled_cfg->ovp_val = WLED_DEFAULT_OVP_VAL;
+ rc = of_property_read_u32(node, "qcom,ovp-val", &val);
+ if (!rc)
+ led->wled_cfg->ovp_val = (u8) val;
+ else if (rc != -EINVAL)
+ return rc;
+
+ led->wled_cfg->boost_curr_lim = WLED_BOOST_LIM_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,boost-curr-lim", &val);
+ if (!rc)
+ led->wled_cfg->boost_curr_lim = (u8) val;
+ else if (rc != -EINVAL)
+ return rc;
+
+ led->wled_cfg->cp_select = WLED_CP_SEL_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,cp-sel", &val);
+ if (!rc)
+ led->wled_cfg->cp_select = (u8) val;
+ else if (rc != -EINVAL)
+ return rc;
+
+ led->wled_cfg->ctrl_delay_us = WLED_CTRL_DLY_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,ctrl-delay-us", &val);
+ if (!rc)
+ led->wled_cfg->ctrl_delay_us = (u8) val;
+ else if (rc != -EINVAL)
+ return rc;
+
+ led->wled_cfg->op_fdbck = WLED_OP_FDBCK_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,op-fdbck", &val);
+ if (!rc)
+ led->wled_cfg->op_fdbck = (u8) val;
+ else if (rc != -EINVAL)
+ return rc;
+
+ led->wled_cfg->switch_freq = WLED_SWITCH_FREQ_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,switch-freq", &val);
+ if (!rc)
+ led->wled_cfg->switch_freq = (u8) val;
+ else if (rc != -EINVAL)
+ return rc;
+
+ led->wled_cfg->dig_mod_gen_en =
+ of_property_read_bool(node, "qcom,dig-mod-gen-en");
+
+ led->wled_cfg->cs_out_en =
+ of_property_read_bool(node, "qcom,cs-out-en");
+
+ return 0;
+}
+
+static int qpnp_get_config_flash(struct qpnp_led_data *led,
+ struct device_node *node, bool *reg_set)
+{
+ int rc;
+ u32 val;
+
+ led->flash_cfg = devm_kzalloc(&led->spmi_dev->dev,
+ sizeof(struct flash_config_data), GFP_KERNEL);
+ if (!led->flash_cfg) {
+ dev_err(&led->spmi_dev->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ rc = spmi_ext_register_readl(led->spmi_dev->ctrl, led->spmi_dev->sid,
+ FLASH_PERIPHERAL_SUBTYPE(led->base),
+ &led->flash_cfg->peripheral_subtype, 1);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read from addr=%x, rc(%d)\n",
+ FLASH_PERIPHERAL_SUBTYPE(led->base), rc);
+ }
+
+ led->flash_cfg->torch_enable =
+ of_property_read_bool(node, "qcom,torch-enable");
+
+ led->flash_cfg->no_smbb_support =
+ of_property_read_bool(node, "qcom,no-smbb-support");
+
+ if (of_find_property(of_get_parent(node), "flash-wa-supply",
+ NULL) && (!*reg_set)) {
+ led->flash_cfg->flash_wa_reg =
+ devm_regulator_get(&led->spmi_dev->dev, "flash-wa");
+ if (IS_ERR_OR_NULL(led->flash_cfg->flash_wa_reg)) {
+ rc = PTR_ERR(led->flash_cfg->flash_wa_reg);
+ if (rc != EPROBE_DEFER) {
+ dev_err(&led->spmi_dev->dev,
+ "Flash wa regulator get failed(%d)\n",
+ rc);
+ }
+ } else {
+ led->flash_cfg->flash_wa_reg_get = true;
+ }
+ }
+
+ if (led->id == QPNP_ID_FLASH1_LED0) {
+ led->flash_cfg->enable_module = FLASH_ENABLE_LED_0;
+ led->flash_cfg->current_addr = FLASH_LED_0_CURR(led->base);
+ led->flash_cfg->trigger_flash = FLASH_LED_0_OUTPUT;
+ if (!*reg_set) {
+ led->flash_cfg->flash_boost_reg =
+ regulator_get(&led->spmi_dev->dev,
+ "flash-boost");
+ if (IS_ERR(led->flash_cfg->flash_boost_reg)) {
+ rc = PTR_ERR(led->flash_cfg->flash_boost_reg);
+ dev_err(&led->spmi_dev->dev,
+ "Regulator get failed(%d)\n", rc);
+ goto error_get_flash_reg;
+ }
+ led->flash_cfg->flash_reg_get = true;
+ *reg_set = true;
+ } else
+ led->flash_cfg->flash_reg_get = false;
+
+ if (led->flash_cfg->torch_enable) {
+ led->flash_cfg->second_addr =
+ FLASH_LED_1_CURR(led->base);
+ }
+ } else if (led->id == QPNP_ID_FLASH1_LED1) {
+ led->flash_cfg->enable_module = FLASH_ENABLE_LED_1;
+ led->flash_cfg->current_addr = FLASH_LED_1_CURR(led->base);
+ led->flash_cfg->trigger_flash = FLASH_LED_1_OUTPUT;
+ if (!*reg_set) {
+ led->flash_cfg->flash_boost_reg =
+ regulator_get(&led->spmi_dev->dev,
+ "flash-boost");
+ if (IS_ERR(led->flash_cfg->flash_boost_reg)) {
+ rc = PTR_ERR(led->flash_cfg->flash_boost_reg);
+ dev_err(&led->spmi_dev->dev,
+ "Regulator get failed(%d)\n", rc);
+ goto error_get_flash_reg;
+ }
+ led->flash_cfg->flash_reg_get = true;
+ *reg_set = true;
+ } else
+ led->flash_cfg->flash_reg_get = false;
+
+ if (led->flash_cfg->torch_enable) {
+ led->flash_cfg->second_addr =
+ FLASH_LED_0_CURR(led->base);
+ }
+ } else {
+ dev_err(&led->spmi_dev->dev, "Unknown flash LED name given\n");
+ return -EINVAL;
+ }
+
+ if (led->flash_cfg->torch_enable) {
+ if (of_find_property(of_get_parent(node), "torch-boost-supply",
+ NULL)) {
+ if (!led->flash_cfg->no_smbb_support) {
+ led->flash_cfg->torch_boost_reg =
+ regulator_get(&led->spmi_dev->dev,
+ "torch-boost");
+ if (IS_ERR(led->flash_cfg->torch_boost_reg)) {
+ rc = PTR_ERR(led->flash_cfg->
+ torch_boost_reg);
+ dev_err(&led->spmi_dev->dev,
+ "Torch regulator get failed(%d)\n", rc);
+ goto error_get_torch_reg;
+ }
+ }
+ led->flash_cfg->enable_module = FLASH_ENABLE_MODULE;
+ } else
+ led->flash_cfg->enable_module = FLASH_ENABLE_ALL;
+ led->flash_cfg->trigger_flash = FLASH_TORCH_OUTPUT;
+
+ rc = of_property_read_u32(node, "qcom,duration", &val);
+ if (!rc)
+ led->flash_cfg->duration = ((u8) val) - 2;
+ else if (rc == -EINVAL)
+ led->flash_cfg->duration = TORCH_DURATION_12s;
+ else {
+ if (led->flash_cfg->peripheral_subtype ==
+ FLASH_SUBTYPE_SINGLE)
+ goto error_get_flash_reg;
+ else if (led->flash_cfg->peripheral_subtype ==
+ FLASH_SUBTYPE_DUAL)
+ goto error_get_torch_reg;
+ }
+
+ rc = of_property_read_u32(node, "qcom,current", &val);
+ if (!rc)
+ led->flash_cfg->current_prgm = (val *
+ TORCH_MAX_LEVEL / led->max_current);
+ else {
+ if (led->flash_cfg->peripheral_subtype ==
+ FLASH_SUBTYPE_SINGLE)
+ goto error_get_flash_reg;
+ else if (led->flash_cfg->peripheral_subtype ==
+ FLASH_SUBTYPE_DUAL)
+ goto error_get_torch_reg;
+ goto error_get_torch_reg;
+ }
+
+ return 0;
+ } else {
+ rc = of_property_read_u32(node, "qcom,duration", &val);
+ if (!rc)
+ led->flash_cfg->duration = (u8)((val - 10) / 10);
+ else if (rc == -EINVAL)
+ led->flash_cfg->duration = FLASH_DURATION_200ms;
+ else
+ goto error_get_flash_reg;
+
+ rc = of_property_read_u32(node, "qcom,current", &val);
+ if (!rc)
+ led->flash_cfg->current_prgm = (val *
+ FLASH_MAX_LEVEL / led->max_current);
+ else
+ goto error_get_flash_reg;
+ }
+
+ rc = of_property_read_u32(node, "qcom,headroom", &val);
+ if (!rc)
+ led->flash_cfg->headroom = (u8) val;
+ else if (rc == -EINVAL)
+ led->flash_cfg->headroom = HEADROOM_500mV;
+ else
+ goto error_get_flash_reg;
+
+ rc = of_property_read_u32(node, "qcom,clamp-curr", &val);
+ if (!rc)
+ led->flash_cfg->clamp_curr = (val *
+ FLASH_MAX_LEVEL / led->max_current);
+ else if (rc == -EINVAL)
+ led->flash_cfg->clamp_curr = FLASH_CLAMP_200mA;
+ else
+ goto error_get_flash_reg;
+
+ rc = of_property_read_u32(node, "qcom,startup-dly", &val);
+ if (!rc)
+ led->flash_cfg->startup_dly = (u8) val;
+ else if (rc == -EINVAL)
+ led->flash_cfg->startup_dly = DELAY_128us;
+ else
+ goto error_get_flash_reg;
+
+ led->flash_cfg->safety_timer =
+ of_property_read_bool(node, "qcom,safety-timer");
+
+ led->flash_cfg->vreg_ok =
+ of_property_read_bool(node, "qcom,sw_vreg_ok");
+
+ return 0;
+
+error_get_torch_reg:
+ if (led->flash_cfg->no_smbb_support)
+ regulator_put(led->flash_cfg->flash_boost_reg);
+ else
+ regulator_put(led->flash_cfg->torch_boost_reg);
+
+error_get_flash_reg:
+ regulator_put(led->flash_cfg->flash_boost_reg);
+ return rc;
+
+}
+
+static int qpnp_get_config_pwm(struct pwm_config_data *pwm_cfg,
+ struct spmi_device *spmi_dev,
+ struct device_node *node)
+{
+ struct property *prop;
+ int rc, i, lut_max_size;
+ u32 val;
+ u8 *temp_cfg;
+ const char *led_label;
+
+ pwm_cfg->pwm_dev = of_pwm_get(node, NULL);
+
+ if (IS_ERR(pwm_cfg->pwm_dev)) {
+ rc = PTR_ERR(pwm_cfg->pwm_dev);
+ dev_err(&spmi_dev->dev, "Cannot get PWM device rc:(%d)\n", rc);
+ pwm_cfg->pwm_dev = NULL;
+ return rc;
+ }
+
+ if (pwm_cfg->mode != MANUAL_MODE) {
+ rc = of_property_read_u32(node, "qcom,pwm-us", &val);
+ if (!rc)
+ pwm_cfg->pwm_period_us = val;
+ else
+ return rc;
+ }
+
+ pwm_cfg->use_blink =
+ of_property_read_bool(node, "qcom,use-blink");
+
+ if (pwm_cfg->mode == LPG_MODE || pwm_cfg->use_blink) {
+ pwm_cfg->duty_cycles =
+ devm_kzalloc(&spmi_dev->dev,
+ sizeof(struct pwm_duty_cycles), GFP_KERNEL);
+ if (!pwm_cfg->duty_cycles) {
+ dev_err(&spmi_dev->dev,
+ "Unable to allocate memory\n");
+ rc = -ENOMEM;
+ goto bad_lpg_params;
+ }
+
+ prop = of_find_property(node, "qcom,duty-pcts",
+ &pwm_cfg->duty_cycles->num_duty_pcts);
+ if (!prop) {
+ dev_err(&spmi_dev->dev, "Looking up property " \
+ "node qcom,duty-pcts failed\n");
+ rc = -ENODEV;
+ goto bad_lpg_params;
+ } else if (!pwm_cfg->duty_cycles->num_duty_pcts) {
+ dev_err(&spmi_dev->dev, "Invalid length of " \
+ "duty pcts\n");
+ rc = -EINVAL;
+ goto bad_lpg_params;
+ }
+
+ rc = of_property_read_string(node, "label", &led_label);
+
+ if (rc < 0) {
+ dev_err(&spmi_dev->dev,
+ "Failure reading label, rc = %d\n", rc);
+ return rc;
+ }
+
+ if (strcmp(led_label, "kpdbl") == 0)
+ lut_max_size = PWM_GPLED_LUT_MAX_SIZE;
+ else
+ lut_max_size = PWM_LUT_MAX_SIZE;
+
+ pwm_cfg->duty_cycles->duty_pcts =
+ devm_kzalloc(&spmi_dev->dev,
+ sizeof(int) * lut_max_size,
+ GFP_KERNEL);
+ if (!pwm_cfg->duty_cycles->duty_pcts) {
+ dev_err(&spmi_dev->dev,
+ "Unable to allocate memory\n");
+ rc = -ENOMEM;
+ goto bad_lpg_params;
+ }
+
+ pwm_cfg->old_duty_pcts =
+ devm_kzalloc(&spmi_dev->dev,
+ sizeof(int) * lut_max_size,
+ GFP_KERNEL);
+ if (!pwm_cfg->old_duty_pcts) {
+ dev_err(&spmi_dev->dev,
+ "Unable to allocate memory\n");
+ rc = -ENOMEM;
+ goto bad_lpg_params;
+ }
+
+ temp_cfg = devm_kzalloc(&spmi_dev->dev,
+ pwm_cfg->duty_cycles->num_duty_pcts *
+ sizeof(u8), GFP_KERNEL);
+ if (!temp_cfg) {
+ dev_err(&spmi_dev->dev, "Failed to allocate " \
+ "memory for duty pcts\n");
+ rc = -ENOMEM;
+ goto bad_lpg_params;
+ }
+
+ memcpy(temp_cfg, prop->value,
+ pwm_cfg->duty_cycles->num_duty_pcts);
+
+ for (i = 0; i < pwm_cfg->duty_cycles->num_duty_pcts; i++)
+ pwm_cfg->duty_cycles->duty_pcts[i] =
+ (int) temp_cfg[i];
+
+ rc = of_property_read_u32(node, "qcom,start-idx", &val);
+ if (!rc) {
+ pwm_cfg->lut_params.start_idx = val;
+ pwm_cfg->duty_cycles->start_idx = val;
+ } else
+ goto bad_lpg_params;
+
+ pwm_cfg->lut_params.lut_pause_hi = 0;
+ rc = of_property_read_u32(node, "qcom,pause-hi", &val);
+ if (!rc)
+ pwm_cfg->lut_params.lut_pause_hi = val;
+ else if (rc != -EINVAL)
+ goto bad_lpg_params;
+
+ pwm_cfg->lut_params.lut_pause_lo = 0;
+ rc = of_property_read_u32(node, "qcom,pause-lo", &val);
+ if (!rc)
+ pwm_cfg->lut_params.lut_pause_lo = val;
+ else if (rc != -EINVAL)
+ goto bad_lpg_params;
+
+ pwm_cfg->lut_params.ramp_step_ms =
+ QPNP_LUT_RAMP_STEP_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,ramp-step-ms", &val);
+ if (!rc)
+ pwm_cfg->lut_params.ramp_step_ms = val;
+ else if (rc != -EINVAL)
+ goto bad_lpg_params;
+
+ pwm_cfg->lut_params.flags = QPNP_LED_PWM_FLAGS;
+ rc = of_property_read_u32(node, "qcom,lut-flags", &val);
+ if (!rc)
+ pwm_cfg->lut_params.flags = (u8) val;
+ else if (rc != -EINVAL)
+ goto bad_lpg_params;
+
+ pwm_cfg->lut_params.idx_len =
+ pwm_cfg->duty_cycles->num_duty_pcts;
+ }
+ return 0;
+
+bad_lpg_params:
+ pwm_cfg->use_blink = false;
+ if (pwm_cfg->mode == PWM_MODE) {
+ dev_err(&spmi_dev->dev, "LPG parameters not set for" \
+ " blink mode, defaulting to PWM mode\n");
+ return 0;
+ }
+ return rc;
+};
+
+static int qpnp_led_get_mode(const char *mode)
+{
+ if (strncmp(mode, "manual", strlen(mode)) == 0)
+ return MANUAL_MODE;
+ else if (strncmp(mode, "pwm", strlen(mode)) == 0)
+ return PWM_MODE;
+ else if (strncmp(mode, "lpg", strlen(mode)) == 0)
+ return LPG_MODE;
+ else
+ return -EINVAL;
+};
+
+static int qpnp_get_config_kpdbl(struct qpnp_led_data *led,
+ struct device_node *node)
+{
+ int rc;
+ u32 val;
+ u8 led_mode;
+ const char *mode;
+
+ led->kpdbl_cfg = devm_kzalloc(&led->spmi_dev->dev,
+ sizeof(struct kpdbl_config_data), GFP_KERNEL);
+ if (!led->kpdbl_cfg) {
+ dev_err(&led->spmi_dev->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_string(node, "qcom,mode", &mode);
+ if (!rc) {
+ led_mode = qpnp_led_get_mode(mode);
+ if ((led_mode == MANUAL_MODE) || (led_mode == -EINVAL)) {
+ dev_err(&led->spmi_dev->dev, "Selected mode not " \
+ "supported for kpdbl.\n");
+ return -EINVAL;
+ }
+ led->kpdbl_cfg->pwm_cfg = devm_kzalloc(&led->spmi_dev->dev,
+ sizeof(struct pwm_config_data),
+ GFP_KERNEL);
+ if (!led->kpdbl_cfg->pwm_cfg) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+ led->kpdbl_cfg->pwm_cfg->mode = led_mode;
+ led->kpdbl_cfg->pwm_cfg->default_mode = led_mode;
+ } else
+ return rc;
+
+ rc = qpnp_get_config_pwm(led->kpdbl_cfg->pwm_cfg, led->spmi_dev, node);
+ if (rc < 0)
+ return rc;
+
+ rc = of_property_read_u32(node, "qcom,row-id", &val);
+ if (!rc)
+ led->kpdbl_cfg->row_id = val;
+ else
+ return rc;
+
+ led->kpdbl_cfg->row_src_vbst =
+ of_property_read_bool(node, "qcom,row-src-vbst");
+
+ led->kpdbl_cfg->row_src_en =
+ of_property_read_bool(node, "qcom,row-src-en");
+
+ led->kpdbl_cfg->always_on =
+ of_property_read_bool(node, "qcom,always-on");
+
+ return 0;
+}
+
+static int qpnp_get_config_rgb(struct qpnp_led_data *led,
+ struct device_node *node)
+{
+ int rc;
+ u8 led_mode;
+ const char *mode;
+
+ led->rgb_cfg = devm_kzalloc(&led->spmi_dev->dev,
+ sizeof(struct rgb_config_data), GFP_KERNEL);
+ if (!led->rgb_cfg) {
+ dev_err(&led->spmi_dev->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ if (led->id == QPNP_ID_RGB_RED)
+ led->rgb_cfg->enable = RGB_LED_ENABLE_RED;
+ else if (led->id == QPNP_ID_RGB_GREEN)
+ led->rgb_cfg->enable = RGB_LED_ENABLE_GREEN;
+ else if (led->id == QPNP_ID_RGB_BLUE)
+ led->rgb_cfg->enable = RGB_LED_ENABLE_BLUE;
+ else
+ return -EINVAL;
+
+ rc = of_property_read_string(node, "qcom,mode", &mode);
+ if (!rc) {
+ led_mode = qpnp_led_get_mode(mode);
+ if ((led_mode == MANUAL_MODE) || (led_mode == -EINVAL)) {
+ dev_err(&led->spmi_dev->dev, "Selected mode not " \
+ "supported for rgb.\n");
+ return -EINVAL;
+ }
+ led->rgb_cfg->pwm_cfg = devm_kzalloc(&led->spmi_dev->dev,
+ sizeof(struct pwm_config_data),
+ GFP_KERNEL);
+ if (!led->rgb_cfg->pwm_cfg) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+ led->rgb_cfg->pwm_cfg->mode = led_mode;
+ led->rgb_cfg->pwm_cfg->default_mode = led_mode;
+ } else
+ return rc;
+
+ rc = qpnp_get_config_pwm(led->rgb_cfg->pwm_cfg, led->spmi_dev, node);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+static int qpnp_get_config_mpp(struct qpnp_led_data *led,
+ struct device_node *node)
+{
+ int rc;
+ u32 val;
+ u8 led_mode;
+ const char *mode;
+
+ led->mpp_cfg = devm_kzalloc(&led->spmi_dev->dev,
+ sizeof(struct mpp_config_data), GFP_KERNEL);
+ if (!led->mpp_cfg) {
+ dev_err(&led->spmi_dev->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ if (of_find_property(of_get_parent(node), "mpp-power-supply", NULL)) {
+ led->mpp_cfg->mpp_reg =
+ regulator_get(&led->spmi_dev->dev,
+ "mpp-power");
+ if (IS_ERR(led->mpp_cfg->mpp_reg)) {
+ rc = PTR_ERR(led->mpp_cfg->mpp_reg);
+ dev_err(&led->spmi_dev->dev,
+ "MPP regulator get failed(%d)\n", rc);
+ return rc;
+ }
+ }
+
+ if (led->mpp_cfg->mpp_reg) {
+ rc = of_property_read_u32(of_get_parent(node),
+ "qcom,mpp-power-max-voltage", &val);
+ if (!rc)
+ led->mpp_cfg->max_uV = val;
+ else
+ goto err_config_mpp;
+
+ rc = of_property_read_u32(of_get_parent(node),
+ "qcom,mpp-power-min-voltage", &val);
+ if (!rc)
+ led->mpp_cfg->min_uV = val;
+ else
+ goto err_config_mpp;
+
+ } else {
+ rc = of_property_read_u32(of_get_parent(node),
+ "qcom,mpp-power-max-voltage", &val);
+ if (!rc)
+ dev_warn(&led->spmi_dev->dev,
+ "No regulator specified\n");
+
+ rc = of_property_read_u32(of_get_parent(node),
+ "qcom,mpp-power-min-voltage", &val);
+ if (!rc)
+ dev_warn(&led->spmi_dev->dev,
+ "No regulator specified\n");
+ }
+
+ led->mpp_cfg->current_setting = LED_MPP_CURRENT_MIN;
+ rc = of_property_read_u32(node, "qcom,current-setting", &val);
+ if (!rc) {
+ if (led->mpp_cfg->current_setting < LED_MPP_CURRENT_MIN)
+ led->mpp_cfg->current_setting = LED_MPP_CURRENT_MIN;
+ else if (led->mpp_cfg->current_setting > LED_MPP_CURRENT_MAX)
+ led->mpp_cfg->current_setting = LED_MPP_CURRENT_MAX;
+ else
+ led->mpp_cfg->current_setting = (u8) val;
+ } else if (rc != -EINVAL)
+ goto err_config_mpp;
+
+ led->mpp_cfg->source_sel = LED_MPP_SOURCE_SEL_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,source-sel", &val);
+ if (!rc)
+ led->mpp_cfg->source_sel = (u8) val;
+ else if (rc != -EINVAL)
+ goto err_config_mpp;
+
+ led->mpp_cfg->mode_ctrl = LED_MPP_MODE_SINK;
+ rc = of_property_read_u32(node, "qcom,mode-ctrl", &val);
+ if (!rc)
+ led->mpp_cfg->mode_ctrl = (u8) val;
+ else if (rc != -EINVAL)
+ goto err_config_mpp;
+
+ led->mpp_cfg->vin_ctrl = LED_MPP_VIN_CTRL_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,vin-ctrl", &val);
+ if (!rc)
+ led->mpp_cfg->vin_ctrl = (u8) val;
+ else if (rc != -EINVAL)
+ goto err_config_mpp;
+
+ led->mpp_cfg->min_brightness = 0;
+ rc = of_property_read_u32(node, "qcom,min-brightness", &val);
+ if (!rc)
+ led->mpp_cfg->min_brightness = (u8) val;
+ else if (rc != -EINVAL)
+ goto err_config_mpp;
+
+ rc = of_property_read_string(node, "qcom,mode", &mode);
+ if (!rc) {
+ led_mode = qpnp_led_get_mode(mode);
+ led->mpp_cfg->pwm_mode = led_mode;
+ if (led_mode == MANUAL_MODE)
+ return MANUAL_MODE;
+ else if (led_mode == -EINVAL) {
+ dev_err(&led->spmi_dev->dev, "Selected mode not " \
+ "supported for mpp.\n");
+ rc = -EINVAL;
+ goto err_config_mpp;
+ }
+ led->mpp_cfg->pwm_cfg = devm_kzalloc(&led->spmi_dev->dev,
+ sizeof(struct pwm_config_data),
+ GFP_KERNEL);
+ if (!led->mpp_cfg->pwm_cfg) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to allocate memory\n");
+ rc = -ENOMEM;
+ goto err_config_mpp;
+ }
+ led->mpp_cfg->pwm_cfg->mode = led_mode;
+ led->mpp_cfg->pwm_cfg->default_mode = led_mode;
+ } else
+ return rc;
+
+ rc = qpnp_get_config_pwm(led->mpp_cfg->pwm_cfg, led->spmi_dev, node);
+ if (rc < 0)
+ goto err_config_mpp;
+
+ return 0;
+
+err_config_mpp:
+ if (led->mpp_cfg->mpp_reg)
+ regulator_put(led->mpp_cfg->mpp_reg);
+ return rc;
+}
+
+static int qpnp_get_config_gpio(struct qpnp_led_data *led,
+ struct device_node *node)
+{
+ int rc;
+ u32 val;
+
+ led->gpio_cfg = devm_kzalloc(&led->spmi_dev->dev,
+ sizeof(struct gpio_config_data), GFP_KERNEL);
+ if (!led->gpio_cfg) {
+ dev_err(&led->spmi_dev->dev, "Unable to allocate memory gpio struct\n");
+ return -ENOMEM;
+ }
+
+ led->gpio_cfg->source_sel = LED_GPIO_SOURCE_SEL_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,source-sel", &val);
+ if (!rc)
+ led->gpio_cfg->source_sel = (u8) val;
+ else if (rc != -EINVAL)
+ goto err_config_gpio;
+
+ led->gpio_cfg->mode_ctrl = LED_GPIO_MODE_OUTPUT;
+ rc = of_property_read_u32(node, "qcom,mode-ctrl", &val);
+ if (!rc)
+ led->gpio_cfg->mode_ctrl = (u8) val;
+ else if (rc != -EINVAL)
+ goto err_config_gpio;
+
+ led->gpio_cfg->vin_ctrl = LED_GPIO_VIN_CTRL_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,vin-ctrl", &val);
+ if (!rc)
+ led->gpio_cfg->vin_ctrl = (u8) val;
+ else if (rc != -EINVAL)
+ goto err_config_gpio;
+
+ return 0;
+
+err_config_gpio:
+ return rc;
+}
+
+static int qpnp_leds_probe(struct spmi_device *spmi)
+{
+ struct qpnp_led_data *led, *led_array;
+ struct resource *led_resource;
+ struct device_node *node, *temp;
+ int rc, i, num_leds = 0, parsed_leds = 0;
+ const char *led_label;
+ bool regulator_probe = false;
+
+ node = spmi->dev.of_node;
+ if (node == NULL)
+ return -ENODEV;
+
+ temp = NULL;
+ while ((temp = of_get_next_child(node, temp)))
+ num_leds++;
+
+ if (!num_leds)
+ return -ECHILD;
+
+ led_array = devm_kzalloc(&spmi->dev,
+ (sizeof(struct qpnp_led_data) * num_leds), GFP_KERNEL);
+ if (!led_array) {
+ dev_err(&spmi->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ for_each_child_of_node(node, temp) {
+ led = &led_array[parsed_leds];
+ led->num_leds = num_leds;
+ led->spmi_dev = spmi;
+
+ led_resource = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
+ if (!led_resource) {
+ dev_err(&spmi->dev, "Unable to get LED base address\n");
+ rc = -ENXIO;
+ goto fail_id_check;
+ }
+ led->base = led_resource->start;
+
+ rc = of_property_read_string(temp, "label", &led_label);
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "Failure reading label, rc = %d\n", rc);
+ goto fail_id_check;
+ }
+
+ rc = of_property_read_string(temp, "linux,name",
+ &led->cdev.name);
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "Failure reading led name, rc = %d\n", rc);
+ goto fail_id_check;
+ }
+
+ rc = of_property_read_u32(temp, "qcom,max-current",
+ &led->max_current);
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "Failure reading max_current, rc = %d\n", rc);
+ goto fail_id_check;
+ }
+
+ rc = of_property_read_u32(temp, "qcom,id", &led->id);
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "Failure reading led id, rc = %d\n", rc);
+ goto fail_id_check;
+ }
+
+ rc = qpnp_get_common_configs(led, temp);
+ if (rc) {
+ dev_err(&led->spmi_dev->dev,
+ "Failure reading common led configuration," \
+ " rc = %d\n", rc);
+ goto fail_id_check;
+ }
+
+ led->cdev.brightness_set = qpnp_led_set;
+ led->cdev.brightness_get = qpnp_led_get;
+
+ if (strncmp(led_label, "wled", sizeof("wled")) == 0) {
+ rc = qpnp_get_config_wled(led, temp);
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read wled config data\n");
+ goto fail_id_check;
+ }
+ } else if (strncmp(led_label, "flash", sizeof("flash"))
+ == 0) {
+ if (!of_find_property(node, "flash-boost-supply", NULL))
+ regulator_probe = true;
+ rc = qpnp_get_config_flash(led, temp, &regulator_probe);
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read flash config data\n");
+ goto fail_id_check;
+ }
+ } else if (strncmp(led_label, "rgb", sizeof("rgb")) == 0) {
+ rc = qpnp_get_config_rgb(led, temp);
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read rgb config data\n");
+ goto fail_id_check;
+ }
+ } else if (strncmp(led_label, "mpp", sizeof("mpp")) == 0) {
+ rc = qpnp_get_config_mpp(led, temp);
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read mpp config data\n");
+ goto fail_id_check;
+ }
+ } else if (strcmp(led_label, "gpio") == 0) {
+ rc = qpnp_get_config_gpio(led, temp);
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read gpio config data\n");
+ goto fail_id_check;
+ }
+ } else if (strncmp(led_label, "kpdbl", sizeof("kpdbl")) == 0) {
+ bitmap_zero(kpdbl_leds_in_use, NUM_KPDBL_LEDS);
+ is_kpdbl_master_turn_on = false;
+ rc = qpnp_get_config_kpdbl(led, temp);
+ if (rc < 0) {
+ dev_err(&led->spmi_dev->dev,
+ "Unable to read kpdbl config data\n");
+ goto fail_id_check;
+ }
+ } else {
+ dev_err(&led->spmi_dev->dev, "No LED matching label\n");
+ rc = -EINVAL;
+ goto fail_id_check;
+ }
+
+ if (led->id != QPNP_ID_FLASH1_LED0 &&
+ led->id != QPNP_ID_FLASH1_LED1)
+ mutex_init(&led->lock);
+
+ led->in_order_command_processing = of_property_read_bool
+ (temp, "qcom,in-order-command-processing");
+
+ if (led->in_order_command_processing) {
+ /*
+ * the command order from user space needs to be
+ * maintained use ordered workqueue to prevent
+ * concurrency
+ */
+ led->workqueue = alloc_ordered_workqueue
+ ("led_workqueue", 0);
+ if (!led->workqueue) {
+ rc = -ENOMEM;
+ goto fail_id_check;
+ }
+ }
+
+ INIT_WORK(&led->work, qpnp_led_work);
+
+ rc = qpnp_led_initialize(led);
+ if (rc < 0)
+ goto fail_id_check;
+
+ rc = qpnp_led_set_max_brightness(led);
+ if (rc < 0)
+ goto fail_id_check;
+
+ rc = led_classdev_register(&spmi->dev, &led->cdev);
+ if (rc) {
+ dev_err(&spmi->dev, "unable to register led %d,rc=%d\n",
+ led->id, rc);
+ goto fail_id_check;
+ }
+
+ if (led->id == QPNP_ID_FLASH1_LED0 ||
+ led->id == QPNP_ID_FLASH1_LED1) {
+ rc = sysfs_create_group(&led->cdev.dev->kobj,
+ &led_attr_group);
+ if (rc)
+ goto fail_id_check;
+
+ }
+
+ if (led->id == QPNP_ID_LED_MPP) {
+ if (!led->mpp_cfg->pwm_cfg)
+ break;
+ if (led->mpp_cfg->pwm_cfg->mode == PWM_MODE) {
+ rc = sysfs_create_group(&led->cdev.dev->kobj,
+ &pwm_attr_group);
+ if (rc)
+ goto fail_id_check;
+ }
+ if (led->mpp_cfg->pwm_cfg->use_blink) {
+ rc = sysfs_create_group(&led->cdev.dev->kobj,
+ &blink_attr_group);
+ if (rc)
+ goto fail_id_check;
+
+ rc = sysfs_create_group(&led->cdev.dev->kobj,
+ &lpg_attr_group);
+ if (rc)
+ goto fail_id_check;
+ } else if (led->mpp_cfg->pwm_cfg->mode == LPG_MODE) {
+ rc = sysfs_create_group(&led->cdev.dev->kobj,
+ &lpg_attr_group);
+ if (rc)
+ goto fail_id_check;
+ }
+ } else if ((led->id == QPNP_ID_RGB_RED) ||
+ (led->id == QPNP_ID_RGB_GREEN) ||
+ (led->id == QPNP_ID_RGB_BLUE)) {
+ if (led->rgb_cfg->pwm_cfg->mode == PWM_MODE) {
+ rc = sysfs_create_group(&led->cdev.dev->kobj,
+ &pwm_attr_group);
+ if (rc)
+ goto fail_id_check;
+ }
+ if (led->rgb_cfg->pwm_cfg->use_blink) {
+ rc = sysfs_create_group(&led->cdev.dev->kobj,
+ &blink_attr_group);
+ if (rc)
+ goto fail_id_check;
+
+ rc = sysfs_create_group(&led->cdev.dev->kobj,
+ &lpg_attr_group);
+ if (rc)
+ goto fail_id_check;
+ } else if (led->rgb_cfg->pwm_cfg->mode == LPG_MODE) {
+ rc = sysfs_create_group(&led->cdev.dev->kobj,
+ &lpg_attr_group);
+ if (rc)
+ goto fail_id_check;
+ }
+ } else if (led->id == QPNP_ID_KPDBL) {
+ if (led->kpdbl_cfg->pwm_cfg->mode == PWM_MODE) {
+ rc = sysfs_create_group(&led->cdev.dev->kobj,
+ &pwm_attr_group);
+ if (rc)
+ goto fail_id_check;
+ }
+ if (led->kpdbl_cfg->pwm_cfg->use_blink) {
+ rc = sysfs_create_group(&led->cdev.dev->kobj,
+ &blink_attr_group);
+ if (rc)
+ goto fail_id_check;
+
+ rc = sysfs_create_group(&led->cdev.dev->kobj,
+ &lpg_attr_group);
+ if (rc)
+ goto fail_id_check;
+ } else if (led->kpdbl_cfg->pwm_cfg->mode == LPG_MODE) {
+ rc = sysfs_create_group(&led->cdev.dev->kobj,
+ &lpg_attr_group);
+ if (rc)
+ goto fail_id_check;
+ }
+ }
+
+ /* configure default state */
+ if (led->default_on) {
+ led->cdev.brightness = led->cdev.max_brightness;
+ __qpnp_led_work(led, led->cdev.brightness);
+ if (led->turn_off_delay_ms > 0)
+ qpnp_led_turn_off(led);
+ } else
+ led->cdev.brightness = LED_OFF;
+
+ parsed_leds++;
+ }
+ dev_set_drvdata(&spmi->dev, led_array);
+ return 0;
+
+fail_id_check:
+ for (i = 0; i < parsed_leds; i++) {
+ if (led_array[i].id != QPNP_ID_FLASH1_LED0 &&
+ led_array[i].id != QPNP_ID_FLASH1_LED1)
+ mutex_destroy(&led_array[i].lock);
+ if (led_array[i].in_order_command_processing)
+ destroy_workqueue(led_array[i].workqueue);
+ led_classdev_unregister(&led_array[i].cdev);
+ }
+
+ return rc;
+}
+
+static int qpnp_leds_remove(struct spmi_device *spmi)
+{
+ struct qpnp_led_data *led_array = dev_get_drvdata(&spmi->dev);
+ int i, parsed_leds = led_array->num_leds;
+
+ for (i = 0; i < parsed_leds; i++) {
+ cancel_work_sync(&led_array[i].work);
+ if (led_array[i].id != QPNP_ID_FLASH1_LED0 &&
+ led_array[i].id != QPNP_ID_FLASH1_LED1)
+ mutex_destroy(&led_array[i].lock);
+
+ if (led_array[i].in_order_command_processing)
+ destroy_workqueue(led_array[i].workqueue);
+ led_classdev_unregister(&led_array[i].cdev);
+ switch (led_array[i].id) {
+ case QPNP_ID_WLED:
+ break;
+ case QPNP_ID_FLASH1_LED0:
+ case QPNP_ID_FLASH1_LED1:
+ if (led_array[i].flash_cfg->flash_reg_get)
+ regulator_put(led_array[i].flash_cfg-> \
+ flash_boost_reg);
+ if (led_array[i].flash_cfg->torch_enable)
+ if (!led_array[i].flash_cfg->no_smbb_support)
+ regulator_put(led_array[i].
+ flash_cfg->torch_boost_reg);
+ sysfs_remove_group(&led_array[i].cdev.dev->kobj,
+ &led_attr_group);
+ break;
+ case QPNP_ID_RGB_RED:
+ case QPNP_ID_RGB_GREEN:
+ case QPNP_ID_RGB_BLUE:
+ if (led_array[i].rgb_cfg->pwm_cfg->mode == PWM_MODE)
+ sysfs_remove_group(&led_array[i].cdev.dev->\
+ kobj, &pwm_attr_group);
+ if (led_array[i].rgb_cfg->pwm_cfg->use_blink) {
+ sysfs_remove_group(&led_array[i].cdev.dev->\
+ kobj, &blink_attr_group);
+ sysfs_remove_group(&led_array[i].cdev.dev->\
+ kobj, &lpg_attr_group);
+ } else if (led_array[i].rgb_cfg->pwm_cfg->mode\
+ == LPG_MODE)
+ sysfs_remove_group(&led_array[i].cdev.dev->\
+ kobj, &lpg_attr_group);
+ break;
+ case QPNP_ID_LED_MPP:
+ if (!led_array[i].mpp_cfg->pwm_cfg)
+ break;
+ if (led_array[i].mpp_cfg->pwm_cfg->mode == PWM_MODE)
+ sysfs_remove_group(&led_array[i].cdev.dev->\
+ kobj, &pwm_attr_group);
+ if (led_array[i].mpp_cfg->pwm_cfg->use_blink) {
+ sysfs_remove_group(&led_array[i].cdev.dev->\
+ kobj, &blink_attr_group);
+ sysfs_remove_group(&led_array[i].cdev.dev->\
+ kobj, &lpg_attr_group);
+ } else if (led_array[i].mpp_cfg->pwm_cfg->mode\
+ == LPG_MODE)
+ sysfs_remove_group(&led_array[i].cdev.dev->\
+ kobj, &lpg_attr_group);
+ if (led_array[i].mpp_cfg->mpp_reg)
+ regulator_put(led_array[i].mpp_cfg->mpp_reg);
+ break;
+ case QPNP_ID_KPDBL:
+ if (led_array[i].kpdbl_cfg->pwm_cfg->mode == PWM_MODE)
+ sysfs_remove_group(&led_array[i].cdev.dev->
+ kobj, &pwm_attr_group);
+ if (led_array[i].kpdbl_cfg->pwm_cfg->use_blink) {
+ sysfs_remove_group(&led_array[i].cdev.dev->
+ kobj, &blink_attr_group);
+ sysfs_remove_group(&led_array[i].cdev.dev->
+ kobj, &lpg_attr_group);
+ } else if (led_array[i].kpdbl_cfg->pwm_cfg->mode
+ == LPG_MODE)
+ sysfs_remove_group(&led_array[i].cdev.dev->
+ kobj, &lpg_attr_group);
+ break;
+ default:
+ dev_err(&led_array[i].spmi_dev->dev,
+ "Invalid LED(%d)\n",
+ led_array[i].id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id spmi_match_table[] = {
+ { .compatible = "qcom,leds-qpnp",},
+ { },
+};
+#else
+#define spmi_match_table NULL
+#endif
+
+static struct spmi_driver qpnp_leds_driver = {
+ .driver = {
+ .name = "qcom,leds-qpnp",
+ .of_match_table = spmi_match_table,
+ },
+ .probe = qpnp_leds_probe,
+ .remove = qpnp_leds_remove,
+};
+
+static int __init qpnp_led_init(void)
+{
+ return spmi_driver_register(&qpnp_leds_driver);
+}
+module_init(qpnp_led_init);
+
+static void __exit qpnp_led_exit(void)
+{
+ spmi_driver_unregister(&qpnp_leds_driver);
+}
+module_exit(qpnp_led_exit);
+
+MODULE_DESCRIPTION("QPNP LEDs driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("leds:leds-qpnp");
+
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index e2a48415d969..e506c6b0e142 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -112,4 +112,9 @@ config OF_OVERLAY
While this option is selected automatically when needed, you can
enable it manually to improve device tree unit test coverage.
+config OF_BATTERYDATA
+ def_bool y
+ help
+ OpenFirmware BatteryData accessors
+
endif # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 156c072b3117..7bcd7adb81bf 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -14,5 +14,6 @@ obj-$(CONFIG_OF_MTD) += of_mtd.o
obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
obj-$(CONFIG_OF_RESOLVE) += resolver.o
obj-$(CONFIG_OF_OVERLAY) += overlay.o
+obj-$(CONFIG_OF_BATTERYDATA) += of_batterydata.o
obj-$(CONFIG_OF_UNITTEST) += unittest-data/
diff --git a/drivers/of/of_batterydata.c b/drivers/of/of_batterydata.c
new file mode 100644
index 000000000000..4fddbdefce5b
--- /dev/null
+++ b/drivers/of/of_batterydata.c
@@ -0,0 +1,469 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/batterydata-lib.h>
+#include <linux/power_supply.h>
+
+static int of_batterydata_read_lut(const struct device_node *np,
+ int max_cols, int max_rows, int *ncols, int *nrows,
+ int *col_legend_data, int *row_legend_data,
+ int *lut_data)
+{
+ struct property *prop;
+ const __be32 *data;
+ int cols, rows, size, i, j, *out_values;
+
+ prop = of_find_property(np, "qcom,lut-col-legend", NULL);
+ if (!prop) {
+ pr_err("%s: No col legend found\n", np->name);
+ return -EINVAL;
+ } else if (!prop->value) {
+ pr_err("%s: No col legend value found, np->name\n", np->name);
+ return -ENODATA;
+ } else if (prop->length > max_cols * sizeof(int)) {
+ pr_err("%s: Too many columns\n", np->name);
+ return -EINVAL;
+ }
+
+ cols = prop->length/sizeof(int);
+ *ncols = cols;
+ data = prop->value;
+ for (i = 0; i < cols; i++)
+ *col_legend_data++ = be32_to_cpup(data++);
+
+ prop = of_find_property(np, "qcom,lut-row-legend", NULL);
+ if (!prop || row_legend_data == NULL) {
+ /* single row lut */
+ rows = 1;
+ } else if (!prop->value) {
+ pr_err("%s: No row legend value found\n", np->name);
+ return -ENODATA;
+ } else if (prop->length > max_rows * sizeof(int)) {
+ pr_err("%s: Too many rows\n", np->name);
+ return -EINVAL;
+ } else {
+ rows = prop->length/sizeof(int);
+ *nrows = rows;
+ data = prop->value;
+ for (i = 0; i < rows; i++)
+ *row_legend_data++ = be32_to_cpup(data++);
+ }
+
+ prop = of_find_property(np, "qcom,lut-data", NULL);
+ if (!prop) {
+ pr_err("prop 'qcom,lut-data' not found\n");
+ return -EINVAL;
+ }
+ data = prop->value;
+ size = prop->length/sizeof(int);
+ if (size != cols * rows) {
+ pr_err("%s: data size mismatch, %dx%d != %d\n",
+ np->name, cols, rows, size);
+ return -EINVAL;
+ }
+ for (i = 0; i < rows; i++) {
+ out_values = lut_data + (max_cols * i);
+ for (j = 0; j < cols; j++) {
+ *out_values++ = be32_to_cpup(data++);
+ pr_debug("Value = %d\n", *(out_values-1));
+ }
+ }
+
+ return 0;
+}
+
+static int of_batterydata_read_sf_lut(struct device_node *data_node,
+ const char *name, struct sf_lut *lut)
+{
+ struct device_node *node = of_find_node_by_name(data_node, name);
+ int rc;
+
+ if (!lut) {
+ pr_debug("No lut provided, skipping\n");
+ return 0;
+ } else if (!node) {
+ pr_err("Couldn't find %s node.\n", name);
+ return -EINVAL;
+ }
+
+ rc = of_batterydata_read_lut(node, PC_CC_COLS, PC_CC_ROWS,
+ &lut->cols, &lut->rows, lut->row_entries,
+ lut->percent, *lut->sf);
+ if (rc) {
+ pr_err("Failed to read %s node.\n", name);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int of_batterydata_read_pc_temp_ocv_lut(struct device_node *data_node,
+ const char *name, struct pc_temp_ocv_lut *lut)
+{
+ struct device_node *node = of_find_node_by_name(data_node, name);
+ int rc;
+
+ if (!lut) {
+ pr_debug("No lut provided, skipping\n");
+ return 0;
+ } else if (!node) {
+ pr_err("Couldn't find %s node.\n", name);
+ return -EINVAL;
+ }
+ rc = of_batterydata_read_lut(node, PC_TEMP_COLS, PC_TEMP_ROWS,
+ &lut->cols, &lut->rows, lut->temp, lut->percent,
+ *lut->ocv);
+ if (rc) {
+ pr_err("Failed to read %s node.\n", name);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int of_batterydata_read_ibat_temp_acc_lut(struct device_node *data_node,
+ const char *name, struct ibat_temp_acc_lut *lut)
+{
+ struct device_node *node = of_find_node_by_name(data_node, name);
+ int rc;
+
+ if (!lut) {
+ pr_debug("No lut provided, skipping\n");
+ return 0;
+ } else if (!node) {
+ pr_debug("Couldn't find %s node.\n", name);
+ return 0;
+ }
+ rc = of_batterydata_read_lut(node, ACC_TEMP_COLS, ACC_IBAT_ROWS,
+ &lut->cols, &lut->rows, lut->temp, lut->ibat,
+ *lut->acc);
+ if (rc) {
+ pr_err("Failed to read %s node.\n", name);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int of_batterydata_read_single_row_lut(struct device_node *data_node,
+ const char *name, struct single_row_lut *lut)
+{
+ struct device_node *node = of_find_node_by_name(data_node, name);
+ int rc;
+
+ if (!lut) {
+ pr_debug("No lut provided, skipping\n");
+ return 0;
+ } else if (!node) {
+ pr_err("Couldn't find %s node.\n", name);
+ return -EINVAL;
+ }
+
+ rc = of_batterydata_read_lut(node, MAX_SINGLE_LUT_COLS, 1,
+ &lut->cols, NULL, lut->x, NULL, lut->y);
+ if (rc) {
+ pr_err("Failed to read %s node.\n", name);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int of_batterydata_read_batt_id_kohm(const struct device_node *np,
+ const char *propname, struct batt_ids *batt_ids)
+{
+ struct property *prop;
+ const __be32 *data;
+ int num, i, *id_kohm = batt_ids->kohm;
+
+ prop = of_find_property(np, "qcom,batt-id-kohm", NULL);
+ if (!prop) {
+ pr_err("%s: No battery id resistor found\n", np->name);
+ return -EINVAL;
+ } else if (!prop->value) {
+ pr_err("%s: No battery id resistor value found, np->name\n",
+ np->name);
+ return -ENODATA;
+ } else if (prop->length > MAX_BATT_ID_NUM * sizeof(__be32)) {
+ pr_err("%s: Too many battery id resistors\n", np->name);
+ return -EINVAL;
+ }
+
+ num = prop->length/sizeof(__be32);
+ batt_ids->num = num;
+ data = prop->value;
+ for (i = 0; i < num; i++)
+ *id_kohm++ = be32_to_cpup(data++);
+
+ return 0;
+}
+
+#define OF_PROP_READ(property, qpnp_dt_property, node, rc, optional) \
+do { \
+ if (rc) \
+ break; \
+ rc = of_property_read_u32(node, "qcom," qpnp_dt_property, \
+ &property); \
+ \
+ if ((rc == -EINVAL) && optional) { \
+ property = -EINVAL; \
+ rc = 0; \
+ } else if (rc) { \
+ pr_err("Error reading " #qpnp_dt_property \
+ " property rc = %d\n", rc); \
+ } \
+} while (0)
+
+static int of_batterydata_load_battery_data(struct device_node *node,
+ int best_id_kohm,
+ struct bms_battery_data *batt_data)
+{
+ int rc;
+
+ rc = of_batterydata_read_single_row_lut(node, "qcom,fcc-temp-lut",
+ batt_data->fcc_temp_lut);
+ if (rc)
+ return rc;
+
+ rc = of_batterydata_read_pc_temp_ocv_lut(node,
+ "qcom,pc-temp-ocv-lut",
+ batt_data->pc_temp_ocv_lut);
+ if (rc)
+ return rc;
+
+ rc = of_batterydata_read_sf_lut(node, "qcom,rbatt-sf-lut",
+ batt_data->rbatt_sf_lut);
+ if (rc)
+ return rc;
+
+ rc = of_batterydata_read_ibat_temp_acc_lut(node, "qcom,ibat-acc-lut",
+ batt_data->ibat_acc_lut);
+ if (rc)
+ return rc;
+
+ rc = of_property_read_string(node, "qcom,battery-type",
+ &batt_data->battery_type);
+ if (rc) {
+ pr_err("Error reading qcom,battery-type property rc=%d\n", rc);
+ batt_data->battery_type = NULL;
+ return rc;
+ }
+
+ OF_PROP_READ(batt_data->fcc, "fcc-mah", node, rc, false);
+ OF_PROP_READ(batt_data->default_rbatt_mohm,
+ "default-rbatt-mohm", node, rc, false);
+ OF_PROP_READ(batt_data->rbatt_capacitive_mohm,
+ "rbatt-capacitive-mohm", node, rc, false);
+ OF_PROP_READ(batt_data->flat_ocv_threshold_uv,
+ "flat-ocv-threshold-uv", node, rc, true);
+ OF_PROP_READ(batt_data->max_voltage_uv,
+ "max-voltage-uv", node, rc, true);
+ OF_PROP_READ(batt_data->cutoff_uv, "v-cutoff-uv", node, rc, true);
+ OF_PROP_READ(batt_data->iterm_ua, "chg-term-ua", node, rc, true);
+ OF_PROP_READ(batt_data->fastchg_current_ma,
+ "fastchg-current-ma", node, rc, true);
+ OF_PROP_READ(batt_data->fg_cc_cv_threshold_mv,
+ "fg-cc-cv-threshold-mv", node, rc, true);
+
+ batt_data->batt_id_kohm = best_id_kohm;
+
+ return rc;
+}
+
+static int64_t of_batterydata_convert_battery_id_kohm(int batt_id_uv,
+ int rpull_up, int vadc_vdd)
+{
+ int64_t resistor_value_kohm, denom;
+
+ if (batt_id_uv == 0) {
+ /* vadc not correct or batt id line grounded, report 0 kohms */
+ return 0;
+ }
+ /* calculate the battery id resistance reported via ADC */
+ denom = div64_s64(vadc_vdd * 1000000LL, batt_id_uv) - 1000000LL;
+
+ if (denom == 0) {
+ /* batt id connector might be open, return 0 kohms */
+ return 0;
+ }
+ resistor_value_kohm = div64_s64(rpull_up * 1000000LL + denom/2, denom);
+
+ pr_debug("batt id voltage = %d, resistor value = %lld\n",
+ batt_id_uv, resistor_value_kohm);
+
+ return resistor_value_kohm;
+}
+
+struct device_node *of_batterydata_get_best_profile(
+ const struct device_node *batterydata_container_node,
+ const char *psy_name, const char *batt_type)
+{
+ struct batt_ids batt_ids;
+ struct device_node *node, *best_node = NULL;
+ struct power_supply *psy;
+ const char *battery_type = NULL;
+ union power_supply_propval ret = {0, };
+ int delta = 0, best_delta = 0, best_id_kohm = 0, id_range_pct,
+ batt_id_kohm = 0, i = 0, rc = 0, limit = 0;
+ bool in_range = false;
+
+ psy = power_supply_get_by_name(psy_name);
+ if (!psy) {
+ pr_err("%s supply not found. defer\n", psy_name);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ rc = psy->get_property(psy, POWER_SUPPLY_PROP_RESISTANCE_ID, &ret);
+ if (rc) {
+ pr_err("failed to retrieve resistance value rc=%d\n", rc);
+ return ERR_PTR(-ENOSYS);
+ }
+
+ batt_id_kohm = ret.intval / 1000;
+
+ /* read battery id range percentage for best profile */
+ rc = of_property_read_u32(batterydata_container_node,
+ "qcom,batt-id-range-pct", &id_range_pct);
+
+ if (rc) {
+ if (rc == -EINVAL) {
+ id_range_pct = 0;
+ } else {
+ pr_err("failed to read battery id range\n");
+ return ERR_PTR(-ENXIO);
+ }
+ }
+
+ /*
+ * Find the battery data with a battery id resistor closest to this one
+ */
+ for_each_child_of_node(batterydata_container_node, node) {
+ if (batt_type != NULL) {
+ rc = of_property_read_string(node, "qcom,battery-type",
+ &battery_type);
+ if (!rc && strcmp(battery_type, batt_type) == 0) {
+ best_node = node;
+ best_id_kohm = batt_id_kohm;
+ break;
+ }
+ } else {
+ rc = of_batterydata_read_batt_id_kohm(node,
+ "qcom,batt-id-kohm",
+ &batt_ids);
+ if (rc)
+ continue;
+ for (i = 0; i < batt_ids.num; i++) {
+ delta = abs(batt_ids.kohm[i] - batt_id_kohm);
+ limit = (batt_ids.kohm[i] * id_range_pct) / 100;
+ in_range = (delta <= limit);
+ /*
+ * Check if the delta is the lowest one
+ * and also if the limits are in range
+ * before selecting the best node.
+ */
+ if ((delta < best_delta || !best_node)
+ && in_range) {
+ best_node = node;
+ best_delta = delta;
+ best_id_kohm = batt_ids.kohm[i];
+ }
+ }
+ }
+ }
+
+ if (best_node == NULL) {
+ pr_err("No battery data found\n");
+ return best_node;
+ }
+
+ /* check that profile id is in range of the measured batt_id */
+ if (abs(best_id_kohm - batt_id_kohm) >
+ ((best_id_kohm * id_range_pct) / 100)) {
+ pr_err("out of range: profile id %d batt id %d pct %d",
+ best_id_kohm, batt_id_kohm, id_range_pct);
+ return NULL;
+ }
+
+ rc = of_property_read_string(best_node, "qcom,battery-type",
+ &battery_type);
+ if (!rc)
+ pr_info("%s found\n", battery_type);
+ else
+ pr_info("%s found\n", best_node->name);
+
+ return best_node;
+}
+
+int of_batterydata_read_data(struct device_node *batterydata_container_node,
+ struct bms_battery_data *batt_data,
+ int batt_id_uv)
+{
+ struct device_node *node, *best_node;
+ struct batt_ids batt_ids;
+ const char *battery_type = NULL;
+ int delta, best_delta, batt_id_kohm, rpull_up_kohm,
+ vadc_vdd_uv, best_id_kohm, i, rc = 0;
+
+ node = batterydata_container_node;
+ OF_PROP_READ(rpull_up_kohm, "rpull-up-kohm", node, rc, false);
+ OF_PROP_READ(vadc_vdd_uv, "vref-batt-therm", node, rc, false);
+ if (rc)
+ return rc;
+
+ batt_id_kohm = of_batterydata_convert_battery_id_kohm(batt_id_uv,
+ rpull_up_kohm, vadc_vdd_uv);
+ best_node = NULL;
+ best_delta = 0;
+ best_id_kohm = 0;
+
+ /*
+ * Find the battery data with a battery id resistor closest to this one
+ */
+ for_each_child_of_node(batterydata_container_node, node) {
+ rc = of_batterydata_read_batt_id_kohm(node,
+ "qcom,batt-id-kohm",
+ &batt_ids);
+ if (rc)
+ continue;
+ for (i = 0; i < batt_ids.num; i++) {
+ delta = abs(batt_ids.kohm[i] - batt_id_kohm);
+ if (delta < best_delta || !best_node) {
+ best_node = node;
+ best_delta = delta;
+ best_id_kohm = batt_ids.kohm[i];
+ }
+ }
+ }
+
+ if (best_node == NULL) {
+ pr_err("No battery data found\n");
+ return -ENODATA;
+ }
+ rc = of_property_read_string(best_node, "qcom,battery-type",
+ &battery_type);
+ if (!rc)
+ pr_info("%s loaded\n", battery_type);
+ else
+ pr_info("%s loaded\n", best_node->name);
+
+ return of_batterydata_load_battery_data(best_node,
+ best_id_kohm, batt_data);
+}
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index 5bd93a25d2ce..d208ab5546c8 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -7,3 +7,5 @@ endif
source "drivers/platform/goldfish/Kconfig"
source "drivers/platform/chrome/Kconfig"
+
+source "drivers/platform/msm/Kconfig"
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index ca2692510733..74f615feaf66 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -7,3 +7,5 @@ obj-$(CONFIG_MIPS) += mips/
obj-$(CONFIG_OLPC) += olpc/
obj-$(CONFIG_GOLDFISH) += goldfish/
obj-$(CONFIG_CHROME_PLATFORMS) += chrome/
+obj-$(CONFIG_ARCH_QCOM) += msm/
+
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
new file mode 100644
index 000000000000..2f394f3fdcee
--- /dev/null
+++ b/drivers/platform/msm/Kconfig
@@ -0,0 +1,48 @@
+menu "Qualcomm MSM specific device drivers"
+ depends on ARCH_QCOM
+
+config QPNP_POWER_ON
+ tristate "QPNP PMIC POWER-ON Driver"
+ depends on OF_SPMI && (SPMI || MSM_SPMI) && MSM_QPNP_INT && INPUT
+ help
+ This driver supports the power-on functionality on Qualcomm
+ PNP PMIC. It currently supports reporting the change in status of
+ the KPDPWR_N line (connected to the power-key).
+
+config QPNP_REVID
+ tristate "QPNP Revision ID Peripheral"
+ depends on SPMI || MSM_SPMI
+ help
+ Say 'y' here to include support for the Qualcomm QPNP REVID
+ peripheral. REVID prints out the PMIC type and revision numbers
+ in the kernel log along with the PMIC option status. The PMIC
+ type is mapped to a Qualcomm chip part number and logged as well.
+
+config QPNP_COINCELL
+ tristate "Qualcomm QPNP coincell charger support"
+ depends on (SPMI || MSM_SPMI) && OF_SPMI
+ help
+ This driver supports the QPNP coincell peripheral found inside of
+ Qualcomm QPNP PMIC devices. The coincell charger provides a means to
+ charge a coincell battery or backup capacitor which is used to
+ maintain PMIC register state when the main battery is removed from the
+ mobile device.
+
+config QPNP_HAPTIC
+ tristate "Haptic support for QPNP PMIC"
+ depends on OF_SPMI
+ help
+ This option enables device driver support for the Haptic
+ on the Qualcomm Technologies' QPNP PMICs. It uses the android
+ timed-output framework.
+
+config GPIO_USB_DETECT
+ tristate "GPIO-based USB VBUS Detection"
+ depends on POWER_SUPPLY
+ help
+ This driver supports external USB VBUS detection circuitry whose
+ output is connected to a GPIO. The driver in turn notifies the
+ USB driver of VBUS presence/disconnection using the power_supply
+ framework.
+
+endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
new file mode 100644
index 000000000000..a4ad7e9e558a
--- /dev/null
+++ b/drivers/platform/msm/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the MSM specific device drivers.
+#
+obj-$(CONFIG_QPNP_POWER_ON) += qpnp-power-on.o
+obj-$(CONFIG_QPNP_REVID) += qpnp-revid.o
+obj-$(CONFIG_QPNP_COINCELL) += qpnp-coincell.o
+obj-$(CONFIG_QPNP_HAPTIC) += qpnp-haptic.o
+obj-$(CONFIG_GPIO_USB_DETECT) += gpio-usbdetect.o
diff --git a/drivers/platform/msm/gpio-usbdetect.c b/drivers/platform/msm/gpio-usbdetect.c
new file mode 100644
index 000000000000..3d117e59a823
--- /dev/null
+++ b/drivers/platform/msm/gpio-usbdetect.c
@@ -0,0 +1,146 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/consumer.h>
+
+struct gpio_usbdetect {
+ struct platform_device *pdev;
+ struct regulator *vin;
+ struct power_supply *usb_psy;
+ int vbus_det_irq;
+};
+
+static irqreturn_t gpio_usbdetect_vbus_irq(int irq, void *data)
+{
+ struct gpio_usbdetect *usb = data;
+ int vbus;
+
+ vbus = !!irq_read_line(irq);
+ if (vbus)
+ power_supply_set_supply_type(usb->usb_psy,
+ POWER_SUPPLY_TYPE_USB);
+ else
+ power_supply_set_supply_type(usb->usb_psy,
+ POWER_SUPPLY_TYPE_UNKNOWN);
+
+ power_supply_set_present(usb->usb_psy, vbus);
+ return IRQ_HANDLED;
+}
+
+static int gpio_usbdetect_probe(struct platform_device *pdev)
+{
+ struct gpio_usbdetect *usb;
+ struct power_supply *usb_psy;
+ int rc;
+ unsigned long flags;
+
+ usb_psy = power_supply_get_by_name("usb");
+ if (!usb_psy) {
+ dev_dbg(&pdev->dev, "USB power_supply not found, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+
+ usb = devm_kzalloc(&pdev->dev, sizeof(*usb), GFP_KERNEL);
+ if (!usb)
+ return -ENOMEM;
+
+ usb->pdev = pdev;
+ usb->usb_psy = usb_psy;
+
+ if (of_get_property(pdev->dev.of_node, "vin-supply", NULL)) {
+ usb->vin = devm_regulator_get(&pdev->dev, "vin");
+ if (IS_ERR(usb->vin)) {
+ dev_err(&pdev->dev, "Failed to get VIN regulator: %ld\n",
+ PTR_ERR(usb->vin));
+ return PTR_ERR(usb->vin);
+ }
+ }
+
+ if (usb->vin) {
+ rc = regulator_enable(usb->vin);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to enable VIN regulator: %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ usb->vbus_det_irq = platform_get_irq_byname(pdev, "vbus_det_irq");
+ if (usb->vbus_det_irq < 0) {
+ if (usb->vin)
+ regulator_disable(usb->vin);
+ return usb->vbus_det_irq;
+ }
+
+ rc = devm_request_irq(&pdev->dev, usb->vbus_det_irq,
+ gpio_usbdetect_vbus_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "vbus_det_irq", usb);
+ if (rc) {
+ dev_err(&pdev->dev, "request for vbus_det_irq failed: %d\n",
+ rc);
+ if (usb->vin)
+ regulator_disable(usb->vin);
+ return rc;
+ }
+
+ enable_irq_wake(usb->vbus_det_irq);
+ dev_set_drvdata(&pdev->dev, usb);
+
+ /* Read and report initial VBUS state */
+ local_irq_save(flags);
+ gpio_usbdetect_vbus_irq(usb->vbus_det_irq, usb);
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int gpio_usbdetect_remove(struct platform_device *pdev)
+{
+ struct gpio_usbdetect *usb = dev_get_drvdata(&pdev->dev);
+
+ disable_irq_wake(usb->vbus_det_irq);
+ disable_irq(usb->vbus_det_irq);
+ if (usb->vin)
+ regulator_disable(usb->vin);
+
+ return 0;
+}
+
+static struct of_device_id of_match_table[] = {
+ { .compatible = "qcom,gpio-usbdetect", },
+ {}
+};
+
+static struct platform_driver gpio_usbdetect_driver = {
+ .driver = {
+ .name = "qcom,gpio-usbdetect",
+ .of_match_table = of_match_table,
+ },
+ .probe = gpio_usbdetect_probe,
+ .remove = gpio_usbdetect_remove,
+};
+
+module_driver(gpio_usbdetect_driver, platform_driver_register,
+ platform_driver_unregister);
+
+MODULE_DESCRIPTION("GPIO USB VBUS Detection driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/qpnp-coincell.c b/drivers/platform/msm/qpnp-coincell.c
new file mode 100644
index 000000000000..ec060bbc4171
--- /dev/null
+++ b/drivers/platform/msm/qpnp-coincell.c
@@ -0,0 +1,266 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define QPNP_COINCELL_DRIVER_NAME "qcom,qpnp-coincell"
+
+struct qpnp_coincell {
+ struct spmi_device *spmi_dev;
+ u16 base_addr;
+};
+
+#define QPNP_COINCELL_REG_TYPE 0x04
+#define QPNP_COINCELL_REG_SUBTYPE 0x05
+#define QPNP_COINCELL_REG_RSET 0x44
+#define QPNP_COINCELL_REG_VSET 0x45
+#define QPNP_COINCELL_REG_ENABLE 0x46
+
+#define QPNP_COINCELL_TYPE 0x02
+#define QPNP_COINCELL_SUBTYPE 0x20
+#define QPNP_COINCELL_ENABLE 0x80
+#define QPNP_COINCELL_DISABLE 0x00
+
+static const int qpnp_rset_map[] = {2100, 1700, 1200, 800};
+static const int qpnp_vset_map[] = {2500, 3200, 3100, 3000};
+
+static int qpnp_coincell_set_resistance(struct qpnp_coincell *chip, int rset)
+{
+ int i, rc;
+ u8 reg;
+
+ for (i = 0; i < ARRAY_SIZE(qpnp_rset_map); i++)
+ if (rset == qpnp_rset_map[i])
+ break;
+
+ if (i >= ARRAY_SIZE(qpnp_rset_map)) {
+ pr_err("invalid rset=%d value\n", rset);
+ return -EINVAL;
+ }
+
+ reg = i;
+ rc = spmi_ext_register_writel(chip->spmi_dev->ctrl, chip->spmi_dev->sid,
+ chip->base_addr + QPNP_COINCELL_REG_RSET, &reg, 1);
+ if (rc)
+ dev_err(&chip->spmi_dev->dev, "%s: could not write to RSET register, rc=%d\n",
+ __func__, rc);
+
+ return rc;
+}
+
+static int qpnp_coincell_set_voltage(struct qpnp_coincell *chip, int vset)
+{
+ int i, rc;
+ u8 reg;
+
+ for (i = 0; i < ARRAY_SIZE(qpnp_vset_map); i++)
+ if (vset == qpnp_vset_map[i])
+ break;
+
+ if (i >= ARRAY_SIZE(qpnp_vset_map)) {
+ pr_err("invalid vset=%d value\n", vset);
+ return -EINVAL;
+ }
+
+ reg = i;
+ rc = spmi_ext_register_writel(chip->spmi_dev->ctrl, chip->spmi_dev->sid,
+ chip->base_addr + QPNP_COINCELL_REG_VSET, &reg, 1);
+ if (rc)
+ dev_err(&chip->spmi_dev->dev, "%s: could not write to VSET register, rc=%d\n",
+ __func__, rc);
+
+ return rc;
+}
+
+static int qpnp_coincell_set_charge(struct qpnp_coincell *chip, bool enabled)
+{
+ int rc;
+ u8 reg;
+
+ reg = enabled ? QPNP_COINCELL_ENABLE : QPNP_COINCELL_DISABLE;
+ rc = spmi_ext_register_writel(chip->spmi_dev->ctrl, chip->spmi_dev->sid,
+ chip->base_addr + QPNP_COINCELL_REG_ENABLE, &reg, 1);
+ if (rc)
+ dev_err(&chip->spmi_dev->dev, "%s: could not write to ENABLE register, rc=%d\n",
+ __func__, rc);
+
+ return rc;
+}
+
+static void qpnp_coincell_charger_show_state(struct qpnp_coincell *chip)
+{
+ int rc, rset, vset, temp;
+ bool enabled;
+ u8 reg[QPNP_COINCELL_REG_ENABLE - QPNP_COINCELL_REG_RSET + 1];
+
+ rc = spmi_ext_register_readl(chip->spmi_dev->ctrl, chip->spmi_dev->sid,
+ chip->base_addr + QPNP_COINCELL_REG_RSET, reg, ARRAY_SIZE(reg));
+ if (rc) {
+ dev_err(&chip->spmi_dev->dev, "%s: could not read RSET register, rc=%d\n",
+ __func__, rc);
+ return;
+ }
+
+ temp = reg[QPNP_COINCELL_REG_RSET - QPNP_COINCELL_REG_RSET];
+ if (temp >= ARRAY_SIZE(qpnp_rset_map)) {
+ dev_err(&chip->spmi_dev->dev, "unknown RSET=0x%02X register value\n",
+ temp);
+ return;
+ }
+ rset = qpnp_rset_map[temp];
+
+ temp = reg[QPNP_COINCELL_REG_VSET - QPNP_COINCELL_REG_RSET];
+ if (temp >= ARRAY_SIZE(qpnp_vset_map)) {
+ dev_err(&chip->spmi_dev->dev, "unknown VSET=0x%02X register value\n",
+ temp);
+ return;
+ }
+ vset = qpnp_vset_map[temp];
+
+ temp = reg[QPNP_COINCELL_REG_ENABLE - QPNP_COINCELL_REG_RSET];
+ enabled = temp & QPNP_COINCELL_ENABLE;
+
+ pr_info("enabled=%c, voltage=%d mV, resistance=%d ohm\n",
+ (enabled ? 'Y' : 'N'), vset, rset);
+}
+
+static int qpnp_coincell_check_type(struct qpnp_coincell *chip)
+{
+ int rc;
+ u8 type[2];
+
+ rc = spmi_ext_register_readl(chip->spmi_dev->ctrl, chip->spmi_dev->sid,
+ chip->base_addr + QPNP_COINCELL_REG_TYPE, type, 2);
+ if (rc) {
+ dev_err(&chip->spmi_dev->dev, "%s: could not read type register, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ if (type[0] != QPNP_COINCELL_TYPE || type[1] != QPNP_COINCELL_SUBTYPE) {
+ dev_err(&chip->spmi_dev->dev, "%s: invalid type=0x%02X or subtype=0x%02X register value\n",
+ __func__, type[0], type[1]);
+ return -ENODEV;
+ }
+
+ return rc;
+}
+
+static int qpnp_coincell_probe(struct spmi_device *spmi)
+{
+ struct device_node *node = spmi->dev.of_node;
+ struct qpnp_coincell *chip;
+ struct resource *res;
+ u32 temp;
+ int rc = 0;
+
+ if (!node) {
+ dev_err(&spmi->dev, "%s: device node missing\n", __func__);
+ return -ENODEV;
+ }
+
+ chip = devm_kzalloc(&spmi->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip) {
+ dev_err(&spmi->dev, "%s: cannot allocate qpnp_coincell\n",
+ __func__);
+ return -ENOMEM;
+ }
+ chip->spmi_dev = spmi;
+
+ res = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&spmi->dev, "%s: node is missing base address\n",
+ __func__);
+ return -EINVAL;
+ }
+ chip->base_addr = res->start;
+
+ rc = qpnp_coincell_check_type(chip);
+ if (rc)
+ return rc;
+
+ rc = of_property_read_u32(node, "qcom,rset-ohms", &temp);
+ if (!rc) {
+ rc = qpnp_coincell_set_resistance(chip, temp);
+ if (rc)
+ return rc;
+ }
+
+ rc = of_property_read_u32(node, "qcom,vset-millivolts", &temp);
+ if (!rc) {
+ rc = qpnp_coincell_set_voltage(chip, temp);
+ if (rc)
+ return rc;
+ }
+
+ rc = of_property_read_u32(node, "qcom,charge-enable", &temp);
+ if (!rc) {
+ rc = qpnp_coincell_set_charge(chip, temp);
+ if (rc)
+ return rc;
+ }
+
+ qpnp_coincell_charger_show_state(chip);
+
+ return 0;
+}
+
+static int qpnp_coincell_remove(struct spmi_device *spmi)
+{
+ return 0;
+}
+
+static struct of_device_id qpnp_coincell_match_table[] = {
+ { .compatible = QPNP_COINCELL_DRIVER_NAME, },
+ {}
+};
+
+static const struct spmi_device_id qpnp_coincell_id[] = {
+ { QPNP_COINCELL_DRIVER_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spmi, qpnp_coincell_id);
+
+static struct spmi_driver qpnp_coincell_driver = {
+ .driver = {
+ .name = QPNP_COINCELL_DRIVER_NAME,
+ .of_match_table = qpnp_coincell_match_table,
+ .owner = THIS_MODULE,
+ },
+ .probe = qpnp_coincell_probe,
+ .remove = qpnp_coincell_remove,
+ .id_table = qpnp_coincell_id,
+};
+
+static int __init qpnp_coincell_init(void)
+{
+ return spmi_driver_register(&qpnp_coincell_driver);
+}
+
+static void __exit qpnp_coincell_exit(void)
+{
+ spmi_driver_unregister(&qpnp_coincell_driver);
+}
+
+MODULE_DESCRIPTION("QPNP PMIC coincell charger driver");
+MODULE_LICENSE("GPL v2");
+
+module_init(qpnp_coincell_init);
+module_exit(qpnp_coincell_exit);
diff --git a/drivers/platform/msm/qpnp-haptic.c b/drivers/platform/msm/qpnp-haptic.c
new file mode 100644
index 000000000000..45be1b11a2c2
--- /dev/null
+++ b/drivers/platform/msm/qpnp-haptic.c
@@ -0,0 +1,2342 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/hrtimer.h>
+#include <linux/of_device.h>
+#include <linux/spmi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/qpnp/pwm.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/qpnp/qpnp-haptic.h>
+#include "../../staging/android/timed_output.h"
+
+#define QPNP_IRQ_FLAGS (IRQF_TRIGGER_RISING | \
+ IRQF_TRIGGER_FALLING | \
+ IRQF_ONESHOT)
+
+#define QPNP_HAP_STATUS(b) (b + 0x0A)
+#define QPNP_HAP_LRA_AUTO_RES_LO(b) (b + 0x0B)
+#define QPNP_HAP_LRA_AUTO_RES_HI(b) (b + 0x0C)
+#define QPNP_HAP_EN_CTL_REG(b) (b + 0x46)
+#define QPNP_HAP_EN_CTL2_REG(b) (b + 0x48)
+#define QPNP_HAP_ACT_TYPE_REG(b) (b + 0x4C)
+#define QPNP_HAP_WAV_SHAPE_REG(b) (b + 0x4D)
+#define QPNP_HAP_PLAY_MODE_REG(b) (b + 0x4E)
+#define QPNP_HAP_LRA_AUTO_RES_REG(b) (b + 0x4F)
+#define QPNP_HAP_VMAX_REG(b) (b + 0x51)
+#define QPNP_HAP_ILIM_REG(b) (b + 0x52)
+#define QPNP_HAP_SC_DEB_REG(b) (b + 0x53)
+#define QPNP_HAP_RATE_CFG1_REG(b) (b + 0x54)
+#define QPNP_HAP_RATE_CFG2_REG(b) (b + 0x55)
+#define QPNP_HAP_INT_PWM_REG(b) (b + 0x56)
+#define QPNP_HAP_EXT_PWM_REG(b) (b + 0x57)
+#define QPNP_HAP_PWM_CAP_REG(b) (b + 0x58)
+#define QPNP_HAP_SC_CLR_REG(b) (b + 0x59)
+#define QPNP_HAP_SC_IRQ_STATUS_DELAY msecs_to_jiffies(1000)
+#define QPNP_HAP_BRAKE_REG(b) (b + 0x5C)
+#define QPNP_HAP_WAV_REP_REG(b) (b + 0x5E)
+#define QPNP_HAP_WAV_S_REG_BASE(b) (b + 0x60)
+#define QPNP_HAP_PLAY_REG(b) (b + 0x70)
+#define QPNP_HAP_SEC_ACCESS_REG(b) (b + 0xD0)
+#define QPNP_HAP_TEST2_REG(b) (b + 0xE3)
+
+#define QPNP_HAP_STATUS_BUSY 0x02
+#define QPNP_HAP_ACT_TYPE_MASK 0xFE
+#define QPNP_HAP_LRA 0x0
+#define QPNP_HAP_ERM 0x1
+#define QPNP_HAP_AUTO_RES_MODE_MASK 0x8F
+#define QPNP_HAP_AUTO_RES_MODE_SHIFT 4
+#define QPNP_HAP_LRA_HIGH_Z_MASK 0xF3
+#define QPNP_HAP_LRA_HIGH_Z_SHIFT 2
+#define QPNP_HAP_LRA_RES_CAL_PER_MASK 0xFC
+#define QPNP_HAP_RES_CAL_PERIOD_MIN 4
+#define QPNP_HAP_RES_CAL_PERIOD_MAX 32
+#define QPNP_HAP_PLAY_MODE_MASK 0xCF
+#define QPNP_HAP_PLAY_MODE_SHFT 4
+#define QPNP_HAP_VMAX_MASK 0xC1
+#define QPNP_HAP_VMAX_SHIFT 1
+#define QPNP_HAP_VMAX_MIN_MV 116
+#define QPNP_HAP_VMAX_MAX_MV 3596
+#define QPNP_HAP_ILIM_MASK 0xFE
+#define QPNP_HAP_ILIM_MIN_MV 400
+#define QPNP_HAP_ILIM_MAX_MV 800
+#define QPNP_HAP_SC_DEB_MASK 0xF8
+#define QPNP_HAP_SC_DEB_SUB 2
+#define QPNP_HAP_SC_DEB_CYCLES_MIN 0
+#define QPNP_HAP_DEF_SC_DEB_CYCLES 8
+#define QPNP_HAP_SC_DEB_CYCLES_MAX 32
+#define QPNP_HAP_SC_CLR 1
+#define QPNP_HAP_INT_PWM_MASK 0xFC
+#define QPNP_HAP_INT_PWM_FREQ_253_KHZ 253
+#define QPNP_HAP_INT_PWM_FREQ_505_KHZ 505
+#define QPNP_HAP_INT_PWM_FREQ_739_KHZ 739
+#define QPNP_HAP_INT_PWM_FREQ_1076_KHZ 1076
+#define QPNP_HAP_WAV_SHAPE_MASK 0xFE
+#define QPNP_HAP_RATE_CFG1_MASK 0xFF
+#define QPNP_HAP_RATE_CFG2_MASK 0xF0
+#define QPNP_HAP_RATE_CFG2_SHFT 8
+#define QPNP_HAP_RATE_CFG_STEP_US 5
+#define QPNP_HAP_WAV_PLAY_RATE_US_MIN 0
+#define QPNP_HAP_DEF_WAVE_PLAY_RATE_US 5715
+#define QPNP_HAP_WAV_PLAY_RATE_US_MAX 20475
+#define QPNP_HAP_WAV_REP_MASK 0x8F
+#define QPNP_HAP_WAV_S_REP_MASK 0xFC
+#define QPNP_HAP_WAV_REP_SHFT 4
+#define QPNP_HAP_WAV_REP_MIN 1
+#define QPNP_HAP_WAV_REP_MAX 128
+#define QPNP_HAP_WAV_S_REP_MIN 1
+#define QPNP_HAP_WAV_S_REP_MAX 8
+#define QPNP_HAP_BRAKE_PAT_MASK 0x3
+#define QPNP_HAP_ILIM_MIN_MA 400
+#define QPNP_HAP_ILIM_MAX_MA 800
+#define QPNP_HAP_EXT_PWM_MASK 0xFC
+#define QPNP_HAP_EXT_PWM_FREQ_25_KHZ 25
+#define QPNP_HAP_EXT_PWM_FREQ_50_KHZ 50
+#define QPNP_HAP_EXT_PWM_FREQ_75_KHZ 75
+#define QPNP_HAP_EXT_PWM_FREQ_100_KHZ 100
+#define PWM_MAX_DTEST_LINES 4
+#define QPNP_HAP_EXT_PWM_DTEST_MASK 0x0F
+#define QPNP_HAP_EXT_PWM_DTEST_SHFT 4
+#define QPNP_HAP_EXT_PWM_PEAK_DATA 0x7F
+#define QPNP_HAP_EXT_PWM_HALF_DUTY 50
+#define QPNP_HAP_EXT_PWM_FULL_DUTY 100
+#define QPNP_HAP_EXT_PWM_DATA_FACTOR 39
+#define QPNP_HAP_WAV_SINE 0
+#define QPNP_HAP_WAV_SQUARE 1
+#define QPNP_HAP_WAV_SAMP_LEN 8
+#define QPNP_HAP_WAV_SAMP_MAX 0x7E
+#define QPNP_HAP_BRAKE_PAT_LEN 4
+#define QPNP_HAP_PLAY_EN 0x80
+#define QPNP_HAP_EN 0x80
+#define QPNP_HAP_BRAKE_MASK 0xFE
+#define QPNP_HAP_TEST2_AUTO_RES_MASK 0x7F
+#define QPNP_HAP_SEC_UNLOCK 0xA5
+#define AUTO_RES_ENABLE 0x80
+#define AUTO_RES_DISABLE 0x00
+#define AUTO_RES_ERR_BIT 0x10
+#define SC_FOUND_BIT 0x08
+#define SC_MAX_DURATION 5
+
+#define QPNP_HAP_TIMEOUT_MS_MAX 15000
+#define QPNP_HAP_STR_SIZE 20
+#define QPNP_HAP_MAX_RETRIES 5
+#define QPNP_HAP_CYCLS 5
+#define QPNP_TEST_TIMER_MS 5
+
+#define AUTO_RES_ENABLE_TIMEOUT 20000
+#define AUTO_RES_ERR_CAPTURE_RES 5
+#define AUTO_RES_ERR_MAX 15
+
+#define MISC_TRIM_ERROR_RC19P2_CLK 0x09F5
+#define MISC_SEC_ACCESS 0x09D0
+#define MISC_SEC_UNLOCK 0xA5
+#define PMI8950_MISC_SID 2
+
+#define POLL_TIME_AUTO_RES_ERR_NS (5 * NSEC_PER_MSEC)
+
+#define LRA_POS_FREQ_COUNT 6
+int lra_play_rate_code[LRA_POS_FREQ_COUNT];
+
+/* haptic debug register set */
+static u8 qpnp_hap_dbg_regs[] = {
+ 0x0a, 0x0b, 0x0c, 0x46, 0x48, 0x4c, 0x4d, 0x4e, 0x4f, 0x51, 0x52, 0x53,
+ 0x54, 0x55, 0x56, 0x57, 0x58, 0x5c, 0x5e, 0x60, 0x61, 0x62, 0x63, 0x64,
+ 0x65, 0x66, 0x67, 0x70, 0xE3,
+};
+
+/* ramp up/down test sequence */
+static u8 qpnp_hap_ramp_test_data[] = {
+ 0x0, 0x19, 0x32, 0x4C, 0x65, 0x7F, 0x65, 0x4C, 0x32, 0x19,
+ 0x0, 0x99, 0xB2, 0xCC, 0xE5, 0xFF, 0xE5, 0xCC, 0xB2, 0x99,
+ 0x0, 0x19, 0x32, 0x4C, 0x65, 0x7F, 0x65, 0x4C, 0x32, 0x19,
+ 0x0, 0x99, 0xB2, 0xCC, 0xE5, 0xFF, 0xE5, 0xCC, 0xB2, 0x99,
+ 0x0, 0x19, 0x32, 0x4C, 0x65, 0x7F, 0x65, 0x4C, 0x32, 0x19,
+ 0x0, 0x99, 0xB2, 0xCC, 0xE5, 0xFF, 0xE5, 0xCC, 0xB2, 0x99,
+ 0x0, 0x19, 0x32, 0x4C, 0x65, 0x7F, 0x65, 0x4C, 0x32, 0x19,
+ 0x0, 0x99, 0xB2, 0xCC, 0xE5, 0xFF, 0xE5, 0xCC, 0xB2, 0x99,
+ 0x0, 0x19, 0x32, 0x4C, 0x65, 0x7F, 0x65, 0x4C, 0x32, 0x19,
+ 0x0, 0x99, 0xB2, 0xCC, 0xE5, 0xFF, 0xE5, 0xCC, 0xB2, 0x99,
+ 0x0, 0x19, 0x32, 0x4C, 0x65, 0x7F, 0x65, 0x4C, 0x32, 0x19,
+ 0x0, 0x99, 0xB2, 0xCC, 0xE5, 0xFF, 0xE5, 0xCC, 0xB2, 0x99,
+ 0x0, 0x19, 0x32, 0x4C, 0x65, 0x7F, 0x65, 0x4C, 0x32, 0x19,
+ 0x0, 0x99, 0xB2, 0xCC, 0xE5, 0xFF, 0xE5, 0xCC, 0xB2, 0x99,
+};
+
+/* alternate max and min sequence */
+static u8 qpnp_hap_min_max_test_data[] = {
+ 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+ 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+ 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+ 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+ 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+ 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+ 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+ 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+ 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+ 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+ 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+ 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF, 0x0, 0x7F, 0x0, 0xFF,
+};
+
+/*
+ * auto resonance mode
+ * ZXD - Zero Cross Detect
+ * QWD - Quarter Wave Drive
+ * ZXD_EOP - ZXD with End Of Pattern
+ */
+enum qpnp_hap_auto_res_mode {
+ QPNP_HAP_AUTO_RES_NONE,
+ QPNP_HAP_AUTO_RES_ZXD,
+ QPNP_HAP_AUTO_RES_QWD,
+ QPNP_HAP_AUTO_RES_MAX_QWD,
+ QPNP_HAP_AUTO_RES_ZXD_EOP,
+};
+
+/* high Z option lines */
+enum qpnp_hap_high_z {
+ QPNP_HAP_LRA_HIGH_Z_NONE,
+ QPNP_HAP_LRA_HIGH_Z_OPT1,
+ QPNP_HAP_LRA_HIGH_Z_OPT2,
+ QPNP_HAP_LRA_HIGH_Z_OPT3,
+};
+
+/* play modes */
+enum qpnp_hap_mode {
+ QPNP_HAP_DIRECT,
+ QPNP_HAP_BUFFER,
+ QPNP_HAP_AUDIO,
+ QPNP_HAP_PWM,
+};
+
+/* pwm channel info */
+struct qpnp_pwm_info {
+ struct pwm_device *pwm_dev;
+ u32 pwm_channel;
+ u32 duty_us;
+ u32 period_us;
+};
+
+/*
+ * qpnp_hap - Haptic data structure
+ * @ spmi - spmi device
+ * @ hap_timer - hrtimer
+ * @ auto_res_err_poll_timer - hrtimer for auto-resonance error
+ * @ timed_dev - timed output device
+ * @ work - worker
+ * @ auto_res_err_work - correct auto resonance error
+ * @ sc_work - worker to handle short circuit condition
+ * @ pwm_info - pwm info
+ * @ lock - mutex lock
+ * @ wf_lock - mutex lock for waveform
+ * @ play_mode - play mode
+ * @ auto_res_mode - auto resonace mode
+ * @ lra_high_z - high z option line
+ * @ timeout_ms - max timeout in ms
+ * @ vmax_mv - max voltage in mv
+ * @ ilim_ma - limiting current in ma
+ * @ sc_deb_cycles - short circuit debounce cycles
+ * @ int_pwm_freq_khz - internal pwm frequency in khz
+ * @ wave_play_rate_us - play rate for waveform
+ * @ ext_pwm_freq_khz - external pwm frequency in khz
+ * @ wave_rep_cnt - waveform repeat count
+ * @ wave_s_rep_cnt - waveform sample repeat count
+ * @ play_irq - irq for play
+ * @ sc_irq - irq for short circuit
+ * @ base - base address
+ * @ act_type - actuator type
+ * @ wave_shape - waveform shape
+ * @ wave_samp - array of wave samples
+ * @ shadow_wave_samp - shadow array of wave samples
+ * @ brake_pat - pattern for active breaking
+ * @ reg_en_ctl - enable control register
+ * @ reg_play - play register
+ * @ lra_res_cal_period - period for resonance calibration
+ * @ sc_duration - counter to determine the duration of short circuit condition
+ * @ state - current state of haptics
+ * @ use_play_irq - play irq usage state
+ * @ use_sc_irq - short circuit irq usage state
+ * @ wf_update - waveform update flag
+ * @ pwm_cfg_state - pwm mode configuration state
+ * @ buffer_cfg_state - buffer mode configuration state
+ * @ en_brake - brake state
+ * @ sup_brake_pat - support custom brake pattern
+ * @ correct_lra_drive_freq - correct LRA Drive Frequency
+ * @ misc_trim_error_rc19p2_clk_reg_present - if MISC Trim Error reg is present
+ */
+struct qpnp_hap {
+ struct spmi_device *spmi;
+ struct regulator *vcc_pon;
+ struct hrtimer hap_timer;
+ struct hrtimer auto_res_err_poll_timer;
+ struct timed_output_dev timed_dev;
+ struct work_struct work;
+ struct work_struct auto_res_err_work;
+ struct delayed_work sc_work;
+ struct hrtimer hap_test_timer;
+ struct work_struct test_work;
+ struct qpnp_pwm_info pwm_info;
+ struct mutex lock;
+ struct mutex wf_lock;
+ struct completion completion;
+ enum qpnp_hap_mode play_mode;
+ enum qpnp_hap_auto_res_mode auto_res_mode;
+ enum qpnp_hap_high_z lra_high_z;
+ u32 timeout_ms;
+ u32 vmax_mv;
+ u32 ilim_ma;
+ u32 sc_deb_cycles;
+ u32 int_pwm_freq_khz;
+ u32 wave_play_rate_us;
+ u32 ext_pwm_freq_khz;
+ u32 wave_rep_cnt;
+ u32 wave_s_rep_cnt;
+ u32 play_irq;
+ u32 sc_irq;
+ u16 base;
+ u8 act_type;
+ u8 wave_shape;
+ u8 wave_samp[QPNP_HAP_WAV_SAMP_LEN];
+ u8 shadow_wave_samp[QPNP_HAP_WAV_SAMP_LEN];
+ u8 brake_pat[QPNP_HAP_BRAKE_PAT_LEN];
+ u8 reg_en_ctl;
+ u8 reg_play;
+ u8 lra_res_cal_period;
+ u8 sc_duration;
+ u8 ext_pwm_dtest_line;
+ bool state;
+ bool use_play_irq;
+ bool use_sc_irq;
+ bool manage_pon_supply;
+ bool wf_update;
+ bool pwm_cfg_state;
+ bool buffer_cfg_state;
+ bool en_brake;
+ bool sup_brake_pat;
+ bool correct_lra_drive_freq;
+ bool misc_trim_error_rc19p2_clk_reg_present;
+};
+
+static struct qpnp_hap *ghap;
+
+/* helper to read a pmic register */
+static int qpnp_hap_read_reg(struct qpnp_hap *hap, u8 *data, u16 addr)
+{
+ int rc;
+
+ rc = spmi_ext_register_readl(hap->spmi->ctrl, hap->spmi->sid,
+ addr, data, 1);
+ if (rc < 0)
+ dev_err(&hap->spmi->dev,
+ "Error reading address: %X - ret %X\n", addr, rc);
+
+ return rc;
+}
+
+/* helper to write a pmic register */
+static int qpnp_hap_write_reg(struct qpnp_hap *hap, u8 *data, u16 addr)
+{
+ int rc;
+
+ rc = spmi_ext_register_writel(hap->spmi->ctrl, hap->spmi->sid,
+ addr, data, 1);
+ if (rc < 0)
+ dev_err(&hap->spmi->dev,
+ "Error writing address: %X - ret %X\n", addr, rc);
+
+ dev_dbg(&hap->spmi->dev, "write: HAP_0x%x = 0x%x\n", addr, *data);
+ return rc;
+}
+
+/* helper to access secure registers */
+static int qpnp_hap_sec_access(struct qpnp_hap *hap)
+{
+ int rc;
+ u8 reg = QPNP_HAP_SEC_UNLOCK;
+
+ rc = qpnp_hap_write_reg(hap, &reg,
+ QPNP_HAP_SEC_ACCESS_REG(hap->base));
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static void qpnp_handle_sc_irq(struct work_struct *work)
+{
+ struct qpnp_hap *hap = container_of(work,
+ struct qpnp_hap, sc_work.work);
+ u8 val, reg;
+
+ qpnp_hap_read_reg(hap, &val, QPNP_HAP_STATUS(hap->base));
+
+ /* clear short circuit register */
+ if (val & SC_FOUND_BIT) {
+ hap->sc_duration++;
+ reg = QPNP_HAP_SC_CLR;
+ qpnp_hap_write_reg(hap, &reg, QPNP_HAP_SC_CLR_REG(hap->base));
+ }
+}
+
+static int qpnp_hap_mod_enable(struct qpnp_hap *hap, int on)
+{
+ u8 val;
+ int rc, i;
+
+ val = hap->reg_en_ctl;
+ if (on) {
+ val |= QPNP_HAP_EN;
+ } else {
+ for (i = 0; i < QPNP_HAP_MAX_RETRIES; i++) {
+ /* wait for 4 cycles of play rate */
+ unsigned long sleep_time =
+ QPNP_HAP_CYCLS * hap->wave_play_rate_us;
+
+ rc = qpnp_hap_read_reg(hap, &val,
+ QPNP_HAP_STATUS(hap->base));
+
+ dev_dbg(&hap->spmi->dev, "HAP_STATUS=0x%x\n", val);
+
+ /* wait for QPNP_HAP_CYCLS cycles of play rate */
+ if (val & QPNP_HAP_STATUS_BUSY) {
+ usleep_range(sleep_time, sleep_time + 1);
+ if (hap->play_mode == QPNP_HAP_DIRECT ||
+ hap->play_mode == QPNP_HAP_PWM)
+ break;
+ } else
+ break;
+ }
+
+ if (i >= QPNP_HAP_MAX_RETRIES)
+ dev_dbg(&hap->spmi->dev,
+ "Haptics Busy. Force disable\n");
+
+ val &= ~QPNP_HAP_EN;
+ }
+
+ rc = qpnp_hap_write_reg(hap, &val,
+ QPNP_HAP_EN_CTL_REG(hap->base));
+ if (rc < 0)
+ return rc;
+
+ hap->reg_en_ctl = val;
+
+ return 0;
+}
+
+static int qpnp_hap_play(struct qpnp_hap *hap, int on)
+{
+ u8 val;
+ int rc;
+
+ val = hap->reg_play;
+ if (on)
+ val |= QPNP_HAP_PLAY_EN;
+ else
+ val &= ~QPNP_HAP_PLAY_EN;
+
+ rc = qpnp_hap_write_reg(hap, &val,
+ QPNP_HAP_PLAY_REG(hap->base));
+ if (rc < 0)
+ return rc;
+
+ hap->reg_play = val;
+
+ return 0;
+}
+
+/* sysfs show debug registers */
+static ssize_t qpnp_hap_dump_regs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+ struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+ timed_dev);
+ int count = 0, i;
+ u8 val;
+
+ for (i = 0; i < ARRAY_SIZE(qpnp_hap_dbg_regs); i++) {
+ qpnp_hap_read_reg(hap, &val, hap->base + qpnp_hap_dbg_regs[i]);
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "qpnp_haptics: REG_0x%x = 0x%x\n",
+ hap->base + qpnp_hap_dbg_regs[i],
+ val);
+
+ if (count >= PAGE_SIZE)
+ return PAGE_SIZE - 1;
+ }
+
+ return count;
+}
+
+/* play irq handler */
+static irqreturn_t qpnp_hap_play_irq(int irq, void *_hap)
+{
+ struct qpnp_hap *hap = _hap;
+ int i, rc;
+ u8 reg;
+
+ mutex_lock(&hap->wf_lock);
+
+ /* Configure WAVE_SAMPLE1 to WAVE_SAMPLE8 register */
+ for (i = 0; i < QPNP_HAP_WAV_SAMP_LEN && hap->wf_update; i++) {
+ reg = hap->wave_samp[i] = hap->shadow_wave_samp[i];
+ rc = qpnp_hap_write_reg(hap, &reg,
+ QPNP_HAP_WAV_S_REG_BASE(hap->base) + i);
+ if (rc)
+ goto unlock;
+ }
+ hap->wf_update = false;
+
+unlock:
+ mutex_unlock(&hap->wf_lock);
+
+ return IRQ_HANDLED;
+}
+
+/* short circuit irq handler */
+static irqreturn_t qpnp_hap_sc_irq(int irq, void *_hap)
+{
+ struct qpnp_hap *hap = _hap;
+ int rc;
+ u8 disable_haptics = 0x00;
+ u8 val;
+
+ dev_dbg(&hap->spmi->dev, "Short circuit detected\n");
+
+ if (hap->sc_duration < SC_MAX_DURATION) {
+ qpnp_hap_read_reg(hap, &val, QPNP_HAP_STATUS(hap->base));
+ if (val & SC_FOUND_BIT)
+ schedule_delayed_work(&hap->sc_work,
+ QPNP_HAP_SC_IRQ_STATUS_DELAY);
+ else
+ hap->sc_duration = 0;
+ } else {
+ /* Disable haptics module if the duration of short circuit
+ * exceeds the maximum limit (5 secs).
+ */
+ rc = qpnp_hap_write_reg(hap, &disable_haptics,
+ QPNP_HAP_EN_CTL_REG(hap->base));
+ dev_err(&hap->spmi->dev,
+ "Haptics disabled permanently due to short circuit\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* configuration api for buffer mode */
+static int qpnp_hap_buffer_config(struct qpnp_hap *hap)
+{
+ u8 reg = 0;
+ int rc, i, temp;
+
+ /* Configure the WAVE_REPEAT register */
+ if (hap->wave_rep_cnt < QPNP_HAP_WAV_REP_MIN)
+ hap->wave_rep_cnt = QPNP_HAP_WAV_REP_MIN;
+ else if (hap->wave_rep_cnt > QPNP_HAP_WAV_REP_MAX)
+ hap->wave_rep_cnt = QPNP_HAP_WAV_REP_MAX;
+
+ if (hap->wave_s_rep_cnt < QPNP_HAP_WAV_S_REP_MIN)
+ hap->wave_s_rep_cnt = QPNP_HAP_WAV_S_REP_MIN;
+ else if (hap->wave_s_rep_cnt > QPNP_HAP_WAV_S_REP_MAX)
+ hap->wave_s_rep_cnt = QPNP_HAP_WAV_S_REP_MAX;
+
+ rc = qpnp_hap_read_reg(hap, &reg,
+ QPNP_HAP_WAV_REP_REG(hap->base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_HAP_WAV_REP_MASK;
+ temp = fls(hap->wave_rep_cnt) - 1;
+ reg |= (temp << QPNP_HAP_WAV_REP_SHFT);
+ reg &= QPNP_HAP_WAV_S_REP_MASK;
+ temp = fls(hap->wave_s_rep_cnt) - 1;
+ reg |= temp;
+ rc = qpnp_hap_write_reg(hap, &reg,
+ QPNP_HAP_WAV_REP_REG(hap->base));
+ if (rc)
+ return rc;
+
+ /* Configure WAVE_SAMPLE1 to WAVE_SAMPLE8 register */
+ for (i = 0, reg = 0; i < QPNP_HAP_WAV_SAMP_LEN; i++) {
+ reg = hap->wave_samp[i];
+ rc = qpnp_hap_write_reg(hap, &reg,
+ QPNP_HAP_WAV_S_REG_BASE(hap->base) + i);
+ if (rc)
+ return rc;
+ }
+
+ /* setup play irq */
+ if (hap->use_play_irq) {
+ rc = devm_request_threaded_irq(&hap->spmi->dev, hap->play_irq,
+ NULL, qpnp_hap_play_irq,
+ QPNP_IRQ_FLAGS,
+ "qpnp_play_irq", hap);
+ if (rc < 0) {
+ dev_err(&hap->spmi->dev,
+ "Unable to request play(%d) IRQ(err:%d)\n",
+ hap->play_irq, rc);
+ return rc;
+ }
+ }
+
+ hap->buffer_cfg_state = true;
+ return 0;
+}
+
+/* configuration api for pwm */
+static int qpnp_hap_pwm_config(struct qpnp_hap *hap)
+{
+ u8 reg = 0;
+ int rc, temp;
+
+ /* Configure the EXTERNAL_PWM register */
+ if (hap->ext_pwm_freq_khz <= QPNP_HAP_EXT_PWM_FREQ_25_KHZ) {
+ hap->ext_pwm_freq_khz = QPNP_HAP_EXT_PWM_FREQ_25_KHZ;
+ temp = 0;
+ } else if (hap->ext_pwm_freq_khz <=
+ QPNP_HAP_EXT_PWM_FREQ_50_KHZ) {
+ hap->ext_pwm_freq_khz = QPNP_HAP_EXT_PWM_FREQ_50_KHZ;
+ temp = 1;
+ } else if (hap->ext_pwm_freq_khz <=
+ QPNP_HAP_EXT_PWM_FREQ_75_KHZ) {
+ hap->ext_pwm_freq_khz = QPNP_HAP_EXT_PWM_FREQ_75_KHZ;
+ temp = 2;
+ } else {
+ hap->ext_pwm_freq_khz = QPNP_HAP_EXT_PWM_FREQ_100_KHZ;
+ temp = 3;
+ }
+
+ rc = qpnp_hap_read_reg(hap, &reg,
+ QPNP_HAP_EXT_PWM_REG(hap->base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_HAP_EXT_PWM_MASK;
+ reg |= temp;
+ rc = qpnp_hap_write_reg(hap, &reg,
+ QPNP_HAP_EXT_PWM_REG(hap->base));
+ if (rc)
+ return rc;
+
+ rc = qpnp_hap_read_reg(hap, &reg,
+ QPNP_HAP_TEST2_REG(hap->base));
+ if (rc)
+ return rc;
+ if (!hap->ext_pwm_dtest_line ||
+ hap->ext_pwm_dtest_line > PWM_MAX_DTEST_LINES) {
+ dev_err(&hap->spmi->dev, "invalid dtest line\n");
+ return -EINVAL;
+ }
+
+ /* disable auto res for PWM mode */
+ reg &= QPNP_HAP_EXT_PWM_DTEST_MASK;
+ temp = hap->ext_pwm_dtest_line << QPNP_HAP_EXT_PWM_DTEST_SHFT;
+ reg |= temp;
+
+ /* TEST2 is a secure access register */
+ rc = qpnp_hap_sec_access(hap);
+ if (rc)
+ return rc;
+
+ rc = qpnp_hap_write_reg(hap, &reg,
+ QPNP_HAP_TEST2_REG(hap->base));
+ if (rc)
+ return rc;
+
+ rc = pwm_config(hap->pwm_info.pwm_dev,
+ hap->pwm_info.duty_us * NSEC_PER_USEC,
+ hap->pwm_info.period_us * NSEC_PER_USEC);
+ if (rc < 0) {
+ dev_err(&hap->spmi->dev, "hap pwm config failed\n");
+ pwm_free(hap->pwm_info.pwm_dev);
+ return -ENODEV;
+ }
+
+ hap->pwm_cfg_state = true;
+
+ return 0;
+}
+
+/* configuration api for play mode */
+static int qpnp_hap_play_mode_config(struct qpnp_hap *hap)
+{
+ u8 reg = 0;
+ int rc, temp;
+
+ rc = qpnp_hap_read_reg(hap, &reg, QPNP_HAP_PLAY_MODE_REG(hap->base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_HAP_PLAY_MODE_MASK;
+ temp = hap->play_mode << QPNP_HAP_PLAY_MODE_SHFT;
+ reg |= temp;
+ rc = qpnp_hap_write_reg(hap, &reg, QPNP_HAP_PLAY_MODE_REG(hap->base));
+ if (rc)
+ return rc;
+ return 0;
+}
+
+/* configuration api for max volatge */
+static int qpnp_hap_vmax_config(struct qpnp_hap *hap)
+{
+ u8 reg = 0;
+ int rc, temp;
+
+ if (hap->vmax_mv < QPNP_HAP_VMAX_MIN_MV)
+ hap->vmax_mv = QPNP_HAP_VMAX_MIN_MV;
+ else if (hap->vmax_mv > QPNP_HAP_VMAX_MAX_MV)
+ hap->vmax_mv = QPNP_HAP_VMAX_MAX_MV;
+
+ rc = qpnp_hap_read_reg(hap, &reg, QPNP_HAP_VMAX_REG(hap->base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_HAP_VMAX_MASK;
+ temp = hap->vmax_mv / QPNP_HAP_VMAX_MIN_MV;
+ reg |= (temp << QPNP_HAP_VMAX_SHIFT);
+ rc = qpnp_hap_write_reg(hap, &reg, QPNP_HAP_VMAX_REG(hap->base));
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+/* configuration api for short circuit debounce */
+static int qpnp_hap_sc_deb_config(struct qpnp_hap *hap)
+{
+ u8 reg = 0;
+ int rc, temp;
+
+ if (hap->sc_deb_cycles < QPNP_HAP_SC_DEB_CYCLES_MIN)
+ hap->sc_deb_cycles = QPNP_HAP_SC_DEB_CYCLES_MIN;
+ else if (hap->sc_deb_cycles > QPNP_HAP_SC_DEB_CYCLES_MAX)
+ hap->sc_deb_cycles = QPNP_HAP_SC_DEB_CYCLES_MAX;
+
+ rc = qpnp_hap_read_reg(hap, &reg, QPNP_HAP_SC_DEB_REG(hap->base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_HAP_SC_DEB_MASK;
+ if (hap->sc_deb_cycles) {
+ temp = fls(hap->sc_deb_cycles) - 1;
+ reg |= temp - QPNP_HAP_SC_DEB_SUB;
+ }
+ rc = qpnp_hap_write_reg(hap, &reg, QPNP_HAP_SC_DEB_REG(hap->base));
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+/* DT parsing api for buffer mode */
+static int qpnp_hap_parse_buffer_dt(struct qpnp_hap *hap)
+{
+ struct spmi_device *spmi = hap->spmi;
+ struct property *prop;
+ u32 temp;
+ int rc, i;
+
+ hap->wave_rep_cnt = QPNP_HAP_WAV_REP_MIN;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,wave-rep-cnt", &temp);
+ if (!rc) {
+ hap->wave_rep_cnt = temp;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read rep cnt\n");
+ return rc;
+ }
+
+ hap->wave_s_rep_cnt = QPNP_HAP_WAV_S_REP_MIN;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,wave-samp-rep-cnt", &temp);
+ if (!rc) {
+ hap->wave_s_rep_cnt = temp;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read samp rep cnt\n");
+ return rc;
+ }
+
+ prop = of_find_property(spmi->dev.of_node,
+ "qcom,wave-samples", &temp);
+ if (!prop || temp != QPNP_HAP_WAV_SAMP_LEN) {
+ dev_err(&spmi->dev, "Invalid wave samples, use default");
+ for (i = 0; i < QPNP_HAP_WAV_SAMP_LEN; i++)
+ hap->wave_samp[i] = QPNP_HAP_WAV_SAMP_MAX;
+ } else {
+ memcpy(hap->wave_samp, prop->value, QPNP_HAP_WAV_SAMP_LEN);
+ }
+
+ hap->use_play_irq = of_property_read_bool(spmi->dev.of_node,
+ "qcom,use-play-irq");
+ if (hap->use_play_irq) {
+ hap->play_irq = spmi_get_irq_byname(hap->spmi,
+ NULL, "play-irq");
+ if (hap->play_irq < 0) {
+ dev_err(&spmi->dev, "Unable to get play irq\n");
+ return hap->play_irq;
+ }
+ }
+
+ return 0;
+}
+
+/* DT parsing api for PWM mode */
+static int qpnp_hap_parse_pwm_dt(struct qpnp_hap *hap)
+{
+ struct spmi_device *spmi = hap->spmi;
+ u32 temp;
+ int rc;
+
+ hap->ext_pwm_freq_khz = QPNP_HAP_EXT_PWM_FREQ_25_KHZ;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,ext-pwm-freq-khz", &temp);
+ if (!rc) {
+ hap->ext_pwm_freq_khz = temp;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read ext pwm freq\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,pwm-channel", &temp);
+ if (!rc)
+ hap->pwm_info.pwm_channel = temp;
+ else
+ return rc;
+
+ hap->pwm_info.pwm_dev = of_pwm_get(spmi->dev.of_node, NULL);
+
+ if (IS_ERR(hap->pwm_info.pwm_dev)) {
+ rc = PTR_ERR(hap->pwm_info.pwm_dev);
+ dev_err(&spmi->dev, "Cannot get PWM device rc:(%d)\n", rc);
+ hap->pwm_info.pwm_dev = NULL;
+ return rc;
+ }
+
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,period-us", &temp);
+ if (!rc)
+ hap->pwm_info.period_us = temp;
+ else
+ return rc;
+
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,duty-us", &temp);
+ if (!rc)
+ hap->pwm_info.duty_us = temp;
+ else
+ return rc;
+
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,ext-pwm-dtest-line", &temp);
+ if (!rc)
+ hap->ext_pwm_dtest_line = temp;
+ else
+ return rc;
+
+ return 0;
+}
+
+/* sysfs show for wave samples */
+static ssize_t qpnp_hap_wf_samp_show(struct device *dev, char *buf, int index)
+{
+ struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+ struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+ timed_dev);
+
+ if (index < 0 || index >= QPNP_HAP_WAV_SAMP_LEN) {
+ dev_err(dev, "Invalid sample index(%d)\n", index);
+ return -EINVAL;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "0x%x\n",
+ hap->shadow_wave_samp[index]);
+}
+
+static ssize_t qpnp_hap_wf_s0_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return qpnp_hap_wf_samp_show(dev, buf, 0);
+}
+
+static ssize_t qpnp_hap_wf_s1_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return qpnp_hap_wf_samp_show(dev, buf, 1);
+}
+
+static ssize_t qpnp_hap_wf_s2_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return qpnp_hap_wf_samp_show(dev, buf, 2);
+}
+
+static ssize_t qpnp_hap_wf_s3_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return qpnp_hap_wf_samp_show(dev, buf, 3);
+}
+
+static ssize_t qpnp_hap_wf_s4_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return qpnp_hap_wf_samp_show(dev, buf, 4);
+}
+
+static ssize_t qpnp_hap_wf_s5_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return qpnp_hap_wf_samp_show(dev, buf, 5);
+}
+
+static ssize_t qpnp_hap_wf_s6_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return qpnp_hap_wf_samp_show(dev, buf, 6);
+}
+
+static ssize_t qpnp_hap_wf_s7_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return qpnp_hap_wf_samp_show(dev, buf, 7);
+}
+
+/* sysfs store for wave samples */
+static ssize_t qpnp_hap_wf_samp_store(struct device *dev,
+ const char *buf, size_t count, int index)
+{
+ struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+ struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+ timed_dev);
+ int data;
+
+ if (index < 0 || index >= QPNP_HAP_WAV_SAMP_LEN) {
+ dev_err(dev, "Invalid sample index(%d)\n", index);
+ return -EINVAL;
+ }
+
+ if (sscanf(buf, "%x", &data) != 1)
+ return -EINVAL;
+
+ if (data < 0 || data > 0xff) {
+ dev_err(dev, "Invalid sample wf_%d (%d)\n", index, data);
+ return -EINVAL;
+ }
+
+ hap->shadow_wave_samp[index] = (u8) data;
+ return count;
+}
+
+static ssize_t qpnp_hap_wf_s0_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return qpnp_hap_wf_samp_store(dev, buf, count, 0);
+}
+
+static ssize_t qpnp_hap_wf_s1_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return qpnp_hap_wf_samp_store(dev, buf, count, 1);
+}
+
+static ssize_t qpnp_hap_wf_s2_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return qpnp_hap_wf_samp_store(dev, buf, count, 2);
+}
+
+static ssize_t qpnp_hap_wf_s3_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return qpnp_hap_wf_samp_store(dev, buf, count, 3);
+}
+
+static ssize_t qpnp_hap_wf_s4_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return qpnp_hap_wf_samp_store(dev, buf, count, 4);
+}
+
+static ssize_t qpnp_hap_wf_s5_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return qpnp_hap_wf_samp_store(dev, buf, count, 5);
+}
+
+static ssize_t qpnp_hap_wf_s6_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return qpnp_hap_wf_samp_store(dev, buf, count, 6);
+}
+
+static ssize_t qpnp_hap_wf_s7_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return qpnp_hap_wf_samp_store(dev, buf, count, 7);
+}
+
+/* sysfs show for wave form update */
+static ssize_t qpnp_hap_wf_update_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+ struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+ timed_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", hap->wf_update);
+}
+
+/* sysfs store for updating wave samples */
+static ssize_t qpnp_hap_wf_update_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+ struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+ timed_dev);
+
+ mutex_lock(&hap->wf_lock);
+ hap->wf_update = true;
+ mutex_unlock(&hap->wf_lock);
+
+ return count;
+}
+
+/* sysfs show for wave repeat */
+static ssize_t qpnp_hap_wf_rep_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+ struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+ timed_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", hap->wave_rep_cnt);
+}
+
+/* sysfs store for wave repeat */
+static ssize_t qpnp_hap_wf_rep_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+ struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+ timed_dev);
+ int data, rc, temp;
+ u8 reg;
+
+ if (sscanf(buf, "%d", &data) != 1)
+ return -EINVAL;
+
+ if (data < QPNP_HAP_WAV_REP_MIN)
+ data = QPNP_HAP_WAV_REP_MIN;
+ else if (data > QPNP_HAP_WAV_REP_MAX)
+ data = QPNP_HAP_WAV_REP_MAX;
+
+ rc = qpnp_hap_read_reg(hap, &reg,
+ QPNP_HAP_WAV_REP_REG(hap->base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_HAP_WAV_REP_MASK;
+ temp = fls(data) - 1;
+ reg |= (temp << QPNP_HAP_WAV_REP_SHFT);
+ rc = qpnp_hap_write_reg(hap, &reg,
+ QPNP_HAP_WAV_REP_REG(hap->base));
+ if (rc)
+ return rc;
+
+ hap->wave_rep_cnt = data;
+
+ return count;
+}
+
+/* sysfs show for wave samples repeat */
+static ssize_t qpnp_hap_wf_s_rep_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+ struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+ timed_dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", hap->wave_s_rep_cnt);
+}
+
+/* sysfs store for wave samples repeat */
+static ssize_t qpnp_hap_wf_s_rep_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+ struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+ timed_dev);
+ int data, rc, temp;
+ u8 reg;
+
+ if (sscanf(buf, "%d", &data) != 1)
+ return -EINVAL;
+
+ if (data < QPNP_HAP_WAV_S_REP_MIN)
+ data = QPNP_HAP_WAV_S_REP_MIN;
+ else if (data > QPNP_HAP_WAV_S_REP_MAX)
+ data = QPNP_HAP_WAV_S_REP_MAX;
+
+ rc = qpnp_hap_read_reg(hap, &reg,
+ QPNP_HAP_WAV_REP_REG(hap->base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_HAP_WAV_S_REP_MASK;
+ temp = fls(data) - 1;
+ reg |= temp;
+ rc = qpnp_hap_write_reg(hap, &reg,
+ QPNP_HAP_WAV_REP_REG(hap->base));
+ if (rc)
+ return rc;
+
+ hap->wave_s_rep_cnt = data;
+
+ return count;
+}
+
+/* sysfs store function for play mode*/
+static ssize_t qpnp_hap_play_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+ struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+ timed_dev);
+ char str[QPNP_HAP_STR_SIZE + 1];
+ int rc = 0, temp, old_mode, i;
+
+ if (snprintf(str, QPNP_HAP_STR_SIZE, "%s", buf) > QPNP_HAP_STR_SIZE)
+ return -EINVAL;
+
+ for (i = 0; i < strlen(str); i++) {
+ if (str[i] == ' ' || str[i] == '\n' || str[i] == '\t') {
+ str[i] = '\0';
+ break;
+ }
+ }
+ if (strcmp(str, "buffer") == 0)
+ temp = QPNP_HAP_BUFFER;
+ else if (strcmp(str, "direct") == 0)
+ temp = QPNP_HAP_DIRECT;
+ else if (strcmp(str, "audio") == 0)
+ temp = QPNP_HAP_AUDIO;
+ else if (strcmp(str, "pwm") == 0)
+ temp = QPNP_HAP_PWM;
+ else
+ return -EINVAL;
+
+ if (temp == hap->play_mode)
+ return count;
+
+ if (temp == QPNP_HAP_BUFFER && !hap->buffer_cfg_state) {
+ rc = qpnp_hap_parse_buffer_dt(hap);
+ if (!rc)
+ rc = qpnp_hap_buffer_config(hap);
+ } else if (temp == QPNP_HAP_PWM && !hap->pwm_cfg_state) {
+ rc = qpnp_hap_parse_pwm_dt(hap);
+ if (!rc)
+ rc = qpnp_hap_pwm_config(hap);
+ }
+
+ if (rc < 0)
+ return rc;
+
+ rc = qpnp_hap_mod_enable(hap, false);
+ if (rc < 0)
+ return rc;
+
+ old_mode = hap->play_mode;
+ hap->play_mode = temp;
+ /* Configure the PLAY MODE register */
+ rc = qpnp_hap_play_mode_config(hap);
+ if (rc) {
+ hap->play_mode = old_mode;
+ return rc;
+ }
+
+ if (hap->play_mode == QPNP_HAP_AUDIO) {
+ rc = qpnp_hap_mod_enable(hap, true);
+ if (rc < 0) {
+ hap->play_mode = old_mode;
+ return rc;
+ }
+ }
+
+ return count;
+}
+
+/* sysfs show function for play mode */
+static ssize_t qpnp_hap_play_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+ struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+ timed_dev);
+ char *str;
+
+ if (hap->play_mode == QPNP_HAP_BUFFER)
+ str = "buffer";
+ else if (hap->play_mode == QPNP_HAP_DIRECT)
+ str = "direct";
+ else if (hap->play_mode == QPNP_HAP_AUDIO)
+ str = "audio";
+ else if (hap->play_mode == QPNP_HAP_PWM)
+ str = "pwm";
+ else
+ return -EINVAL;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", str);
+}
+
+/* sysfs store for ramp test data */
+static ssize_t qpnp_hap_min_max_test_data_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+ struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+ timed_dev);
+
+ int value = QPNP_TEST_TIMER_MS, i;
+
+ mutex_lock(&hap->lock);
+ qpnp_hap_mod_enable(hap, true);
+ for (i = 0; i < ARRAY_SIZE(qpnp_hap_min_max_test_data); i++) {
+ hrtimer_start(&hap->hap_test_timer,
+ ktime_set(value / 1000, (value % 1000) * 1000000),
+ HRTIMER_MODE_REL);
+ qpnp_hap_play_byte(qpnp_hap_min_max_test_data[i], true);
+ wait_for_completion(&hap->completion);
+ }
+
+ qpnp_hap_play_byte(0, false);
+ qpnp_hap_mod_enable(hap, false);
+ mutex_unlock(&hap->lock);
+
+ return count;
+}
+
+/* sysfs show function for min max test data */
+static ssize_t qpnp_hap_min_max_test_data_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int count = 0, i;
+
+ for (i = 0; i < ARRAY_SIZE(qpnp_hap_min_max_test_data); i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "qpnp_haptics: min_max_test_data[%d] = 0x%x\n",
+ i, qpnp_hap_min_max_test_data[i]);
+
+ if (count >= PAGE_SIZE)
+ return PAGE_SIZE - 1;
+ }
+
+ return count;
+
+}
+
+/* sysfs store for ramp test data */
+static ssize_t qpnp_hap_ramp_test_data_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct timed_output_dev *timed_dev = dev_get_drvdata(dev);
+ struct qpnp_hap *hap = container_of(timed_dev, struct qpnp_hap,
+ timed_dev);
+
+ int value = QPNP_TEST_TIMER_MS, i;
+
+ mutex_lock(&hap->lock);
+ qpnp_hap_mod_enable(hap, true);
+ for (i = 0; i < ARRAY_SIZE(qpnp_hap_ramp_test_data); i++) {
+ hrtimer_start(&hap->hap_test_timer,
+ ktime_set(value / 1000, (value % 1000) * 1000000),
+ HRTIMER_MODE_REL);
+ qpnp_hap_play_byte(qpnp_hap_ramp_test_data[i], true);
+ wait_for_completion(&hap->completion);
+ }
+
+ qpnp_hap_play_byte(0, false);
+ qpnp_hap_mod_enable(hap, false);
+ mutex_unlock(&hap->lock);
+
+ return count;
+}
+
+/* sysfs show function for ramp test data */
+static ssize_t qpnp_hap_ramp_test_data_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int count = 0, i;
+
+ for (i = 0; i < ARRAY_SIZE(qpnp_hap_ramp_test_data); i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "qpnp_haptics: ramp_test_data[%d] = 0x%x\n",
+ i, qpnp_hap_ramp_test_data[i]);
+
+ if (count >= PAGE_SIZE)
+ return PAGE_SIZE - 1;
+ }
+
+ return count;
+
+}
+
+/* sysfs attributes */
+static struct device_attribute qpnp_hap_attrs[] = {
+ __ATTR(wf_s0, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_hap_wf_s0_show,
+ qpnp_hap_wf_s0_store),
+ __ATTR(wf_s1, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_hap_wf_s1_show,
+ qpnp_hap_wf_s1_store),
+ __ATTR(wf_s2, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_hap_wf_s2_show,
+ qpnp_hap_wf_s2_store),
+ __ATTR(wf_s3, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_hap_wf_s3_show,
+ qpnp_hap_wf_s3_store),
+ __ATTR(wf_s4, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_hap_wf_s4_show,
+ qpnp_hap_wf_s4_store),
+ __ATTR(wf_s5, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_hap_wf_s5_show,
+ qpnp_hap_wf_s5_store),
+ __ATTR(wf_s6, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_hap_wf_s6_show,
+ qpnp_hap_wf_s6_store),
+ __ATTR(wf_s7, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_hap_wf_s7_show,
+ qpnp_hap_wf_s7_store),
+ __ATTR(wf_update, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_hap_wf_update_show,
+ qpnp_hap_wf_update_store),
+ __ATTR(wf_rep, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_hap_wf_rep_show,
+ qpnp_hap_wf_rep_store),
+ __ATTR(wf_s_rep, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_hap_wf_s_rep_show,
+ qpnp_hap_wf_s_rep_store),
+ __ATTR(play_mode, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_hap_play_mode_show,
+ qpnp_hap_play_mode_store),
+ __ATTR(dump_regs, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_hap_dump_regs_show,
+ NULL),
+ __ATTR(ramp_test, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_hap_ramp_test_data_show,
+ qpnp_hap_ramp_test_data_store),
+ __ATTR(min_max_test, (S_IRUGO | S_IWUSR | S_IWGRP),
+ qpnp_hap_min_max_test_data_show,
+ qpnp_hap_min_max_test_data_store),
+};
+
+static void calculate_lra_code(struct qpnp_hap *hap)
+{
+ u8 play_rate_code_lo, play_rate_code_hi;
+ int play_rate_code, neg_idx = 0, pos_idx = LRA_POS_FREQ_COUNT-1;
+ int lra_init_freq, freq_variation, start_variation = AUTO_RES_ERR_MAX;
+
+ qpnp_hap_read_reg(hap, &play_rate_code_lo,
+ QPNP_HAP_RATE_CFG1_REG(hap->base));
+ qpnp_hap_read_reg(hap, &play_rate_code_hi,
+ QPNP_HAP_RATE_CFG2_REG(hap->base));
+
+ play_rate_code = (play_rate_code_hi << 8) | (play_rate_code_lo & 0xff);
+
+ lra_init_freq = 200000 / play_rate_code;
+
+ while (start_variation >= AUTO_RES_ERR_CAPTURE_RES) {
+ freq_variation = (lra_init_freq * start_variation) / 100;
+ lra_play_rate_code[neg_idx++] = 200000 / (lra_init_freq -
+ freq_variation);
+ lra_play_rate_code[pos_idx--] = 200000 / (lra_init_freq +
+ freq_variation);
+ start_variation -= AUTO_RES_ERR_CAPTURE_RES;
+ }
+}
+
+static int qpnp_hap_auto_res_enable(struct qpnp_hap *hap, int enable)
+{
+ int rc = 0;
+ u8 val;
+
+ rc = qpnp_hap_read_reg(hap, &val, QPNP_HAP_TEST2_REG(hap->base));
+ if (rc < 0)
+ return rc;
+ val &= QPNP_HAP_TEST2_AUTO_RES_MASK;
+
+ if (enable)
+ val |= AUTO_RES_ENABLE;
+ else
+ val |= AUTO_RES_DISABLE;
+
+ /* TEST2 is a secure access register */
+ rc = qpnp_hap_sec_access(hap);
+ if (rc)
+ return rc;
+
+ rc = qpnp_hap_write_reg(hap, &val, QPNP_HAP_TEST2_REG(hap->base));
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static void update_lra_frequency(struct qpnp_hap *hap)
+{
+ u8 lra_auto_res_lo = 0, lra_auto_res_hi = 0;
+
+ qpnp_hap_read_reg(hap, &lra_auto_res_lo,
+ QPNP_HAP_LRA_AUTO_RES_LO(hap->base));
+ qpnp_hap_read_reg(hap, &lra_auto_res_hi,
+ QPNP_HAP_LRA_AUTO_RES_HI(hap->base));
+
+ if (lra_auto_res_lo && lra_auto_res_hi) {
+ qpnp_hap_write_reg(hap, &lra_auto_res_lo,
+ QPNP_HAP_RATE_CFG1_REG(hap->base));
+
+ lra_auto_res_hi = lra_auto_res_hi >> 4;
+ qpnp_hap_write_reg(hap, &lra_auto_res_hi,
+ QPNP_HAP_RATE_CFG2_REG(hap->base));
+ }
+}
+
+static enum hrtimer_restart detect_auto_res_error(struct hrtimer *timer)
+{
+ struct qpnp_hap *hap = container_of(timer, struct qpnp_hap,
+ auto_res_err_poll_timer);
+ u8 val;
+ ktime_t currtime;
+
+ qpnp_hap_read_reg(hap, &val, QPNP_HAP_STATUS(hap->base));
+
+ if (val & AUTO_RES_ERR_BIT) {
+ schedule_work(&hap->auto_res_err_work);
+ return HRTIMER_NORESTART;
+ } else {
+ update_lra_frequency(hap);
+ currtime = ktime_get();
+ hrtimer_forward(&hap->auto_res_err_poll_timer, currtime,
+ ktime_set(0, POLL_TIME_AUTO_RES_ERR_NS));
+ return HRTIMER_RESTART;
+ }
+}
+
+static void correct_auto_res_error(struct work_struct *auto_res_err_work)
+{
+ struct qpnp_hap *hap = container_of(auto_res_err_work,
+ struct qpnp_hap, auto_res_err_work);
+
+ u8 lra_code_lo, lra_code_hi, disable_hap = 0x00;
+ static int lra_freq_index;
+ ktime_t currtime, remaining_time;
+ int temp, rem = 0, index = lra_freq_index % LRA_POS_FREQ_COUNT;
+
+ if (hrtimer_active(&hap->hap_timer)) {
+ remaining_time = hrtimer_get_remaining(&hap->hap_timer);
+ rem = (int)ktime_to_us(remaining_time);
+ }
+
+ qpnp_hap_play(hap, 0);
+ qpnp_hap_write_reg(hap, &disable_hap,
+ QPNP_HAP_EN_CTL_REG(hap->base));
+
+ lra_code_lo = lra_play_rate_code[index] & QPNP_HAP_RATE_CFG1_MASK;
+ qpnp_hap_write_reg(hap, &lra_code_lo,
+ QPNP_HAP_RATE_CFG1_REG(hap->base));
+
+ qpnp_hap_read_reg(hap, &lra_code_hi,
+ QPNP_HAP_RATE_CFG2_REG(hap->base));
+
+ lra_code_hi &= QPNP_HAP_RATE_CFG2_MASK;
+ temp = lra_play_rate_code[index] >> QPNP_HAP_RATE_CFG2_SHFT;
+ lra_code_hi |= temp;
+
+ qpnp_hap_write_reg(hap, &lra_code_hi,
+ QPNP_HAP_RATE_CFG2_REG(hap->base));
+
+ lra_freq_index++;
+
+ if (rem > 0) {
+ currtime = ktime_get();
+ hap->state = 1;
+ hrtimer_forward(&hap->hap_timer, currtime, remaining_time);
+ schedule_work(&hap->work);
+ }
+}
+
+/* set api for haptics */
+static int qpnp_hap_set(struct qpnp_hap *hap, int on)
+{
+ int rc = 0;
+ u8 val = 0;
+ unsigned long timeout_ns = POLL_TIME_AUTO_RES_ERR_NS;
+
+ if (hap->play_mode == QPNP_HAP_PWM) {
+ if (on)
+ rc = pwm_enable(hap->pwm_info.pwm_dev);
+ else
+ pwm_disable(hap->pwm_info.pwm_dev);
+ } else if (hap->play_mode == QPNP_HAP_BUFFER ||
+ hap->play_mode == QPNP_HAP_DIRECT) {
+ if (on) {
+ if (hap->correct_lra_drive_freq ||
+ hap->auto_res_mode == QPNP_HAP_AUTO_RES_QWD)
+ qpnp_hap_auto_res_enable(hap, 0);
+
+ rc = qpnp_hap_mod_enable(hap, on);
+ if (rc < 0)
+ return rc;
+
+ rc = qpnp_hap_play(hap, on);
+
+ if ((hap->act_type == QPNP_HAP_LRA &&
+ hap->correct_lra_drive_freq) ||
+ hap->auto_res_mode == QPNP_HAP_AUTO_RES_QWD) {
+ usleep_range(AUTO_RES_ENABLE_TIMEOUT,
+ (AUTO_RES_ENABLE_TIMEOUT + 1));
+
+ rc = qpnp_hap_auto_res_enable(hap, 1);
+ if (rc < 0)
+ return rc;
+ }
+ if (hap->correct_lra_drive_freq) {
+ /*
+ * Start timer to poll Auto Resonance error bit
+ */
+ mutex_lock(&hap->lock);
+ hrtimer_cancel(&hap->auto_res_err_poll_timer);
+ hrtimer_start(&hap->auto_res_err_poll_timer,
+ ktime_set(0, timeout_ns),
+ HRTIMER_MODE_REL);
+ mutex_unlock(&hap->lock);
+ }
+ } else {
+ rc = qpnp_hap_play(hap, on);
+ if (rc < 0)
+ return rc;
+
+ if (hap->correct_lra_drive_freq) {
+ rc = qpnp_hap_read_reg(hap, &val,
+ QPNP_HAP_STATUS(hap->base));
+ if (!(val & AUTO_RES_ERR_BIT))
+ update_lra_frequency(hap);
+ }
+
+ rc = qpnp_hap_mod_enable(hap, on);
+ if (hap->act_type == QPNP_HAP_LRA &&
+ hap->correct_lra_drive_freq) {
+ hrtimer_cancel(&hap->auto_res_err_poll_timer);
+ calculate_lra_code(hap);
+ }
+ }
+ }
+
+ return rc;
+}
+
+/* enable interface from timed output class */
+static void qpnp_hap_td_enable(struct timed_output_dev *dev, int value)
+{
+ struct qpnp_hap *hap = container_of(dev, struct qpnp_hap,
+ timed_dev);
+
+ mutex_lock(&hap->lock);
+
+ if (hap->act_type == QPNP_HAP_LRA &&
+ hap->correct_lra_drive_freq)
+ hrtimer_cancel(&hap->auto_res_err_poll_timer);
+
+ hrtimer_cancel(&hap->hap_timer);
+
+ if (value == 0) {
+ if (hap->state == 0) {
+ mutex_unlock(&hap->lock);
+ return;
+ }
+ hap->state = 0;
+ } else {
+ value = (value > hap->timeout_ms ?
+ hap->timeout_ms : value);
+ hap->state = 1;
+ hrtimer_start(&hap->hap_timer,
+ ktime_set(value / 1000, (value % 1000) * 1000000),
+ HRTIMER_MODE_REL);
+ }
+ mutex_unlock(&hap->lock);
+ schedule_work(&hap->work);
+}
+
+/* play pwm bytes */
+int qpnp_hap_play_byte(u8 data, bool on)
+{
+ struct qpnp_hap *hap = ghap;
+ int duty_ns, period_ns, duty_percent, rc;
+
+ if (!hap) {
+ pr_err("Haptics is not initialized\n");
+ return -EINVAL;
+ }
+
+ if (hap->play_mode != QPNP_HAP_PWM) {
+ dev_err(&hap->spmi->dev, "only PWM mode is supported\n");
+ return -EINVAL;
+ }
+
+ rc = qpnp_hap_set(hap, false);
+ if (rc)
+ return rc;
+
+ if (!on) {
+ /* set the pwm back to original duty for normal operations */
+ /* this is not required if standard interface is not used */
+ rc = pwm_config(hap->pwm_info.pwm_dev,
+ hap->pwm_info.duty_us * NSEC_PER_USEC,
+ hap->pwm_info.period_us * NSEC_PER_USEC);
+ return rc;
+ }
+
+ /* pwm values range from 0x00 to 0xff. The range from 0x00 to 0x7f
+ provides a postive amplitude in the sin wave form for 0 to 100%.
+ The range from 0x80 to 0xff provides a negative amplitude in the
+ sin wave form for 0 to 100%. Here the duty percentage is calculated
+ based on the incoming data to accommodate this. */
+ if (data <= QPNP_HAP_EXT_PWM_PEAK_DATA)
+ duty_percent = QPNP_HAP_EXT_PWM_HALF_DUTY +
+ ((data * QPNP_HAP_EXT_PWM_DATA_FACTOR) / 100);
+ else
+ duty_percent = QPNP_HAP_EXT_PWM_FULL_DUTY -
+ ((data * QPNP_HAP_EXT_PWM_DATA_FACTOR) / 100);
+
+ period_ns = hap->pwm_info.period_us * NSEC_PER_USEC;
+ duty_ns = (period_ns * duty_percent) / 100;
+ rc = pwm_config(hap->pwm_info.pwm_dev,
+ duty_ns,
+ hap->pwm_info.period_us * NSEC_PER_USEC);
+ if (rc)
+ return rc;
+
+ dev_dbg(&hap->spmi->dev, "data=0x%x duty_per=%d\n", data, duty_percent);
+
+ rc = qpnp_hap_set(hap, true);
+
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_hap_play_byte);
+
+/* worker to opeate haptics */
+static void qpnp_hap_worker(struct work_struct *work)
+{
+ struct qpnp_hap *hap = container_of(work, struct qpnp_hap,
+ work);
+ u8 val = 0x00;
+ int rc, reg_en;
+
+ if (hap->vcc_pon) {
+ reg_en = regulator_enable(hap->vcc_pon);
+ if (reg_en)
+ pr_err("%s: could not enable vcc_pon regulator\n",
+ __func__);
+ }
+
+ /* Disable haptics module if the duration of short circuit
+ * exceeds the maximum limit (5 secs).
+ */
+ if (hap->sc_duration == SC_MAX_DURATION) {
+ rc = qpnp_hap_write_reg(hap, &val,
+ QPNP_HAP_EN_CTL_REG(hap->base));
+ } else {
+ if (hap->play_mode == QPNP_HAP_PWM)
+ qpnp_hap_mod_enable(hap, hap->state);
+ qpnp_hap_set(hap, hap->state);
+ }
+
+ if (hap->vcc_pon && !reg_en) {
+ rc = regulator_disable(hap->vcc_pon);
+ if (rc)
+ pr_err("%s: could not disable vcc_pon regulator\n",
+ __func__);
+ }
+}
+
+/* get time api to know the remaining time */
+static int qpnp_hap_get_time(struct timed_output_dev *dev)
+{
+ struct qpnp_hap *hap = container_of(dev, struct qpnp_hap,
+ timed_dev);
+
+ if (hrtimer_active(&hap->hap_timer)) {
+ ktime_t r = hrtimer_get_remaining(&hap->hap_timer);
+ return (int)ktime_to_us(r);
+ } else {
+ return 0;
+ }
+}
+
+/* hrtimer function handler */
+static enum hrtimer_restart qpnp_hap_timer(struct hrtimer *timer)
+{
+ struct qpnp_hap *hap = container_of(timer, struct qpnp_hap,
+ hap_timer);
+
+ hap->state = 0;
+ schedule_work(&hap->work);
+
+ return HRTIMER_NORESTART;
+}
+
+/* hrtimer function handler */
+static enum hrtimer_restart qpnp_hap_test_timer(struct hrtimer *timer)
+{
+ struct qpnp_hap *hap = container_of(timer, struct qpnp_hap,
+ hap_test_timer);
+
+ complete(&hap->completion);
+
+ return HRTIMER_NORESTART;
+}
+
+/* suspend routines to turn off haptics */
+#ifdef CONFIG_PM
+static int qpnp_haptic_suspend(struct device *dev)
+{
+ struct qpnp_hap *hap = dev_get_drvdata(dev);
+ hrtimer_cancel(&hap->hap_timer);
+ cancel_work_sync(&hap->work);
+ /* turn-off haptic */
+ qpnp_hap_set(hap, 0);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(qpnp_haptic_pm_ops, qpnp_haptic_suspend, NULL);
+
+/* Configuration api for haptics registers */
+static int qpnp_hap_config(struct qpnp_hap *hap)
+{
+ u8 reg = 0, error_code = 0, unlock_val, error_value;
+ int rc, i, temp;
+
+ /* Configure the ACTUATOR TYPE register */
+ rc = qpnp_hap_read_reg(hap, &reg, QPNP_HAP_ACT_TYPE_REG(hap->base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_HAP_ACT_TYPE_MASK;
+ reg |= hap->act_type;
+ rc = qpnp_hap_write_reg(hap, &reg, QPNP_HAP_ACT_TYPE_REG(hap->base));
+ if (rc)
+ return rc;
+
+ /* Configure auto resonance parameters */
+ if (hap->act_type == QPNP_HAP_LRA) {
+ if (hap->lra_res_cal_period < QPNP_HAP_RES_CAL_PERIOD_MIN)
+ hap->lra_res_cal_period = QPNP_HAP_RES_CAL_PERIOD_MIN;
+ else if (hap->lra_res_cal_period > QPNP_HAP_RES_CAL_PERIOD_MAX)
+ hap->lra_res_cal_period = QPNP_HAP_RES_CAL_PERIOD_MAX;
+
+ rc = qpnp_hap_read_reg(hap, &reg,
+ QPNP_HAP_LRA_AUTO_RES_REG(hap->base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_HAP_AUTO_RES_MODE_MASK;
+ reg |= (hap->auto_res_mode << QPNP_HAP_AUTO_RES_MODE_SHIFT);
+ reg &= QPNP_HAP_LRA_HIGH_Z_MASK;
+ reg |= (hap->lra_high_z << QPNP_HAP_LRA_HIGH_Z_SHIFT);
+ reg &= QPNP_HAP_LRA_RES_CAL_PER_MASK;
+ temp = fls(hap->lra_res_cal_period) - 1;
+ reg |= (temp - 2);
+ rc = qpnp_hap_write_reg(hap, &reg,
+ QPNP_HAP_LRA_AUTO_RES_REG(hap->base));
+ if (rc)
+ return rc;
+ } else {
+ /* disable auto resonance for ERM */
+ reg = 0x00;
+
+ rc = qpnp_hap_write_reg(hap, &reg,
+ QPNP_HAP_LRA_AUTO_RES_REG(hap->base));
+ if (rc)
+ return rc;
+ }
+
+ /* Configure the PLAY MODE register */
+ rc = qpnp_hap_play_mode_config(hap);
+ if (rc)
+ return rc;
+
+ /* Configure the VMAX register */
+ rc = qpnp_hap_vmax_config(hap);
+ if (rc)
+ return rc;
+
+ /* Configure the ILIM register */
+ if (hap->ilim_ma < QPNP_HAP_ILIM_MIN_MA)
+ hap->ilim_ma = QPNP_HAP_ILIM_MIN_MA;
+ else if (hap->ilim_ma > QPNP_HAP_ILIM_MAX_MA)
+ hap->ilim_ma = QPNP_HAP_ILIM_MAX_MA;
+
+ rc = qpnp_hap_read_reg(hap, &reg, QPNP_HAP_ILIM_REG(hap->base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_HAP_ILIM_MASK;
+ temp = (hap->ilim_ma / QPNP_HAP_ILIM_MIN_MA) >> 1;
+ reg |= temp;
+ rc = qpnp_hap_write_reg(hap, &reg, QPNP_HAP_ILIM_REG(hap->base));
+ if (rc)
+ return rc;
+
+ /* Configure the short circuit debounce register */
+ rc = qpnp_hap_sc_deb_config(hap);
+ if (rc)
+ return rc;
+
+ /* Configure the INTERNAL_PWM register */
+ if (hap->int_pwm_freq_khz <= QPNP_HAP_INT_PWM_FREQ_253_KHZ) {
+ hap->int_pwm_freq_khz = QPNP_HAP_INT_PWM_FREQ_253_KHZ;
+ temp = 0;
+ } else if (hap->int_pwm_freq_khz <= QPNP_HAP_INT_PWM_FREQ_505_KHZ) {
+ hap->int_pwm_freq_khz = QPNP_HAP_INT_PWM_FREQ_505_KHZ;
+ temp = 1;
+ } else if (hap->int_pwm_freq_khz <= QPNP_HAP_INT_PWM_FREQ_739_KHZ) {
+ hap->int_pwm_freq_khz = QPNP_HAP_INT_PWM_FREQ_739_KHZ;
+ temp = 2;
+ } else {
+ hap->int_pwm_freq_khz = QPNP_HAP_INT_PWM_FREQ_1076_KHZ;
+ temp = 3;
+ }
+
+ rc = qpnp_hap_read_reg(hap, &reg, QPNP_HAP_INT_PWM_REG(hap->base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_HAP_INT_PWM_MASK;
+ reg |= temp;
+ rc = qpnp_hap_write_reg(hap, &reg, QPNP_HAP_INT_PWM_REG(hap->base));
+ if (rc)
+ return rc;
+
+ rc = qpnp_hap_read_reg(hap, &reg, QPNP_HAP_PWM_CAP_REG(hap->base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_HAP_INT_PWM_MASK;
+ reg |= temp;
+ rc = qpnp_hap_write_reg(hap, &reg, QPNP_HAP_PWM_CAP_REG(hap->base));
+ if (rc)
+ return rc;
+
+ /* Configure the WAVE SHAPE register */
+ rc = qpnp_hap_read_reg(hap, &reg,
+ QPNP_HAP_WAV_SHAPE_REG(hap->base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_HAP_WAV_SHAPE_MASK;
+ reg |= hap->wave_shape;
+ rc = qpnp_hap_write_reg(hap, &reg,
+ QPNP_HAP_WAV_SHAPE_REG(hap->base));
+ if (rc)
+ return rc;
+
+ /* Configure RATE_CFG1 and RATE_CFG2 registers */
+ /* Note: For ERM these registers act as play rate and
+ for LRA these represent resonance period */
+ if (hap->wave_play_rate_us < QPNP_HAP_WAV_PLAY_RATE_US_MIN)
+ hap->wave_play_rate_us = QPNP_HAP_WAV_PLAY_RATE_US_MIN;
+ else if (hap->wave_play_rate_us > QPNP_HAP_WAV_PLAY_RATE_US_MAX)
+ hap->wave_play_rate_us = QPNP_HAP_WAV_PLAY_RATE_US_MAX;
+
+ temp = hap->wave_play_rate_us / QPNP_HAP_RATE_CFG_STEP_US;
+
+ /*
+ * The frequency of 19.2Mzhz RC clock is subject to variation.
+ * In PMI8950, TRIM_ERROR_RC19P2_CLK register in MISC module
+ * holds the frequency error in 19.2Mhz RC clock
+ */
+ if ((hap->act_type == QPNP_HAP_LRA) && hap->correct_lra_drive_freq
+ && hap->misc_trim_error_rc19p2_clk_reg_present) {
+ unlock_val = MISC_SEC_UNLOCK;
+ rc = spmi_ext_register_writel(hap->spmi->ctrl,
+ PMI8950_MISC_SID, MISC_SEC_ACCESS,
+ &unlock_val, 1);
+ if (rc)
+ dev_err(&hap->spmi->dev,
+ "Unable to do SEC_ACCESS rc:%d\n", rc);
+
+ spmi_ext_register_readl(hap->spmi->ctrl, PMI8950_MISC_SID,
+ MISC_TRIM_ERROR_RC19P2_CLK, &error_code, 1);
+
+ error_value = (error_code & 0x0F) * 7;
+
+ if (error_code & 0x80)
+ temp = (temp * (1000 - error_value)) / 1000;
+ else
+ temp = (temp * (1000 + error_value)) / 1000;
+ }
+
+ reg = temp & QPNP_HAP_RATE_CFG1_MASK;
+ rc = qpnp_hap_write_reg(hap, &reg,
+ QPNP_HAP_RATE_CFG1_REG(hap->base));
+ if (rc)
+ return rc;
+
+ rc = qpnp_hap_read_reg(hap, &reg,
+ QPNP_HAP_RATE_CFG2_REG(hap->base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_HAP_RATE_CFG2_MASK;
+ temp = temp >> QPNP_HAP_RATE_CFG2_SHFT;
+ reg |= temp;
+ rc = qpnp_hap_write_reg(hap, &reg,
+ QPNP_HAP_RATE_CFG2_REG(hap->base));
+ if (rc)
+ return rc;
+
+ if ((hap->act_type == QPNP_HAP_LRA) && hap->correct_lra_drive_freq)
+ calculate_lra_code(hap);
+
+ /* Configure BRAKE register */
+ rc = qpnp_hap_read_reg(hap, &reg, QPNP_HAP_EN_CTL2_REG(hap->base));
+ if (rc < 0)
+ return rc;
+ reg &= QPNP_HAP_BRAKE_MASK;
+ reg |= hap->en_brake;
+ rc = qpnp_hap_write_reg(hap, &reg, QPNP_HAP_EN_CTL2_REG(hap->base));
+ if (rc)
+ return rc;
+
+ if (hap->en_brake && hap->sup_brake_pat) {
+ for (i = QPNP_HAP_BRAKE_PAT_LEN - 1, reg = 0; i >= 0; i--) {
+ hap->brake_pat[i] &= QPNP_HAP_BRAKE_PAT_MASK;
+ temp = i << 1;
+ reg |= hap->brake_pat[i] << temp;
+ }
+ rc = qpnp_hap_write_reg(hap, &reg,
+ QPNP_HAP_BRAKE_REG(hap->base));
+ if (rc)
+ return rc;
+ }
+
+ /* Cache enable control register */
+ rc = qpnp_hap_read_reg(hap, &reg, QPNP_HAP_EN_CTL_REG(hap->base));
+ if (rc < 0)
+ return rc;
+ hap->reg_en_ctl = reg;
+
+ /* Cache play register */
+ rc = qpnp_hap_read_reg(hap, &reg, QPNP_HAP_PLAY_REG(hap->base));
+ if (rc < 0)
+ return rc;
+ hap->reg_play = reg;
+
+ if (hap->play_mode == QPNP_HAP_BUFFER)
+ rc = qpnp_hap_buffer_config(hap);
+ else if (hap->play_mode == QPNP_HAP_PWM)
+ rc = qpnp_hap_pwm_config(hap);
+ else if (hap->play_mode == QPNP_HAP_AUDIO)
+ rc = qpnp_hap_mod_enable(hap, true);
+
+ if (rc)
+ return rc;
+
+ /* setup short circuit irq */
+ if (hap->use_sc_irq) {
+ rc = devm_request_threaded_irq(&hap->spmi->dev, hap->sc_irq,
+ NULL, qpnp_hap_sc_irq,
+ QPNP_IRQ_FLAGS,
+ "qpnp_sc_irq", hap);
+ if (rc < 0) {
+ dev_err(&hap->spmi->dev,
+ "Unable to request sc(%d) IRQ(err:%d)\n",
+ hap->sc_irq, rc);
+ return rc;
+ }
+ }
+
+ hap->sc_duration = 0;
+
+ return rc;
+}
+
+/* DT parsing for haptics parameters */
+static int qpnp_hap_parse_dt(struct qpnp_hap *hap)
+{
+ struct spmi_device *spmi = hap->spmi;
+ struct property *prop;
+ const char *temp_str;
+ u32 temp;
+ int rc;
+
+ hap->timeout_ms = QPNP_HAP_TIMEOUT_MS_MAX;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,timeout-ms", &temp);
+ if (!rc) {
+ hap->timeout_ms = temp;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read timeout\n");
+ return rc;
+ }
+
+ hap->act_type = QPNP_HAP_LRA;
+ rc = of_property_read_string(spmi->dev.of_node,
+ "qcom,actuator-type", &temp_str);
+ if (!rc) {
+ if (strcmp(temp_str, "erm") == 0)
+ hap->act_type = QPNP_HAP_ERM;
+ else if (strcmp(temp_str, "lra") == 0)
+ hap->act_type = QPNP_HAP_LRA;
+ else {
+ dev_err(&spmi->dev, "Invalid actuator type\n");
+ return -EINVAL;
+ }
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read actuator type\n");
+ return rc;
+ }
+
+ if (hap->act_type == QPNP_HAP_LRA) {
+ hap->auto_res_mode = QPNP_HAP_AUTO_RES_ZXD_EOP;
+ rc = of_property_read_string(spmi->dev.of_node,
+ "qcom,lra-auto-res-mode", &temp_str);
+ if (!rc) {
+ if (strcmp(temp_str, "none") == 0)
+ hap->auto_res_mode = QPNP_HAP_AUTO_RES_NONE;
+ else if (strcmp(temp_str, "zxd") == 0)
+ hap->auto_res_mode = QPNP_HAP_AUTO_RES_ZXD;
+ else if (strcmp(temp_str, "qwd") == 0)
+ hap->auto_res_mode = QPNP_HAP_AUTO_RES_QWD;
+ else if (strcmp(temp_str, "max-qwd") == 0)
+ hap->auto_res_mode = QPNP_HAP_AUTO_RES_MAX_QWD;
+ else
+ hap->auto_res_mode = QPNP_HAP_AUTO_RES_ZXD_EOP;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read auto res mode\n");
+ return rc;
+ }
+
+ hap->lra_high_z = QPNP_HAP_LRA_HIGH_Z_OPT3;
+ rc = of_property_read_string(spmi->dev.of_node,
+ "qcom,lra-high-z", &temp_str);
+ if (!rc) {
+ if (strcmp(temp_str, "none") == 0)
+ hap->lra_high_z = QPNP_HAP_LRA_HIGH_Z_NONE;
+ else if (strcmp(temp_str, "opt1") == 0)
+ hap->lra_high_z = QPNP_HAP_LRA_HIGH_Z_OPT1;
+ else if (strcmp(temp_str, "opt2") == 0)
+ hap->lra_high_z = QPNP_HAP_LRA_HIGH_Z_OPT2;
+ else
+ hap->lra_high_z = QPNP_HAP_LRA_HIGH_Z_OPT3;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read LRA high-z\n");
+ return rc;
+ }
+
+ hap->lra_res_cal_period = QPNP_HAP_RES_CAL_PERIOD_MAX;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,lra-res-cal-period", &temp);
+ if (!rc) {
+ hap->lra_res_cal_period = temp;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read cal period\n");
+ return rc;
+ }
+
+ hap->correct_lra_drive_freq =
+ of_property_read_bool(spmi->dev.of_node,
+ "qcom,correct-lra-drive-freq");
+
+ hap->misc_trim_error_rc19p2_clk_reg_present =
+ of_property_read_bool(spmi->dev.of_node,
+ "qcom,misc-trim-error-rc19p2-clk-reg-present");
+ }
+
+ rc = of_property_read_string(spmi->dev.of_node,
+ "qcom,play-mode", &temp_str);
+ if (!rc) {
+ if (strcmp(temp_str, "direct") == 0)
+ hap->play_mode = QPNP_HAP_DIRECT;
+ else if (strcmp(temp_str, "buffer") == 0)
+ hap->play_mode = QPNP_HAP_BUFFER;
+ else if (strcmp(temp_str, "pwm") == 0)
+ hap->play_mode = QPNP_HAP_PWM;
+ else if (strcmp(temp_str, "audio") == 0)
+ hap->play_mode = QPNP_HAP_AUDIO;
+ else {
+ dev_err(&spmi->dev, "Invalid play mode\n");
+ return -EINVAL;
+ }
+ } else {
+ dev_err(&spmi->dev, "Unable to read play mode\n");
+ return rc;
+ }
+
+ hap->vmax_mv = QPNP_HAP_VMAX_MAX_MV;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,vmax-mv", &temp);
+ if (!rc) {
+ hap->vmax_mv = temp;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read vmax\n");
+ return rc;
+ }
+
+ hap->ilim_ma = QPNP_HAP_ILIM_MIN_MV;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,ilim-ma", &temp);
+ if (!rc) {
+ hap->ilim_ma = temp;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read ILim\n");
+ return rc;
+ }
+
+ hap->sc_deb_cycles = QPNP_HAP_DEF_SC_DEB_CYCLES;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,sc-deb-cycles", &temp);
+ if (!rc) {
+ hap->sc_deb_cycles = temp;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read sc debounce\n");
+ return rc;
+ }
+
+ hap->int_pwm_freq_khz = QPNP_HAP_INT_PWM_FREQ_505_KHZ;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,int-pwm-freq-khz", &temp);
+ if (!rc) {
+ hap->int_pwm_freq_khz = temp;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read int pwm freq\n");
+ return rc;
+ }
+
+ hap->wave_shape = QPNP_HAP_WAV_SQUARE;
+ rc = of_property_read_string(spmi->dev.of_node,
+ "qcom,wave-shape", &temp_str);
+ if (!rc) {
+ if (strcmp(temp_str, "sine") == 0)
+ hap->wave_shape = QPNP_HAP_WAV_SINE;
+ else if (strcmp(temp_str, "square") == 0)
+ hap->wave_shape = QPNP_HAP_WAV_SQUARE;
+ else {
+ dev_err(&spmi->dev, "Unsupported wav shape\n");
+ return -EINVAL;
+ }
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read wav shape\n");
+ return rc;
+ }
+
+ hap->wave_play_rate_us = QPNP_HAP_DEF_WAVE_PLAY_RATE_US;
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,wave-play-rate-us", &temp);
+ if (!rc) {
+ hap->wave_play_rate_us = temp;
+ } else if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read play rate\n");
+ return rc;
+ }
+
+ if (hap->play_mode == QPNP_HAP_BUFFER)
+ rc = qpnp_hap_parse_buffer_dt(hap);
+ else if (hap->play_mode == QPNP_HAP_PWM)
+ rc = qpnp_hap_parse_pwm_dt(hap);
+
+ if (rc < 0)
+ return rc;
+
+ hap->en_brake = of_property_read_bool(spmi->dev.of_node,
+ "qcom,en-brake");
+
+ if (hap->en_brake) {
+ prop = of_find_property(spmi->dev.of_node,
+ "qcom,brake-pattern", &temp);
+ if (!prop) {
+ dev_info(&spmi->dev, "brake pattern not found");
+ } else if (temp != QPNP_HAP_BRAKE_PAT_LEN) {
+ dev_err(&spmi->dev, "Invalid len of brake pattern\n");
+ return -EINVAL;
+ } else {
+ hap->sup_brake_pat = true;
+ memcpy(hap->brake_pat, prop->value,
+ QPNP_HAP_BRAKE_PAT_LEN);
+ }
+ }
+
+ hap->use_sc_irq = of_property_read_bool(spmi->dev.of_node,
+ "qcom,use-sc-irq");
+ if (hap->use_sc_irq) {
+ hap->sc_irq = spmi_get_irq_byname(hap->spmi,
+ NULL, "sc-irq");
+ if (hap->sc_irq < 0) {
+ dev_err(&spmi->dev, "Unable to get sc irq\n");
+ return hap->sc_irq;
+ }
+ }
+
+ if (of_find_property(spmi->dev.of_node, "vcc_pon-supply", NULL))
+ hap->manage_pon_supply = true;
+
+ return 0;
+}
+
+static int qpnp_haptic_probe(struct spmi_device *spmi)
+{
+ struct qpnp_hap *hap;
+ struct resource *hap_resource;
+ struct regulator *vcc_pon;
+ int rc, i;
+
+ hap = devm_kzalloc(&spmi->dev, sizeof(*hap), GFP_KERNEL);
+ if (!hap)
+ return -ENOMEM;
+
+ hap->spmi = spmi;
+
+ hap_resource = spmi_get_resource(spmi, 0, IORESOURCE_MEM, 0);
+ if (!hap_resource) {
+ dev_err(&spmi->dev, "Unable to get haptic base address\n");
+ return -EINVAL;
+ }
+ hap->base = hap_resource->start;
+
+ dev_set_drvdata(&spmi->dev, hap);
+
+ rc = qpnp_hap_parse_dt(hap);
+ if (rc) {
+ dev_err(&spmi->dev, "DT parsing failed\n");
+ return rc;
+ }
+
+ rc = qpnp_hap_config(hap);
+ if (rc) {
+ dev_err(&spmi->dev, "hap config failed\n");
+ return rc;
+ }
+
+ mutex_init(&hap->lock);
+ mutex_init(&hap->wf_lock);
+ INIT_WORK(&hap->work, qpnp_hap_worker);
+ INIT_DELAYED_WORK(&hap->sc_work, qpnp_handle_sc_irq);
+ init_completion(&hap->completion);
+
+ hrtimer_init(&hap->hap_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hap->hap_timer.function = qpnp_hap_timer;
+
+ hrtimer_init(&hap->hap_test_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hap->hap_test_timer.function = qpnp_hap_test_timer;
+
+ hap->timed_dev.name = "vibrator";
+ hap->timed_dev.get_time = qpnp_hap_get_time;
+ hap->timed_dev.enable = qpnp_hap_td_enable;
+
+ if (hap->act_type == QPNP_HAP_LRA && hap->correct_lra_drive_freq) {
+ INIT_WORK(&hap->auto_res_err_work, correct_auto_res_error);
+ hrtimer_init(&hap->auto_res_err_poll_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ hap->auto_res_err_poll_timer.function = detect_auto_res_error;
+ }
+
+ rc = timed_output_dev_register(&hap->timed_dev);
+ if (rc < 0) {
+ dev_err(&spmi->dev, "timed_output registration failed\n");
+ goto timed_output_fail;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(qpnp_hap_attrs); i++) {
+ rc = sysfs_create_file(&hap->timed_dev.dev->kobj,
+ &qpnp_hap_attrs[i].attr);
+ if (rc < 0) {
+ dev_err(&spmi->dev, "sysfs creation failed\n");
+ goto sysfs_fail;
+ }
+ }
+
+ if (hap->manage_pon_supply) {
+ vcc_pon = regulator_get(&spmi->dev, "vcc_pon");
+ if (IS_ERR(vcc_pon)) {
+ rc = PTR_ERR(vcc_pon);
+ dev_err(&spmi->dev,
+ "regulator get failed vcc_pon rc=%d\n", rc);
+ goto sysfs_fail;
+ }
+ hap->vcc_pon = vcc_pon;
+ }
+
+ ghap = hap;
+
+ return 0;
+
+sysfs_fail:
+ for (i--; i >= 0; i--)
+ sysfs_remove_file(&hap->timed_dev.dev->kobj,
+ &qpnp_hap_attrs[i].attr);
+ timed_output_dev_unregister(&hap->timed_dev);
+timed_output_fail:
+ cancel_work_sync(&hap->work);
+ if (hap->act_type == QPNP_HAP_LRA && hap->correct_lra_drive_freq)
+ hrtimer_cancel(&hap->auto_res_err_poll_timer);
+ hrtimer_cancel(&hap->hap_timer);
+ mutex_destroy(&hap->lock);
+ mutex_destroy(&hap->wf_lock);
+
+ return rc;
+}
+
+static int qpnp_haptic_remove(struct spmi_device *spmi)
+{
+ struct qpnp_hap *hap = dev_get_drvdata(&spmi->dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qpnp_hap_attrs); i++)
+ sysfs_remove_file(&hap->timed_dev.dev->kobj,
+ &qpnp_hap_attrs[i].attr);
+
+ cancel_work_sync(&hap->work);
+ if (hap->act_type == QPNP_HAP_LRA && hap->correct_lra_drive_freq)
+ hrtimer_cancel(&hap->auto_res_err_poll_timer);
+ hrtimer_cancel(&hap->hap_timer);
+ timed_output_dev_unregister(&hap->timed_dev);
+ mutex_destroy(&hap->lock);
+ mutex_destroy(&hap->wf_lock);
+ if (hap->vcc_pon)
+ regulator_put(hap->vcc_pon);
+
+ return 0;
+}
+
+static struct of_device_id spmi_match_table[] = {
+ { .compatible = "qcom,qpnp-haptic", },
+ { },
+};
+
+static struct spmi_driver qpnp_haptic_driver = {
+ .driver = {
+ .name = "qcom,qpnp-haptic",
+ .of_match_table = spmi_match_table,
+ .pm = &qpnp_haptic_pm_ops,
+ },
+ .probe = qpnp_haptic_probe,
+ .remove = qpnp_haptic_remove,
+};
+
+static int __init qpnp_haptic_init(void)
+{
+ return spmi_driver_register(&qpnp_haptic_driver);
+}
+module_init(qpnp_haptic_init);
+
+static void __exit qpnp_haptic_exit(void)
+{
+ return spmi_driver_unregister(&qpnp_haptic_driver);
+}
+module_exit(qpnp_haptic_exit);
+
+MODULE_DESCRIPTION("qpnp haptic driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/qpnp-power-on.c b/drivers/platform/msm/qpnp-power-on.c
new file mode 100644
index 000000000000..964ecfd483ed
--- /dev/null
+++ b/drivers/platform/msm/qpnp-power-on.c
@@ -0,0 +1,2324 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/input.h>
+#include <linux/log2.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/qpnp/power-on.h>
+
+#define CREATE_MASK(NUM_BITS, POS) \
+ ((unsigned char) (((1 << (NUM_BITS)) - 1) << (POS)))
+#define PON_MASK(MSB_BIT, LSB_BIT) \
+ CREATE_MASK(MSB_BIT - LSB_BIT + 1, LSB_BIT)
+
+#define PMIC_VER_8941 0x01
+#define PMIC_VERSION_REG 0x0105
+#define PMIC_VERSION_REV4_REG 0x0103
+
+#define PMIC8941_V1_REV4 0x01
+#define PMIC8941_V2_REV4 0x02
+#define PON_PRIMARY 0x01
+#define PON_SECONDARY 0x02
+#define PON_1REG 0x03
+#define PON_GEN2_PRIMARY 0x04
+#define PON_GEN2_SECONDARY 0x05
+
+#define PON_OFFSET(subtype, offset_gen1, offset_gen2) \
+ (((subtype == PON_PRIMARY) || \
+ (subtype == PON_SECONDARY) || \
+ (subtype == PON_1REG)) ? offset_gen1 : offset_gen2)
+
+/* Common PNP defines */
+#define QPNP_PON_REVISION2(pon) ((pon)->base + 0x01)
+#define QPNP_PON_PERPH_SUBTYPE(pon) ((pon)->base + 0x05)
+
+/* PON common register addresses */
+#define QPNP_PON_RT_STS(pon) ((pon)->base + 0x10)
+#define QPNP_PON_PULL_CTL(pon) ((pon)->base + 0x70)
+#define QPNP_PON_DBC_CTL(pon) ((pon)->base + 0x71)
+
+/* PON/RESET sources register addresses */
+#define QPNP_PON_REASON1(pon) \
+ ((pon)->base + PON_OFFSET((pon)->subtype, 0x8, 0xC0))
+#define QPNP_PON_WARM_RESET_REASON1(pon) \
+ ((pon)->base + PON_OFFSET((pon)->subtype, 0xA, 0xC2))
+#define QPNP_POFF_REASON1(pon) \
+ ((pon)->base + PON_OFFSET((pon)->subtype, 0xC, 0xC5))
+#define QPNP_PON_WARM_RESET_REASON2(pon) ((pon)->base + 0xB)
+#define QPNP_PON_OFF_REASON(pon) ((pon)->base + 0xC7)
+#define QPNP_FAULT_REASON1(pon) ((pon)->base + 0xC8)
+#define QPNP_S3_RESET_REASON(pon) ((pon)->base + 0xCA)
+#define QPNP_PON_KPDPWR_S1_TIMER(pon) ((pon)->base + 0x40)
+#define QPNP_PON_KPDPWR_S2_TIMER(pon) ((pon)->base + 0x41)
+#define QPNP_PON_KPDPWR_S2_CNTL(pon) ((pon)->base + 0x42)
+#define QPNP_PON_KPDPWR_S2_CNTL2(pon) ((pon)->base + 0x43)
+#define QPNP_PON_RESIN_S1_TIMER(pon) ((pon)->base + 0x44)
+#define QPNP_PON_RESIN_S2_TIMER(pon) ((pon)->base + 0x45)
+#define QPNP_PON_RESIN_S2_CNTL(pon) ((pon)->base + 0x46)
+#define QPNP_PON_RESIN_S2_CNTL2(pon) ((pon)->base + 0x47)
+#define QPNP_PON_KPDPWR_RESIN_S1_TIMER(pon) ((pon)->base + 0x48)
+#define QPNP_PON_KPDPWR_RESIN_S2_TIMER(pon) ((pon)->base + 0x49)
+#define QPNP_PON_KPDPWR_RESIN_S2_CNTL(pon) ((pon)->base + 0x4A)
+#define QPNP_PON_KPDPWR_RESIN_S2_CNTL2(pon) ((pon)->base + 0x4B)
+#define QPNP_PON_PS_HOLD_RST_CTL(pon) ((pon)->base + 0x5A)
+#define QPNP_PON_PS_HOLD_RST_CTL2(pon) ((pon)->base + 0x5B)
+#define QPNP_PON_WD_RST_S2_CTL(pon) ((pon)->base + 0x56)
+#define QPNP_PON_WD_RST_S2_CTL2(pon) ((pon)->base + 0x57)
+#define QPNP_PON_S3_SRC(pon) ((pon)->base + 0x74)
+#define QPNP_PON_S3_DBC_CTL(pon) ((pon)->base + 0x75)
+#define QPNP_PON_SMPL_CTL(pon) ((pon)->base + 0x7F)
+#define QPNP_PON_TRIGGER_EN(pon) ((pon)->base + 0x80)
+#define QPNP_PON_XVDD_RB_SPARE(pon) ((pon)->base + 0x8E)
+#define QPNP_PON_SOFT_RB_SPARE(pon) ((pon)->base + 0x8F)
+#define QPNP_PON_SEC_ACCESS(pon) ((pon)->base + 0xD0)
+
+#define QPNP_PON_SEC_UNLOCK 0xA5
+
+#define QPNP_PON_WARM_RESET_TFT BIT(4)
+
+#define QPNP_PON_RESIN_PULL_UP BIT(0)
+#define QPNP_PON_KPDPWR_PULL_UP BIT(1)
+#define QPNP_PON_CBLPWR_PULL_UP BIT(2)
+#define QPNP_PON_FAULT_PULL_UP BIT(4)
+#define QPNP_PON_S2_CNTL_EN BIT(7)
+#define QPNP_PON_S2_RESET_ENABLE BIT(7)
+#define QPNP_PON_DELAY_BIT_SHIFT 6
+
+#define QPNP_PON_S1_TIMER_MASK (0xF)
+#define QPNP_PON_S2_TIMER_MASK (0x7)
+#define QPNP_PON_S2_CNTL_TYPE_MASK (0xF)
+
+#define QPNP_PON_DBC_DELAY_MASK(pon) \
+ PON_OFFSET((pon)->subtype, 0x7, 0xF)
+
+#define QPNP_PON_KPDPWR_N_SET BIT(0)
+#define QPNP_PON_RESIN_N_SET BIT(1)
+#define QPNP_PON_CBLPWR_N_SET BIT(2)
+#define QPNP_PON_RESIN_BARK_N_SET BIT(4)
+#define QPNP_PON_KPDPWR_RESIN_BARK_N_SET BIT(5)
+
+#define QPNP_PON_WD_EN BIT(7)
+#define QPNP_PON_RESET_EN BIT(7)
+#define QPNP_PON_POWER_OFF_MASK 0xF
+#define QPNP_GEN2_POFF_SEQ BIT(7)
+#define QPNP_GEN2_FAULT_SEQ BIT(6)
+#define QPNP_GEN2_S3_RESET_SEQ BIT(5)
+
+#define QPNP_PON_S3_SRC_KPDPWR 0
+#define QPNP_PON_S3_SRC_RESIN 1
+#define QPNP_PON_S3_SRC_KPDPWR_AND_RESIN 2
+#define QPNP_PON_S3_SRC_KPDPWR_OR_RESIN 3
+#define QPNP_PON_S3_SRC_MASK 0x3
+#define QPNP_PON_HARD_RESET_MASK PON_MASK(7, 5)
+
+#define QPNP_PON_UVLO_DLOAD_EN BIT(7)
+#define QPNP_PON_SMPL_EN BIT(7)
+
+/* Ranges */
+#define QPNP_PON_S1_TIMER_MAX 10256
+#define QPNP_PON_S2_TIMER_MAX 2000
+#define QPNP_PON_S3_TIMER_SECS_MAX 128
+#define QPNP_PON_S3_DBC_DELAY_MASK 0x07
+#define QPNP_PON_RESET_TYPE_MAX 0xF
+#define PON_S1_COUNT_MAX 0xF
+#define QPNP_PON_MIN_DBC_US (USEC_PER_SEC / 64)
+#define QPNP_PON_MAX_DBC_US (USEC_PER_SEC * 2)
+
+#define QPNP_KEY_STATUS_DELAY msecs_to_jiffies(250)
+
+#define QPNP_PON_BUFFER_SIZE 9
+
+#define QPNP_POFF_REASON_UVLO 13
+
+enum qpnp_pon_version {
+ QPNP_PON_GEN1_V1,
+ QPNP_PON_GEN1_V2,
+ QPNP_PON_GEN2,
+};
+
+enum pon_type {
+ PON_KPDPWR,
+ PON_RESIN,
+ PON_CBLPWR,
+ PON_KPDPWR_RESIN,
+};
+
+struct qpnp_pon_config {
+ u32 pon_type;
+ u32 support_reset;
+ u32 key_code;
+ u32 s1_timer;
+ u32 s2_timer;
+ u32 s2_type;
+ u32 pull_up;
+ u32 state_irq;
+ u32 bark_irq;
+ u16 s2_cntl_addr;
+ u16 s2_cntl2_addr;
+ bool old_state;
+ bool use_bark;
+ bool config_reset;
+};
+
+struct pon_regulator {
+ struct qpnp_pon *pon;
+ struct regulator_dev *rdev;
+ struct regulator_desc rdesc;
+ u32 addr;
+ u32 bit;
+ bool enabled;
+};
+
+struct qpnp_pon {
+ struct spmi_device *spmi;
+ struct input_dev *pon_input;
+ struct qpnp_pon_config *pon_cfg;
+ struct pon_regulator *pon_reg_cfg;
+ struct list_head list;
+ struct delayed_work bark_work;
+ struct dentry *debugfs;
+ int pon_trigger_reason;
+ int pon_power_off_reason;
+ int num_pon_reg;
+ int num_pon_config;
+ int reg_count;
+ u32 dbc;
+ u32 uvlo;
+ int warm_reset_poff_type;
+ int hard_reset_poff_type;
+ int shutdown_poff_type;
+ u16 base;
+ u8 subtype;
+ u8 pon_ver;
+ u8 warm_reset_reason1;
+ u8 warm_reset_reason2;
+ bool is_spon;
+ bool store_hard_reset_reason;
+};
+
+static struct qpnp_pon *sys_reset_dev;
+static DEFINE_SPINLOCK(spon_list_slock);
+static LIST_HEAD(spon_dev_list);
+
+static u32 s1_delay[PON_S1_COUNT_MAX + 1] = {
+ 0 , 32, 56, 80, 138, 184, 272, 408, 608, 904, 1352, 2048,
+ 3072, 4480, 6720, 10256
+};
+
+static const char * const qpnp_pon_reason[] = {
+ [0] = "Triggered from Hard Reset",
+ [1] = "Triggered from SMPL (sudden momentary power loss)",
+ [2] = "Triggered from RTC (RTC alarm expiry)",
+ [3] = "Triggered from DC (DC charger insertion)",
+ [4] = "Triggered from USB (USB charger insertion)",
+ [5] = "Triggered from PON1 (secondary PMIC)",
+ [6] = "Triggered from CBL (external power supply)",
+ [7] = "Triggered from KPD (power key press)",
+};
+
+#define POFF_REASON_FAULT_OFFSET 16
+#define POFF_REASON_S3_RESET_OFFSET 32
+static const char * const qpnp_poff_reason[] = {
+ /* QPNP_PON_GEN1 POFF reasons */
+ [0] = "Triggered from SOFT (Software)",
+ [1] = "Triggered from PS_HOLD (PS_HOLD/MSM controlled shutdown)",
+ [2] = "Triggered from PMIC_WD (PMIC watchdog)",
+ [3] = "Triggered from GP1 (Keypad_Reset1)",
+ [4] = "Triggered from GP2 (Keypad_Reset2)",
+ [5] = "Triggered from KPDPWR_AND_RESIN (Simultaneous power key and reset line)",
+ [6] = "Triggered from RESIN_N (Reset line/Volume Down Key)",
+ [7] = "Triggered from KPDPWR_N (Long Power Key hold)",
+ [8] = "N/A",
+ [9] = "N/A",
+ [10] = "N/A",
+ [11] = "Triggered from CHARGER (Charger ENUM_TIMER, BOOT_DONE)",
+ [12] = "Triggered from TFT (Thermal Fault Tolerance)",
+ [13] = "Triggered from UVLO (Under Voltage Lock Out)",
+ [14] = "Triggered from OTST3 (Overtemp)",
+ [15] = "Triggered from STAGE3 (Stage 3 reset)",
+
+ /* QPNP_PON_GEN2 FAULT reasons */
+ [16] = "Triggered from GP_FAULT0",
+ [17] = "Triggered from GP_FAULT1",
+ [18] = "Triggered from GP_FAULT2",
+ [19] = "Triggered from GP_FAULT3",
+ [20] = "Triggered from MBG_FAULT",
+ [21] = "Triggered from OVLO (Over Voltage Lock Out)",
+ [22] = "Triggered from UVLO (Under Voltage Lock Out)",
+ [23] = "Triggered from AVDD_RB",
+ [24] = "N/A",
+ [25] = "N/A",
+ [26] = "N/A",
+ [27] = "Triggered from FAULT_FAULT_N",
+ [28] = "Triggered from FAULT_PBS_WATCHDOG_TO",
+ [29] = "Triggered from FAULT_PBS_NACK",
+ [30] = "Triggered from FAULT_RESTART_PON",
+ [31] = "Triggered from OTST3 (Overtemp)",
+
+ /* QPNP_PON_GEN2 S3_RESET reasons */
+ [32] = "N/A",
+ [33] = "N/A",
+ [34] = "N/A",
+ [35] = "N/A",
+ [36] = "Triggered from S3_RESET_FAULT_N",
+ [37] = "Triggered from S3_RESET_PBS_WATCHDOG_TO",
+ [38] = "Triggered from S3_RESET_PBS_NACK",
+ [39] = "Triggered from S3_RESET_KPDPWR_ANDOR_RESIN (power key and/or reset line)",
+};
+
+/*
+ * On the kernel command line specify
+ * qpnp-power-on.warm_boot=1 to indicate a warm
+ * boot of the device.
+ */
+static int warm_boot;
+module_param(warm_boot, int, 0);
+
+static int
+qpnp_pon_masked_write(struct qpnp_pon *pon, u16 addr, u8 mask, u8 val)
+{
+ int rc;
+ u8 reg;
+
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ addr, &reg, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev,
+ "Unable to read from addr=%hx, rc(%d)\n",
+ addr, rc);
+ return rc;
+ }
+
+ reg &= ~mask;
+ reg |= val & mask;
+ rc = spmi_ext_register_writel(pon->spmi->ctrl, pon->spmi->sid,
+ addr, &reg, 1);
+ if (rc)
+ dev_err(&pon->spmi->dev,
+ "Unable to write to addr=%hx, rc(%d)\n", addr, rc);
+ return rc;
+}
+
+static bool is_pon_gen1(struct qpnp_pon *pon)
+{
+ return pon->subtype == PON_PRIMARY ||
+ pon->subtype == PON_SECONDARY;
+}
+
+static bool is_pon_gen2(struct qpnp_pon *pon)
+{
+ return pon->subtype == PON_GEN2_PRIMARY ||
+ pon->subtype == PON_GEN2_SECONDARY;
+}
+
+/**
+ * qpnp_pon_set_restart_reason - Store device restart reason in PMIC register.
+ *
+ * Returns = 0 if PMIC feature is not available or store restart reason
+ * successfully.
+ * Returns > 0 for errors
+ *
+ * This function is used to store device restart reason in PMIC register.
+ * It checks here to see if the restart reason register has been specified.
+ * If it hasn't, this function should immediately return 0
+ */
+int qpnp_pon_set_restart_reason(enum pon_restart_reason reason)
+{
+ int rc = 0;
+ struct qpnp_pon *pon = sys_reset_dev;
+
+ if (!pon)
+ return 0;
+
+ if (!pon->store_hard_reset_reason)
+ return 0;
+
+ rc = qpnp_pon_masked_write(pon, QPNP_PON_SOFT_RB_SPARE(pon),
+ PON_MASK(7, 2), (reason << 2));
+ if (rc)
+ dev_err(&pon->spmi->dev,
+ "Unable to write to addr=%x, rc(%d)\n",
+ QPNP_PON_SOFT_RB_SPARE(pon), rc);
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_pon_set_restart_reason);
+
+/*
+ * qpnp_pon_check_hard_reset_stored - Checks if the PMIC need to
+ * store hard reset reason.
+ *
+ * Returns true if reset reason can be stored, false if it cannot be stored
+ *
+ */
+bool qpnp_pon_check_hard_reset_stored(void)
+{
+ struct qpnp_pon *pon = sys_reset_dev;
+
+ if (!pon)
+ return false;
+
+ return pon->store_hard_reset_reason;
+}
+EXPORT_SYMBOL(qpnp_pon_check_hard_reset_stored);
+
+static int qpnp_pon_set_dbc(struct qpnp_pon *pon, u32 delay)
+{
+ int rc = 0;
+ u32 delay_reg;
+
+ if (delay == pon->dbc)
+ goto out;
+ if (pon->pon_input)
+ mutex_lock(&pon->pon_input->mutex);
+
+ if (delay < QPNP_PON_MIN_DBC_US)
+ delay = QPNP_PON_MIN_DBC_US;
+ else if (delay > QPNP_PON_MAX_DBC_US)
+ delay = QPNP_PON_MAX_DBC_US;
+
+ delay_reg = (delay << QPNP_PON_DELAY_BIT_SHIFT) / USEC_PER_SEC;
+ delay_reg = ilog2(delay_reg);
+ rc = qpnp_pon_masked_write(pon, QPNP_PON_DBC_CTL(pon),
+ QPNP_PON_DBC_DELAY_MASK(pon),
+ delay_reg);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Unable to set PON debounce\n");
+ goto unlock;
+ }
+
+ pon->dbc = delay;
+
+unlock:
+ if (pon->pon_input)
+ mutex_unlock(&pon->pon_input->mutex);
+out:
+ return rc;
+}
+
+static ssize_t qpnp_pon_dbc_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qpnp_pon *pon = dev_get_drvdata(dev);
+
+ return snprintf(buf, QPNP_PON_BUFFER_SIZE, "%d\n", pon->dbc);
+}
+
+static ssize_t qpnp_pon_dbc_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct qpnp_pon *pon = dev_get_drvdata(dev);
+ u32 value;
+ int rc;
+
+ if (size > QPNP_PON_BUFFER_SIZE)
+ return -EINVAL;
+
+ rc = kstrtou32(buf, 10, &value);
+ if (rc)
+ return rc;
+
+ rc = qpnp_pon_set_dbc(pon, value);
+ if (rc < 0)
+ return rc;
+
+ return size;
+}
+
+static DEVICE_ATTR(debounce_us, 0664, qpnp_pon_dbc_show, qpnp_pon_dbc_store);
+
+static int qpnp_pon_reset_config(struct qpnp_pon *pon,
+ enum pon_power_off_type type)
+{
+ int rc;
+ u16 rst_en_reg;
+
+ if (pon->pon_ver == QPNP_PON_GEN1_V1)
+ rst_en_reg = QPNP_PON_PS_HOLD_RST_CTL(pon);
+ else
+ rst_en_reg = QPNP_PON_PS_HOLD_RST_CTL2(pon);
+
+ /*
+ * Based on the poweroff type set for a PON device through device tree
+ * change the type being configured into PS_HOLD_RST_CTL.
+ */
+ switch (type) {
+ case PON_POWER_OFF_WARM_RESET:
+ if (pon->warm_reset_poff_type != -EINVAL)
+ type = pon->warm_reset_poff_type;
+ break;
+ case PON_POWER_OFF_HARD_RESET:
+ if (pon->hard_reset_poff_type != -EINVAL)
+ type = pon->hard_reset_poff_type;
+ break;
+ case PON_POWER_OFF_SHUTDOWN:
+ if (pon->shutdown_poff_type != -EINVAL)
+ type = pon->shutdown_poff_type;
+ break;
+ default:
+ break;
+ }
+
+ rc = qpnp_pon_masked_write(pon, rst_en_reg, QPNP_PON_RESET_EN, 0);
+ if (rc)
+ dev_err(&pon->spmi->dev,
+ "Unable to write to addr=%hx, rc(%d)\n",
+ rst_en_reg, rc);
+
+ /*
+ * We need 10 sleep clock cycles here. But since the clock is
+ * internally generated, we need to add 50% tolerance to be
+ * conservative.
+ */
+ udelay(500);
+
+ rc = qpnp_pon_masked_write(pon, QPNP_PON_PS_HOLD_RST_CTL(pon),
+ QPNP_PON_POWER_OFF_MASK, type);
+ if (rc)
+ dev_err(&pon->spmi->dev,
+ "Unable to write to addr=%x, rc(%d)\n",
+ QPNP_PON_PS_HOLD_RST_CTL(pon), rc);
+
+ rc = qpnp_pon_masked_write(pon, rst_en_reg, QPNP_PON_RESET_EN,
+ QPNP_PON_RESET_EN);
+ if (rc)
+ dev_err(&pon->spmi->dev,
+ "Unable to write to addr=%hx, rc(%d)\n",
+ rst_en_reg, rc);
+
+ dev_dbg(&pon->spmi->dev, "power off type = 0x%02X\n", type);
+ return rc;
+}
+
+/**
+ * qpnp_pon_system_pwr_off - Configure system-reset PMIC for shutdown or reset
+ * @type: Determines the type of power off to perform - shutdown, reset, etc
+ *
+ * This function will support configuring for multiple PMICs. In some cases, the
+ * PON of secondary PMICs also needs to be configured. So this supports that
+ * requirement. Once the system-reset and secondary PMIC is configured properly,
+ * the MSM can drop PS_HOLD to activate the specified configuration. Note that
+ * this function may be called from atomic context as in the case of the panic
+ * notifier path and thus it should not rely on function calls that may sleep.
+ */
+int qpnp_pon_system_pwr_off(enum pon_power_off_type type)
+{
+ int rc = 0;
+ struct qpnp_pon *pon = sys_reset_dev;
+ struct qpnp_pon *tmp;
+ unsigned long flags;
+
+ if (!pon)
+ return -ENODEV;
+
+ rc = qpnp_pon_reset_config(pon, type);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Error configuring main PON rc: %d\n",
+ rc);
+ return rc;
+ }
+
+ /*
+ * Check if a secondary PON device needs to be configured. If it
+ * is available, configure that also as per the requested power off
+ * type
+ */
+ spin_lock_irqsave(&spon_list_slock, flags);
+ if (list_empty(&spon_dev_list))
+ goto out;
+
+ list_for_each_entry_safe(pon, tmp, &spon_dev_list, list) {
+ dev_emerg(&pon->spmi->dev,
+ "PMIC@SID%d: configuring PON for reset\n",
+ pon->spmi->sid);
+ rc = qpnp_pon_reset_config(pon, type);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Error configuring secondary PON rc: %d\n",
+ rc);
+ goto out;
+ }
+ }
+out:
+ spin_unlock_irqrestore(&spon_list_slock, flags);
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_pon_system_pwr_off);
+
+/**
+ * qpnp_pon_is_warm_reset - Checks if the PMIC went through a warm reset.
+ *
+ * Returns > 0 for warm resets, 0 for not warm reset, < 0 for errors
+ *
+ * Note that this function will only return the warm vs not-warm reset status
+ * of the PMIC that is configured as the system-reset device.
+ */
+int qpnp_pon_is_warm_reset(void)
+{
+ struct qpnp_pon *pon = sys_reset_dev;
+
+ if (!pon)
+ return -EPROBE_DEFER;
+
+ if (is_pon_gen1(pon) || pon->subtype == PON_1REG)
+ return pon->warm_reset_reason1
+ || (pon->warm_reset_reason2 & QPNP_PON_WARM_RESET_TFT);
+ else
+ return pon->warm_reset_reason1;
+}
+EXPORT_SYMBOL(qpnp_pon_is_warm_reset);
+
+/**
+ * qpnp_pon_wd_config - Disable the wd in a warm reset.
+ * @enable: to enable or disable the PON watch dog
+ *
+ * Returns = 0 for operate successfully, < 0 for errors
+ */
+int qpnp_pon_wd_config(bool enable)
+{
+ struct qpnp_pon *pon = sys_reset_dev;
+ int rc = 0;
+
+ if (!pon)
+ return -EPROBE_DEFER;
+
+ rc = qpnp_pon_masked_write(pon, QPNP_PON_WD_RST_S2_CTL2(pon),
+ QPNP_PON_WD_EN, enable ? QPNP_PON_WD_EN : 0);
+ if (rc)
+ dev_err(&pon->spmi->dev,
+ "Unable to write to addr=%x, rc(%d)\n",
+ QPNP_PON_WD_RST_S2_CTL2(pon), rc);
+
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_pon_wd_config);
+
+static int qpnp_pon_get_trigger_config(enum pon_trigger_source pon_src,
+ bool *enabled)
+{
+ struct qpnp_pon *pon = sys_reset_dev;
+ int rc;
+ u16 addr;
+ u8 val;
+ u8 mask;
+
+ if (!pon)
+ return -ENODEV;
+
+ if (pon_src < PON_SMPL || pon_src > PON_KPDPWR_N) {
+ dev_err(&pon->spmi->dev, "Invalid PON source\n");
+ return -EINVAL;
+ }
+
+ addr = QPNP_PON_TRIGGER_EN(pon);
+ mask = BIT(pon_src);
+ if (is_pon_gen2(pon) && pon_src == PON_SMPL) {
+ addr = QPNP_PON_SMPL_CTL(pon);
+ mask = QPNP_PON_SMPL_EN;
+ }
+
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ addr, &val, 1);
+ if (rc)
+ dev_err(&pon->spmi->dev,
+ "Unable to read from addr=%hx, rc(%d)\n",
+ addr, rc);
+ else
+ *enabled = !!(val & mask);
+
+ return rc;
+}
+
+/**
+ * qpnp_pon_trigger_config - Configures (enable/disable) the PON trigger source
+ * @pon_src: PON source to be configured
+ * @enable: to enable or disable the PON trigger
+ *
+ * This function configures the power-on trigger capability of a
+ * PON source. If a specific PON trigger is disabled it cannot act
+ * as a power-on source to the PMIC.
+ */
+
+int qpnp_pon_trigger_config(enum pon_trigger_source pon_src, bool enable)
+{
+ struct qpnp_pon *pon = sys_reset_dev;
+ int rc;
+
+ if (!pon)
+ return -EPROBE_DEFER;
+
+ if (pon_src < PON_SMPL || pon_src > PON_KPDPWR_N) {
+ dev_err(&pon->spmi->dev, "Invalid PON source\n");
+ return -EINVAL;
+ }
+
+ if (is_pon_gen2(pon) && pon_src == PON_SMPL) {
+ rc = qpnp_pon_masked_write(pon, QPNP_PON_SMPL_CTL(pon),
+ QPNP_PON_SMPL_EN, enable ? QPNP_PON_SMPL_EN : 0);
+ if (rc)
+ dev_err(&pon->spmi->dev, "Unable to write to addr=%x, rc(%d)\n",
+ QPNP_PON_SMPL_CTL(pon), rc);
+ } else {
+ rc = qpnp_pon_masked_write(pon, QPNP_PON_TRIGGER_EN(pon),
+ BIT(pon_src), enable ? BIT(pon_src) : 0);
+ if (rc)
+ dev_err(&pon->spmi->dev, "Unable to write to addr=%x, rc(%d)\n",
+ QPNP_PON_TRIGGER_EN(pon), rc);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_pon_trigger_config);
+
+/*
+ * This function stores the PMIC warm reset reason register values. It also
+ * clears these registers if the qcom,clear-warm-reset device tree property
+ * is specified.
+ */
+static int qpnp_pon_store_and_clear_warm_reset(struct qpnp_pon *pon)
+{
+ int rc;
+ u8 reg = 0;
+
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_WARM_RESET_REASON1(pon),
+ &pon->warm_reset_reason1, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Unable to read addr=%x, rc(%d)\n",
+ QPNP_PON_WARM_RESET_REASON1(pon), rc);
+ return rc;
+ }
+
+ if (is_pon_gen1(pon) || pon->subtype == PON_1REG) {
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_WARM_RESET_REASON2(pon),
+ &pon->warm_reset_reason2, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev,
+ "Unable to read addr=%x, rc(%d)\n",
+ QPNP_PON_WARM_RESET_REASON2(pon), rc);
+ return rc;
+ }
+ }
+
+ if (of_property_read_bool(pon->spmi->dev.of_node,
+ "qcom,clear-warm-reset")) {
+ rc = spmi_ext_register_writel(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_WARM_RESET_REASON1(pon), &reg, 1);
+ if (rc)
+ dev_err(&pon->spmi->dev,
+ "Unable to write to addr=%hx, rc(%d)\n",
+ QPNP_PON_WARM_RESET_REASON1(pon), rc);
+ }
+
+ return 0;
+}
+
+static struct qpnp_pon_config *
+qpnp_get_cfg(struct qpnp_pon *pon, u32 pon_type)
+{
+ int i;
+
+ for (i = 0; i < pon->num_pon_config; i++) {
+ if (pon_type == pon->pon_cfg[i].pon_type)
+ return &pon->pon_cfg[i];
+ }
+
+ return NULL;
+}
+
+static int
+qpnp_pon_input_dispatch(struct qpnp_pon *pon, u32 pon_type)
+{
+ int rc;
+ struct qpnp_pon_config *cfg = NULL;
+ u8 pon_rt_sts = 0, pon_rt_bit = 0;
+ u32 key_status;
+
+ cfg = qpnp_get_cfg(pon, pon_type);
+ if (!cfg)
+ return -EINVAL;
+
+ /* Check if key reporting is supported */
+ if (!cfg->key_code)
+ return 0;
+
+ /* check the RT status to get the current status of the line */
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_RT_STS(pon), &pon_rt_sts, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Unable to read PON RT status\n");
+ return rc;
+ }
+
+ switch (cfg->pon_type) {
+ case PON_KPDPWR:
+ pon_rt_bit = QPNP_PON_KPDPWR_N_SET;
+ break;
+ case PON_RESIN:
+ pon_rt_bit = QPNP_PON_RESIN_N_SET;
+ break;
+ case PON_CBLPWR:
+ pon_rt_bit = QPNP_PON_CBLPWR_N_SET;
+ break;
+ case PON_KPDPWR_RESIN:
+ pon_rt_bit = QPNP_PON_KPDPWR_RESIN_BARK_N_SET;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pr_debug("PMIC input: code=%d, sts=0x%hhx\n",
+ cfg->key_code, pon_rt_sts);
+ key_status = pon_rt_sts & pon_rt_bit;
+
+ /* simulate press event in case release event occured
+ * without a press event
+ */
+ if (!cfg->old_state && !key_status) {
+ input_report_key(pon->pon_input, cfg->key_code, 1);
+ input_sync(pon->pon_input);
+ }
+
+ input_report_key(pon->pon_input, cfg->key_code, key_status);
+ input_sync(pon->pon_input);
+
+ cfg->old_state = !!key_status;
+
+ return 0;
+}
+
+static irqreturn_t qpnp_kpdpwr_irq(int irq, void *_pon)
+{
+ int rc;
+ struct qpnp_pon *pon = _pon;
+
+ rc = qpnp_pon_input_dispatch(pon, PON_KPDPWR);
+ if (rc)
+ dev_err(&pon->spmi->dev, "Unable to send input event\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qpnp_kpdpwr_bark_irq(int irq, void *_pon)
+{
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qpnp_resin_irq(int irq, void *_pon)
+{
+ int rc;
+ struct qpnp_pon *pon = _pon;
+
+ rc = qpnp_pon_input_dispatch(pon, PON_RESIN);
+ if (rc)
+ dev_err(&pon->spmi->dev, "Unable to send input event\n");
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qpnp_kpdpwr_resin_bark_irq(int irq, void *_pon)
+{
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qpnp_cblpwr_irq(int irq, void *_pon)
+{
+ int rc;
+ struct qpnp_pon *pon = _pon;
+
+ rc = qpnp_pon_input_dispatch(pon, PON_CBLPWR);
+ if (rc)
+ dev_err(&pon->spmi->dev, "Unable to send input event\n");
+
+ return IRQ_HANDLED;
+}
+
+static void print_pon_reg(struct qpnp_pon *pon, u16 offset)
+{
+ int rc;
+ u16 addr;
+ u8 reg;
+
+ addr = pon->base + offset;
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ addr, &reg, 1);
+ if (rc)
+ dev_emerg(&pon->spmi->dev,
+ "Unable to read reg at 0x%04hx\n", addr);
+ else
+ dev_emerg(&pon->spmi->dev, "reg@0x%04hx: %02hhx\n", addr, reg);
+}
+
+#define PON_PBL_STATUS 0x7
+#define PON_PON_REASON1(subtype) PON_OFFSET(subtype, 0x8, 0xC0)
+#define PON_PON_REASON2 0x9
+#define PON_WARM_RESET_REASON1(subtype) PON_OFFSET(subtype, 0xA, 0xC2)
+#define PON_WARM_RESET_REASON2 0xB
+#define PON_POFF_REASON1(subtype) PON_OFFSET(subtype, 0xC, 0xC5)
+#define PON_POFF_REASON2 0xD
+#define PON_SOFT_RESET_REASON1(subtype) PON_OFFSET(subtype, 0xE, 0xCB)
+#define PON_SOFT_RESET_REASON2 0xF
+#define PON_FAULT_REASON1 0xC8
+#define PON_FAULT_REASON2 0xC9
+#define PON_PMIC_WD_RESET_S1_TIMER 0x54
+#define PON_PMIC_WD_RESET_S2_TIMER 0x55
+static irqreturn_t qpnp_pmic_wd_bark_irq(int irq, void *_pon)
+{
+ struct qpnp_pon *pon = _pon;
+
+ print_pon_reg(pon, PON_PBL_STATUS);
+ print_pon_reg(pon, PON_PON_REASON1(pon->subtype));
+ print_pon_reg(pon, PON_WARM_RESET_REASON1(pon->subtype));
+ print_pon_reg(pon, PON_SOFT_RESET_REASON1(pon->subtype));
+ print_pon_reg(pon, PON_POFF_REASON1(pon->subtype));
+ if (is_pon_gen1(pon) || pon->subtype == PON_1REG) {
+ print_pon_reg(pon, PON_PON_REASON2);
+ print_pon_reg(pon, PON_WARM_RESET_REASON2);
+ print_pon_reg(pon, PON_POFF_REASON2);
+ print_pon_reg(pon, PON_SOFT_RESET_REASON2);
+ } else {
+ print_pon_reg(pon, PON_FAULT_REASON1);
+ print_pon_reg(pon, PON_FAULT_REASON2);
+ }
+ print_pon_reg(pon, PON_PMIC_WD_RESET_S1_TIMER);
+ print_pon_reg(pon, PON_PMIC_WD_RESET_S2_TIMER);
+ panic("PMIC Watch dog triggered");
+
+ return IRQ_HANDLED;
+}
+
+static void bark_work_func(struct work_struct *work)
+{
+ int rc;
+ u8 pon_rt_sts = 0;
+ struct qpnp_pon_config *cfg;
+ struct qpnp_pon *pon =
+ container_of(work, struct qpnp_pon, bark_work.work);
+
+ cfg = qpnp_get_cfg(pon, PON_RESIN);
+ if (!cfg) {
+ dev_err(&pon->spmi->dev, "Invalid config pointer\n");
+ goto err_return;
+ }
+
+ /* enable reset */
+ rc = qpnp_pon_masked_write(pon, cfg->s2_cntl2_addr,
+ QPNP_PON_S2_CNTL_EN, QPNP_PON_S2_CNTL_EN);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Unable to configure S2 enable\n");
+ goto err_return;
+ }
+ /* bark RT status update delay */
+ msleep(100);
+ /* read the bark RT status */
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_RT_STS(pon), &pon_rt_sts, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Unable to read PON RT status\n");
+ goto err_return;
+ }
+
+ if (!(pon_rt_sts & QPNP_PON_RESIN_BARK_N_SET)) {
+ /* report the key event and enable the bark IRQ */
+ input_report_key(pon->pon_input, cfg->key_code, 0);
+ input_sync(pon->pon_input);
+ enable_irq(cfg->bark_irq);
+ } else {
+ /* disable reset */
+ rc = qpnp_pon_masked_write(pon, cfg->s2_cntl2_addr,
+ QPNP_PON_S2_CNTL_EN, 0);
+ if (rc) {
+ dev_err(&pon->spmi->dev,
+ "Unable to configure S2 enable\n");
+ goto err_return;
+ }
+ /* re-arm the work */
+ schedule_delayed_work(&pon->bark_work, QPNP_KEY_STATUS_DELAY);
+ }
+
+err_return:
+ return;
+}
+
+static irqreturn_t qpnp_resin_bark_irq(int irq, void *_pon)
+{
+ int rc;
+ struct qpnp_pon *pon = _pon;
+ struct qpnp_pon_config *cfg;
+
+ /* disable the bark interrupt */
+ disable_irq_nosync(irq);
+
+ cfg = qpnp_get_cfg(pon, PON_RESIN);
+ if (!cfg) {
+ dev_err(&pon->spmi->dev, "Invalid config pointer\n");
+ goto err_exit;
+ }
+
+ /* disable reset */
+ rc = qpnp_pon_masked_write(pon, cfg->s2_cntl2_addr,
+ QPNP_PON_S2_CNTL_EN, 0);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Unable to configure S2 enable\n");
+ goto err_exit;
+ }
+
+ /* report the key event */
+ input_report_key(pon->pon_input, cfg->key_code, 1);
+ input_sync(pon->pon_input);
+ /* schedule work to check the bark status for key-release */
+ schedule_delayed_work(&pon->bark_work, QPNP_KEY_STATUS_DELAY);
+err_exit:
+ return IRQ_HANDLED;
+}
+
+static int
+qpnp_config_pull(struct qpnp_pon *pon, struct qpnp_pon_config *cfg)
+{
+ int rc;
+ u8 pull_bit;
+
+ switch (cfg->pon_type) {
+ case PON_KPDPWR:
+ pull_bit = QPNP_PON_KPDPWR_PULL_UP;
+ break;
+ case PON_RESIN:
+ pull_bit = QPNP_PON_RESIN_PULL_UP;
+ break;
+ case PON_CBLPWR:
+ pull_bit = QPNP_PON_CBLPWR_PULL_UP;
+ break;
+ case PON_KPDPWR_RESIN:
+ pull_bit = QPNP_PON_KPDPWR_PULL_UP | QPNP_PON_RESIN_PULL_UP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rc = qpnp_pon_masked_write(pon, QPNP_PON_PULL_CTL(pon),
+ pull_bit, cfg->pull_up ? pull_bit : 0);
+ if (rc)
+ dev_err(&pon->spmi->dev, "Unable to config pull-up\n");
+
+ return rc;
+}
+
+static int
+qpnp_config_reset(struct qpnp_pon *pon, struct qpnp_pon_config *cfg)
+{
+ int rc;
+ u8 i;
+ u16 s1_timer_addr, s2_timer_addr;
+
+ switch (cfg->pon_type) {
+ case PON_KPDPWR:
+ s1_timer_addr = QPNP_PON_KPDPWR_S1_TIMER(pon);
+ s2_timer_addr = QPNP_PON_KPDPWR_S2_TIMER(pon);
+ break;
+ case PON_RESIN:
+ s1_timer_addr = QPNP_PON_RESIN_S1_TIMER(pon);
+ s2_timer_addr = QPNP_PON_RESIN_S2_TIMER(pon);
+ break;
+ case PON_KPDPWR_RESIN:
+ s1_timer_addr = QPNP_PON_KPDPWR_RESIN_S1_TIMER(pon);
+ s2_timer_addr = QPNP_PON_KPDPWR_RESIN_S2_TIMER(pon);
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* disable S2 reset */
+ rc = qpnp_pon_masked_write(pon, cfg->s2_cntl2_addr,
+ QPNP_PON_S2_CNTL_EN, 0);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Unable to configure S2 enable\n");
+ return rc;
+ }
+
+ usleep_range(100, 120);
+
+ /* configure s1 timer, s2 timer and reset type */
+ for (i = 0; i < PON_S1_COUNT_MAX + 1; i++) {
+ if (cfg->s1_timer <= s1_delay[i])
+ break;
+ }
+ rc = qpnp_pon_masked_write(pon, s1_timer_addr,
+ QPNP_PON_S1_TIMER_MASK, i);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Unable to configure S1 timer\n");
+ return rc;
+ }
+
+ i = 0;
+ if (cfg->s2_timer) {
+ i = cfg->s2_timer / 10;
+ i = ilog2(i + 1);
+ }
+
+ rc = qpnp_pon_masked_write(pon, s2_timer_addr,
+ QPNP_PON_S2_TIMER_MASK, i);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Unable to configure S2 timer\n");
+ return rc;
+ }
+
+ rc = qpnp_pon_masked_write(pon, cfg->s2_cntl_addr,
+ QPNP_PON_S2_CNTL_TYPE_MASK, (u8)cfg->s2_type);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Unable to configure S2 reset type\n");
+ return rc;
+ }
+
+ /* enable S2 reset */
+ rc = qpnp_pon_masked_write(pon, cfg->s2_cntl2_addr,
+ QPNP_PON_S2_CNTL_EN, QPNP_PON_S2_CNTL_EN);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Unable to configure S2 enable\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int
+qpnp_pon_request_irqs(struct qpnp_pon *pon, struct qpnp_pon_config *cfg)
+{
+ int rc = 0;
+
+ switch (cfg->pon_type) {
+ case PON_KPDPWR:
+ rc = devm_request_irq(&pon->spmi->dev, cfg->state_irq,
+ qpnp_kpdpwr_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "qpnp_kpdpwr_status", pon);
+ if (rc < 0) {
+ dev_err(&pon->spmi->dev, "Can't request %d IRQ\n",
+ cfg->state_irq);
+ return rc;
+ }
+ if (cfg->use_bark) {
+ rc = devm_request_irq(&pon->spmi->dev, cfg->bark_irq,
+ qpnp_kpdpwr_bark_irq,
+ IRQF_TRIGGER_RISING,
+ "qpnp_kpdpwr_bark", pon);
+ if (rc < 0) {
+ dev_err(&pon->spmi->dev,
+ "Can't request %d IRQ\n",
+ cfg->bark_irq);
+ return rc;
+ }
+ }
+ break;
+ case PON_RESIN:
+ rc = devm_request_irq(&pon->spmi->dev, cfg->state_irq,
+ qpnp_resin_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "qpnp_resin_status", pon);
+ if (rc < 0) {
+ dev_err(&pon->spmi->dev, "Can't request %d IRQ\n",
+ cfg->state_irq);
+ return rc;
+ }
+ if (cfg->use_bark) {
+ rc = devm_request_irq(&pon->spmi->dev, cfg->bark_irq,
+ qpnp_resin_bark_irq,
+ IRQF_TRIGGER_RISING,
+ "qpnp_resin_bark", pon);
+ if (rc < 0) {
+ dev_err(&pon->spmi->dev,
+ "Can't request %d IRQ\n",
+ cfg->bark_irq);
+ return rc;
+ }
+ }
+ break;
+ case PON_CBLPWR:
+ rc = devm_request_irq(&pon->spmi->dev, cfg->state_irq,
+ qpnp_cblpwr_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "qpnp_cblpwr_status", pon);
+ if (rc < 0) {
+ dev_err(&pon->spmi->dev, "Can't request %d IRQ\n",
+ cfg->state_irq);
+ return rc;
+ }
+ break;
+ case PON_KPDPWR_RESIN:
+ if (cfg->use_bark) {
+ rc = devm_request_irq(&pon->spmi->dev, cfg->bark_irq,
+ qpnp_kpdpwr_resin_bark_irq,
+ IRQF_TRIGGER_RISING,
+ "qpnp_kpdpwr_resin_bark", pon);
+ if (rc < 0) {
+ dev_err(&pon->spmi->dev,
+ "Can't request %d IRQ\n",
+ cfg->bark_irq);
+ return rc;
+ }
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* mark the interrupts wakeable if they support linux-key */
+ if (cfg->key_code) {
+ enable_irq_wake(cfg->state_irq);
+ /* special handling for RESIN due to a hardware bug */
+ if (cfg->pon_type == PON_RESIN && cfg->support_reset)
+ enable_irq_wake(cfg->bark_irq);
+ }
+
+ return rc;
+}
+
+static int
+qpnp_pon_config_input(struct qpnp_pon *pon, struct qpnp_pon_config *cfg)
+{
+ if (!pon->pon_input) {
+ pon->pon_input = input_allocate_device();
+ if (!pon->pon_input) {
+ dev_err(&pon->spmi->dev,
+ "Can't allocate pon input device\n");
+ return -ENOMEM;
+ }
+ pon->pon_input->name = "qpnp_pon";
+ pon->pon_input->phys = "qpnp_pon/input0";
+ }
+
+ /* don't send dummy release event when system resumes */
+ __set_bit(INPUT_PROP_NO_DUMMY_RELEASE, pon->pon_input->propbit);
+ input_set_capability(pon->pon_input, EV_KEY, cfg->key_code);
+
+ return 0;
+}
+
+static int qpnp_pon_config_init(struct qpnp_pon *pon)
+{
+ int rc = 0, i = 0, pmic_wd_bark_irq;
+ struct device_node *pp = NULL;
+ struct qpnp_pon_config *cfg;
+ u8 pmic_type;
+ u8 revid_rev4;
+
+ if (!pon->num_pon_config) {
+ dev_dbg(&pon->spmi->dev, "num_pon_config: %d\n",
+ pon->num_pon_config);
+ return 0;
+ }
+
+ /* iterate through the list of pon configs */
+ for_each_available_child_of_node(pon->spmi->dev.of_node, pp) {
+ if (!of_find_property(pp, "qcom,pon-type", NULL))
+ continue;
+
+ cfg = &pon->pon_cfg[i++];
+
+ rc = of_property_read_u32(pp, "qcom,pon-type", &cfg->pon_type);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "PON type not specified\n");
+ return rc;
+ }
+
+ switch (cfg->pon_type) {
+ case PON_KPDPWR:
+ cfg->state_irq = spmi_get_irq_byname(pon->spmi,
+ NULL, "kpdpwr");
+ if (cfg->state_irq < 0) {
+ dev_err(&pon->spmi->dev,
+ "Unable to get kpdpwr irq\n");
+ return cfg->state_irq;
+ }
+
+ rc = of_property_read_u32(pp, "qcom,support-reset",
+ &cfg->support_reset);
+
+ if (rc) {
+ if (rc == -EINVAL) {
+ dev_dbg(&pon->spmi->dev,
+ "'qcom,support-reset' DT property doesn't exist\n");
+ } else {
+ dev_err(&pon->spmi->dev,
+ "Unable to read 'qcom,support-reset'\n");
+ return rc;
+ }
+ } else {
+ cfg->config_reset = true;
+ }
+
+ cfg->use_bark = of_property_read_bool(pp,
+ "qcom,use-bark");
+ if (cfg->use_bark) {
+ cfg->bark_irq = spmi_get_irq_byname(pon->spmi,
+ NULL, "kpdpwr-bark");
+ if (cfg->bark_irq < 0) {
+ dev_err(&pon->spmi->dev,
+ "Unable to get kpdpwr-bark irq\n");
+ return cfg->bark_irq;
+ }
+ }
+
+ /* If the value read from REVISION2 register is 0x00,
+ then there is a single register to control s2 reset.
+ Otherwise there are separate registers for s2 reset
+ type and s2 reset enable */
+ if (pon->pon_ver == QPNP_PON_GEN1_V1) {
+ cfg->s2_cntl_addr = cfg->s2_cntl2_addr =
+ QPNP_PON_KPDPWR_S2_CNTL(pon);
+ } else {
+ cfg->s2_cntl_addr =
+ QPNP_PON_KPDPWR_S2_CNTL(pon);
+ cfg->s2_cntl2_addr =
+ QPNP_PON_KPDPWR_S2_CNTL2(pon);
+ }
+
+ break;
+ case PON_RESIN:
+ cfg->state_irq = spmi_get_irq_byname(pon->spmi,
+ NULL, "resin");
+ if (cfg->state_irq < 0) {
+ dev_err(&pon->spmi->dev,
+ "Unable to get resin irq\n");
+ return cfg->bark_irq;
+ }
+
+ rc = of_property_read_u32(pp, "qcom,support-reset",
+ &cfg->support_reset);
+
+ if (rc) {
+ if (rc == -EINVAL) {
+ dev_dbg(&pon->spmi->dev,
+ "'qcom,support-reset' DT property doesn't exist\n");
+ } else {
+ dev_err(&pon->spmi->dev,
+ "Unable to read 'qcom,support-reset'\n");
+ return rc;
+ }
+ } else {
+ cfg->config_reset = true;
+ }
+
+ cfg->use_bark = of_property_read_bool(pp,
+ "qcom,use-bark");
+
+ rc = spmi_ext_register_readl(pon->spmi->ctrl,
+ pon->spmi->sid, PMIC_VERSION_REG,
+ &pmic_type, 1);
+
+ if (rc) {
+ dev_err(&pon->spmi->dev,
+ "Unable to read PMIC type\n");
+ return rc;
+ }
+
+ if (pmic_type == PMIC_VER_8941) {
+
+ rc = spmi_ext_register_readl(pon->spmi->ctrl,
+ pon->spmi->sid, PMIC_VERSION_REV4_REG,
+ &revid_rev4, 1);
+
+ if (rc) {
+ dev_err(&pon->spmi->dev,
+ "Unable to read PMIC revision ID\n");
+ return rc;
+ }
+
+ /*PM8941 V3 does not have harware bug. Hence
+ bark is not required from PMIC versions 3.0*/
+ if (!(revid_rev4 == PMIC8941_V1_REV4 ||
+ revid_rev4 == PMIC8941_V2_REV4)) {
+ cfg->support_reset = false;
+ cfg->use_bark = false;
+ }
+ }
+
+ if (cfg->use_bark) {
+ cfg->bark_irq = spmi_get_irq_byname(pon->spmi,
+ NULL, "resin-bark");
+ if (cfg->bark_irq < 0) {
+ dev_err(&pon->spmi->dev,
+ "Unable to get resin-bark irq\n");
+ return cfg->bark_irq;
+ }
+ }
+
+ if (pon->pon_ver == QPNP_PON_GEN1_V1) {
+ cfg->s2_cntl_addr = cfg->s2_cntl2_addr =
+ QPNP_PON_RESIN_S2_CNTL(pon);
+ } else {
+ cfg->s2_cntl_addr =
+ QPNP_PON_RESIN_S2_CNTL(pon);
+ cfg->s2_cntl2_addr =
+ QPNP_PON_RESIN_S2_CNTL2(pon);
+ }
+
+ break;
+ case PON_CBLPWR:
+ cfg->state_irq = spmi_get_irq_byname(pon->spmi,
+ NULL, "cblpwr");
+ if (cfg->state_irq < 0) {
+ dev_err(&pon->spmi->dev,
+ "Unable to get cblpwr irq\n");
+ return rc;
+ }
+ break;
+ case PON_KPDPWR_RESIN:
+ rc = of_property_read_u32(pp, "qcom,support-reset",
+ &cfg->support_reset);
+
+ if (rc) {
+ if (rc == -EINVAL) {
+ dev_dbg(&pon->spmi->dev,
+ "'qcom,support-reset' DT property doesn't exist\n");
+ } else {
+ dev_err(&pon->spmi->dev,
+ "Unable to read 'qcom,support-reset'\n");
+ return rc;
+ }
+ } else {
+ cfg->config_reset = true;
+ }
+
+ cfg->use_bark = of_property_read_bool(pp,
+ "qcom,use-bark");
+ if (cfg->use_bark) {
+ cfg->bark_irq = spmi_get_irq_byname(pon->spmi,
+ NULL, "kpdpwr-resin-bark");
+ if (cfg->bark_irq < 0) {
+ dev_err(&pon->spmi->dev,
+ "Unable to get kpdpwr-resin-bark irq\n");
+ return cfg->bark_irq;
+ }
+ }
+
+ if (pon->pon_ver == QPNP_PON_GEN1_V1) {
+ cfg->s2_cntl_addr = cfg->s2_cntl2_addr =
+ QPNP_PON_KPDPWR_RESIN_S2_CNTL(pon);
+ } else {
+ cfg->s2_cntl_addr =
+ QPNP_PON_KPDPWR_RESIN_S2_CNTL(pon);
+ cfg->s2_cntl2_addr =
+ QPNP_PON_KPDPWR_RESIN_S2_CNTL2(pon);
+ }
+
+ break;
+ default:
+ dev_err(&pon->spmi->dev, "PON RESET %d not supported",
+ cfg->pon_type);
+ return -EINVAL;
+ }
+
+ if (cfg->support_reset) {
+ /*
+ * Get the reset parameters (bark debounce time and
+ * reset debounce time) for the reset line.
+ */
+ rc = of_property_read_u32(pp, "qcom,s1-timer",
+ &cfg->s1_timer);
+ if (rc) {
+ dev_err(&pon->spmi->dev,
+ "Unable to read s1-timer\n");
+ return rc;
+ }
+ if (cfg->s1_timer > QPNP_PON_S1_TIMER_MAX) {
+ dev_err(&pon->spmi->dev,
+ "Incorrect S1 debounce time\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_u32(pp, "qcom,s2-timer",
+ &cfg->s2_timer);
+ if (rc) {
+ dev_err(&pon->spmi->dev,
+ "Unable to read s2-timer\n");
+ return rc;
+ }
+ if (cfg->s2_timer > QPNP_PON_S2_TIMER_MAX) {
+ dev_err(&pon->spmi->dev,
+ "Incorrect S2 debounce time\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_u32(pp, "qcom,s2-type",
+ &cfg->s2_type);
+ if (rc) {
+ dev_err(&pon->spmi->dev,
+ "Unable to read s2-type\n");
+ return rc;
+ }
+ if (cfg->s2_type > QPNP_PON_RESET_TYPE_MAX) {
+ dev_err(&pon->spmi->dev,
+ "Incorrect reset type specified\n");
+ return -EINVAL;
+ }
+
+ }
+ /*
+ * Get the standard-key parameters. This might not be
+ * specified if there is no key mapping on the reset line.
+ */
+ rc = of_property_read_u32(pp, "linux,code", &cfg->key_code);
+ if (rc && rc != -EINVAL) {
+ dev_err(&pon->spmi->dev,
+ "Unable to read key-code\n");
+ return rc;
+ }
+ /* Register key configuration */
+ if (cfg->key_code) {
+ rc = qpnp_pon_config_input(pon, cfg);
+ if (rc < 0)
+ return rc;
+ }
+ /* get the pull-up configuration */
+ rc = of_property_read_u32(pp, "qcom,pull-up", &cfg->pull_up);
+ if (rc && rc != -EINVAL) {
+ dev_err(&pon->spmi->dev, "Unable to read pull-up\n");
+ return rc;
+ }
+ }
+
+ pmic_wd_bark_irq = spmi_get_irq_byname(pon->spmi, NULL, "pmic-wd-bark");
+ /* request the pmic-wd-bark irq only if it is defined */
+ if (pmic_wd_bark_irq >= 0) {
+ rc = devm_request_irq(&pon->spmi->dev, pmic_wd_bark_irq,
+ qpnp_pmic_wd_bark_irq,
+ IRQF_TRIGGER_RISING,
+ "qpnp_pmic_wd_bark", pon);
+ if (rc < 0) {
+ dev_err(&pon->spmi->dev,
+ "Can't request %d IRQ\n",
+ pmic_wd_bark_irq);
+ goto free_input_dev;
+ }
+ }
+
+ /* register the input device */
+ if (pon->pon_input) {
+ rc = input_register_device(pon->pon_input);
+ if (rc) {
+ dev_err(&pon->spmi->dev,
+ "Can't register pon key: %d\n", rc);
+ goto free_input_dev;
+ }
+ }
+
+ for (i = 0; i < pon->num_pon_config; i++) {
+ cfg = &pon->pon_cfg[i];
+ /* Configure the pull-up */
+ rc = qpnp_config_pull(pon, cfg);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Unable to config pull-up\n");
+ goto unreg_input_dev;
+ }
+
+ if (cfg->config_reset) {
+ /* Configure the reset-configuration */
+ if (cfg->support_reset) {
+ rc = qpnp_config_reset(pon, cfg);
+ if (rc) {
+ dev_err(&pon->spmi->dev,
+ "Unable to config pon reset\n");
+ goto unreg_input_dev;
+ }
+ } else {
+ if (cfg->pon_type != PON_CBLPWR) {
+ /* disable S2 reset */
+ rc = qpnp_pon_masked_write(pon,
+ cfg->s2_cntl2_addr,
+ QPNP_PON_S2_CNTL_EN, 0);
+ if (rc) {
+ dev_err(&pon->spmi->dev,
+ "Unable to disable S2 reset\n");
+ goto unreg_input_dev;
+ }
+ }
+ }
+ }
+
+ rc = qpnp_pon_request_irqs(pon, cfg);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Unable to request-irq's\n");
+ goto unreg_input_dev;
+ }
+ }
+
+ device_init_wakeup(&pon->spmi->dev, 1);
+
+ return rc;
+
+unreg_input_dev:
+ if (pon->pon_input)
+ input_unregister_device(pon->pon_input);
+free_input_dev:
+ if (pon->pon_input)
+ input_free_device(pon->pon_input);
+ return rc;
+}
+
+static int pon_spare_regulator_enable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ u8 value;
+ struct pon_regulator *pon_reg = rdev_get_drvdata(rdev);
+
+ pr_debug("reg %s enable addr: %x bit: %d\n", rdev->desc->name,
+ pon_reg->addr, pon_reg->bit);
+
+ value = BIT(pon_reg->bit) & 0xFF;
+ rc = qpnp_pon_masked_write(pon_reg->pon, pon_reg->pon->base +
+ pon_reg->addr, value, value);
+ if (rc)
+ dev_err(&pon_reg->pon->spmi->dev, "Unable to write to %x\n",
+ pon_reg->pon->base + pon_reg->addr);
+ else
+ pon_reg->enabled = true;
+ return rc;
+}
+
+static int pon_spare_regulator_disable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ u8 mask;
+ struct pon_regulator *pon_reg = rdev_get_drvdata(rdev);
+
+ pr_debug("reg %s disable addr: %x bit: %d\n", rdev->desc->name,
+ pon_reg->addr, pon_reg->bit);
+
+ mask = BIT(pon_reg->bit) & 0xFF;
+ rc = qpnp_pon_masked_write(pon_reg->pon, pon_reg->pon->base +
+ pon_reg->addr, mask, 0);
+ if (rc)
+ dev_err(&pon_reg->pon->spmi->dev, "Unable to write to %x\n",
+ pon_reg->pon->base + pon_reg->addr);
+ else
+ pon_reg->enabled = false;
+ return rc;
+}
+
+static int pon_spare_regulator_is_enable(struct regulator_dev *rdev)
+{
+ struct pon_regulator *pon_reg = rdev_get_drvdata(rdev);
+
+ return pon_reg->enabled;
+}
+
+struct regulator_ops pon_spare_reg_ops = {
+ .enable = pon_spare_regulator_enable,
+ .disable = pon_spare_regulator_disable,
+ .is_enabled = pon_spare_regulator_is_enable,
+};
+
+static int pon_regulator_init(struct qpnp_pon *pon)
+{
+ int rc = 0, i = 0;
+ struct regulator_init_data *init_data;
+ struct regulator_config reg_cfg = {};
+ struct device_node *node = NULL;
+ struct device *dev = &pon->spmi->dev;
+ struct pon_regulator *pon_reg;
+
+ if (!pon->num_pon_reg)
+ return 0;
+
+ pon->pon_reg_cfg = devm_kcalloc(dev, pon->num_pon_reg,
+ sizeof(*(pon->pon_reg_cfg)),
+ GFP_KERNEL);
+
+ if (!pon->pon_reg_cfg)
+ return -ENOMEM;
+
+ for_each_available_child_of_node(dev->of_node, node) {
+ if (!of_find_property(node, "regulator-name", NULL))
+ continue;
+
+ pon_reg = &pon->pon_reg_cfg[i++];
+ pon_reg->pon = pon;
+
+ rc = of_property_read_u32(node, "qcom,pon-spare-reg-addr",
+ &pon_reg->addr);
+ if (rc) {
+ dev_err(dev, "Unable to read address for regulator, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(node, "qcom,pon-spare-reg-bit",
+ &pon_reg->bit);
+ if (rc) {
+ dev_err(dev, "Unable to read bit for regulator, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ init_data = of_get_regulator_init_data(dev, node);
+ if (!init_data) {
+ dev_err(dev, "regulator init data is missing\n");
+ return -EINVAL;
+ }
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_STATUS;
+
+ if (!init_data->constraints.name) {
+ dev_err(dev, "regulator-name is missing\n");
+ return -EINVAL;
+ }
+
+ pon_reg->rdesc.owner = THIS_MODULE;
+ pon_reg->rdesc.type = REGULATOR_VOLTAGE;
+ pon_reg->rdesc.ops = &pon_spare_reg_ops;
+ pon_reg->rdesc.name = init_data->constraints.name;
+
+ reg_cfg.dev = dev;
+ reg_cfg.init_data = init_data;
+ reg_cfg.driver_data = pon_reg;
+ reg_cfg.of_node = node;
+
+ pon_reg->rdev = regulator_register(&pon_reg->rdesc, &reg_cfg);
+ if (IS_ERR(pon_reg->rdev)) {
+ rc = PTR_ERR(pon_reg->rdev);
+ pon_reg->rdev = NULL;
+ if (rc != -EPROBE_DEFER)
+ dev_err(dev, "regulator_register failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+ return rc;
+}
+
+static bool smpl_en;
+
+static int qpnp_pon_smpl_en_get(char *buf, const struct kernel_param *kp)
+{
+ bool enabled;
+ int rc;
+
+ rc = qpnp_pon_get_trigger_config(PON_SMPL, &enabled);
+ if (rc < 0)
+ return rc;
+
+ return snprintf(buf, QPNP_PON_BUFFER_SIZE, "%d", enabled);
+}
+
+static int qpnp_pon_smpl_en_set(const char *val,
+ const struct kernel_param *kp)
+{
+ int rc;
+
+ rc = param_set_bool(val, kp);
+ if (rc < 0) {
+ pr_err("Unable to set smpl_en rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = qpnp_pon_trigger_config(PON_SMPL, *(bool *)kp->arg);
+ return rc;
+}
+
+static struct kernel_param_ops smpl_en_ops = {
+ .set = qpnp_pon_smpl_en_set,
+ .get = qpnp_pon_smpl_en_get,
+};
+
+module_param_cb(smpl_en, &smpl_en_ops, &smpl_en, 0644);
+
+static bool dload_on_uvlo;
+
+static int qpnp_pon_debugfs_uvlo_dload_get(char *buf,
+ const struct kernel_param *kp)
+{
+ struct qpnp_pon *pon = sys_reset_dev;
+ int rc = 0;
+ u8 reg;
+
+ if (!pon)
+ return -ENODEV;
+
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_XVDD_RB_SPARE(pon), &reg, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev,
+ "Unable to read addr=%x, rc(%d)\n",
+ QPNP_PON_XVDD_RB_SPARE(pon), rc);
+ return rc;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d",
+ !!(QPNP_PON_UVLO_DLOAD_EN & reg));
+}
+
+static int qpnp_pon_debugfs_uvlo_dload_set(const char *val,
+ const struct kernel_param *kp)
+{
+ struct qpnp_pon *pon = sys_reset_dev;
+ int rc = 0;
+ u8 reg;
+
+ if (!pon)
+ return -ENODEV;
+
+ rc = param_set_bool(val, kp);
+ if (rc) {
+ pr_err("Unable to set bms_reset: %d\n", rc);
+ return rc;
+ }
+
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_XVDD_RB_SPARE(pon), &reg, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev,
+ "Unable to read addr=%x, rc(%d)\n",
+ QPNP_PON_XVDD_RB_SPARE(pon), rc);
+ return rc;
+ }
+
+ reg &= ~QPNP_PON_UVLO_DLOAD_EN;
+ if (*(bool *)kp->arg)
+ reg |= QPNP_PON_UVLO_DLOAD_EN;
+
+ rc = spmi_ext_register_writel(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_XVDD_RB_SPARE(pon), &reg, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev,
+ "Unable to write to addr=%hx, rc(%d)\n",
+ QPNP_PON_XVDD_RB_SPARE(pon), rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static struct kernel_param_ops dload_on_uvlo_ops = {
+ .set = qpnp_pon_debugfs_uvlo_dload_set,
+ .get = qpnp_pon_debugfs_uvlo_dload_get,
+};
+
+module_param_cb(dload_on_uvlo, &dload_on_uvlo_ops, &dload_on_uvlo, 0644);
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int qpnp_pon_debugfs_uvlo_get(void *data, u64 *val)
+{
+ struct qpnp_pon *pon = (struct qpnp_pon *) data;
+
+ *val = pon->uvlo;
+
+ return 0;
+}
+
+static int qpnp_pon_debugfs_uvlo_set(void *data, u64 val)
+{
+ struct qpnp_pon *pon = (struct qpnp_pon *) data;
+
+ if (pon->pon_trigger_reason == PON_SMPL ||
+ pon->pon_power_off_reason == QPNP_POFF_REASON_UVLO)
+ panic("An UVLO was occurred.\n");
+ pon->uvlo = val;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(qpnp_pon_debugfs_uvlo_fops, qpnp_pon_debugfs_uvlo_get,
+ qpnp_pon_debugfs_uvlo_set, "0x%02llx\n");
+
+static void qpnp_pon_debugfs_init(struct spmi_device *spmi)
+{
+ struct qpnp_pon *pon = dev_get_drvdata(&spmi->dev);
+ struct dentry *ent;
+
+ pon->debugfs = debugfs_create_dir(dev_name(&spmi->dev), NULL);
+ if (!pon->debugfs) {
+ dev_err(&pon->spmi->dev, "Unable to create debugfs directory\n");
+ } else {
+ ent = debugfs_create_file("uvlo_panic",
+ S_IFREG | S_IWUSR | S_IRUGO,
+ pon->debugfs, pon, &qpnp_pon_debugfs_uvlo_fops);
+ if (!ent)
+ dev_err(&pon->spmi->dev, "Unable to create uvlo_panic debugfs file.\n");
+ }
+}
+
+static void qpnp_pon_debugfs_remove(struct spmi_device *spmi)
+{
+ struct qpnp_pon *pon = dev_get_drvdata(&spmi->dev);
+
+ debugfs_remove_recursive(pon->debugfs);
+}
+
+#else
+
+static void qpnp_pon_debugfs_init(struct spmi_device *spmi)
+{}
+
+static void qpnp_pon_debugfs_remove(struct spmi_device *spmi)
+{}
+#endif
+
+static int read_gen2_pon_off_reason(struct qpnp_pon *pon, u16 *reason,
+ int *reason_index_offset)
+{
+ int rc;
+ u8 buf[2], reg;
+
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_OFF_REASON(pon),
+ &reg, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Unable to read PON_OFF_REASON reg rc:%d\n",
+ rc);
+ return rc;
+ }
+
+ if (reg & QPNP_GEN2_POFF_SEQ) {
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_POFF_REASON1(pon),
+ buf, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Unable to read POFF_REASON1 reg rc:%d\n",
+ rc);
+ return rc;
+ }
+ *reason = buf[0];
+ *reason_index_offset = 0;
+ } else if (reg & QPNP_GEN2_FAULT_SEQ) {
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_FAULT_REASON1(pon),
+ buf, 2);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Unable to read FAULT_REASON regs rc:%d\n",
+ rc);
+ return rc;
+ }
+ *reason = buf[0] | (buf[1] << 8);
+ *reason_index_offset = POFF_REASON_FAULT_OFFSET;
+ } else if (reg & QPNP_GEN2_S3_RESET_SEQ) {
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_S3_RESET_REASON(pon),
+ buf, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Unable to read S3_RESET_REASON reg rc:%d\n",
+ rc);
+ return rc;
+ }
+ *reason = buf[0];
+ *reason_index_offset = POFF_REASON_S3_RESET_OFFSET;
+ }
+
+ return 0;
+}
+
+static int qpnp_pon_probe(struct spmi_device *spmi)
+{
+ struct qpnp_pon *pon;
+ struct resource *pon_resource;
+ struct device_node *node = NULL;
+ u32 delay = 0, s3_debounce = 0;
+ int rc, sys_reset, index;
+ int reason_index_offset = 0;
+ u8 pon_sts = 0, buf[2];
+ u16 poff_sts = 0;
+ const char *s3_src;
+ u8 s3_src_reg;
+ unsigned long flags;
+
+ pon = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_pon),
+ GFP_KERNEL);
+ if (!pon) {
+ dev_err(&spmi->dev, "Can't allocate qpnp_pon\n");
+ return -ENOMEM;
+ }
+
+ sys_reset = of_property_read_bool(spmi->dev.of_node,
+ "qcom,system-reset");
+ if (sys_reset && sys_reset_dev) {
+ dev_err(&spmi->dev, "qcom,system-reset property can only be specified for one device on the system\n");
+ return -EINVAL;
+ } else if (sys_reset) {
+ sys_reset_dev = pon;
+ }
+
+ pon->spmi = spmi;
+
+ pon_resource = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
+ if (!pon_resource) {
+ dev_err(&spmi->dev, "Unable to get PON base address\n");
+ return -ENXIO;
+ }
+ pon->base = pon_resource->start;
+
+ /* get the total number of pon configurations */
+ for_each_available_child_of_node(spmi->dev.of_node, node) {
+ if (of_find_property(node, "regulator-name", NULL)) {
+ pon->num_pon_reg++;
+ } else if (of_find_property(node, "qcom,pon-type", NULL)) {
+ pon->num_pon_config++;
+ } else {
+ pr_err("Unknown sub-node\n");
+ return -EINVAL;
+ }
+ }
+
+ pr_debug("PON@SID %d: num_pon_config: %d num_pon_reg: %d\n",
+ pon->spmi->sid, pon->num_pon_config, pon->num_pon_reg);
+
+ rc = pon_regulator_init(pon);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Error in pon_regulator_init rc: %d\n",
+ rc);
+ return rc;
+ }
+
+ if (!pon->num_pon_config)
+ /* No PON config., do not register the driver */
+ dev_info(&spmi->dev, "No PON config. specified\n");
+ else
+ pon->pon_cfg = devm_kzalloc(&spmi->dev,
+ sizeof(struct qpnp_pon_config) *
+ pon->num_pon_config, GFP_KERNEL);
+
+ /* Read PON_PERPH_SUBTYPE register to get PON type */
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_PERPH_SUBTYPE(pon),
+ &pon->subtype, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev,
+ "Unable to read PON_PERPH_SUBTYPE register rc: %d\n",
+ rc);
+ return rc;
+ }
+
+ /* Check if it is rev B */
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_REVISION2(pon), &pon->pon_ver, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev,
+ "Unable to read addr=%x, rc(%d)\n",
+ QPNP_PON_REVISION2(pon), rc);
+ return rc;
+ }
+ if (is_pon_gen1(pon)) {
+ if (pon->pon_ver == 0)
+ pon->pon_ver = QPNP_PON_GEN1_V1;
+ else
+ pon->pon_ver = QPNP_PON_GEN1_V2;
+ } else if (is_pon_gen2(pon)) {
+ pon->pon_ver = QPNP_PON_GEN2;
+ } else if (pon->subtype == PON_1REG) {
+ pon->pon_ver = QPNP_PON_GEN1_V2;
+ } else {
+ dev_err(&pon->spmi->dev,
+ "Invalid PON_PERPH_SUBTYPE value %x\n",
+ pon->subtype);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: pon_subtype=%x, pon_version=%x\n", __func__,
+ pon->subtype, pon->pon_ver);
+
+ rc = qpnp_pon_store_and_clear_warm_reset(pon);
+ if (rc) {
+ dev_err(&pon->spmi->dev,
+ "Unable to store/clear WARM_RESET_REASONx registers rc: %d\n",
+ rc);
+ return rc;
+ }
+
+ /* PON reason */
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_PON_REASON1(pon), &pon_sts, 1);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Unable to read PON_REASON1 reg rc: %d\n",
+ rc);
+ return rc;
+ }
+
+ index = ffs(pon_sts) - 1;
+ cold_boot = !qpnp_pon_is_warm_reset();
+ if (index >= ARRAY_SIZE(qpnp_pon_reason) || index < 0) {
+ dev_info(&pon->spmi->dev,
+ "PMIC@SID%d Power-on reason: Unknown and '%s' boot\n",
+ pon->spmi->sid, cold_boot ? "cold" : "warm");
+ } else {
+ pon->pon_trigger_reason = index;
+ dev_info(&pon->spmi->dev,
+ "PMIC@SID%d Power-on reason: %s and '%s' boot\n",
+ pon->spmi->sid, qpnp_pon_reason[index],
+ cold_boot ? "cold" : "warm");
+ }
+
+ /* POFF reason */
+ if (!is_pon_gen1(pon) && pon->subtype != PON_1REG) {
+ rc = read_gen2_pon_off_reason(pon, &poff_sts,
+ &reason_index_offset);
+ if (rc)
+ return rc;
+ } else {
+ rc = spmi_ext_register_readl(pon->spmi->ctrl, pon->spmi->sid,
+ QPNP_POFF_REASON1(pon),
+ buf, 2);
+ if (rc) {
+ dev_err(&pon->spmi->dev, "Unable to read POFF_REASON regs rc:%d\n",
+ rc);
+ return rc;
+ }
+ poff_sts = buf[0] | (buf[1] << 8);
+ }
+ index = ffs(poff_sts) - 1 + reason_index_offset;
+ if (index >= ARRAY_SIZE(qpnp_poff_reason) || index < 0) {
+ dev_info(&pon->spmi->dev,
+ "PMIC@SID%d: Unknown power-off reason\n",
+ pon->spmi->sid);
+ } else {
+ pon->pon_power_off_reason = index;
+ dev_info(&pon->spmi->dev,
+ "PMIC@SID%d: Power-off reason: %s\n",
+ pon->spmi->sid,
+ qpnp_poff_reason[index]);
+ }
+
+ if (pon->pon_trigger_reason == PON_SMPL ||
+ pon->pon_power_off_reason == QPNP_POFF_REASON_UVLO) {
+ if (of_property_read_bool(spmi->dev.of_node,
+ "qcom,uvlo-panic"))
+ panic("An UVLO was occurred.");
+ }
+
+ /* program s3 debounce */
+ rc = of_property_read_u32(pon->spmi->dev.of_node,
+ "qcom,s3-debounce", &s3_debounce);
+ if (rc) {
+ if (rc != -EINVAL) {
+ dev_err(&pon->spmi->dev, "Unable to read s3 timer rc:%d\n",
+ rc);
+ return rc;
+ }
+ } else {
+ if (s3_debounce > QPNP_PON_S3_TIMER_SECS_MAX) {
+ dev_info(&pon->spmi->dev,
+ "Exceeded S3 max value, set it to max\n");
+ s3_debounce = QPNP_PON_S3_TIMER_SECS_MAX;
+ }
+
+ /* 0 is a special value to indicate instant s3 reset */
+ if (s3_debounce != 0)
+ s3_debounce = ilog2(s3_debounce);
+
+ /* s3 debounce is SEC_ACCESS register */
+ rc = qpnp_pon_masked_write(pon, QPNP_PON_SEC_ACCESS(pon),
+ 0xFF, QPNP_PON_SEC_UNLOCK);
+ if (rc) {
+ dev_err(&spmi->dev, "Unable to do SEC_ACCESS rc:%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = qpnp_pon_masked_write(pon, QPNP_PON_S3_DBC_CTL(pon),
+ QPNP_PON_S3_DBC_DELAY_MASK, s3_debounce);
+ if (rc) {
+ dev_err(&spmi->dev, "Unable to set S3 debounce rc:%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* program s3 source */
+ s3_src = "kpdpwr-and-resin";
+ rc = of_property_read_string(pon->spmi->dev.of_node,
+ "qcom,s3-src", &s3_src);
+ if (rc && rc != -EINVAL) {
+ dev_err(&pon->spmi->dev, "Unable to read s3 timer rc: %d\n",
+ rc);
+ return rc;
+ }
+
+ if (!strcmp(s3_src, "kpdpwr"))
+ s3_src_reg = QPNP_PON_S3_SRC_KPDPWR;
+ else if (!strcmp(s3_src, "resin"))
+ s3_src_reg = QPNP_PON_S3_SRC_RESIN;
+ else if (!strcmp(s3_src, "kpdpwr-or-resin"))
+ s3_src_reg = QPNP_PON_S3_SRC_KPDPWR_OR_RESIN;
+ else /* default combination */
+ s3_src_reg = QPNP_PON_S3_SRC_KPDPWR_AND_RESIN;
+
+ /*
+ * S3 source is a write once register. If the register has
+ * been configured by bootloader then this operation will
+ * not be effective.
+ */
+ rc = qpnp_pon_masked_write(pon, QPNP_PON_S3_SRC(pon),
+ QPNP_PON_S3_SRC_MASK, s3_src_reg);
+ if (rc) {
+ dev_err(&spmi->dev, "Unable to program s3 source rc: %d\n", rc);
+ return rc;
+ }
+
+ dev_set_drvdata(&spmi->dev, pon);
+
+ INIT_DELAYED_WORK(&pon->bark_work, bark_work_func);
+
+ /* register the PON configurations */
+ rc = qpnp_pon_config_init(pon);
+ if (rc) {
+ dev_err(&spmi->dev,
+ "Unable to initialize PON configurations rc: %d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(pon->spmi->dev.of_node,
+ "qcom,pon-dbc-delay", &delay);
+ if (rc) {
+ if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read debounce delay rc: %d\n",
+ rc);
+ return rc;
+ }
+ } else {
+ rc = qpnp_pon_set_dbc(pon, delay);
+ }
+
+ rc = of_property_read_u32(pon->spmi->dev.of_node,
+ "qcom,warm-reset-poweroff-type",
+ &pon->warm_reset_poff_type);
+ if (rc) {
+ if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read warm reset poweroff type rc: %d\n",
+ rc);
+ return rc;
+ }
+ pon->warm_reset_poff_type = -EINVAL;
+ } else if (pon->warm_reset_poff_type <= PON_POWER_OFF_RESERVED ||
+ pon->warm_reset_poff_type >= PON_POWER_OFF_MAX_TYPE) {
+ dev_err(&spmi->dev, "Invalid warm-reset-poweroff-type\n");
+ pon->warm_reset_poff_type = -EINVAL;
+ }
+
+ rc = of_property_read_u32(pon->spmi->dev.of_node,
+ "qcom,hard-reset-poweroff-type",
+ &pon->hard_reset_poff_type);
+ if (rc) {
+ if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read hard reset poweroff type rc: %d\n",
+ rc);
+ return rc;
+ }
+ pon->hard_reset_poff_type = -EINVAL;
+ } else if (pon->hard_reset_poff_type <= PON_POWER_OFF_RESERVED ||
+ pon->hard_reset_poff_type >= PON_POWER_OFF_MAX_TYPE) {
+ dev_err(&spmi->dev, "Invalid hard-reset-poweroff-type\n");
+ pon->hard_reset_poff_type = -EINVAL;
+ }
+
+ rc = of_property_read_u32(pon->spmi->dev.of_node,
+ "qcom,shutdown-poweroff-type",
+ &pon->shutdown_poff_type);
+ if (rc) {
+ if (rc != -EINVAL) {
+ dev_err(&spmi->dev, "Unable to read shutdown poweroff type rc: %d\n",
+ rc);
+ return rc;
+ }
+ pon->shutdown_poff_type = -EINVAL;
+ } else if (pon->shutdown_poff_type <= PON_POWER_OFF_RESERVED ||
+ pon->shutdown_poff_type >= PON_POWER_OFF_MAX_TYPE) {
+ dev_err(&spmi->dev, "Invalid shutdown-poweroff-type\n");
+ pon->shutdown_poff_type = -EINVAL;
+ }
+
+ rc = device_create_file(&spmi->dev, &dev_attr_debounce_us);
+ if (rc) {
+ dev_err(&spmi->dev, "sys file creation failed rc: %d\n",
+ rc);
+ return rc;
+ }
+
+ if (of_property_read_bool(spmi->dev.of_node,
+ "qcom,secondary-pon-reset")) {
+ if (sys_reset) {
+ dev_err(&spmi->dev, "qcom,system-reset property shouldn't be used along with qcom,secondary-pon-reset property\n");
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&spon_list_slock, flags);
+ list_add(&pon->list, &spon_dev_list);
+ spin_unlock_irqrestore(&spon_list_slock, flags);
+ pon->is_spon = true;
+ } else {
+ boot_reason = ffs(pon_sts);
+ }
+
+ /* config whether store the hard reset reason */
+ pon->store_hard_reset_reason = of_property_read_bool(
+ spmi->dev.of_node,
+ "qcom,store-hard-reset-reason");
+
+ qpnp_pon_debugfs_init(spmi);
+ return 0;
+}
+
+static int qpnp_pon_remove(struct spmi_device *spmi)
+{
+ struct qpnp_pon *pon = dev_get_drvdata(&spmi->dev);
+ unsigned long flags;
+
+ device_remove_file(&spmi->dev, &dev_attr_debounce_us);
+
+ cancel_delayed_work_sync(&pon->bark_work);
+
+ if (pon->pon_input)
+ input_unregister_device(pon->pon_input);
+ qpnp_pon_debugfs_remove(spmi);
+ if (pon->is_spon) {
+ spin_lock_irqsave(&spon_list_slock, flags);
+ list_del(&pon->list);
+ spin_unlock_irqrestore(&spon_list_slock, flags);
+ }
+ return 0;
+}
+
+static struct of_device_id spmi_match_table[] = {
+ { .compatible = "qcom,qpnp-power-on", },
+ {}
+};
+
+static struct spmi_driver qpnp_pon_driver = {
+ .driver = {
+ .name = "qcom,qpnp-power-on",
+ .of_match_table = spmi_match_table,
+ },
+ .probe = qpnp_pon_probe,
+ .remove = qpnp_pon_remove,
+};
+
+static int __init qpnp_pon_init(void)
+{
+ return spmi_driver_register(&qpnp_pon_driver);
+}
+subsys_initcall(qpnp_pon_init);
+
+static void __exit qpnp_pon_exit(void)
+{
+ return spmi_driver_unregister(&qpnp_pon_driver);
+}
+module_exit(qpnp_pon_exit);
+
+MODULE_DESCRIPTION("QPNP PMIC POWER-ON driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/qpnp-revid.c b/drivers/platform/msm/qpnp-revid.c
new file mode 100644
index 000000000000..4c16a77e9749
--- /dev/null
+++ b/drivers/platform/msm/qpnp-revid.c
@@ -0,0 +1,245 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/err.h>
+#include <linux/qpnp/qpnp-revid.h>
+
+#define REVID_REVISION1 0x0
+#define REVID_REVISION2 0x1
+#define REVID_REVISION3 0x2
+#define REVID_REVISION4 0x3
+#define REVID_TYPE 0x4
+#define REVID_SUBTYPE 0x5
+#define REVID_STATUS1 0x8
+#define REVID_SPARE_0 0x60
+
+#define QPNP_REVID_DEV_NAME "qcom,qpnp-revid"
+
+static const char *const pmic_names[] = {
+ "Unknown PMIC",
+ "PM8941",
+ "PM8841",
+ "PM8019",
+ "PM8226",
+ "PM8110",
+ "PMA8084",
+ "PMI8962",
+ "PMD9635",
+ "PM8994",
+ "PMI8994",
+ "PM8916",
+ "PM8004",
+ "PM8909",
+ "PM2433",
+ "PMD9655",
+ "PM8950",
+ "PMI8950",
+ "PMK8001",
+ "PMI8996",
+ [25] = "PM8937",
+ [55] = "PMI8937",
+};
+
+struct revid_chip {
+ struct list_head link;
+ struct device_node *dev_node;
+ struct pmic_revid_data data;
+};
+
+static LIST_HEAD(revid_chips);
+static DEFINE_MUTEX(revid_chips_lock);
+
+static struct of_device_id qpnp_revid_match_table[] = {
+ { .compatible = QPNP_REVID_DEV_NAME },
+ {}
+};
+
+static u8 qpnp_read_byte(struct spmi_device *spmi, u16 addr)
+{
+ int rc;
+ u8 val;
+
+ rc = spmi_ext_register_readl(spmi->ctrl, spmi->sid, addr, &val, 1);
+ if (rc) {
+ pr_err("SPMI read failed rc=%d\n", rc);
+ return 0;
+ }
+ return val;
+}
+
+/**
+ * get_revid_data - Return the revision information of PMIC
+ * @dev_node: Pointer to the revid peripheral of the PMIC for which
+ * revision information is seeked
+ *
+ * CONTEXT: Should be called in non atomic context
+ *
+ * RETURNS: pointer to struct pmic_revid_data filled with the information
+ * about the PMIC revision
+ */
+struct pmic_revid_data *get_revid_data(struct device_node *dev_node)
+{
+ struct revid_chip *revid_chip;
+
+ if (!dev_node)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&revid_chips_lock);
+ list_for_each_entry(revid_chip, &revid_chips, link) {
+ if (dev_node == revid_chip->dev_node) {
+ mutex_unlock(&revid_chips_lock);
+ return &revid_chip->data;
+ }
+ }
+ mutex_unlock(&revid_chips_lock);
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL(get_revid_data);
+
+#define PM8941_PERIPHERAL_SUBTYPE 0x01
+#define PM8226_PERIPHERAL_SUBTYPE 0x04
+#define PMD9655_PERIPHERAL_SUBTYPE 0x0F
+#define PMI8950_PERIPHERAL_SUBTYPE 0x11
+#define PMI8937_PERIPHERAL_SUBTYPE 0x37
+static size_t build_pmic_string(char *buf, size_t n, int sid,
+ u8 subtype, u8 rev1, u8 rev2, u8 rev3, u8 rev4)
+{
+ size_t pos = 0;
+ /*
+ * In early versions of PM8941 and PM8226, the major revision number
+ * started incrementing from 0 (eg 0 = v1.0, 1 = v2.0).
+ * Increment the major revision number here if the chip is an early
+ * version of PM8941 or PM8226.
+ */
+ if (((int)subtype == PM8941_PERIPHERAL_SUBTYPE
+ || (int)subtype == PM8226_PERIPHERAL_SUBTYPE)
+ && rev4 < 0x02)
+ rev4++;
+
+ pos += snprintf(buf + pos, n - pos, "PMIC@SID%d", sid);
+ if (subtype >= ARRAY_SIZE(pmic_names) || subtype == 0)
+ pos += snprintf(buf + pos, n - pos, ": %s (subtype: 0x%02X)",
+ pmic_names[0], subtype);
+ else
+ pos += snprintf(buf + pos, n - pos, ": %s",
+ pmic_names[subtype]);
+ pos += snprintf(buf + pos, n - pos, " v%d.%d", rev4, rev3);
+ if (rev2 || rev1)
+ pos += snprintf(buf + pos, n - pos, ".%d", rev2);
+ if (rev1)
+ pos += snprintf(buf + pos, n - pos, ".%d", rev1);
+ return pos;
+}
+
+#define PMIC_PERIPHERAL_TYPE 0x51
+#define PMIC_STRING_MAXLENGTH 80
+static int qpnp_revid_probe(struct spmi_device *spmi)
+{
+ u8 rev1, rev2, rev3, rev4, pmic_type, pmic_subtype, pmic_status;
+ u8 option1, option2, option3, option4, spare0;
+ struct resource *resource;
+ char pmic_string[PMIC_STRING_MAXLENGTH] = {'\0'};
+ struct revid_chip *revid_chip;
+
+ resource = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
+ if (!resource) {
+ pr_err("Unable to get spmi resource for REVID\n");
+ return -EINVAL;
+ }
+ pmic_type = qpnp_read_byte(spmi, resource->start + REVID_TYPE);
+ if (pmic_type != PMIC_PERIPHERAL_TYPE) {
+ pr_err("Invalid REVID peripheral type: %02X\n", pmic_type);
+ return -EINVAL;
+ }
+
+ rev1 = qpnp_read_byte(spmi, resource->start + REVID_REVISION1);
+ rev2 = qpnp_read_byte(spmi, resource->start + REVID_REVISION2);
+ rev3 = qpnp_read_byte(spmi, resource->start + REVID_REVISION3);
+ rev4 = qpnp_read_byte(spmi, resource->start + REVID_REVISION4);
+
+ pmic_subtype = qpnp_read_byte(spmi, resource->start + REVID_SUBTYPE);
+ if (pmic_subtype != PMD9655_PERIPHERAL_SUBTYPE)
+ pmic_status = qpnp_read_byte(spmi,
+ resource->start + REVID_STATUS1);
+ else
+ pmic_status = 0;
+
+ /* special case for PMI8937 */
+ if (pmic_subtype == PMI8950_PERIPHERAL_SUBTYPE) {
+ /* read spare register */
+ spare0 = qpnp_read_byte(spmi, resource->start + REVID_SPARE_0);
+ if (spare0)
+ pmic_subtype = PMI8937_PERIPHERAL_SUBTYPE;
+ }
+
+ revid_chip = devm_kzalloc(&spmi->dev, sizeof(struct revid_chip),
+ GFP_KERNEL);
+ if (!revid_chip)
+ return -ENOMEM;
+
+ revid_chip->dev_node = spmi->dev.of_node;
+ revid_chip->data.rev1 = rev1;
+ revid_chip->data.rev2 = rev2;
+ revid_chip->data.rev3 = rev3;
+ revid_chip->data.rev4 = rev4;
+ revid_chip->data.pmic_subtype = pmic_subtype;
+ revid_chip->data.pmic_type = pmic_type;
+
+ if (pmic_subtype < ARRAY_SIZE(pmic_names))
+ revid_chip->data.pmic_name = pmic_names[pmic_subtype];
+ else
+ revid_chip->data.pmic_name = pmic_names[0];
+
+ mutex_lock(&revid_chips_lock);
+ list_add(&revid_chip->link, &revid_chips);
+ mutex_unlock(&revid_chips_lock);
+
+ option1 = pmic_status & 0x3;
+ option2 = (pmic_status >> 2) & 0x3;
+ option3 = (pmic_status >> 4) & 0x3;
+ option4 = (pmic_status >> 6) & 0x3;
+
+ build_pmic_string(pmic_string, PMIC_STRING_MAXLENGTH, spmi->sid,
+ pmic_subtype, rev1, rev2, rev3, rev4);
+ pr_info("%s options: %d, %d, %d, %d\n",
+ pmic_string, option1, option2, option3, option4);
+ return 0;
+}
+
+static struct spmi_driver qpnp_revid_driver = {
+ .probe = qpnp_revid_probe,
+ .driver = {
+ .name = QPNP_REVID_DEV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = qpnp_revid_match_table,
+ },
+};
+
+static int __init qpnp_revid_init(void)
+{
+ return spmi_driver_register(&qpnp_revid_driver);
+}
+
+static void __exit qpnp_revid_exit(void)
+{
+ return spmi_driver_unregister(&qpnp_revid_driver);
+}
+
+subsys_initcall(qpnp_revid_init);
+module_exit(qpnp_revid_exit);
+
+MODULE_DESCRIPTION("QPNP REVID DRIVER");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" QPNP_REVID_DEV_NAME);
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 237d7aa73e8c..4db923cc870e 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -508,8 +508,67 @@ config AXP20X_POWER
This driver provides support for the power supply features of
AXP20x PMIC.
+config QPNP_SMBCHARGER
+ tristate "QPNP SMB Charger driver"
+ depends on SPMI || MSM_SPMI
+ depends on OF_SPMI
+ help
+ Say Y here to enable the dual path switch mode battery charger which
+ supports USB detection and battery charging up to 3A.
+ The driver also offers relevant information to userspace via the
+ power supply framework.
+
+config QPNP_FG
+ tristate "QPNP fuel gauge driver"
+ depends on SPMI || MSM_SPMI
+ depends on OF_SPMI
+ help
+ Say Y here to enable the Fuel Gauge driver. This adds support for
+ battery fuel gauging and state of charge of battery connected to the
+ fuel gauge. The state of charge is reported through a BMS power
+ supply property and also sends uevents when the capacity is updated.
+
+config SMB135X_CHARGER
+ tristate "SMB135X Battery Charger"
+ depends on I2C
+ help
+ Say Y to include support for SMB135X Battery Charger.
+ SMB135X is a dual path switching mode charger capable of charging
+ the battery with 3Amps of current.
+ The driver supports charger enable/disable.
+ The driver reports the charger status via the power supply framework.
+ A charger status change triggers an IRQ via the device STAT pin.
+
+config SMB1351_USB_CHARGER
+ tristate "smb1351 usb charger (with VBUS detection)"
+ depends on I2C
+ help
+ Say Y to enable support for the SMB1351 switching mode based charger.
+ The driver supports charging control (enable/disable) and
+ charge-current limiting. It also provides USB VBUS detection and
+ notification support. The driver controls SMB1351 via I2C and
+ supports device-tree interface.
+
+config MSM_BCL_CTL
+ bool "BCL Framework driver"
+ help
+ Say Y here to enable this BCL Framework driver. This driver provides
+ interface, which can be used by the BCL h/w drivers to implement the
+ basic functionalities. This framework abstracts the underlying
+ hardware for the top level modules.
+
+config MSM_BCL_PERIPHERAL_CTL
+ bool "BCL driver to control the PMIC BCL peripheral"
+ depends on SPMI || MSM_SPMI
+ depends on MSM_BCL_CTL
+ help
+ Say Y here to enable this BCL PMIC peripheral driver. This driver
+ provides routines to configure and monitor the BCL
+ PMIC peripheral.
+
source "drivers/power/reset/Kconfig"
endif # POWER_SUPPLY
source "drivers/power/avs/Kconfig"
+source "drivers/power/qcom/Kconfig"
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index b656638f8b39..77477d485ac5 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -72,3 +72,10 @@ obj-$(CONFIG_CHARGER_TPS65217) += tps65217_charger.o
obj-$(CONFIG_POWER_RESET) += reset/
obj-$(CONFIG_AXP288_FUEL_GAUGE) += axp288_fuel_gauge.o
obj-$(CONFIG_AXP288_CHARGER) += axp288_charger.o
+obj-$(CONFIG_QPNP_SMBCHARGER) += qpnp-smbcharger.o pmic-voter.o
+obj-$(CONFIG_QPNP_FG) += qpnp-fg.o
+obj-$(CONFIG_SMB135X_CHARGER) += smb135x-charger.o
+obj-$(CONFIG_SMB1351_USB_CHARGER) += smb1351-charger.o
+obj-$(CONFIG_MSM_BCL_CTL) += msm_bcl.o
+obj-$(CONFIG_MSM_BCL_PERIPHERAL_CTL) += bcl_peripheral.o
+obj-$(CONFIG_ARCH_QCOM) += qcom/
diff --git a/drivers/power/batterydata-lib.c b/drivers/power/batterydata-lib.c
new file mode 100644
index 000000000000..226581468fda
--- /dev/null
+++ b/drivers/power/batterydata-lib.c
@@ -0,0 +1,493 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/batterydata-lib.h>
+
+int linear_interpolate(int y0, int x0, int y1, int x1, int x)
+{
+ if (y0 == y1 || x == x0)
+ return y0;
+ if (x1 == x0 || x == x1)
+ return y1;
+
+ return y0 + ((y1 - y0) * (x - x0) / (x1 - x0));
+}
+
+static int interpolate_single_lut_scaled(struct single_row_lut *lut,
+ int x, int scale)
+{
+ int i, result;
+
+ if (x < lut->x[0] * scale) {
+ pr_debug("x %d less than known range return y = %d lut = %pS\n",
+ x, lut->y[0], lut);
+ return lut->y[0];
+ }
+ if (x > lut->x[lut->cols - 1] * scale) {
+ pr_debug("x %d more than known range return y = %d lut = %pS\n",
+ x, lut->y[lut->cols - 1], lut);
+ return lut->y[lut->cols - 1];
+ }
+
+ for (i = 0; i < lut->cols; i++)
+ if (x <= lut->x[i] * scale)
+ break;
+ if (x == lut->x[i] * scale) {
+ result = lut->y[i];
+ } else {
+ result = linear_interpolate(
+ lut->y[i - 1],
+ lut->x[i - 1] * scale,
+ lut->y[i],
+ lut->x[i] * scale,
+ x);
+ }
+ return result;
+}
+
+int interpolate_fcc(struct single_row_lut *fcc_temp_lut, int batt_temp)
+{
+ return interpolate_single_lut_scaled(fcc_temp_lut,
+ batt_temp,
+ DEGC_SCALE);
+}
+
+int interpolate_scalingfactor_fcc(struct single_row_lut *fcc_sf_lut,
+ int cycles)
+{
+ /*
+ * sf table could be null when no battery aging data is available, in
+ * that case return 100%
+ */
+ if (fcc_sf_lut)
+ return interpolate_single_lut_scaled(fcc_sf_lut, cycles, 1);
+ else
+ return 100;
+}
+
+int interpolate_scalingfactor(struct sf_lut *sf_lut, int row_entry, int pc)
+{
+ int i, scalefactorrow1, scalefactorrow2, scalefactor, rows, cols;
+ int row1 = 0;
+ int row2 = 0;
+
+ /*
+ * sf table could be null when no battery aging data is available, in
+ * that case return 100%
+ */
+ if (!sf_lut)
+ return 100;
+
+ rows = sf_lut->rows;
+ cols = sf_lut->cols;
+ if (pc > sf_lut->percent[0]) {
+ pr_debug("pc %d greater than known pc ranges for sfd\n", pc);
+ row1 = 0;
+ row2 = 0;
+ } else if (pc < sf_lut->percent[rows - 1]) {
+ pr_debug("pc %d less than known pc ranges for sf\n", pc);
+ row1 = rows - 1;
+ row2 = rows - 1;
+ } else {
+ for (i = 0; i < rows; i++) {
+ if (pc == sf_lut->percent[i]) {
+ row1 = i;
+ row2 = i;
+ break;
+ }
+ if (pc > sf_lut->percent[i]) {
+ row1 = i - 1;
+ row2 = i;
+ break;
+ }
+ }
+ }
+
+ if (row_entry < sf_lut->row_entries[0] * DEGC_SCALE)
+ row_entry = sf_lut->row_entries[0] * DEGC_SCALE;
+ if (row_entry > sf_lut->row_entries[cols - 1] * DEGC_SCALE)
+ row_entry = sf_lut->row_entries[cols - 1] * DEGC_SCALE;
+
+ for (i = 0; i < cols; i++)
+ if (row_entry <= sf_lut->row_entries[i] * DEGC_SCALE)
+ break;
+ if (row_entry == sf_lut->row_entries[i] * DEGC_SCALE) {
+ scalefactor = linear_interpolate(
+ sf_lut->sf[row1][i],
+ sf_lut->percent[row1],
+ sf_lut->sf[row2][i],
+ sf_lut->percent[row2],
+ pc);
+ return scalefactor;
+ }
+
+ scalefactorrow1 = linear_interpolate(
+ sf_lut->sf[row1][i - 1],
+ sf_lut->row_entries[i - 1] * DEGC_SCALE,
+ sf_lut->sf[row1][i],
+ sf_lut->row_entries[i] * DEGC_SCALE,
+ row_entry);
+
+ scalefactorrow2 = linear_interpolate(
+ sf_lut->sf[row2][i - 1],
+ sf_lut->row_entries[i - 1] * DEGC_SCALE,
+ sf_lut->sf[row2][i],
+ sf_lut->row_entries[i] * DEGC_SCALE,
+ row_entry);
+
+ scalefactor = linear_interpolate(
+ scalefactorrow1,
+ sf_lut->percent[row1],
+ scalefactorrow2,
+ sf_lut->percent[row2],
+ pc);
+
+ return scalefactor;
+}
+
+/* get ocv given a soc -- reverse lookup */
+int interpolate_ocv(struct pc_temp_ocv_lut *pc_temp_ocv,
+ int batt_temp, int pc)
+{
+ int i, ocvrow1, ocvrow2, ocv, rows, cols;
+ int row1 = 0;
+ int row2 = 0;
+
+ rows = pc_temp_ocv->rows;
+ cols = pc_temp_ocv->cols;
+ if (pc > pc_temp_ocv->percent[0]) {
+ pr_debug("pc %d greater than known pc ranges for sfd\n", pc);
+ row1 = 0;
+ row2 = 0;
+ } else if (pc < pc_temp_ocv->percent[rows - 1]) {
+ pr_debug("pc %d less than known pc ranges for sf\n", pc);
+ row1 = rows - 1;
+ row2 = rows - 1;
+ } else {
+ for (i = 0; i < rows; i++) {
+ if (pc == pc_temp_ocv->percent[i]) {
+ row1 = i;
+ row2 = i;
+ break;
+ }
+ if (pc > pc_temp_ocv->percent[i]) {
+ row1 = i - 1;
+ row2 = i;
+ break;
+ }
+ }
+ }
+
+ if (batt_temp < pc_temp_ocv->temp[0] * DEGC_SCALE)
+ batt_temp = pc_temp_ocv->temp[0] * DEGC_SCALE;
+ if (batt_temp > pc_temp_ocv->temp[cols - 1] * DEGC_SCALE)
+ batt_temp = pc_temp_ocv->temp[cols - 1] * DEGC_SCALE;
+
+ for (i = 0; i < cols; i++)
+ if (batt_temp <= pc_temp_ocv->temp[i] * DEGC_SCALE)
+ break;
+ if (batt_temp == pc_temp_ocv->temp[i] * DEGC_SCALE) {
+ ocv = linear_interpolate(
+ pc_temp_ocv->ocv[row1][i],
+ pc_temp_ocv->percent[row1],
+ pc_temp_ocv->ocv[row2][i],
+ pc_temp_ocv->percent[row2],
+ pc);
+ return ocv;
+ }
+
+ ocvrow1 = linear_interpolate(
+ pc_temp_ocv->ocv[row1][i - 1],
+ pc_temp_ocv->temp[i - 1] * DEGC_SCALE,
+ pc_temp_ocv->ocv[row1][i],
+ pc_temp_ocv->temp[i] * DEGC_SCALE,
+ batt_temp);
+
+ ocvrow2 = linear_interpolate(
+ pc_temp_ocv->ocv[row2][i - 1],
+ pc_temp_ocv->temp[i - 1] * DEGC_SCALE,
+ pc_temp_ocv->ocv[row2][i],
+ pc_temp_ocv->temp[i] * DEGC_SCALE,
+ batt_temp);
+
+ ocv = linear_interpolate(
+ ocvrow1,
+ pc_temp_ocv->percent[row1],
+ ocvrow2,
+ pc_temp_ocv->percent[row2],
+ pc);
+
+ return ocv;
+}
+
+int interpolate_pc(struct pc_temp_ocv_lut *pc_temp_ocv,
+ int batt_temp, int ocv)
+{
+ int i, j, pcj, pcj_minus_one, pc;
+ int rows = pc_temp_ocv->rows;
+ int cols = pc_temp_ocv->cols;
+
+ if (batt_temp < pc_temp_ocv->temp[0] * DEGC_SCALE) {
+ pr_debug("batt_temp %d < known temp range\n", batt_temp);
+ batt_temp = pc_temp_ocv->temp[0] * DEGC_SCALE;
+ }
+
+ if (batt_temp > pc_temp_ocv->temp[cols - 1] * DEGC_SCALE) {
+ pr_debug("batt_temp %d > known temp range\n", batt_temp);
+ batt_temp = pc_temp_ocv->temp[cols - 1] * DEGC_SCALE;
+ }
+
+ for (j = 0; j < cols; j++)
+ if (batt_temp <= pc_temp_ocv->temp[j] * DEGC_SCALE)
+ break;
+ if (batt_temp == pc_temp_ocv->temp[j] * DEGC_SCALE) {
+ /* found an exact match for temp in the table */
+ if (ocv >= pc_temp_ocv->ocv[0][j])
+ return pc_temp_ocv->percent[0];
+ if (ocv <= pc_temp_ocv->ocv[rows - 1][j])
+ return pc_temp_ocv->percent[rows - 1];
+ for (i = 0; i < rows; i++) {
+ if (ocv >= pc_temp_ocv->ocv[i][j]) {
+ if (ocv == pc_temp_ocv->ocv[i][j])
+ return pc_temp_ocv->percent[i];
+ pc = linear_interpolate(
+ pc_temp_ocv->percent[i],
+ pc_temp_ocv->ocv[i][j],
+ pc_temp_ocv->percent[i - 1],
+ pc_temp_ocv->ocv[i - 1][j],
+ ocv);
+ return pc;
+ }
+ }
+ }
+
+ /*
+ * batt_temp is within temperature for
+ * column j-1 and j
+ */
+ if (ocv >= pc_temp_ocv->ocv[0][j])
+ return pc_temp_ocv->percent[0];
+ if (ocv <= pc_temp_ocv->ocv[rows - 1][j - 1])
+ return pc_temp_ocv->percent[rows - 1];
+
+ pcj_minus_one = 0;
+ pcj = 0;
+ for (i = 0; i < rows-1; i++) {
+ if (pcj == 0
+ && is_between(pc_temp_ocv->ocv[i][j],
+ pc_temp_ocv->ocv[i+1][j], ocv)) {
+ pcj = linear_interpolate(
+ pc_temp_ocv->percent[i],
+ pc_temp_ocv->ocv[i][j],
+ pc_temp_ocv->percent[i + 1],
+ pc_temp_ocv->ocv[i+1][j],
+ ocv);
+ }
+
+ if (pcj_minus_one == 0
+ && is_between(pc_temp_ocv->ocv[i][j-1],
+ pc_temp_ocv->ocv[i+1][j-1], ocv)) {
+ pcj_minus_one = linear_interpolate(
+ pc_temp_ocv->percent[i],
+ pc_temp_ocv->ocv[i][j-1],
+ pc_temp_ocv->percent[i + 1],
+ pc_temp_ocv->ocv[i+1][j-1],
+ ocv);
+ }
+
+ if (pcj && pcj_minus_one) {
+ pc = linear_interpolate(
+ pcj_minus_one,
+ pc_temp_ocv->temp[j-1] * DEGC_SCALE,
+ pcj,
+ pc_temp_ocv->temp[j] * DEGC_SCALE,
+ batt_temp);
+ return pc;
+ }
+ }
+
+ if (pcj)
+ return pcj;
+
+ if (pcj_minus_one)
+ return pcj_minus_one;
+
+ pr_debug("%d ocv wasn't found for temp %d in the LUT returning 100%%\n",
+ ocv, batt_temp);
+ return 100;
+}
+
+int interpolate_slope(struct pc_temp_ocv_lut *pc_temp_ocv,
+ int batt_temp, int pc)
+{
+ int i, ocvrow1, ocvrow2, rows, cols;
+ int row1 = 0;
+ int row2 = 0;
+ int slope;
+
+ rows = pc_temp_ocv->rows;
+ cols = pc_temp_ocv->cols;
+ if (pc >= pc_temp_ocv->percent[0]) {
+ pr_debug("pc %d >= max pc range - use the slope at pc=%d\n",
+ pc, pc_temp_ocv->percent[0]);
+ row1 = 0;
+ row2 = 1;
+ } else if (pc <= pc_temp_ocv->percent[rows - 1]) {
+ pr_debug("pc %d is <= min pc range - use the slope at pc=%d\n",
+ pc, pc_temp_ocv->percent[rows - 1]);
+ row1 = rows - 2;
+ row2 = rows - 1;
+ } else {
+ for (i = 0; i < rows; i++) {
+ if (pc == pc_temp_ocv->percent[i]) {
+ row1 = i - 1;
+ row2 = i;
+ break;
+ }
+ if (pc > pc_temp_ocv->percent[i]) {
+ row1 = i - 1;
+ row2 = i;
+ break;
+ }
+ }
+ }
+
+ if (batt_temp < pc_temp_ocv->temp[0] * DEGC_SCALE)
+ batt_temp = pc_temp_ocv->temp[0] * DEGC_SCALE;
+ if (batt_temp > pc_temp_ocv->temp[cols - 1] * DEGC_SCALE)
+ batt_temp = pc_temp_ocv->temp[cols - 1] * DEGC_SCALE;
+
+ for (i = 0; i < cols; i++)
+ if (batt_temp <= pc_temp_ocv->temp[i] * DEGC_SCALE)
+ break;
+
+ if (batt_temp == pc_temp_ocv->temp[i] * DEGC_SCALE) {
+ slope = (pc_temp_ocv->ocv[row1][i] -
+ pc_temp_ocv->ocv[row2][i]);
+ if (slope <= 0) {
+ pr_warn("Slope=%d for pc=%d, using 1\n", slope, pc);
+ slope = 1;
+ }
+ slope *= 1000;
+ slope /= (pc_temp_ocv->percent[row1] -
+ pc_temp_ocv->percent[row2]);
+ return slope;
+ }
+ ocvrow1 = linear_interpolate(
+ pc_temp_ocv->ocv[row1][i - 1],
+ pc_temp_ocv->temp[i - 1] * DEGC_SCALE,
+ pc_temp_ocv->ocv[row1][i],
+ pc_temp_ocv->temp[i] * DEGC_SCALE,
+ batt_temp);
+
+ ocvrow2 = linear_interpolate(
+ pc_temp_ocv->ocv[row2][i - 1],
+ pc_temp_ocv->temp[i - 1] * DEGC_SCALE,
+ pc_temp_ocv->ocv[row2][i],
+ pc_temp_ocv->temp[i] * DEGC_SCALE,
+ batt_temp);
+
+ slope = (ocvrow1 - ocvrow2);
+ if (slope <= 0) {
+ pr_warn("Slope=%d for pc=%d, using 1\n", slope, pc);
+ slope = 1;
+ }
+ slope *= 1000;
+ slope /= (pc_temp_ocv->percent[row1] - pc_temp_ocv->percent[row2]);
+
+ return slope;
+}
+
+
+int interpolate_acc(struct ibat_temp_acc_lut *ibat_acc_lut,
+ int batt_temp, int ibat)
+{
+ int i, accrow1, accrow2, rows, cols;
+ int row1 = 0;
+ int row2 = 0;
+ int acc;
+
+ rows = ibat_acc_lut->rows;
+ cols = ibat_acc_lut->cols;
+
+ if (ibat > ibat_acc_lut->ibat[rows - 1]) {
+ pr_debug("ibatt(%d) > max range(%d)\n", ibat,
+ ibat_acc_lut->ibat[rows - 1]);
+ row1 = rows - 1;
+ row2 = rows - 2;
+ } else if (ibat < ibat_acc_lut->ibat[0]) {
+ pr_debug("ibatt(%d) < max range(%d)\n", ibat,
+ ibat_acc_lut->ibat[0]);
+ row1 = 0;
+ row2 = 0;
+ } else {
+ for (i = 0; i < rows; i++) {
+ if (ibat == ibat_acc_lut->ibat[i]) {
+ row1 = i;
+ row2 = i;
+ break;
+ }
+ if (ibat < ibat_acc_lut->ibat[i]) {
+ row1 = i;
+ row2 = i - 1;
+ break;
+ }
+ }
+ }
+
+ if (batt_temp < ibat_acc_lut->temp[0] * DEGC_SCALE)
+ batt_temp = ibat_acc_lut->temp[0] * DEGC_SCALE;
+ if (batt_temp > ibat_acc_lut->temp[cols - 1] * DEGC_SCALE)
+ batt_temp = ibat_acc_lut->temp[cols - 1] * DEGC_SCALE;
+
+ for (i = 0; i < cols; i++)
+ if (batt_temp <= ibat_acc_lut->temp[i] * DEGC_SCALE)
+ break;
+
+ if (batt_temp == (ibat_acc_lut->temp[i] * DEGC_SCALE)) {
+ acc = linear_interpolate(
+ ibat_acc_lut->acc[row1][i],
+ ibat_acc_lut->ibat[row1],
+ ibat_acc_lut->acc[row2][i],
+ ibat_acc_lut->ibat[row2],
+ ibat);
+ return acc;
+ }
+
+ accrow1 = linear_interpolate(
+ ibat_acc_lut->acc[row1][i - 1],
+ ibat_acc_lut->temp[i - 1] * DEGC_SCALE,
+ ibat_acc_lut->acc[row1][i],
+ ibat_acc_lut->temp[i] * DEGC_SCALE,
+ batt_temp);
+
+ accrow2 = linear_interpolate(
+ ibat_acc_lut->acc[row2][i - 1],
+ ibat_acc_lut->temp[i - 1] * DEGC_SCALE,
+ ibat_acc_lut->acc[row2][i],
+ ibat_acc_lut->temp[i] * DEGC_SCALE,
+ batt_temp);
+
+ acc = linear_interpolate(accrow1,
+ ibat_acc_lut->ibat[row1],
+ accrow2,
+ ibat_acc_lut->ibat[row2],
+ ibat);
+
+ if (acc < 0)
+ acc = 0;
+
+ return acc;
+}
diff --git a/drivers/power/bcl_peripheral.c b/drivers/power/bcl_peripheral.c
new file mode 100644
index 000000000000..64a648c2be5b
--- /dev/null
+++ b/drivers/power/bcl_peripheral.c
@@ -0,0 +1,1153 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/spmi.h>
+#include <linux/mutex.h>
+#include <linux/msm_bcl.h>
+#include <linux/power_supply.h>
+
+#define CREATE_TRACE_POINTS
+#define _BCL_HW_TRACE
+#include <trace/trace_thermal.h>
+
+#define BCL_DRIVER_NAME "bcl_peripheral"
+#define BCL_VBAT_INT_NAME "bcl-low-vbat-int"
+#define BCL_IBAT_INT_NAME "bcl-high-ibat-int"
+#define BCL_PARAM_MAX_ATTR 3
+
+#define BCL_INT_EN 0x15
+#define BCL_MONITOR_EN 0x46
+#define BCL_VBAT_VALUE 0x54
+#define BCL_IBAT_VALUE 0x55
+#define BCL_VBAT_CP_VALUE 0x56
+#define BCL_IBAT_CP_VALUE 0x57
+#define BCL_VBAT_MIN 0x58
+#define BCL_IBAT_MAX 0x59
+#define BCL_VBAT_MIN_CP 0x5A
+#define BCL_IBAT_MAX_CP 0x5B
+#define BCL_V_GAIN_BAT 0x60
+#define BCL_I_GAIN_RSENSE 0x61
+#define BCL_I_OFFSET_RSENSE 0x62
+#define BCL_I_GAIN_BATFET 0x63
+#define BCL_I_OFFSET_BATFET 0x64
+#define BCL_I_SENSE_SRC 0x65
+#define BCL_VBAT_MIN_CLR 0x66
+#define BCL_IBAT_MAX_CLR 0x67
+#define BCL_VBAT_TRIP 0x68
+#define BCL_IBAT_TRIP 0x69
+
+#define BCL_CONSTANT_NUM 32
+#define BCL_READ_RETRY_LIMIT 3
+#define VAL_CP_REG_BUF_LEN 3
+#define VAL_REG_BUF_OFFSET 0
+#define VAL_CP_REG_BUF_OFFSET 2
+#define PON_SPARE_FULL_CURRENT 0x0
+#define PON_SPARE_DERATED_CURRENT 0x1
+
+#define READ_CONV_FACTOR(_node, _key, _val, _ret, _dest) do { \
+ _ret = of_property_read_u32(_node, _key, &_val); \
+ if (_ret) { \
+ pr_err("Error reading key:%s. err:%d\n", _key, _ret); \
+ goto bcl_dev_exit; \
+ } \
+ _dest = _val; \
+ } while (0)
+
+#define READ_OPTIONAL_PROP(_node, _key, _val, _ret, _dest) do { \
+ _ret = of_property_read_u32(_node, _key, &_val); \
+ if (_ret && _ret != -EINVAL) { \
+ pr_err("Error reading key:%s. err:%d\n", _key, _ret); \
+ goto bcl_dev_exit; \
+ } else if (!_ret) { \
+ _dest = _val; \
+ } \
+ } while (0)
+
+enum bcl_monitor_state {
+ BCL_PARAM_INACTIVE,
+ BCL_PARAM_MONITOR,
+ BCL_PARAM_POLLING,
+};
+
+struct bcl_peripheral_data {
+ struct bcl_param_data *param_data;
+ struct bcl_driver_ops ops;
+ enum bcl_monitor_state state;
+ struct delayed_work poll_work;
+ int irq_num;
+ int high_trip;
+ int low_trip;
+ int trip_val;
+ int scaling_factor;
+ int offset_factor_num;
+ int offset_factor_den;
+ int offset;
+ int gain_factor_num;
+ int gain_factor_den;
+ int gain;
+ uint32_t polling_delay_ms;
+ int inhibit_derating_ua;
+ int (*read_max) (int *adc_value);
+ int (*clear_max) (void);
+ struct mutex state_trans_lock;
+};
+
+struct bcl_device {
+ bool enabled;
+ struct device *dev;
+ struct spmi_device *spmi;
+ uint16_t base_addr;
+ uint16_t pon_spare_addr;
+ uint8_t slave_id;
+ int i_src;
+ struct bcl_peripheral_data param[BCL_PARAM_MAX];
+};
+
+static struct bcl_device *bcl_perph;
+static struct power_supply bcl_psy;
+static const char bcl_psy_name[] = "fg_adc";
+static bool calibration_done;
+static DEFINE_MUTEX(bcl_access_mutex);
+static DEFINE_MUTEX(bcl_enable_mutex);
+
+static int bcl_read_multi_register(int16_t reg_offset, uint8_t *data, int len)
+{
+ int ret = 0, trace_len = 0;
+
+ if (!bcl_perph) {
+ pr_err("BCL device not initialized\n");
+ return -EINVAL;
+ }
+ ret = spmi_ext_register_readl(bcl_perph->spmi->ctrl,
+ bcl_perph->slave_id, (bcl_perph->base_addr + reg_offset),
+ data, len);
+ if (ret < 0) {
+ pr_err("Error reading register %d. err:%d", reg_offset, ret);
+ return ret;
+ }
+ while (trace_len < len) {
+ trace_bcl_hw_reg_access("Read",
+ bcl_perph->base_addr + reg_offset + trace_len,
+ data[trace_len]);
+ trace_len++;
+ }
+
+ return ret;
+}
+
+static int bcl_read_register(int16_t reg_offset, uint8_t *data)
+{
+ return bcl_read_multi_register(reg_offset, data, 1);
+}
+
+static int bcl_write_general_register(int16_t reg_offset,
+ uint16_t base, uint8_t data)
+{
+ int ret = 0;
+ uint8_t *write_buf = &data;
+
+ if (!bcl_perph) {
+ pr_err("BCL device not initialized\n");
+ return -EINVAL;
+ }
+ ret = spmi_ext_register_writel(bcl_perph->spmi->ctrl,
+ bcl_perph->slave_id, (base + reg_offset),
+ write_buf, 1);
+ if (ret < 0) {
+ pr_err("Error reading register %d. err:%d", reg_offset, ret);
+ return ret;
+ }
+ pr_debug("wrote 0x%02x to 0x%04x\n", data, base + reg_offset);
+ trace_bcl_hw_reg_access("write", base + reg_offset, data);
+
+ return ret;
+}
+
+static int bcl_write_register(int16_t reg_offset, uint8_t data)
+{
+ return bcl_write_general_register(reg_offset,
+ bcl_perph->base_addr, data);
+}
+
+static void convert_vbat_to_adc_val(int *val)
+{
+ struct bcl_peripheral_data *perph_data = NULL;
+
+ if (!bcl_perph)
+ return;
+ perph_data = &bcl_perph->param[BCL_PARAM_VOLTAGE];
+ *val = (*val * 100
+ / (100 + (perph_data->gain_factor_num * perph_data->gain)
+ * BCL_CONSTANT_NUM
+ / perph_data->gain_factor_den))
+ / perph_data->scaling_factor;
+ return;
+}
+
+static void convert_adc_to_vbat_val(int *val)
+{
+ struct bcl_peripheral_data *perph_data = NULL;
+
+ if (!bcl_perph)
+ return;
+ perph_data = &bcl_perph->param[BCL_PARAM_VOLTAGE];
+ *val = ((*val + 2) * perph_data->scaling_factor)
+ * (100 + (perph_data->gain_factor_num * perph_data->gain)
+ * BCL_CONSTANT_NUM / perph_data->gain_factor_den)
+ / 100;
+ return;
+}
+
+static void convert_ibat_to_adc_val(int *val)
+{
+ struct bcl_peripheral_data *perph_data = NULL;
+
+ if (!bcl_perph)
+ return;
+ perph_data = &bcl_perph->param[BCL_PARAM_CURRENT];
+ *val = (*val * 100
+ / (100 + (perph_data->gain_factor_num * perph_data->gain)
+ * BCL_CONSTANT_NUM / perph_data->gain_factor_den)
+ - (perph_data->offset_factor_num * perph_data->offset)
+ / perph_data->offset_factor_den)
+ / perph_data->scaling_factor;
+ return;
+}
+
+static void convert_adc_to_ibat_val(int *val)
+{
+ struct bcl_peripheral_data *perph_data = NULL;
+
+ if (!bcl_perph)
+ return;
+ perph_data = &bcl_perph->param[BCL_PARAM_CURRENT];
+ *val = (*val * perph_data->scaling_factor
+ + (perph_data->offset_factor_num * perph_data->offset)
+ / perph_data->offset_factor_den)
+ * (100 + (perph_data->gain_factor_num * perph_data->gain)
+ * BCL_CONSTANT_NUM / perph_data->gain_factor_den) / 100;
+ return;
+}
+
+static int bcl_set_high_vbat(int thresh_value)
+{
+ bcl_perph->param[BCL_PARAM_VOLTAGE].high_trip = thresh_value;
+ return 0;
+}
+
+static int bcl_set_low_ibat(int thresh_value)
+{
+ bcl_perph->param[BCL_PARAM_CURRENT].low_trip = thresh_value;
+ return 0;
+}
+
+static int bcl_set_high_ibat(int thresh_value)
+{
+ int ret = 0, ibat_ua;
+ int8_t val = 0;
+
+ ibat_ua = thresh_value;
+ convert_ibat_to_adc_val(&thresh_value);
+ pr_debug("Setting Ibat high trip:%d. ADC_val:%d\n", ibat_ua,
+ thresh_value);
+ val = (int8_t)thresh_value;
+ ret = bcl_write_register(BCL_IBAT_TRIP, val);
+ if (ret) {
+ pr_err("Error accessing BCL peripheral. err:%d\n", ret);
+ return ret;
+ }
+ bcl_perph->param[BCL_PARAM_CURRENT].high_trip = thresh_value;
+
+ if (bcl_perph->param[BCL_PARAM_CURRENT].inhibit_derating_ua == 0
+ || bcl_perph->pon_spare_addr == 0)
+ return ret;
+
+ ret = bcl_write_general_register(bcl_perph->pon_spare_addr,
+ PON_SPARE_FULL_CURRENT, val);
+ if (ret) {
+ pr_debug("Error accessing PON register. err:%d\n", ret);
+ return ret;
+ }
+ thresh_value = ibat_ua
+ - bcl_perph->param[BCL_PARAM_CURRENT].inhibit_derating_ua;
+ convert_ibat_to_adc_val(&thresh_value);
+ val = (int8_t)thresh_value;
+ ret = bcl_write_general_register(bcl_perph->pon_spare_addr,
+ PON_SPARE_DERATED_CURRENT, val);
+ if (ret) {
+ pr_debug("Error accessing PON register. err:%d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int bcl_set_low_vbat(int thresh_value)
+{
+ int ret = 0, vbat_uv;
+ int8_t val = 0;
+
+ vbat_uv = thresh_value;
+ convert_vbat_to_adc_val(&thresh_value);
+ pr_debug("Setting Vbat low trip:%d. ADC_val:%d\n", vbat_uv,
+ thresh_value);
+ val = (int8_t)thresh_value;
+ ret = bcl_write_register(BCL_VBAT_TRIP, val);
+ if (ret) {
+ pr_err("Error accessing BCL peripheral. err:%d\n", ret);
+ return ret;
+ }
+ bcl_perph->param[BCL_PARAM_VOLTAGE].low_trip = thresh_value;
+
+ return ret;
+}
+
+static int bcl_access_monitor_enable(bool enable)
+{
+ int ret = 0, i = 0;
+ struct bcl_peripheral_data *perph_data = NULL;
+
+ mutex_lock(&bcl_enable_mutex);
+ if (enable == bcl_perph->enabled)
+ goto access_exit;
+
+ for (; i < BCL_PARAM_MAX; i++) {
+ perph_data = &bcl_perph->param[i];
+ mutex_lock(&perph_data->state_trans_lock);
+ if (enable) {
+ switch (perph_data->state) {
+ case BCL_PARAM_INACTIVE:
+ trace_bcl_hw_state_event(
+ (i == BCL_PARAM_VOLTAGE)
+ ? "Voltage Inactive to Monitor"
+ : "Current Inactive to Monitor",
+ 0);
+ enable_irq(perph_data->irq_num);
+ break;
+ case BCL_PARAM_POLLING:
+ case BCL_PARAM_MONITOR:
+ default:
+ break;
+ }
+ perph_data->state = BCL_PARAM_MONITOR;
+ } else {
+ switch (perph_data->state) {
+ case BCL_PARAM_MONITOR:
+ trace_bcl_hw_state_event(
+ (i == BCL_PARAM_VOLTAGE)
+ ? "Voltage Monitor to Inactive"
+ : "Current Monitor to Inactive",
+ 0);
+ disable_irq_nosync(perph_data->irq_num);
+ /* Fall through to clear the poll work */
+ case BCL_PARAM_INACTIVE:
+ case BCL_PARAM_POLLING:
+ cancel_delayed_work_sync(
+ &perph_data->poll_work);
+ break;
+ default:
+ break;
+ }
+ perph_data->state = BCL_PARAM_INACTIVE;
+ }
+ mutex_unlock(&perph_data->state_trans_lock);
+ }
+ bcl_perph->enabled = enable;
+
+access_exit:
+ mutex_unlock(&bcl_enable_mutex);
+ return ret;
+}
+
+static int bcl_monitor_enable(void)
+{
+ trace_bcl_hw_event("BCL Enable");
+ return bcl_access_monitor_enable(true);
+}
+
+static int bcl_monitor_disable(void)
+{
+ trace_bcl_hw_event("BCL Disable");
+ return bcl_access_monitor_enable(false);
+}
+
+static int bcl_read_ibat_high_trip(int *thresh_value)
+{
+ int ret = 0;
+ int8_t val = 0;
+
+ *thresh_value = (int)val;
+ ret = bcl_read_register(BCL_IBAT_TRIP, &val);
+ if (ret) {
+ pr_err("BCL register read error. err:%d\n", ret);
+ ret = 0;
+ val = bcl_perph->param[BCL_PARAM_CURRENT].high_trip;
+ *thresh_value = (int)val;
+ } else {
+ *thresh_value = (int)val;
+ convert_adc_to_ibat_val(thresh_value);
+ pr_debug("Reading Ibat high trip:%d. ADC_val:%d\n",
+ *thresh_value, val);
+ }
+
+ return ret;
+}
+
+static int bcl_read_ibat_low_trip(int *thresh_value)
+{
+ *thresh_value = bcl_perph->param[BCL_PARAM_CURRENT].low_trip;
+ return 0;
+}
+
+static int bcl_read_vbat_low_trip(int *thresh_value)
+{
+ int ret = 0;
+ int8_t val = 0;
+
+ *thresh_value = (int)val;
+ ret = bcl_read_register(BCL_VBAT_TRIP, &val);
+ if (ret) {
+ pr_err("BCL register read error. err:%d\n", ret);
+ ret = 0;
+ *thresh_value = bcl_perph->param[BCL_PARAM_VOLTAGE].low_trip;
+ } else {
+ *thresh_value = (int)val;
+ convert_adc_to_vbat_val(thresh_value);
+ pr_debug("Reading Ibat high trip:%d. ADC_val:%d\n",
+ *thresh_value, val);
+ }
+
+ return ret;
+}
+
+static int bcl_read_vbat_high_trip(int *thresh_value)
+{
+ *thresh_value = bcl_perph->param[BCL_PARAM_VOLTAGE].high_trip;
+ return 0;
+}
+
+static int bcl_clear_vbat_min(void)
+{
+ int ret = 0;
+
+ ret = bcl_write_register(BCL_VBAT_MIN_CLR, BIT(7));
+ if (ret)
+ pr_err("Error in clearing vbat min reg. err:%d", ret);
+
+ return ret;
+}
+
+static int bcl_clear_ibat_max(void)
+{
+ int ret = 0;
+
+ ret = bcl_write_register(BCL_IBAT_MAX_CLR, BIT(7));
+ if (ret)
+ pr_err("Error in clearing ibat max reg. err:%d", ret);
+
+ return ret;
+}
+
+static int bcl_read_ibat_max(int *adc_value)
+{
+ int ret = 0, timeout = 0;
+ int8_t val[VAL_CP_REG_BUF_LEN] = {0};
+
+ *adc_value = (int)val[VAL_REG_BUF_OFFSET];
+ do {
+ ret = bcl_read_multi_register(BCL_IBAT_MAX, val,
+ VAL_CP_REG_BUF_LEN);
+ if (ret) {
+ pr_err("BCL register read error. err:%d\n", ret);
+ goto bcl_read_exit;
+ }
+ } while (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]
+ && timeout++ < BCL_READ_RETRY_LIMIT);
+ if (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]) {
+ ret = -ENODEV;
+ goto bcl_read_exit;
+ }
+ *adc_value = (int)val[VAL_REG_BUF_OFFSET];
+ convert_adc_to_ibat_val(adc_value);
+ pr_debug("Ibat Max:%d. ADC_val:%d\n", *adc_value,
+ val[VAL_REG_BUF_OFFSET]);
+ trace_bcl_hw_sensor_reading("Ibat Max[uA]", *adc_value);
+
+bcl_read_exit:
+ return ret;
+}
+
+static int bcl_read_vbat_min(int *adc_value)
+{
+ int ret = 0, timeout = 0;
+ int8_t val[VAL_CP_REG_BUF_LEN] = {0};
+
+ *adc_value = (int)val[VAL_REG_BUF_OFFSET];
+ do {
+ ret = bcl_read_multi_register(BCL_VBAT_MIN, val,
+ VAL_CP_REG_BUF_LEN);
+ if (ret) {
+ pr_err("BCL register read error. err:%d\n", ret);
+ goto bcl_read_exit;
+ }
+ } while (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]
+ && timeout++ < BCL_READ_RETRY_LIMIT);
+ if (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]) {
+ ret = -ENODEV;
+ goto bcl_read_exit;
+ }
+ *adc_value = (int)val[VAL_REG_BUF_OFFSET];
+ convert_adc_to_vbat_val(adc_value);
+ pr_debug("Vbat Min:%d. ADC_val:%d\n", *adc_value,
+ val[VAL_REG_BUF_OFFSET]);
+ trace_bcl_hw_sensor_reading("vbat Min[uV]", *adc_value);
+
+bcl_read_exit:
+ return ret;
+}
+
+static int bcl_read_ibat(int *adc_value)
+{
+ int ret = 0, timeout = 0;
+ int8_t val[VAL_CP_REG_BUF_LEN] = {0};
+
+ *adc_value = (int)val[VAL_REG_BUF_OFFSET];
+ do {
+ ret = bcl_read_multi_register(BCL_IBAT_VALUE, val,
+ VAL_CP_REG_BUF_LEN);
+ if (ret) {
+ pr_err("BCL register read error. err:%d\n", ret);
+ goto bcl_read_exit;
+ }
+ } while (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]
+ && timeout++ < BCL_READ_RETRY_LIMIT);
+ if (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]) {
+ ret = -ENODEV;
+ goto bcl_read_exit;
+ }
+ *adc_value = (int)val[VAL_REG_BUF_OFFSET];
+ convert_adc_to_ibat_val(adc_value);
+ pr_debug("Read Ibat:%d. ADC_val:%d\n", *adc_value,
+ val[VAL_REG_BUF_OFFSET]);
+ trace_bcl_hw_sensor_reading("ibat[uA]", *adc_value);
+
+bcl_read_exit:
+ return ret;
+}
+
+static int bcl_read_vbat(int *adc_value)
+{
+ int ret = 0, timeout = 0;
+ int8_t val[VAL_CP_REG_BUF_LEN] = {0};
+
+ *adc_value = (int)val[VAL_REG_BUF_OFFSET];
+ do {
+ ret = bcl_read_multi_register(BCL_VBAT_VALUE, val,
+ VAL_CP_REG_BUF_LEN);
+ if (ret) {
+ pr_err("BCL register read error. err:%d\n", ret);
+ goto bcl_read_exit;
+ }
+ } while (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]
+ && timeout++ < BCL_READ_RETRY_LIMIT);
+ if (val[VAL_REG_BUF_OFFSET] != val[VAL_CP_REG_BUF_OFFSET]) {
+ ret = -ENODEV;
+ goto bcl_read_exit;
+ }
+ *adc_value = (int)val[VAL_REG_BUF_OFFSET];
+ convert_adc_to_vbat_val(adc_value);
+ pr_debug("Read Vbat:%d. ADC_val:%d\n", *adc_value,
+ val[VAL_REG_BUF_OFFSET]);
+ trace_bcl_hw_sensor_reading("vbat[uV]", *adc_value);
+
+bcl_read_exit:
+ return ret;
+}
+
+static void bcl_poll_ibat_low(struct work_struct *work)
+{
+ int ret = 0, val = 0;
+ struct bcl_peripheral_data *perph_data =
+ &bcl_perph->param[BCL_PARAM_CURRENT];
+
+ trace_bcl_hw_event("ibat poll low. Enter");
+ mutex_lock(&perph_data->state_trans_lock);
+ if (perph_data->state != BCL_PARAM_POLLING) {
+ pr_err("Invalid ibat state %d\n", perph_data->state);
+ goto exit_ibat;
+ }
+
+ ret = perph_data->read_max(&val);
+ if (ret) {
+ pr_err("Error in reading ibat. err:%d", ret);
+ goto reschedule_ibat;
+ }
+ ret = perph_data->clear_max();
+ if (ret)
+ pr_err("Error clearing max ibat reg. err:%d\n", ret);
+ if (val <= perph_data->low_trip) {
+ pr_debug("Ibat reached low clear trip. ibat:%d\n", val);
+ trace_bcl_hw_state_event("Polling to Monitor. Ibat[uA]:", val);
+ trace_bcl_hw_mitigation("Ibat low trip. Ibat[uA]", val);
+ perph_data->ops.notify(perph_data->param_data, val,
+ BCL_LOW_TRIP);
+ perph_data->state = BCL_PARAM_MONITOR;
+ enable_irq(perph_data->irq_num);
+ } else {
+ goto reschedule_ibat;
+ }
+
+exit_ibat:
+ mutex_unlock(&perph_data->state_trans_lock);
+ trace_bcl_hw_event("ibat poll low. Exit");
+ return;
+
+reschedule_ibat:
+ mutex_unlock(&perph_data->state_trans_lock);
+ schedule_delayed_work(&perph_data->poll_work,
+ msecs_to_jiffies(perph_data->polling_delay_ms));
+ trace_bcl_hw_event("ibat poll low. Exit");
+ return;
+}
+
+static void bcl_poll_vbat_high(struct work_struct *work)
+{
+ int ret = 0, val = 0;
+ struct bcl_peripheral_data *perph_data =
+ &bcl_perph->param[BCL_PARAM_VOLTAGE];
+
+ trace_bcl_hw_event("vbat poll high. Enter");
+ mutex_lock(&perph_data->state_trans_lock);
+ if (perph_data->state != BCL_PARAM_POLLING) {
+ pr_err("Invalid vbat state %d\n", perph_data->state);
+ goto exit_vbat;
+ }
+
+ ret = perph_data->read_max(&val);
+ if (ret) {
+ pr_err("Error in reading vbat. err:%d", ret);
+ goto reschedule_vbat;
+ }
+ ret = perph_data->clear_max();
+ if (ret)
+ pr_err("Error clearing min vbat reg. err:%d\n", ret);
+ if (val >= perph_data->high_trip) {
+ pr_debug("Vbat reached high clear trip. vbat:%d\n", val);
+ trace_bcl_hw_state_event("Polling to Monitor. vbat[uV]:", val);
+ trace_bcl_hw_mitigation("vbat high trip. vbat[uV]", val);
+ perph_data->ops.notify(perph_data->param_data, val,
+ BCL_HIGH_TRIP);
+ perph_data->state = BCL_PARAM_MONITOR;
+ enable_irq(perph_data->irq_num);
+ } else {
+ goto reschedule_vbat;
+ }
+
+exit_vbat:
+ mutex_unlock(&perph_data->state_trans_lock);
+ trace_bcl_hw_event("vbat poll high. Exit");
+ return;
+
+reschedule_vbat:
+ mutex_unlock(&perph_data->state_trans_lock);
+ schedule_delayed_work(&perph_data->poll_work,
+ msecs_to_jiffies(perph_data->polling_delay_ms));
+ trace_bcl_hw_event("vbat poll high. Exit");
+ return;
+}
+
+static irqreturn_t bcl_handle_ibat(int irq, void *data)
+{
+ int thresh_value = 0, ret = 0;
+ struct bcl_peripheral_data *perph_data =
+ (struct bcl_peripheral_data *)data;
+
+ trace_bcl_hw_mitigation_event("Ibat interrupted");
+ mutex_lock(&perph_data->state_trans_lock);
+ if (perph_data->state == BCL_PARAM_MONITOR) {
+ ret = perph_data->read_max(&perph_data->trip_val);
+ if (ret) {
+ pr_err("Error reading max/min reg. err:%d\n", ret);
+ goto exit_intr;
+ }
+ ret = perph_data->clear_max();
+ if (ret)
+ pr_err("Error clearing max/min reg. err:%d\n", ret);
+ thresh_value = perph_data->high_trip;
+ convert_adc_to_ibat_val(&thresh_value);
+ /* Account threshold trip from PBS threshold for dead time */
+ thresh_value -= perph_data->inhibit_derating_ua;
+ if (perph_data->trip_val < thresh_value) {
+ pr_debug("False Ibat high trip. ibat:%d ibat_thresh_val:%d\n",
+ perph_data->trip_val, thresh_value);
+ trace_bcl_hw_event("Ibat invalid interrupt");
+ goto exit_intr;
+ }
+ pr_debug("Ibat reached high trip. ibat:%d\n",
+ perph_data->trip_val);
+ trace_bcl_hw_state_event("Monitor to Polling. ibat[uA]:",
+ perph_data->trip_val);
+ disable_irq_nosync(perph_data->irq_num);
+ perph_data->state = BCL_PARAM_POLLING;
+ trace_bcl_hw_mitigation("ibat high trip. ibat[uA]",
+ perph_data->trip_val);
+ perph_data->ops.notify(perph_data->param_data,
+ perph_data->trip_val, BCL_HIGH_TRIP);
+ schedule_delayed_work(&perph_data->poll_work,
+ msecs_to_jiffies(perph_data->polling_delay_ms));
+ } else {
+ pr_debug("Ignoring interrupt\n");
+ trace_bcl_hw_event("Ibat Ignoring interrupt");
+ }
+
+exit_intr:
+ mutex_unlock(&perph_data->state_trans_lock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bcl_handle_vbat(int irq, void *data)
+{
+ int thresh_value = 0, ret = 0;
+ struct bcl_peripheral_data *perph_data =
+ (struct bcl_peripheral_data *)data;
+
+ trace_bcl_hw_mitigation_event("Vbat Interrupted");
+ mutex_lock(&perph_data->state_trans_lock);
+ if (perph_data->state == BCL_PARAM_MONITOR) {
+ ret = perph_data->read_max(&perph_data->trip_val);
+ if (ret) {
+ pr_err("Error reading max/min reg. err:%d\n", ret);
+ goto exit_intr;
+ }
+ ret = perph_data->clear_max();
+ if (ret)
+ pr_err("Error clearing max/min reg. err:%d\n", ret);
+ thresh_value = perph_data->low_trip;
+ convert_adc_to_vbat_val(&thresh_value);
+ if (perph_data->trip_val > thresh_value) {
+ pr_debug("False vbat min trip. vbat:%d vbat_thresh_val:%d\n",
+ perph_data->trip_val, thresh_value);
+ trace_bcl_hw_event("Vbat Invalid interrupt");
+ goto exit_intr;
+ }
+ pr_debug("Vbat reached Low trip. vbat:%d\n",
+ perph_data->trip_val);
+ trace_bcl_hw_state_event("Monitor to Polling. vbat[uV]:",
+ perph_data->trip_val);
+ disable_irq_nosync(perph_data->irq_num);
+ perph_data->state = BCL_PARAM_POLLING;
+ trace_bcl_hw_mitigation("vbat low trip. vbat[uV]",
+ perph_data->trip_val);
+ perph_data->ops.notify(perph_data->param_data,
+ perph_data->trip_val, BCL_LOW_TRIP);
+ schedule_delayed_work(&perph_data->poll_work,
+ msecs_to_jiffies(perph_data->polling_delay_ms));
+ } else {
+ pr_debug("Ignoring interrupt\n");
+ trace_bcl_hw_event("Vbat Ignoring interrupt");
+ }
+
+exit_intr:
+ mutex_unlock(&perph_data->state_trans_lock);
+ return IRQ_HANDLED;
+}
+
+static int bcl_get_devicetree_data(struct spmi_device *spmi)
+{
+ int ret = 0, irq_num = 0, temp_val = 0;
+ struct resource *resource = NULL;
+ char *key = NULL;
+ const __be32 *prop = NULL;
+ struct device_node *dev_node = spmi->dev.of_node;
+
+ /* Get SPMI peripheral address */
+ resource = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
+ if (!resource) {
+ pr_err("No base address defined\n");
+ return -EINVAL;
+ }
+ bcl_perph->slave_id = spmi->sid;
+ prop = of_get_address_by_name(dev_node,
+ "fg_user_adc", 0, 0);
+ if (prop) {
+ bcl_perph->base_addr = be32_to_cpu(*prop);
+ pr_debug("fg_user_adc@%04x\n", bcl_perph->base_addr);
+ } else {
+ dev_err(&spmi->dev, "No fg_user_adc registers found\n");
+ return -EINVAL;
+ }
+
+ prop = of_get_address_by_name(dev_node,
+ "pon_spare", 0, 0);
+ if (prop) {
+ bcl_perph->pon_spare_addr = be32_to_cpu(*prop);
+ pr_debug("pon_spare@%04x\n", bcl_perph->pon_spare_addr);
+ }
+
+ /* Register SPMI peripheral interrupt */
+ irq_num = spmi_get_irq_byname(spmi, NULL,
+ BCL_VBAT_INT_NAME);
+ if (irq_num < 0) {
+ pr_err("Invalid vbat IRQ\n");
+ ret = -ENXIO;
+ goto bcl_dev_exit;
+ }
+ bcl_perph->param[BCL_PARAM_VOLTAGE].irq_num = irq_num;
+ irq_num = spmi_get_irq_byname(spmi, NULL,
+ BCL_IBAT_INT_NAME);
+ if (irq_num < 0) {
+ pr_err("Invalid ibat IRQ\n");
+ ret = -ENXIO;
+ goto bcl_dev_exit;
+ }
+ bcl_perph->param[BCL_PARAM_CURRENT].irq_num = irq_num;
+
+ /* Get VADC and IADC scaling factor */
+ key = "qcom,vbat-scaling-factor";
+ READ_CONV_FACTOR(dev_node, key, temp_val, ret,
+ bcl_perph->param[BCL_PARAM_VOLTAGE].scaling_factor);
+ key = "qcom,vbat-gain-numerator";
+ READ_CONV_FACTOR(dev_node, key, temp_val, ret,
+ bcl_perph->param[BCL_PARAM_VOLTAGE].gain_factor_num);
+ key = "qcom,vbat-gain-denominator";
+ READ_CONV_FACTOR(dev_node, key, temp_val, ret,
+ bcl_perph->param[BCL_PARAM_VOLTAGE].gain_factor_den);
+ key = "qcom,ibat-scaling-factor";
+ READ_CONV_FACTOR(dev_node, key, temp_val, ret,
+ bcl_perph->param[BCL_PARAM_CURRENT].scaling_factor);
+ key = "qcom,ibat-offset-numerator";
+ READ_CONV_FACTOR(dev_node, key, temp_val, ret,
+ bcl_perph->param[BCL_PARAM_CURRENT].offset_factor_num);
+ key = "qcom,ibat-offset-denominator";
+ READ_CONV_FACTOR(dev_node, key, temp_val, ret,
+ bcl_perph->param[BCL_PARAM_CURRENT].offset_factor_den);
+ key = "qcom,ibat-gain-numerator";
+ READ_CONV_FACTOR(dev_node, key, temp_val, ret,
+ bcl_perph->param[BCL_PARAM_CURRENT].gain_factor_num);
+ key = "qcom,ibat-gain-denominator";
+ READ_CONV_FACTOR(dev_node, key, temp_val, ret,
+ bcl_perph->param[BCL_PARAM_CURRENT].gain_factor_den);
+ key = "qcom,vbat-polling-delay-ms";
+ READ_CONV_FACTOR(dev_node, key, temp_val, ret,
+ bcl_perph->param[BCL_PARAM_VOLTAGE].polling_delay_ms);
+ key = "qcom,ibat-polling-delay-ms";
+ READ_CONV_FACTOR(dev_node, key, temp_val, ret,
+ bcl_perph->param[BCL_PARAM_CURRENT].polling_delay_ms);
+ key = "qcom,inhibit-derating-ua";
+ READ_OPTIONAL_PROP(dev_node, key, temp_val, ret,
+ bcl_perph->param[BCL_PARAM_CURRENT].inhibit_derating_ua);
+
+bcl_dev_exit:
+ return ret;
+}
+
+static int bcl_calibrate(void)
+{
+ int ret = 0;
+ int8_t i_src = 0, val = 0;
+
+ ret = bcl_read_register(BCL_I_SENSE_SRC, &i_src);
+ if (ret) {
+ pr_err("Error reading current sense reg. err:%d\n", ret);
+ goto bcl_cal_exit;
+ }
+
+ ret = bcl_read_register((i_src & 0x01) ? BCL_I_GAIN_RSENSE
+ : BCL_I_GAIN_BATFET, &val);
+ if (ret) {
+ pr_err("Error reading %s current gain. err:%d\n",
+ (i_src & 0x01) ? "rsense" : "batfet", ret);
+ goto bcl_cal_exit;
+ }
+ bcl_perph->param[BCL_PARAM_CURRENT].gain = val;
+ ret = bcl_read_register((i_src & 0x01) ? BCL_I_OFFSET_RSENSE
+ : BCL_I_OFFSET_BATFET, &val);
+ if (ret) {
+ pr_err("Error reading %s current offset. err:%d\n",
+ (i_src & 0x01) ? "rsense" : "batfet", ret);
+ goto bcl_cal_exit;
+ }
+ bcl_perph->param[BCL_PARAM_CURRENT].offset = val;
+ ret = bcl_read_register(BCL_V_GAIN_BAT, &val);
+ if (ret) {
+ pr_err("Error reading vbat offset. err:%d\n", ret);
+ goto bcl_cal_exit;
+ }
+ bcl_perph->param[BCL_PARAM_VOLTAGE].gain = val;
+
+ if (((i_src & 0x01) != bcl_perph->i_src)
+ && (bcl_perph->enabled)) {
+ bcl_set_low_vbat(bcl_perph->param[BCL_PARAM_VOLTAGE]
+ .low_trip);
+ bcl_set_high_ibat(bcl_perph->param[BCL_PARAM_CURRENT]
+ .high_trip);
+ bcl_perph->i_src = i_src;
+ }
+
+bcl_cal_exit:
+ return ret;
+}
+
+static void power_supply_callback(struct power_supply *psy)
+{
+ static struct power_supply *bms_psy;
+ int ret = 0;
+
+ if (calibration_done)
+ return;
+
+ if (!bms_psy)
+ bms_psy = power_supply_get_by_name("bms");
+ if (bms_psy) {
+ calibration_done = true;
+ trace_bcl_hw_event("Recalibrate callback");
+ ret = bcl_calibrate();
+ if (ret)
+ pr_err("Could not read calibration values. err:%d",
+ ret);
+ }
+}
+
+static int bcl_psy_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ return 0;
+}
+static int bcl_psy_set_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ const union power_supply_propval *val)
+{
+ return -EINVAL;
+}
+
+static int bcl_update_data(void)
+{
+ int ret = 0;
+
+ bcl_perph->param[BCL_PARAM_VOLTAGE].ops.read = bcl_read_vbat;
+ bcl_perph->param[BCL_PARAM_VOLTAGE].ops.get_high_trip
+ = bcl_read_vbat_high_trip;
+ bcl_perph->param[BCL_PARAM_VOLTAGE].ops.get_low_trip
+ = bcl_read_vbat_low_trip;
+ bcl_perph->param[BCL_PARAM_VOLTAGE].ops.set_high_trip
+ = bcl_set_high_vbat;
+ bcl_perph->param[BCL_PARAM_VOLTAGE].ops.set_low_trip
+ = bcl_set_low_vbat;
+ bcl_perph->param[BCL_PARAM_VOLTAGE].ops.enable
+ = bcl_monitor_enable;
+ bcl_perph->param[BCL_PARAM_VOLTAGE].ops.disable
+ = bcl_monitor_disable;
+ bcl_perph->param[BCL_PARAM_VOLTAGE].read_max
+ = bcl_read_vbat_min;
+ bcl_perph->param[BCL_PARAM_VOLTAGE].clear_max
+ = bcl_clear_vbat_min;
+
+ bcl_perph->param[BCL_PARAM_CURRENT].ops.read = bcl_read_ibat;
+ bcl_perph->param[BCL_PARAM_CURRENT].ops.get_high_trip
+ = bcl_read_ibat_high_trip;
+ bcl_perph->param[BCL_PARAM_CURRENT].ops.get_low_trip
+ = bcl_read_ibat_low_trip;
+ bcl_perph->param[BCL_PARAM_CURRENT].ops.set_high_trip
+ = bcl_set_high_ibat;
+ bcl_perph->param[BCL_PARAM_CURRENT].ops.set_low_trip
+ = bcl_set_low_ibat;
+ bcl_perph->param[BCL_PARAM_CURRENT].ops.enable
+ = bcl_monitor_enable;
+ bcl_perph->param[BCL_PARAM_CURRENT].ops.disable
+ = bcl_monitor_disable;
+ bcl_perph->param[BCL_PARAM_CURRENT].read_max
+ = bcl_read_ibat_max;
+ bcl_perph->param[BCL_PARAM_CURRENT].clear_max
+ = bcl_clear_ibat_max;
+
+ bcl_perph->param[BCL_PARAM_VOLTAGE].param_data = msm_bcl_register_param(
+ BCL_PARAM_VOLTAGE, &bcl_perph->param[BCL_PARAM_VOLTAGE].ops,
+ "vbat");
+ if (!bcl_perph->param[BCL_PARAM_VOLTAGE].param_data) {
+ pr_err("register Vbat failed.\n");
+ ret = -ENODEV;
+ goto update_data_exit;
+ }
+ bcl_perph->param[BCL_PARAM_CURRENT].param_data = msm_bcl_register_param(
+ BCL_PARAM_CURRENT, &bcl_perph->param[BCL_PARAM_CURRENT].ops,
+ "ibat");
+ if (!bcl_perph->param[BCL_PARAM_CURRENT].param_data) {
+ pr_err("register Ibat failed.\n");
+ ret = -ENODEV;
+ goto update_data_exit;
+ }
+ INIT_DELAYED_WORK(&bcl_perph->param[BCL_PARAM_VOLTAGE].poll_work,
+ bcl_poll_vbat_high);
+ INIT_DELAYED_WORK(&bcl_perph->param[BCL_PARAM_CURRENT].poll_work,
+ bcl_poll_ibat_low);
+ mutex_init(&bcl_perph->param[BCL_PARAM_CURRENT].state_trans_lock);
+ mutex_init(&bcl_perph->param[BCL_PARAM_VOLTAGE].state_trans_lock);
+
+update_data_exit:
+ return ret;
+}
+
+static int bcl_probe(struct spmi_device *spmi)
+{
+ int ret = 0;
+
+ bcl_perph = devm_kzalloc(&spmi->dev, sizeof(struct bcl_device),
+ GFP_KERNEL);
+ if (!bcl_perph) {
+ pr_err("Memory alloc failed\n");
+ return -ENOMEM;
+ }
+ memset(bcl_perph, 0, sizeof(struct bcl_device));
+ bcl_perph->spmi = spmi;
+ bcl_perph->dev = &(spmi->dev);
+
+ ret = bcl_get_devicetree_data(spmi);
+ if (ret) {
+ pr_err("Device tree data fetch error. err:%d", ret);
+ goto bcl_probe_exit;
+ }
+ ret = bcl_calibrate();
+ if (ret) {
+ pr_debug("Could not read calibration values. err:%d", ret);
+ goto bcl_probe_exit;
+ }
+ bcl_psy.name = bcl_psy_name;
+ bcl_psy.type = POWER_SUPPLY_TYPE_BMS;
+ bcl_psy.get_property = bcl_psy_get_property;
+ bcl_psy.set_property = bcl_psy_set_property;
+ bcl_psy.num_properties = 0;
+ bcl_psy.external_power_changed = power_supply_callback;
+ ret = power_supply_register(&spmi->dev, &bcl_psy);
+ if (ret < 0) {
+ pr_err("Unable to register bcl_psy rc = %d\n", ret);
+ return ret;
+ }
+
+ ret = bcl_update_data();
+ if (ret) {
+ pr_err("Update data failed. err:%d", ret);
+ goto bcl_probe_exit;
+ }
+ mutex_lock(&bcl_perph->param[BCL_PARAM_VOLTAGE].state_trans_lock);
+ ret = devm_request_threaded_irq(&spmi->dev,
+ bcl_perph->param[BCL_PARAM_VOLTAGE].irq_num,
+ NULL, bcl_handle_vbat,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "bcl_vbat_interrupt",
+ &bcl_perph->param[BCL_PARAM_VOLTAGE]);
+ if (ret) {
+ dev_err(&spmi->dev, "Error requesting VBAT irq. err:%d", ret);
+ mutex_unlock(
+ &bcl_perph->param[BCL_PARAM_VOLTAGE].state_trans_lock);
+ goto bcl_probe_exit;
+ }
+ /*
+ * BCL is enabled by default in hardware.
+ * Disable BCL monitoring till a valid threshold is set by APPS
+ */
+ disable_irq_nosync(bcl_perph->param[BCL_PARAM_VOLTAGE].irq_num);
+ mutex_unlock(&bcl_perph->param[BCL_PARAM_VOLTAGE].state_trans_lock);
+
+ mutex_lock(&bcl_perph->param[BCL_PARAM_CURRENT].state_trans_lock);
+ ret = devm_request_threaded_irq(&spmi->dev,
+ bcl_perph->param[BCL_PARAM_CURRENT].irq_num,
+ NULL, bcl_handle_ibat,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "bcl_ibat_interrupt",
+ &bcl_perph->param[BCL_PARAM_CURRENT]);
+ if (ret) {
+ dev_err(&spmi->dev, "Error requesting IBAT irq. err:%d", ret);
+ mutex_unlock(
+ &bcl_perph->param[BCL_PARAM_CURRENT].state_trans_lock);
+ goto bcl_probe_exit;
+ }
+ disable_irq_nosync(bcl_perph->param[BCL_PARAM_CURRENT].irq_num);
+ mutex_unlock(&bcl_perph->param[BCL_PARAM_CURRENT].state_trans_lock);
+
+ dev_set_drvdata(&spmi->dev, bcl_perph);
+ ret = bcl_write_register(BCL_MONITOR_EN, BIT(7));
+ if (ret) {
+ pr_err("Error accessing BCL peripheral. err:%d\n", ret);
+ goto bcl_probe_exit;
+ }
+
+ return 0;
+
+bcl_probe_exit:
+ bcl_perph = NULL;
+ return ret;
+}
+
+static int bcl_remove(struct spmi_device *spmi)
+{
+ int ret = 0, i = 0;
+
+ ret = bcl_monitor_disable();
+ if (ret)
+ pr_err("Error disabling BCL. err:%d\n", ret);
+
+ for (; i < BCL_PARAM_MAX; i++) {
+ if (!bcl_perph->param[i].param_data)
+ continue;
+
+ ret = msm_bcl_unregister_param(bcl_perph->param[i].param_data);
+ if (ret)
+ pr_err("Error unregistering with Framework. err:%d\n",
+ ret);
+ }
+
+ return 0;
+}
+
+static struct of_device_id bcl_match[] = {
+ { .compatible = "qcom,msm-bcl",
+ },
+ {},
+};
+
+static struct spmi_driver bcl_driver = {
+ .probe = bcl_probe,
+ .remove = bcl_remove,
+ .driver = {
+ .name = BCL_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = bcl_match,
+ },
+};
+
+static int __init bcl_perph_init(void)
+{
+ pr_info("BCL Initialized\n");
+ return spmi_driver_register(&bcl_driver);
+}
+
+static void __exit bcl_perph_exit(void)
+{
+ spmi_driver_unregister(&bcl_driver);
+}
+fs_initcall(bcl_perph_init);
+module_exit(bcl_perph_exit);
+MODULE_ALIAS("platform:" BCL_DRIVER_NAME);
+
diff --git a/drivers/power/msm_bcl.c b/drivers/power/msm_bcl.c
new file mode 100644
index 000000000000..6b7cefdc0250
--- /dev/null
+++ b/drivers/power/msm_bcl.c
@@ -0,0 +1,374 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/sysfs.h>
+#include <linux/mutex.h>
+#include <linux/msm_bcl.h>
+#include <linux/slab.h>
+
+#define BCL_PARAM_MAX_ATTR 3
+
+#define BCL_DEFINE_RO_PARAM(_attr, _name, _attr_gp, _index) \
+ _attr.attr.name = __stringify(_name); \
+ _attr.attr.mode = 0444; \
+ _attr.show = _name##_show; \
+ _attr_gp.attrs[_index] = &_attr.attr;
+
+static struct bcl_param_data *bcl[BCL_PARAM_MAX];
+
+static ssize_t high_trip_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ int val = 0, ret = 0;
+ struct bcl_param_data *dev_param = container_of(attr,
+ struct bcl_param_data, high_trip_attr);
+
+ if (!dev_param->registered)
+ return -ENODEV;
+
+ ret = dev_param->ops->get_high_trip(&val);
+ if (ret) {
+ pr_err("High trip value read failed. err:%d\n", ret);
+ return ret;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t low_trip_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ int val = 0, ret = 0;
+ struct bcl_param_data *dev_param = container_of(attr,
+ struct bcl_param_data, low_trip_attr);
+
+ if (!dev_param->registered)
+ return -ENODEV;
+
+ ret = dev_param->ops->get_low_trip(&val);
+ if (ret) {
+ pr_err("Low trip value read failed. err:%d\n", ret);
+ return ret;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t value_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ int32_t val = 0, ret = 0;
+ struct bcl_param_data *dev_param = container_of(attr,
+ struct bcl_param_data, val_attr);
+
+ if (!dev_param->registered)
+ return -ENODEV;
+
+ ret = dev_param->ops->read(&val);
+ if (ret) {
+ pr_err("Value read failed. err:%d\n", ret);
+ return ret;
+ }
+ dev_param->last_read_val = val;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+int msm_bcl_set_threshold(enum bcl_param param_type,
+ enum bcl_trip_type trip_type, struct bcl_threshold *inp_thresh)
+{
+ int ret = 0;
+
+ if (!bcl[param_type] || !bcl[param_type]->registered) {
+ pr_err("BCL not initialized\n");
+ return -EINVAL;
+ }
+ if ((!inp_thresh)
+ || (inp_thresh->trip_value < 0)
+ || (!inp_thresh->trip_notify)
+ || (param_type >= BCL_PARAM_MAX)
+ || (trip_type >= BCL_TRIP_MAX)) {
+ pr_err("Invalid Input\n");
+ return -EINVAL;
+ }
+
+ bcl[param_type]->thresh[trip_type] = inp_thresh;
+ if (trip_type == BCL_HIGH_TRIP) {
+ bcl[param_type]->high_trip = inp_thresh->trip_value;
+ ret = bcl[param_type]->ops->set_high_trip(
+ inp_thresh->trip_value);
+ } else {
+ bcl[param_type]->low_trip = inp_thresh->trip_value;
+ ret = bcl[param_type]->ops->set_low_trip(
+ inp_thresh->trip_value);
+ }
+ if (ret) {
+ pr_err("Error setting trip%d for param%d. err:%d\n", trip_type,
+ param_type, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int bcl_thresh_notify(struct bcl_param_data *param_data, int val,
+ enum bcl_trip_type trip_type)
+{
+ if (!param_data || trip_type >= BCL_TRIP_MAX
+ || !param_data->registered) {
+ pr_err("Invalid input\n");
+ return -EINVAL;
+ }
+
+ param_data->thresh[trip_type]->trip_notify(trip_type, val,
+ param_data->thresh[trip_type]->trip_data);
+
+ return 0;
+}
+
+static int bcl_add_sysfs_nodes(enum bcl_param param_type);
+struct bcl_param_data *msm_bcl_register_param(enum bcl_param param_type,
+ struct bcl_driver_ops *param_ops, char *name)
+{
+ int ret = 0;
+
+ if (!bcl[param_type]
+ || param_type >= BCL_PARAM_MAX || !param_ops || !name
+ || !param_ops->read || !param_ops->set_high_trip
+ || !param_ops->get_high_trip || !param_ops->set_low_trip
+ || !param_ops->get_low_trip || !param_ops->enable
+ || !param_ops->disable) {
+ pr_err("Invalid input\n");
+ return NULL;
+ }
+ if (bcl[param_type]->registered) {
+ pr_err("param%d already initialized\n", param_type);
+ return NULL;
+ }
+
+ ret = bcl_add_sysfs_nodes(param_type);
+ if (ret) {
+ pr_err("Error creating sysfs nodes. err:%d\n", ret);
+ return NULL;
+ }
+ bcl[param_type]->ops = param_ops;
+ bcl[param_type]->registered = true;
+ strlcpy(bcl[param_type]->name, name, BCL_NAME_MAX_LEN);
+ param_ops->notify = bcl_thresh_notify;
+
+ return bcl[param_type];
+}
+
+int msm_bcl_unregister_param(struct bcl_param_data *param_data)
+{
+ int i = 0, ret = -EINVAL;
+
+ if (!bcl[i] || !param_data) {
+ pr_err("Invalid input\n");
+ return ret;
+ }
+ for (; i < BCL_PARAM_MAX; i++) {
+ if (param_data != bcl[i])
+ continue;
+ bcl[i]->ops->disable();
+ bcl[i]->registered = false;
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+int msm_bcl_disable(void)
+{
+ int ret = 0, i = 0;
+
+ if (!bcl[i]) {
+ pr_err("BCL not initialized\n");
+ return -EINVAL;
+ }
+
+ for (; i < BCL_PARAM_MAX; i++) {
+ if (!bcl[i]->registered)
+ continue;
+ ret = bcl[i]->ops->disable();
+ if (ret) {
+ pr_err("Error in disabling interrupt. param:%d err%d\n",
+ i, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+int msm_bcl_enable(void)
+{
+ int ret = 0, i = 0;
+ struct bcl_param_data *param_data = NULL;
+
+ if (!bcl[i] || !bcl[BCL_PARAM_VOLTAGE]->thresh
+ || !bcl[BCL_PARAM_CURRENT]->thresh) {
+ pr_err("BCL not initialized\n");
+ return -EINVAL;
+ }
+
+ for (; i < BCL_PARAM_MAX; i++) {
+ if (!bcl[i]->registered)
+ continue;
+ param_data = bcl[i];
+ ret = param_data->ops->set_high_trip(param_data->high_trip);
+ if (ret) {
+ pr_err("Error setting high trip. param:%d. err:%d",
+ i, ret);
+ return ret;
+ }
+ ret = param_data->ops->set_low_trip(param_data->low_trip);
+ if (ret) {
+ pr_err("Error setting low trip. param:%d. err:%d",
+ i, ret);
+ return ret;
+ }
+ ret = param_data->ops->enable();
+ if (ret) {
+ pr_err("Error enabling interrupt. param:%d. err:%d",
+ i, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+int msm_bcl_read(enum bcl_param param_type, int *value)
+{
+ int ret = 0;
+
+ if (!value || param_type >= BCL_PARAM_MAX) {
+ pr_err("Invalid input\n");
+ return -EINVAL;
+ }
+ if (!bcl[param_type] || !bcl[param_type]->registered) {
+ pr_err("BCL driver not initialized\n");
+ return -ENOSYS;
+ }
+
+ ret = bcl[param_type]->ops->read(value);
+ if (ret) {
+ pr_err("Error reading param%d. err:%d\n", param_type, ret);
+ return ret;
+ }
+ bcl[param_type]->last_read_val = *value;
+
+ return ret;
+}
+
+static struct class msm_bcl_class = {
+ .name = "msm_bcl",
+};
+
+static int bcl_add_sysfs_nodes(enum bcl_param param_type)
+{
+ char *param_name[BCL_PARAM_MAX] = {"voltage", "current"};
+ int ret = 0;
+
+ bcl[param_type]->device.class = &msm_bcl_class;
+ dev_set_name(&bcl[param_type]->device, "%s", param_name[param_type]);
+ ret = device_register(&bcl[param_type]->device);
+ if (ret) {
+ pr_err("Error registering device %s. err:%d\n",
+ param_name[param_type], ret);
+ return ret;
+ }
+ bcl[param_type]->bcl_attr_gp.attrs = kzalloc(sizeof(struct attribute *)
+ * (BCL_PARAM_MAX_ATTR + 1), GFP_KERNEL);
+ if (!bcl[param_type]->bcl_attr_gp.attrs) {
+ pr_err("Sysfs attribute create failed.\n");
+ ret = -ENOMEM;
+ goto add_sysfs_exit;
+ }
+ BCL_DEFINE_RO_PARAM(bcl[param_type]->val_attr, value,
+ bcl[param_type]->bcl_attr_gp, 0);
+ BCL_DEFINE_RO_PARAM(bcl[param_type]->high_trip_attr, high_trip,
+ bcl[param_type]->bcl_attr_gp, 1);
+ BCL_DEFINE_RO_PARAM(bcl[param_type]->low_trip_attr, low_trip,
+ bcl[param_type]->bcl_attr_gp, 2);
+ bcl[param_type]->bcl_attr_gp.attrs[BCL_PARAM_MAX_ATTR] = NULL;
+
+ ret = sysfs_create_group(&bcl[param_type]->device.kobj,
+ &bcl[param_type]->bcl_attr_gp);
+ if (ret) {
+ pr_err("Failure to create sysfs nodes. err:%d", ret);
+ goto add_sysfs_exit;
+ }
+
+add_sysfs_exit:
+ return ret;
+}
+
+static int msm_bcl_init(void)
+{
+ int ret = 0, i = 0;
+
+ for (; i < BCL_PARAM_MAX; i++) {
+ bcl[i] = kzalloc(sizeof(struct bcl_param_data),
+ GFP_KERNEL);
+ if (!bcl[i]) {
+ pr_err("kzalloc failed\n");
+ while ((--i) >= 0)
+ kfree(bcl[i]);
+ return -ENOMEM;
+ }
+ }
+
+ return ret;
+}
+
+
+static int __init msm_bcl_init_driver(void)
+{
+ int ret = 0;
+
+ ret = msm_bcl_init();
+ if (ret) {
+ pr_err("msm bcl init failed. err:%d\n", ret);
+ return ret;
+ }
+ return class_register(&msm_bcl_class);
+}
+
+static void __exit bcl_exit(void)
+{
+ int i = 0;
+
+ for (; i < BCL_PARAM_MAX; i++) {
+ sysfs_remove_group(&bcl[i]->device.kobj,
+ &bcl[i]->bcl_attr_gp);
+ kfree(bcl[i]->bcl_attr_gp.attrs);
+ kfree(bcl[i]);
+ }
+ class_unregister(&msm_bcl_class);
+}
+
+fs_initcall(msm_bcl_init_driver);
+module_exit(bcl_exit);
diff --git a/drivers/power/pmic-voter.c b/drivers/power/pmic-voter.c
new file mode 100644
index 000000000000..4ba0db459f03
--- /dev/null
+++ b/drivers/power/pmic-voter.c
@@ -0,0 +1,266 @@
+/* Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <linux/printk.h>
+#include <linux/device.h>
+
+#include "pmic-voter.h"
+
+#define NUM_MAX_CLIENTS 8
+
+struct client_vote {
+ int state;
+ int value;
+};
+
+struct votable {
+ struct client_vote votes[NUM_MAX_CLIENTS];
+ struct device *dev;
+ const char *name;
+ int num_clients;
+ int type;
+ int effective_client_id;
+ int effective_result;
+ int default_result;
+ struct mutex vote_lock;
+ int (*callback)(struct device *dev,
+ int effective_result,
+ int effective_client,
+ int last_result,
+ int last_client);
+};
+
+static int vote_set_any(struct votable *votable)
+{
+ int i;
+
+ for (i = 0; i < votable->num_clients; i++)
+ if (votable->votes[i].state == 1)
+ return 1;
+ return 0;
+}
+
+static int vote_min(struct votable *votable)
+{
+ int min_vote = INT_MAX;
+ int client_index = -EINVAL;
+ int i;
+
+ for (i = 0; i < votable->num_clients; i++) {
+ if (votable->votes[i].state == 1 &&
+ min_vote > votable->votes[i].value) {
+ min_vote = votable->votes[i].value;
+ client_index = i;
+ }
+ }
+
+ return client_index;
+}
+
+static int vote_max(struct votable *votable)
+{
+ int max_vote = INT_MIN;
+ int client_index = -EINVAL;
+ int i;
+
+ for (i = 0; i < votable->num_clients; i++) {
+ if (votable->votes[i].state == 1 &&
+ max_vote < votable->votes[i].value) {
+ max_vote = votable->votes[i].value;
+ client_index = i;
+ }
+ }
+
+ return client_index;
+}
+
+void lock_votable(struct votable *votable)
+{
+ mutex_lock(&votable->vote_lock);
+}
+
+void unlock_votable(struct votable *votable)
+{
+ mutex_unlock(&votable->vote_lock);
+}
+
+int get_client_vote(struct votable *votable, int client_id)
+{
+ int value;
+
+ lock_votable(votable);
+ value = get_client_vote_locked(votable, client_id);
+ unlock_votable(votable);
+ return value;
+}
+
+int get_client_vote_locked(struct votable *votable, int client_id)
+{
+ if (votable->votes[client_id].state < 0)
+ return votable->default_result;
+
+ return votable->votes[client_id].value;
+}
+
+int get_effective_result(struct votable *votable)
+{
+ int value;
+
+ lock_votable(votable);
+ value = get_effective_result_locked(votable);
+ unlock_votable(votable);
+ return value;
+}
+
+int get_effective_result_locked(struct votable *votable)
+{
+ if (votable->effective_result < 0)
+ return votable->default_result;
+
+ return votable->effective_result;
+}
+
+int get_effective_client_id(struct votable *votable)
+{
+ int id;
+
+ lock_votable(votable);
+ id = get_effective_client_id_locked(votable);
+ unlock_votable(votable);
+ return id;
+}
+
+int get_effective_client_id_locked(struct votable *votable)
+{
+ return votable->effective_client_id;
+}
+
+int vote(struct votable *votable, int client_id, bool state, int val)
+{
+ int effective_id, effective_result;
+ int rc = 0;
+
+ lock_votable(votable);
+
+ if (votable->votes[client_id].state == state &&
+ votable->votes[client_id].value == val) {
+ pr_debug("%s: votes unchanged; skipping\n", votable->name);
+ goto out;
+ }
+
+ votable->votes[client_id].state = state;
+ votable->votes[client_id].value = val;
+
+ pr_debug("%s: %d voting for %d - %s\n",
+ votable->name,
+ client_id, val, state ? "on" : "off");
+ switch (votable->type) {
+ case VOTE_MIN:
+ effective_id = vote_min(votable);
+ break;
+ case VOTE_MAX:
+ effective_id = vote_max(votable);
+ break;
+ case VOTE_SET_ANY:
+ votable->votes[client_id].value = state;
+ effective_result = vote_set_any(votable);
+ if (effective_result != votable->effective_result) {
+ votable->effective_client_id = client_id;
+ votable->effective_result = effective_result;
+ rc = votable->callback(votable->dev,
+ effective_result, client_id,
+ state, client_id);
+ }
+ goto out;
+ }
+
+ /*
+ * If the votable does not have any votes it will maintain the last
+ * known effective_result and effective_client_id
+ */
+ if (effective_id < 0) {
+ pr_debug("%s: no votes; skipping callback\n", votable->name);
+ goto out;
+ }
+
+ effective_result = votable->votes[effective_id].value;
+
+ if (effective_result != votable->effective_result) {
+ votable->effective_client_id = effective_id;
+ votable->effective_result = effective_result;
+ pr_debug("%s: effective vote is now %d voted by %d\n",
+ votable->name, effective_result, effective_id);
+ rc = votable->callback(votable->dev, effective_result,
+ effective_id, val, client_id);
+ }
+
+out:
+ unlock_votable(votable);
+ return rc;
+}
+
+struct votable *create_votable(struct device *dev, const char *name,
+ int votable_type,
+ int num_clients,
+ int default_result,
+ int (*callback)(struct device *dev,
+ int effective_result,
+ int effective_client,
+ int last_result,
+ int last_client)
+ )
+{
+ int i;
+ struct votable *votable;
+
+ if (!callback) {
+ dev_err(dev, "Invalid callback specified for voter\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (votable_type >= NUM_VOTABLE_TYPES) {
+ dev_err(dev, "Invalid votable_type specified for voter\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (num_clients > NUM_MAX_CLIENTS) {
+ dev_err(dev, "Invalid num_clients specified for voter\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ votable = devm_kzalloc(dev, sizeof(struct votable), GFP_KERNEL);
+ if (!votable)
+ return ERR_PTR(-ENOMEM);
+
+ votable->dev = dev;
+ votable->name = name;
+ votable->num_clients = num_clients;
+ votable->callback = callback;
+ votable->type = votable_type;
+ votable->default_result = default_result;
+ mutex_init(&votable->vote_lock);
+
+ /*
+ * Because effective_result and client states are invalid
+ * before the first vote, initialize them to -EINVAL
+ */
+ votable->effective_result = -EINVAL;
+ votable->effective_client_id = -EINVAL;
+
+ for (i = 0; i < votable->num_clients; i++)
+ votable->votes[i].state = -EINVAL;
+
+ return votable;
+}
diff --git a/drivers/power/pmic-voter.h b/drivers/power/pmic-voter.h
new file mode 100644
index 000000000000..363444468a82
--- /dev/null
+++ b/drivers/power/pmic-voter.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mutex.h>
+
+struct votable;
+
+enum votable_type {
+ VOTE_MIN,
+ VOTE_MAX,
+ VOTE_SET_ANY,
+ NUM_VOTABLE_TYPES,
+};
+
+int get_client_vote(struct votable *votable, int client_id);
+int get_client_vote_locked(struct votable *votable, int client_id);
+int get_effective_result(struct votable *votable);
+int get_effective_result_locked(struct votable *votable);
+int get_effective_client_id(struct votable *votable);
+int get_effective_client_id_locked(struct votable *votable);
+int vote(struct votable *votable, int client_id, bool state, int val);
+struct votable *create_votable(struct device *dev, const char *name,
+ int votable_type, int num_clients,
+ int default_result,
+ int (*callback)(struct device *dev,
+ int effective_result,
+ int effective_client,
+ int last_result,
+ int last_client)
+ );
+void lock_votable(struct votable *votable);
+void unlock_votable(struct votable *votable);
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 280018d59d5a..c6017e20c7b6 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -45,18 +45,22 @@ static ssize_t power_supply_show_property(struct device *dev,
char *buf) {
static char *type_text[] = {
"Unknown", "Battery", "UPS", "Mains", "USB",
- "USB_DCP", "USB_CDP", "USB_ACA"
+ "USB_DCP", "USB_CDP", "USB_ACA",
+ "USB_HVDCP", "USB_HVDCP_3", "Wireless", "BMS", "USB_Parallel",
+ "Wipower", "TYPEC", "TYPEC_UFP", "TYPEC_DFP"
};
static char *status_text[] = {
"Unknown", "Charging", "Discharging", "Not charging", "Full"
};
static char *charge_type[] = {
- "Unknown", "N/A", "Trickle", "Fast"
+ "Unknown", "N/A", "Trickle", "Fast",
+ "Taper"
};
static char *health_text[] = {
"Unknown", "Good", "Overheat", "Dead", "Over voltage",
"Unspecified failure", "Cold", "Watchdog timer expire",
- "Safety timer expire"
+ "Safety timer expire",
+ "Warm", "Cool"
};
static char *technology_text[] = {
"Unknown", "NiMH", "Li-ion", "Li-poly", "LiFe", "NiCd",
@@ -165,6 +169,8 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(charge_full),
POWER_SUPPLY_ATTR(charge_empty),
POWER_SUPPLY_ATTR(charge_now),
+ POWER_SUPPLY_ATTR(charge_now_raw),
+ POWER_SUPPLY_ATTR(charge_now_error),
POWER_SUPPLY_ATTR(charge_avg),
POWER_SUPPLY_ATTR(charge_counter),
POWER_SUPPLY_ATTR(constant_charge_current),
@@ -184,6 +190,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(capacity_alert_min),
POWER_SUPPLY_ATTR(capacity_alert_max),
POWER_SUPPLY_ATTR(capacity_level),
+ POWER_SUPPLY_ATTR(capacity_raw),
POWER_SUPPLY_ATTR(temp),
POWER_SUPPLY_ATTR(temp_max),
POWER_SUPPLY_ATTR(temp_min),
@@ -204,12 +211,47 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(usb_hc),
POWER_SUPPLY_ATTR(usb_otg),
POWER_SUPPLY_ATTR(charge_enabled),
+ POWER_SUPPLY_ATTR(battery_charging_enabled),
+ POWER_SUPPLY_ATTR(charging_enabled),
+ POWER_SUPPLY_ATTR(input_voltage_regulation),
+ POWER_SUPPLY_ATTR(input_current_max),
+ POWER_SUPPLY_ATTR(input_current_trim),
+ POWER_SUPPLY_ATTR(input_current_settled),
+ POWER_SUPPLY_ATTR(bypass_vchg_loop_debouncer),
+ POWER_SUPPLY_ATTR(charge_counter_shadow),
+ POWER_SUPPLY_ATTR(hi_power),
+ POWER_SUPPLY_ATTR(low_power),
+ POWER_SUPPLY_ATTR(temp_cool),
+ POWER_SUPPLY_ATTR(temp_warm),
+ POWER_SUPPLY_ATTR(system_temp_level),
+ POWER_SUPPLY_ATTR(resistance),
+ POWER_SUPPLY_ATTR(resistance_capacitive),
+ POWER_SUPPLY_ATTR(resistance_id),
+ POWER_SUPPLY_ATTR(resistance_now),
+ POWER_SUPPLY_ATTR(flash_current_max),
+ POWER_SUPPLY_ATTR(update_now),
+ POWER_SUPPLY_ATTR(esr_count),
+ POWER_SUPPLY_ATTR(safety_timer_enabled),
+ POWER_SUPPLY_ATTR(charge_done),
+ POWER_SUPPLY_ATTR(flash_active),
+ POWER_SUPPLY_ATTR(flash_trigger),
+ POWER_SUPPLY_ATTR(force_tlim),
+ POWER_SUPPLY_ATTR(dp_dm),
+ POWER_SUPPLY_ATTR(input_current_limited),
+ POWER_SUPPLY_ATTR(input_current_now),
+ POWER_SUPPLY_ATTR(rerun_aicl),
+ POWER_SUPPLY_ATTR(cycle_count_id),
+ POWER_SUPPLY_ATTR(safety_timer_expired),
+ POWER_SUPPLY_ATTR(restricted_charging),
+ POWER_SUPPLY_ATTR(current_capability),
+ POWER_SUPPLY_ATTR(typec_mode),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
POWER_SUPPLY_ATTR(model_name),
POWER_SUPPLY_ATTR(manufacturer),
POWER_SUPPLY_ATTR(serial_number),
+ POWER_SUPPLY_ATTR(battery_type),
};
static struct attribute *
diff --git a/drivers/power/qcom/Kconfig b/drivers/power/qcom/Kconfig
new file mode 100644
index 000000000000..b7ca61d6c21d
--- /dev/null
+++ b/drivers/power/qcom/Kconfig
@@ -0,0 +1,66 @@
+config MSM_PM
+ depends on PM
+ select MSM_IDLE_STATS if DEBUG_FS
+ select CPU_IDLE_MULTIPLE_DRIVERS
+ bool "Qualcomm platform specific PM driver"
+ help
+ Platform specific power driver to manage cores and l2
+ low power modes. It interface with various system
+ driver and put the cores into low power modes.
+
+config MSM_NOPM
+ default y if !PM
+ bool
+ help
+ This enables bare minimum support of power management at platform level.
+ i.e WFI
+
+config APSS_CORE_EA
+ depends on CPU_FREQ && PM_OPP
+ bool "Qualcomm Technology Inc specific power aware driver"
+ help
+ Platform specific power aware driver to provide power
+ and temperature information to the scheduler.
+
+config MSM_APM
+ bool "Qualcomm Technologies Inc platform specific APM driver"
+ help
+ Platform specific driver to manage the power source of
+ memory arrays. Interfaces with regulator drivers to ensure
+ SRAM Vmin requirements are met across different performance
+ levels.
+
+if MSM_PM
+menuconfig MSM_IDLE_STATS
+ bool "Collect idle statistics"
+ help
+ Collect cores various low power mode idle statistics
+ and export them in proc/msm_pm_stats. User can read
+ this data and determine what low power modes and how
+ many times cores have entered into LPM modes.
+
+if MSM_IDLE_STATS
+
+config MSM_IDLE_STATS_FIRST_BUCKET
+ int "First bucket time"
+ default 62500
+ help
+ Upper time limit in nanoseconds of first bucket.
+
+config MSM_IDLE_STATS_BUCKET_SHIFT
+ int "Bucket shift"
+ default 2
+
+config MSM_IDLE_STATS_BUCKET_COUNT
+ int "Bucket count"
+ default 10
+
+config MSM_SUSPEND_STATS_FIRST_BUCKET
+ int "First bucket time for suspend"
+ default 1000000000
+ help
+ Upper time limit in nanoseconds of first bucket of the
+ histogram. This is for collecting statistics on suspend.
+
+endif # MSM_IDLE_STATS
+endif # MSM_PM
diff --git a/drivers/power/qcom/Makefile b/drivers/power/qcom/Makefile
new file mode 100644
index 000000000000..9a5ac6109e74
--- /dev/null
+++ b/drivers/power/qcom/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_MSM_PM) += msm-pm.o pm-data.o
+obj-$(CONFIG_MSM_IDLE_STATS) += lpm-stats.o
+obj-$(CONFIG_MSM_NOPM) += no-pm.o
+obj-$(CONFIG_APSS_CORE_EA) += msm-core.o debug_core.o
+obj-$(CONFIG_MSM_APM) += apm.o
diff --git a/drivers/power/qcom/apm.c b/drivers/power/qcom/apm.c
new file mode 100644
index 000000000000..a8df71f38417
--- /dev/null
+++ b/drivers/power/qcom/apm.c
@@ -0,0 +1,985 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/of_device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/power/qcom/apm.h>
+#include <soc/qcom/scm.h>
+
+/*
+ * VDD_APCC
+ * =============================================================
+ * | VDD_MX | |
+ * | ==========================|============= |
+ * ___|___ ___|___ ___|___ ___|___ ___|___ ___|___
+ * | | | | | | | | | | | |
+ * | APCC | | MX HS | | MX HS | | APCC | | MX HS | | APCC |
+ * | HS | | | | | | HS | | | | HS |
+ * |_______| |_______| |_______| |_______| |_______| |_______|
+ * |_________| |_________| |__________|
+ * | | |
+ * ______|_____ ______|_____ _______|_____
+ * | | | | | |
+ * | | | | | |
+ * | CPU MEM | | L2 MEM | | L3 MEM |
+ * | Arrays | | Arrays | | Arrays |
+ * | | | | | |
+ * |____________| |____________| |_____________|
+ *
+ */
+
+/* Register value definitions */
+#define APCS_GFMUXA_SEL_VAL 0x13
+#define APCS_GFMUXA_DESEL_VAL 0x03
+#define MSM_APM_MX_MODE_VAL 0x00
+#define MSM_APM_APCC_MODE_VAL 0x10
+#define MSM_APM_MX_DONE_VAL 0x00
+#define MSM_APM_APCC_DONE_VAL 0x03
+#define MSM_APM_OVERRIDE_SEL_VAL 0xb0
+#define MSM_APM_SEC_CLK_SEL_VAL 0x30
+#define SPM_EVENT_SET_VAL 0x01
+#define SPM_EVENT_CLEAR_VAL 0x00
+
+/* Register bit mask definitions */
+#define MSM_APM_CTL_STS_MASK 0x0f
+
+/* Register offset definitions */
+#define APCC_APM_MODE 0x00000098
+#define APCC_APM_CTL_STS 0x000000a8
+#define APCS_SPARE 0x00000068
+#define APCS_VERSION 0x00000fd0
+
+#define HMSS_VERSION_1P2 0x10020000
+
+#define MSM_APM_SWITCH_TIMEOUT_US 10
+#define SPM_WAKEUP_DELAY_US 2
+#define SPM_EVENT_NUM 6
+
+#define MSM_APM_DRIVER_NAME "qcom,msm-apm"
+
+asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64);
+
+enum {
+ CLOCK_ASSERT_ENABLE,
+ CLOCK_ASSERT_DISABLE,
+ CLOCK_ASSERT_TOGGLE,
+};
+
+enum {
+ MSM8996_ID,
+ MSMTITANIUM_ID,
+};
+
+static int msm_id[] = {MSM8996_ID, MSMTITANIUM_ID};
+
+struct msm_apm_ctrl_dev {
+ struct list_head list;
+ struct device *dev;
+ enum msm_apm_supply supply;
+ spinlock_t lock;
+ void __iomem *reg_base;
+ void __iomem *apcs_csr_base;
+ void __iomem **apcs_spm_events_addr;
+ void __iomem *apc0_pll_ctl_addr;
+ void __iomem *apc1_pll_ctl_addr;
+ bool clk_src_override;
+ u32 version;
+ struct dentry *debugfs;
+ u32 msm_id;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+static struct dentry *apm_debugfs_base;
+#endif
+
+static DEFINE_MUTEX(apm_ctrl_list_mutex);
+static LIST_HEAD(apm_ctrl_list);
+
+/*
+ * Get the resources associated with the APM controller from device tree
+ * and remap all I/O addresses that are relevant to this HW revision.
+ */
+static int msm_apm_ctrl_devm_ioremap(struct platform_device *pdev,
+ struct msm_apm_ctrl_dev *ctrl)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ static const char *res_name[SPM_EVENT_NUM] = {
+ "apc0-l2-spm",
+ "apc1-l2-spm",
+ "apc0-cpu0-spm",
+ "apc0-cpu1-spm",
+ "apc1-cpu0-spm",
+ "apc1-cpu1-spm"
+ };
+ int i, ret = 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pm-apcc-glb");
+ if (!res) {
+ dev_err(dev, "Missing PM APCC Global register physical address");
+ return -EINVAL;
+ }
+ ctrl->reg_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!ctrl->reg_base) {
+ dev_err(dev, "Failed to map PM APCC Global registers\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apcs-csr");
+ if (!res) {
+ dev_err(dev, "Missing APCS CSR physical base address");
+ return -EINVAL;
+ }
+ ctrl->apcs_csr_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!ctrl->apcs_csr_base) {
+ dev_err(dev, "Failed to map APCS CSR registers\n");
+ return -ENOMEM;
+ }
+
+ ctrl->clk_src_override = of_property_read_bool(dev->of_node,
+ "qcom,clock-source-override");
+
+ if (ctrl->clk_src_override)
+ dev_info(dev, "overriding clock sources across APM switch\n");
+
+ ctrl->version = readl_relaxed(ctrl->apcs_csr_base + APCS_VERSION);
+
+ if (ctrl->version >= HMSS_VERSION_1P2)
+ return ret;
+
+ ctrl->apcs_spm_events_addr = devm_kzalloc(&pdev->dev,
+ SPM_EVENT_NUM
+ * sizeof(void __iomem *),
+ GFP_KERNEL);
+ if (!ctrl->apcs_spm_events_addr) {
+ dev_err(dev, "Failed to allocate memory for APCS SPM event registers\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < SPM_EVENT_NUM; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ res_name[i]);
+ if (!res) {
+ dev_err(dev, "Missing address for %s\n", res_name[i]);
+ ret = -EINVAL;
+ goto free_events;
+ }
+
+ ctrl->apcs_spm_events_addr[i] = devm_ioremap(dev, res->start,
+ resource_size(res));
+ if (!ctrl->apcs_spm_events_addr[i]) {
+ dev_err(dev, "Failed to map %s\n", res_name[i]);
+ ret = -ENOMEM;
+ goto free_events;
+ }
+
+ dev_dbg(dev, "%s event phys: %pa virt:0x%p\n", res_name[i],
+ &res->start, ctrl->apcs_spm_events_addr[i]);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "apc0-pll-ctl");
+ if (!res) {
+ dev_err(dev, "Missing APC0 PLL CTL physical address\n");
+ ret = -EINVAL;
+ goto free_events;
+ }
+
+ ctrl->apc0_pll_ctl_addr = devm_ioremap(dev,
+ res->start,
+ resource_size(res));
+ if (!ctrl->apc0_pll_ctl_addr) {
+ dev_err(dev, "Failed to map APC0 PLL CTL register\n");
+ ret = -ENOMEM;
+ goto free_events;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "apc1-pll-ctl");
+ if (!res) {
+ dev_err(dev, "Missing APC1 PLL CTL physical address\n");
+ ret = -EINVAL;
+ goto free_events;
+ }
+
+ ctrl->apc1_pll_ctl_addr = devm_ioremap(dev,
+ res->start,
+ resource_size(res));
+ if (!ctrl->apc1_pll_ctl_addr) {
+ dev_err(dev, "Failed to map APC1 PLL CTL register\n");
+ ret = -ENOMEM;
+ goto free_events;
+ }
+
+ return ret;
+
+free_events:
+ devm_kfree(dev, ctrl->apcs_spm_events_addr);
+ return ret;
+}
+
+/* Titanium register offset definition */
+#define MSMTITANIUM_APM_DLY_CNTR 0x2ac
+
+/* Register field shift definitions */
+#define APM_CTL_SEL_SWITCH_DLY_SHIFT 0
+#define APM_CTL_RESUME_CLK_DLY_SHIFT 8
+#define APM_CTL_HALT_CLK_DLY_SHIFT 16
+#define APM_CTL_POST_HALT_DLY_SHIFT 24
+
+/* Register field mask definitions */
+#define APM_CTL_SEL_SWITCH_DLY_MASK GENMASK(7, 0)
+#define APM_CTL_RESUME_CLK_DLY_MASK GENMASK(15, 8)
+#define APM_CTL_HALT_CLK_DLY_MASK GENMASK(23, 16)
+#define APM_CTL_POST_HALT_DLY_MASK GENMASK(31, 24)
+
+/*
+ * Get the resources associated with the msmtitanium APM controller from
+ * device tree, remap all I/O addresses, and program the initial
+ * register configuration required for the titanium APM controller device.
+ */
+static int msmtitanium_apm_ctrl_init(struct platform_device *pdev,
+ struct msm_apm_ctrl_dev *ctrl)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ u32 delay_counter, val = 0, regval = 0;
+ int rc = 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pm-apcc-glb");
+ if (!res) {
+ dev_err(dev, "Missing PM APCC Global register physical address\n");
+ return -ENODEV;
+ }
+ ctrl->reg_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!ctrl->reg_base) {
+ dev_err(dev, "Failed to map PM APCC Global registers\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Initial APM register configuration required before starting
+ * APM HW controller.
+ */
+ regval = readl_relaxed(ctrl->reg_base + MSMTITANIUM_APM_DLY_CNTR);
+ val = regval;
+
+ if (of_find_property(dev->of_node, "qcom,apm-post-halt-delay", NULL)) {
+ rc = of_property_read_u32(dev->of_node,
+ "qcom,apm-post-halt-delay", &delay_counter);
+ if (rc < 0) {
+ dev_err(dev, "apm-post-halt-delay read failed, rc = %d",
+ rc);
+ return rc;
+ }
+
+ val &= ~APM_CTL_POST_HALT_DLY_MASK;
+ val |= (delay_counter << APM_CTL_POST_HALT_DLY_SHIFT)
+ & APM_CTL_POST_HALT_DLY_MASK;
+ }
+
+ if (of_find_property(dev->of_node, "qcom,apm-halt-clk-delay", NULL)) {
+ rc = of_property_read_u32(dev->of_node,
+ "qcom,apm-halt-clk-delay", &delay_counter);
+ if (rc < 0) {
+ dev_err(dev, "apm-halt-clk-delay read failed, rc = %d",
+ rc);
+ return rc;
+ }
+
+ val &= ~APM_CTL_HALT_CLK_DLY_MASK;
+ val |= (delay_counter << APM_CTL_HALT_CLK_DLY_SHIFT)
+ & APM_CTL_HALT_CLK_DLY_MASK;
+ }
+
+ if (of_find_property(dev->of_node, "qcom,apm-resume-clk-delay", NULL)) {
+ rc = of_property_read_u32(dev->of_node,
+ "qcom,apm-resume-clk-delay", &delay_counter);
+ if (rc < 0) {
+ dev_err(dev, "apm-resume-clk-delay read failed, rc = %d",
+ rc);
+ return rc;
+ }
+
+ val &= ~APM_CTL_RESUME_CLK_DLY_MASK;
+ val |= (delay_counter << APM_CTL_RESUME_CLK_DLY_SHIFT)
+ & APM_CTL_RESUME_CLK_DLY_MASK;
+ }
+
+ if (of_find_property(dev->of_node, "qcom,apm-sel-switch-delay", NULL)) {
+ rc = of_property_read_u32(dev->of_node,
+ "qcom,apm-sel-switch-delay", &delay_counter);
+ if (rc < 0) {
+ dev_err(dev, "apm-sel-switch-delay read failed, rc = %d",
+ rc);
+ return rc;
+ }
+
+ val &= ~APM_CTL_SEL_SWITCH_DLY_MASK;
+ val |= (delay_counter << APM_CTL_SEL_SWITCH_DLY_SHIFT)
+ & APM_CTL_SEL_SWITCH_DLY_MASK;
+ }
+
+ if (val != regval) {
+ writel_relaxed(val, ctrl->reg_base + MSMTITANIUM_APM_DLY_CNTR);
+ /* make sure write completes before return */
+ mb();
+ }
+
+ return rc;
+}
+
+static int msm_apm_secure_clock_source_override(
+ struct msm_apm_ctrl_dev *ctrl_dev, bool enable)
+{
+ int ret;
+
+ if (ctrl_dev->clk_src_override) {
+ ret = __invoke_psci_fn_smc(0xC4000020, 3, enable ?
+ CLOCK_ASSERT_ENABLE :
+ CLOCK_ASSERT_DISABLE, 0);
+ if (ret)
+ dev_err(ctrl_dev->dev, "PSCI request to switch to %s clock source failed\n",
+ enable ? "GPLL0" : "original");
+ }
+
+ return 0;
+}
+
+static int msm8996_apm_switch_to_mx(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+ int i, timeout = MSM_APM_SWITCH_TIMEOUT_US;
+ u32 regval;
+ int ret = 0;
+ unsigned long flags;
+
+ mutex_lock(&scm_lmh_lock);
+ spin_lock_irqsave(&ctrl_dev->lock, flags);
+
+ ret = msm_apm_secure_clock_source_override(ctrl_dev, true);
+ if (ret)
+ return ret;
+
+ /* Perform revision-specific programming steps */
+ if (ctrl_dev->version < HMSS_VERSION_1P2) {
+ /* Clear SPM events */
+ for (i = 0; i < SPM_EVENT_NUM; i++)
+ writel_relaxed(SPM_EVENT_CLEAR_VAL,
+ ctrl_dev->apcs_spm_events_addr[i]);
+
+ udelay(SPM_WAKEUP_DELAY_US);
+
+ /* Switch APC/CBF to GPLL0 clock */
+ writel_relaxed(APCS_GFMUXA_SEL_VAL,
+ ctrl_dev->apcs_csr_base + APCS_SPARE);
+ ndelay(200);
+ writel_relaxed(MSM_APM_OVERRIDE_SEL_VAL,
+ ctrl_dev->apc0_pll_ctl_addr);
+ ndelay(200);
+ writel_relaxed(MSM_APM_OVERRIDE_SEL_VAL,
+ ctrl_dev->apc1_pll_ctl_addr);
+
+ /* Ensure writes complete before proceeding */
+ mb();
+ }
+
+ /* Switch arrays to MX supply and wait for its completion */
+ writel_relaxed(MSM_APM_MX_MODE_VAL, ctrl_dev->reg_base +
+ APCC_APM_MODE);
+
+ /* Ensure write above completes before delaying */
+ mb();
+
+ while (timeout > 0) {
+ regval = readl_relaxed(ctrl_dev->reg_base + APCC_APM_CTL_STS);
+ if ((regval & MSM_APM_CTL_STS_MASK) ==
+ MSM_APM_MX_DONE_VAL)
+ break;
+
+ udelay(1);
+ timeout--;
+ }
+
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ dev_err(ctrl_dev->dev, "APCC to MX APM switch timed out. APCC_APM_CTL_STS=0x%x\n",
+ regval);
+ }
+
+ /* Perform revision-specific programming steps */
+ if (ctrl_dev->version < HMSS_VERSION_1P2) {
+ /* Switch APC/CBF clocks to original source */
+ writel_relaxed(APCS_GFMUXA_DESEL_VAL,
+ ctrl_dev->apcs_csr_base + APCS_SPARE);
+ ndelay(200);
+ writel_relaxed(MSM_APM_SEC_CLK_SEL_VAL,
+ ctrl_dev->apc0_pll_ctl_addr);
+ ndelay(200);
+ writel_relaxed(MSM_APM_SEC_CLK_SEL_VAL,
+ ctrl_dev->apc1_pll_ctl_addr);
+
+ /* Complete clock source switch before SPM event sequence */
+ mb();
+
+ /* Set SPM events */
+ for (i = 0; i < SPM_EVENT_NUM; i++)
+ writel_relaxed(SPM_EVENT_SET_VAL,
+ ctrl_dev->apcs_spm_events_addr[i]);
+ }
+
+ ret = msm_apm_secure_clock_source_override(ctrl_dev, false);
+ if (ret)
+ return ret;
+
+ if (!ret) {
+ ctrl_dev->supply = MSM_APM_SUPPLY_MX;
+ dev_dbg(ctrl_dev->dev, "APM supply switched to MX\n");
+ }
+
+ spin_unlock_irqrestore(&ctrl_dev->lock, flags);
+ mutex_unlock(&scm_lmh_lock);
+
+ return ret;
+}
+
+static int msm8996_apm_switch_to_apcc(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+ int i, timeout = MSM_APM_SWITCH_TIMEOUT_US;
+ u32 regval;
+ int ret = 0;
+ unsigned long flags;
+
+ mutex_lock(&scm_lmh_lock);
+ spin_lock_irqsave(&ctrl_dev->lock, flags);
+
+ ret = msm_apm_secure_clock_source_override(ctrl_dev, true);
+ if (ret)
+ return ret;
+
+ /* Perform revision-specific programming steps */
+ if (ctrl_dev->version < HMSS_VERSION_1P2) {
+ /* Clear SPM events */
+ for (i = 0; i < SPM_EVENT_NUM; i++)
+ writel_relaxed(SPM_EVENT_CLEAR_VAL,
+ ctrl_dev->apcs_spm_events_addr[i]);
+
+ udelay(SPM_WAKEUP_DELAY_US);
+
+ /* Switch APC/CBF to GPLL0 clock */
+ writel_relaxed(APCS_GFMUXA_SEL_VAL,
+ ctrl_dev->apcs_csr_base + APCS_SPARE);
+ ndelay(200);
+ writel_relaxed(MSM_APM_OVERRIDE_SEL_VAL,
+ ctrl_dev->apc0_pll_ctl_addr);
+ ndelay(200);
+ writel_relaxed(MSM_APM_OVERRIDE_SEL_VAL,
+ ctrl_dev->apc1_pll_ctl_addr);
+
+ /* Ensure previous writes complete before proceeding */
+ mb();
+ }
+
+ /* Switch arrays to APCC supply and wait for its completion */
+ writel_relaxed(MSM_APM_APCC_MODE_VAL, ctrl_dev->reg_base +
+ APCC_APM_MODE);
+
+ /* Ensure write above completes before delaying */
+ mb();
+
+ while (timeout > 0) {
+ regval = readl_relaxed(ctrl_dev->reg_base + APCC_APM_CTL_STS);
+ if ((regval & MSM_APM_CTL_STS_MASK) ==
+ MSM_APM_APCC_DONE_VAL)
+ break;
+
+ udelay(1);
+ timeout--;
+ }
+
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ dev_err(ctrl_dev->dev, "MX to APCC APM switch timed out. APCC_APM_CTL_STS=0x%x\n",
+ regval);
+ }
+
+ /* Perform revision-specific programming steps */
+ if (ctrl_dev->version < HMSS_VERSION_1P2) {
+ /* Set SPM events */
+ for (i = 0; i < SPM_EVENT_NUM; i++)
+ writel_relaxed(SPM_EVENT_SET_VAL,
+ ctrl_dev->apcs_spm_events_addr[i]);
+
+ /* Complete SPM event sequence before clock source switch */
+ mb();
+
+ /* Switch APC/CBF clocks to original source */
+ writel_relaxed(APCS_GFMUXA_DESEL_VAL,
+ ctrl_dev->apcs_csr_base + APCS_SPARE);
+ ndelay(200);
+ writel_relaxed(MSM_APM_SEC_CLK_SEL_VAL,
+ ctrl_dev->apc0_pll_ctl_addr);
+ ndelay(200);
+ writel_relaxed(MSM_APM_SEC_CLK_SEL_VAL,
+ ctrl_dev->apc1_pll_ctl_addr);
+ }
+
+ ret = msm_apm_secure_clock_source_override(ctrl_dev, false);
+ if (ret)
+ return ret;
+
+ if (!ret) {
+ ctrl_dev->supply = MSM_APM_SUPPLY_APCC;
+ dev_dbg(ctrl_dev->dev, "APM supply switched to APCC\n");
+ }
+
+ spin_unlock_irqrestore(&ctrl_dev->lock, flags);
+ mutex_unlock(&scm_lmh_lock);
+
+ return ret;
+}
+
+/* Titanium register value definitions */
+#define MSMTITANIUM_APM_MX_MODE_VAL 0x00
+#define MSMTITANIUM_APM_APCC_MODE_VAL 0x02
+#define MSMTITANIUM_APM_MX_DONE_VAL 0x00
+#define MSMTITANIUM_APM_APCC_DONE_VAL 0x03
+
+/* Titanium register offset definitions */
+#define MSMTITANIUM_APCC_APM_MODE 0x000002a8
+#define MSMTITANIUM_APCC_APM_CTL_STS 0x000002b0
+
+static int msmtitanium_apm_switch_to_mx(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+ int timeout = MSM_APM_SWITCH_TIMEOUT_US;
+ u32 regval;
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl_dev->lock, flags);
+
+ /* Switch arrays to MX supply and wait for its completion */
+ writel_relaxed(MSMTITANIUM_APM_MX_MODE_VAL, ctrl_dev->reg_base +
+ MSMTITANIUM_APCC_APM_MODE);
+
+ /* Ensure write above completes before delaying */
+ mb();
+
+ while (timeout > 0) {
+ regval = readl_relaxed(ctrl_dev->reg_base +
+ MSMTITANIUM_APCC_APM_CTL_STS);
+ if ((regval & MSM_APM_CTL_STS_MASK) ==
+ MSMTITANIUM_APM_MX_DONE_VAL)
+ break;
+
+ udelay(1);
+ timeout--;
+ }
+
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ dev_err(ctrl_dev->dev, "APCC to MX APM switch timed out. APCC_APM_CTL_STS=0x%x\n",
+ regval);
+ } else {
+ ctrl_dev->supply = MSM_APM_SUPPLY_MX;
+ dev_dbg(ctrl_dev->dev, "APM supply switched to MX\n");
+ }
+
+ spin_unlock_irqrestore(&ctrl_dev->lock, flags);
+
+ return ret;
+}
+
+static int msmtitanium_apm_switch_to_apcc(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+ int timeout = MSM_APM_SWITCH_TIMEOUT_US;
+ u32 regval;
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl_dev->lock, flags);
+
+ /* Switch arrays to APCC supply and wait for its completion */
+ writel_relaxed(MSMTITANIUM_APM_APCC_MODE_VAL, ctrl_dev->reg_base +
+ MSMTITANIUM_APCC_APM_MODE);
+
+ /* Ensure write above completes before delaying */
+ mb();
+
+ while (timeout > 0) {
+ regval = readl_relaxed(ctrl_dev->reg_base +
+ MSMTITANIUM_APCC_APM_CTL_STS);
+ if ((regval & MSM_APM_CTL_STS_MASK) ==
+ MSMTITANIUM_APM_APCC_DONE_VAL)
+ break;
+
+ udelay(1);
+ timeout--;
+ }
+
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ dev_err(ctrl_dev->dev, "MX to APCC APM switch timed out. APCC_APM_CTL_STS=0x%x\n",
+ regval);
+ } else {
+ ctrl_dev->supply = MSM_APM_SUPPLY_APCC;
+ dev_dbg(ctrl_dev->dev, "APM supply switched to APCC\n");
+ }
+
+ spin_unlock_irqrestore(&ctrl_dev->lock, flags);
+
+ return ret;
+}
+
+static int msm_apm_switch_to_mx(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+ int ret = 0;
+
+ switch (ctrl_dev->msm_id) {
+ case MSM8996_ID:
+ ret = msm8996_apm_switch_to_mx(ctrl_dev);
+ break;
+ case MSMTITANIUM_ID:
+ ret = msmtitanium_apm_switch_to_mx(ctrl_dev);
+ break;
+ }
+
+ return ret;
+}
+
+static int msm_apm_switch_to_apcc(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+ int ret = 0;
+
+ switch (ctrl_dev->msm_id) {
+ case MSM8996_ID:
+ ret = msm8996_apm_switch_to_apcc(ctrl_dev);
+ break;
+ case MSMTITANIUM_ID:
+ ret = msmtitanium_apm_switch_to_apcc(ctrl_dev);
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * msm_apm_get_supply() - Returns the supply that is currently
+ * powering the memory arrays
+ * @ctrl_dev: Pointer to an MSM APM controller device
+ *
+ * Returns the supply currently selected by the APM.
+ */
+int msm_apm_get_supply(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+ return ctrl_dev->supply;
+}
+EXPORT_SYMBOL(msm_apm_get_supply);
+
+/**
+ * msm_apm_set_supply() - Perform the necessary steps to switch the voltage
+ * source of the memory arrays to a given supply
+ * @ctrl_dev: Pointer to an MSM APM controller device
+ * @supply: Power rail to use as supply for the memory
+ * arrays
+ *
+ * Returns 0 on success, -ETIMEDOUT on APM switch timeout, or -EPERM if
+ * the supply is not supported.
+ */
+int msm_apm_set_supply(struct msm_apm_ctrl_dev *ctrl_dev,
+ enum msm_apm_supply supply)
+{
+ int ret;
+
+ switch (supply) {
+ case MSM_APM_SUPPLY_APCC:
+ ret = msm_apm_switch_to_apcc(ctrl_dev);
+ break;
+ case MSM_APM_SUPPLY_MX:
+ ret = msm_apm_switch_to_mx(ctrl_dev);
+ break;
+ default:
+ ret = -EPERM;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_apm_set_supply);
+
+/**
+ * msm_apm_ctrl_dev_get() - get a handle to the MSM APM controller linked to
+ * the device in device tree
+ * @dev: Pointer to the device
+ *
+ * The device must specify "qcom,apm-ctrl" property in its device tree
+ * node which points to an MSM APM controller device node.
+ *
+ * Returns an MSM APM controller handle if successful or ERR_PTR on any error.
+ * If the APM controller device hasn't probed yet, ERR_PTR(-EPROBE_DEFER) is
+ * returned.
+ */
+struct msm_apm_ctrl_dev *msm_apm_ctrl_dev_get(struct device *dev)
+{
+ struct msm_apm_ctrl_dev *ctrl_dev = NULL;
+ struct msm_apm_ctrl_dev *dev_found = ERR_PTR(-EPROBE_DEFER);
+ struct device_node *ctrl_node;
+
+ if (!dev || !dev->of_node) {
+ pr_err("Invalid device node\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ctrl_node = of_parse_phandle(dev->of_node, "qcom,apm-ctrl", 0);
+ if (!ctrl_node) {
+ pr_err("Could not find qcom,apm-ctrl property in %s\n",
+ dev->of_node->full_name);
+ return ERR_PTR(-ENXIO);
+ }
+
+ mutex_lock(&apm_ctrl_list_mutex);
+ list_for_each_entry(ctrl_dev, &apm_ctrl_list, list) {
+ if (ctrl_dev->dev && ctrl_dev->dev->of_node == ctrl_node) {
+ dev_found = ctrl_dev;
+ break;
+ }
+ }
+ mutex_unlock(&apm_ctrl_list_mutex);
+
+ of_node_put(ctrl_node);
+ return dev_found;
+}
+EXPORT_SYMBOL(msm_apm_ctrl_dev_get);
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int apm_supply_dbg_open(struct inode *inode, struct file *filep)
+{
+ filep->private_data = inode->i_private;
+
+ return 0;
+}
+
+static ssize_t apm_supply_dbg_read(struct file *filep, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct msm_apm_ctrl_dev *ctrl_dev = filep->private_data;
+ char buf[10];
+ int len;
+
+ if (!ctrl_dev) {
+ pr_err("invalid apm ctrl handle\n");
+ return -ENODEV;
+ }
+
+ if (ctrl_dev->supply == MSM_APM_SUPPLY_APCC)
+ len = snprintf(buf, sizeof(buf), "APCC\n");
+ else if (ctrl_dev->supply == MSM_APM_SUPPLY_MX)
+ len = snprintf(buf, sizeof(buf), "MX\n");
+ else
+ len = snprintf(buf, sizeof(buf), "ERR\n");
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations apm_supply_fops = {
+ .open = apm_supply_dbg_open,
+ .read = apm_supply_dbg_read,
+};
+
+static void apm_debugfs_base_init(void)
+{
+ apm_debugfs_base = debugfs_create_dir("msm-apm", NULL);
+
+ if (IS_ERR_OR_NULL(apm_debugfs_base))
+ pr_err("msm-apm debugfs base directory creation failed\n");
+}
+
+static void apm_debugfs_init(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+ struct dentry *temp;
+
+ if (IS_ERR_OR_NULL(apm_debugfs_base)) {
+ pr_err("Base directory missing, cannot create apm debugfs nodes\n");
+ return;
+ }
+
+ ctrl_dev->debugfs = debugfs_create_dir(dev_name(ctrl_dev->dev),
+ apm_debugfs_base);
+ if (IS_ERR_OR_NULL(ctrl_dev->debugfs)) {
+ pr_err("%s debugfs directory creation failed\n",
+ dev_name(ctrl_dev->dev));
+ return;
+ }
+
+ temp = debugfs_create_file("supply", S_IRUGO, ctrl_dev->debugfs,
+ ctrl_dev, &apm_supply_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ pr_err("supply mode creation failed\n");
+ return;
+ }
+}
+
+static void apm_debugfs_deinit(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+ if (!IS_ERR_OR_NULL(ctrl_dev->debugfs))
+ debugfs_remove_recursive(ctrl_dev->debugfs);
+}
+
+static void apm_debugfs_base_remove(void)
+{
+ debugfs_remove_recursive(apm_debugfs_base);
+}
+#else
+
+static void apm_debugfs_base_init(void)
+{}
+
+static void apm_debugfs_init(struct msm_apm_ctrl_dev *ctrl_dev)
+{}
+
+static void apm_debugfs_deinit(struct msm_apm_ctrl_dev *ctrl_dev)
+{}
+
+static void apm_debugfs_base_remove(void)
+{}
+
+#endif
+
+static struct of_device_id msm_apm_match_table[] = {
+ {
+ .compatible = "qcom,msm-apm",
+ .data = &msm_id[MSM8996_ID]
+ },
+ {
+ .compatible = "qcom,msmtitanium-apm",
+ .data = &msm_id[MSMTITANIUM_ID]
+ },
+ {}
+};
+
+static int msm_apm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct msm_apm_ctrl_dev *ctrl;
+ const struct of_device_id *match;
+ int ret = 0;
+
+ dev_dbg(dev, "probing MSM Array Power Mux driver\n");
+
+ if (!dev->of_node) {
+ dev_err(dev, "Device tree node is missing\n");
+ return -ENODEV;
+ }
+
+ match = of_match_device(msm_apm_match_table, dev);
+ if (!match)
+ return -ENODEV;
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl) {
+ dev_err(dev, "MSM APM controller memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&ctrl->list);
+ spin_lock_init(&ctrl->lock);
+ ctrl->dev = dev;
+ ctrl->msm_id = *(int *)match->data;
+ platform_set_drvdata(pdev, ctrl);
+
+ switch (ctrl->msm_id) {
+ case MSM8996_ID:
+ ret = msm_apm_ctrl_devm_ioremap(pdev, ctrl);
+ if (ret) {
+ dev_err(dev, "Failed to add APM controller device\n");
+ return ret;
+ }
+ break;
+ case MSMTITANIUM_ID:
+ ret = msmtitanium_apm_ctrl_init(pdev, ctrl);
+ if (ret) {
+ dev_err(dev, "Failed to initialize APM controller device: ret=%d\n",
+ ret);
+ return ret;
+ }
+ break;
+ default:
+ dev_err(dev, "unable to add APM controller device for msm_id:%d\n",
+ ctrl->msm_id);
+ return -ENODEV;
+ }
+
+ apm_debugfs_init(ctrl);
+ mutex_lock(&apm_ctrl_list_mutex);
+ list_add_tail(&ctrl->list, &apm_ctrl_list);
+ mutex_unlock(&apm_ctrl_list_mutex);
+
+ dev_dbg(dev, "MSM Array Power Mux driver probe successful");
+
+ return ret;
+}
+
+static int msm_apm_remove(struct platform_device *pdev)
+{
+ struct msm_apm_ctrl_dev *ctrl_dev;
+
+ ctrl_dev = platform_get_drvdata(pdev);
+ if (ctrl_dev) {
+ mutex_lock(&apm_ctrl_list_mutex);
+ list_del(&ctrl_dev->list);
+ mutex_unlock(&apm_ctrl_list_mutex);
+ apm_debugfs_deinit(ctrl_dev);
+ }
+
+ return 0;
+}
+
+static struct platform_driver msm_apm_driver = {
+ .driver = {
+ .name = MSM_APM_DRIVER_NAME,
+ .of_match_table = msm_apm_match_table,
+ .owner = THIS_MODULE,
+ },
+ .probe = msm_apm_probe,
+ .remove = msm_apm_remove,
+};
+
+static int __init msm_apm_init(void)
+{
+ apm_debugfs_base_init();
+ return platform_driver_register(&msm_apm_driver);
+}
+
+static void __exit msm_apm_exit(void)
+{
+ platform_driver_unregister(&msm_apm_driver);
+ apm_debugfs_base_remove();
+}
+
+arch_initcall(msm_apm_init);
+module_exit(msm_apm_exit);
+
+MODULE_DESCRIPTION("MSM Array Power Mux driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/qpnp-fg.c b/drivers/power/qpnp-fg.c
new file mode 100644
index 000000000000..a9cffbc59eaf
--- /dev/null
+++ b/drivers/power/qpnp-fg.c
@@ -0,0 +1,6721 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "FG: %s: " fmt, __func__
+
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/rtc.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <linux/spmi.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/ktime.h>
+#include <linux/power_supply.h>
+#include <linux/of_batterydata.h>
+#include <linux/string_helpers.h>
+#include <linux/alarmtimer.h>
+#include <linux/qpnp/qpnp-revid.h>
+
+/* Register offsets */
+
+/* Interrupt offsets */
+#define INT_RT_STS(base) (base + 0x10)
+#define INT_EN_CLR(base) (base + 0x16)
+
+/* SPMI Register offsets */
+#define SOC_MONOTONIC_SOC 0x09
+#define SOC_BOOT_MOD 0x50
+#define SOC_RESTART 0x51
+
+#define REG_OFFSET_PERP_SUBTYPE 0x05
+
+/* RAM register offsets */
+#define RAM_OFFSET 0x400
+
+/* Bit/Mask definitions */
+#define FULL_PERCENT 0xFF
+#define MAX_TRIES_SOC 5
+#define MA_MV_BIT_RES 39
+#define MSB_SIGN BIT(7)
+#define IBAT_VBAT_MASK 0x7F
+#define NO_OTP_PROF_RELOAD BIT(6)
+#define REDO_FIRST_ESTIMATE BIT(3)
+#define RESTART_GO BIT(0)
+#define THERM_DELAY_MASK 0xE0
+
+/* SUBTYPE definitions */
+#define FG_SOC 0x9
+#define FG_BATT 0xA
+#define FG_ADC 0xB
+#define FG_MEMIF 0xC
+
+#define QPNP_FG_DEV_NAME "qcom,qpnp-fg"
+#define MEM_IF_TIMEOUT_MS 5000
+#define BUCKET_COUNT 8
+#define BUCKET_SOC_PCT (256 / BUCKET_COUNT)
+
+#define BCL_MA_TO_ADC(_current, _adc_val) { \
+ _adc_val = (u8)((_current) * 100 / 976); \
+}
+
+/* Debug Flag Definitions */
+enum {
+ FG_SPMI_DEBUG_WRITES = BIT(0), /* Show SPMI writes */
+ FG_SPMI_DEBUG_READS = BIT(1), /* Show SPMI reads */
+ FG_IRQS = BIT(2), /* Show interrupts */
+ FG_MEM_DEBUG_WRITES = BIT(3), /* Show SRAM writes */
+ FG_MEM_DEBUG_READS = BIT(4), /* Show SRAM reads */
+ FG_POWER_SUPPLY = BIT(5), /* Show POWER_SUPPLY */
+ FG_STATUS = BIT(6), /* Show FG status changes */
+ FG_AGING = BIT(7), /* Show FG aging algorithm */
+};
+
+/* PMIC REVISIONS */
+#define REVID_RESERVED 0
+#define REVID_VARIANT 1
+#define REVID_ANA_MAJOR 2
+#define REVID_DIG_MAJOR 3
+
+enum dig_major {
+ DIG_REV_1 = 0x1,
+ DIG_REV_2 = 0x2,
+ DIG_REV_3 = 0x3,
+};
+
+enum pmic_subtype {
+ PMI8994 = 10,
+ PMI8950 = 17,
+ PMI8996 = 19,
+ PMI8937 = 55,
+};
+
+enum wa_flags {
+ IADC_GAIN_COMP_WA = BIT(0),
+ USE_CC_SOC_REG = BIT(1),
+ PULSE_REQUEST_WA = BIT(2),
+ BCL_HI_POWER_FOR_CHGLED_WA = BIT(3)
+};
+
+enum current_sense_type {
+ INTERNAL_CURRENT_SENSE,
+ EXTERNAL_CURRENT_SENSE,
+};
+
+struct fg_mem_setting {
+ u16 address;
+ u8 offset;
+ int value;
+};
+
+struct fg_mem_data {
+ u16 address;
+ u8 offset;
+ unsigned int len;
+ int value;
+};
+
+struct fg_learning_data {
+ int64_t cc_uah;
+ int64_t learned_cc_uah;
+ int init_cc_pc_val;
+ bool active;
+ bool feedback_on;
+ struct mutex learning_lock;
+ ktime_t time_stamp;
+ /* configuration properties */
+ int max_start_soc;
+ int max_increment;
+ int max_decrement;
+ int min_temp;
+ int max_temp;
+ int vbat_est_thr_uv;
+};
+
+struct fg_rslow_data {
+ u8 rslow_cfg;
+ u8 rslow_thr;
+ u8 rs_to_rslow[2];
+ u8 rslow_comp[4];
+ uint32_t chg_rs_to_rslow;
+ uint32_t chg_rslow_comp_c1;
+ uint32_t chg_rslow_comp_c2;
+ uint32_t chg_rslow_comp_thr;
+ bool active;
+ struct mutex lock;
+};
+
+struct fg_cyc_ctr_data {
+ bool en;
+ bool started[BUCKET_COUNT];
+ u16 count[BUCKET_COUNT];
+ u8 last_soc[BUCKET_COUNT];
+ int id;
+ struct mutex lock;
+};
+
+struct fg_iadc_comp_data {
+ u8 dfl_gain_reg[2];
+ bool gain_active;
+ int64_t dfl_gain;
+};
+
+struct fg_cc_soc_data {
+ int init_sys_soc;
+ int init_cc_soc;
+ int full_capacity;
+ int delta_soc;
+};
+
+/* FG_MEMIF setting index */
+enum fg_mem_setting_index {
+ FG_MEM_SOFT_COLD = 0,
+ FG_MEM_SOFT_HOT,
+ FG_MEM_HARD_COLD,
+ FG_MEM_HARD_HOT,
+ FG_MEM_RESUME_SOC,
+ FG_MEM_BCL_LM_THRESHOLD,
+ FG_MEM_BCL_MH_THRESHOLD,
+ FG_MEM_TERM_CURRENT,
+ FG_MEM_CHG_TERM_CURRENT,
+ FG_MEM_IRQ_VOLT_EMPTY,
+ FG_MEM_CUTOFF_VOLTAGE,
+ FG_MEM_VBAT_EST_DIFF,
+ FG_MEM_DELTA_SOC,
+ FG_MEM_BATT_LOW,
+ FG_MEM_THERM_DELAY,
+ FG_MEM_SETTING_MAX,
+};
+
+/* FG_MEMIF data index */
+enum fg_mem_data_index {
+ FG_DATA_BATT_TEMP = 0,
+ FG_DATA_OCV,
+ FG_DATA_VOLTAGE,
+ FG_DATA_CURRENT,
+ FG_DATA_BATT_ESR,
+ FG_DATA_BATT_ESR_COUNT,
+ FG_DATA_BATT_SOC,
+ FG_DATA_CC_CHARGE,
+ FG_DATA_VINT_ERR,
+ FG_DATA_CPRED_VOLTAGE,
+ /* values below this only gets read once per profile reload */
+ FG_DATA_BATT_ID,
+ FG_DATA_BATT_ID_INFO,
+ FG_DATA_MAX,
+};
+
+#define SETTING(_idx, _address, _offset, _value) \
+ [FG_MEM_##_idx] = { \
+ .address = _address, \
+ .offset = _offset, \
+ .value = _value, \
+ } \
+
+static struct fg_mem_setting settings[FG_MEM_SETTING_MAX] = {
+ /* ID Address, Offset, Value*/
+ SETTING(SOFT_COLD, 0x454, 0, 100),
+ SETTING(SOFT_HOT, 0x454, 1, 400),
+ SETTING(HARD_COLD, 0x454, 2, 50),
+ SETTING(HARD_HOT, 0x454, 3, 450),
+ SETTING(RESUME_SOC, 0x45C, 1, 0),
+ SETTING(BCL_LM_THRESHOLD, 0x47C, 2, 50),
+ SETTING(BCL_MH_THRESHOLD, 0x47C, 3, 752),
+ SETTING(TERM_CURRENT, 0x40C, 2, 250),
+ SETTING(CHG_TERM_CURRENT, 0x4F8, 2, 250),
+ SETTING(IRQ_VOLT_EMPTY, 0x458, 3, 3100),
+ SETTING(CUTOFF_VOLTAGE, 0x40C, 0, 3200),
+ SETTING(VBAT_EST_DIFF, 0x000, 0, 30),
+ SETTING(DELTA_SOC, 0x450, 3, 1),
+ SETTING(BATT_LOW, 0x458, 0, 4200),
+ SETTING(THERM_DELAY, 0x4AC, 3, 0),
+};
+
+#define DATA(_idx, _address, _offset, _length, _value) \
+ [FG_DATA_##_idx] = { \
+ .address = _address, \
+ .offset = _offset, \
+ .len = _length, \
+ .value = _value, \
+ } \
+
+static struct fg_mem_data fg_data[FG_DATA_MAX] = {
+ /* ID Address, Offset, Length, Value*/
+ DATA(BATT_TEMP, 0x550, 2, 2, -EINVAL),
+ DATA(OCV, 0x588, 3, 2, -EINVAL),
+ DATA(VOLTAGE, 0x5CC, 1, 2, -EINVAL),
+ DATA(CURRENT, 0x5CC, 3, 2, -EINVAL),
+ DATA(BATT_ESR, 0x554, 2, 2, -EINVAL),
+ DATA(BATT_ESR_COUNT, 0x558, 2, 2, -EINVAL),
+ DATA(BATT_SOC, 0x56C, 1, 3, -EINVAL),
+ DATA(CC_CHARGE, 0x570, 0, 4, -EINVAL),
+ DATA(VINT_ERR, 0x560, 0, 4, -EINVAL),
+ DATA(CPRED_VOLTAGE, 0x540, 0, 2, -EINVAL),
+ DATA(BATT_ID, 0x594, 1, 1, -EINVAL),
+ DATA(BATT_ID_INFO, 0x594, 3, 1, -EINVAL),
+};
+
+static int fg_debug_mask;
+module_param_named(
+ debug_mask, fg_debug_mask, int, S_IRUSR | S_IWUSR
+);
+
+static int fg_sense_type = -EINVAL;
+static int fg_restart;
+
+static int fg_est_dump;
+module_param_named(
+ first_est_dump, fg_est_dump, int, S_IRUSR | S_IWUSR
+);
+
+static char *fg_batt_type;
+module_param_named(
+ battery_type, fg_batt_type, charp, S_IRUSR | S_IWUSR
+);
+
+static int fg_sram_update_period_ms = 30000;
+module_param_named(
+ sram_update_period_ms, fg_sram_update_period_ms, int, S_IRUSR | S_IWUSR
+);
+
+struct fg_irq {
+ int irq;
+ unsigned long disabled;
+};
+
+enum fg_soc_irq {
+ HIGH_SOC,
+ LOW_SOC,
+ FULL_SOC,
+ EMPTY_SOC,
+ DELTA_SOC,
+ FIRST_EST_DONE,
+ SW_FALLBK_OCV,
+ SW_FALLBK_NEW_BATT,
+ FG_SOC_IRQ_COUNT,
+};
+
+enum fg_batt_irq {
+ JEITA_SOFT_COLD,
+ JEITA_SOFT_HOT,
+ VBATT_LOW,
+ BATT_IDENTIFIED,
+ BATT_ID_REQ,
+ BATTERY_UNKNOWN,
+ BATT_MISSING,
+ BATT_MATCH,
+ FG_BATT_IRQ_COUNT,
+};
+
+enum fg_mem_if_irq {
+ FG_MEM_AVAIL,
+ TA_RCVRY_SUG,
+ FG_MEM_IF_IRQ_COUNT,
+};
+
+enum fg_batt_aging_mode {
+ FG_AGING_NONE,
+ FG_AGING_ESR,
+ FG_AGING_CC,
+};
+
+enum register_type {
+ MEM_INTF_CFG,
+ MEM_INTF_CTL,
+ MEM_INTF_ADDR_LSB,
+ MEM_INTF_RD_DATA0,
+ MEM_INTF_WR_DATA0,
+ MAX_ADDRESS,
+};
+
+struct register_offset {
+ u16 address[MAX_ADDRESS];
+};
+
+static struct register_offset offset[] = {
+ [0] = {
+ /* CFG CTL LSB RD0 WD0 */
+ .address = {0x40, 0x41, 0x42, 0x4C, 0x48},
+ },
+ [1] = {
+ /* CFG CTL LSB RD0 WD0 */
+ .address = {0x50, 0x51, 0x61, 0x67, 0x63},
+ },
+};
+
+#define MEM_INTF_CFG(chip) \
+ ((chip)->mem_base + (chip)->offset[MEM_INTF_CFG])
+#define MEM_INTF_CTL(chip) \
+ ((chip)->mem_base + (chip)->offset[MEM_INTF_CTL])
+#define MEM_INTF_ADDR_LSB(chip) \
+ ((chip)->mem_base + (chip)->offset[MEM_INTF_ADDR_LSB])
+#define MEM_INTF_RD_DATA0(chip) \
+ ((chip)->mem_base + (chip)->offset[MEM_INTF_RD_DATA0])
+#define MEM_INTF_WR_DATA0(chip) \
+ ((chip)->mem_base + (chip)->offset[MEM_INTF_WR_DATA0])
+
+struct fg_wakeup_source {
+ struct wakeup_source source;
+ unsigned long enabled;
+};
+
+static void fg_stay_awake(struct fg_wakeup_source *source)
+{
+ if (!__test_and_set_bit(0, &source->enabled)) {
+ __pm_stay_awake(&source->source);
+ pr_debug("enabled source %s\n", source->source.name);
+ }
+}
+
+static void fg_relax(struct fg_wakeup_source *source)
+{
+ if (__test_and_clear_bit(0, &source->enabled)) {
+ __pm_relax(&source->source);
+ pr_debug("disabled source %s\n", source->source.name);
+ }
+}
+
+#define THERMAL_COEFF_N_BYTES 6
+struct fg_chip {
+ struct device *dev;
+ struct spmi_device *spmi;
+ u8 pmic_subtype;
+ u8 pmic_revision[4];
+ u8 revision[4];
+ u16 soc_base;
+ u16 batt_base;
+ u16 mem_base;
+ u16 vbat_adc_addr;
+ u16 ibat_adc_addr;
+ u16 tp_rev_addr;
+ u32 wa_flag;
+ atomic_t memif_user_cnt;
+ struct fg_irq soc_irq[FG_SOC_IRQ_COUNT];
+ struct fg_irq batt_irq[FG_BATT_IRQ_COUNT];
+ struct fg_irq mem_irq[FG_MEM_IF_IRQ_COUNT];
+ struct completion sram_access_granted;
+ struct completion sram_access_revoked;
+ struct completion batt_id_avail;
+ struct completion first_soc_done;
+ struct power_supply bms_psy;
+ struct mutex rw_lock;
+ struct mutex sysfs_restart_lock;
+ struct delayed_work batt_profile_init;
+ struct work_struct dump_sram;
+ struct work_struct status_change_work;
+ struct work_struct cycle_count_work;
+ struct work_struct battery_age_work;
+ struct work_struct update_esr_work;
+ struct work_struct set_resume_soc_work;
+ struct work_struct rslow_comp_work;
+ struct work_struct sysfs_restart_work;
+ struct work_struct init_work;
+ struct work_struct charge_full_work;
+ struct work_struct gain_comp_work;
+ struct work_struct bcl_hi_power_work;
+ struct power_supply *batt_psy;
+ struct power_supply *usb_psy;
+ struct power_supply *dc_psy;
+ struct fg_wakeup_source memif_wakeup_source;
+ struct fg_wakeup_source profile_wakeup_source;
+ struct fg_wakeup_source empty_check_wakeup_source;
+ struct fg_wakeup_source resume_soc_wakeup_source;
+ struct fg_wakeup_source gain_comp_wakeup_source;
+ struct fg_wakeup_source capacity_learning_wakeup_source;
+ bool first_profile_loaded;
+ struct fg_wakeup_source update_temp_wakeup_source;
+ struct fg_wakeup_source update_sram_wakeup_source;
+ bool fg_restarting;
+ bool profile_loaded;
+ bool use_otp_profile;
+ bool battery_missing;
+ bool power_supply_registered;
+ bool sw_rbias_ctrl;
+ bool use_thermal_coefficients;
+ bool esr_strict_filter;
+ bool soc_empty;
+ bool charge_done;
+ bool resume_soc_lowered;
+ bool vbat_low_irq_enabled;
+ bool charge_full;
+ bool hold_soc_while_full;
+ bool input_present;
+ bool otg_present;
+ bool safety_timer_expired;
+ bool bad_batt_detection_en;
+ bool bcl_lpm_disabled;
+ struct delayed_work update_jeita_setting;
+ struct delayed_work update_sram_data;
+ struct delayed_work update_temp_work;
+ struct delayed_work check_empty_work;
+ char *batt_profile;
+ u8 thermal_coefficients[THERMAL_COEFF_N_BYTES];
+ u32 cc_cv_threshold_mv;
+ unsigned int batt_profile_len;
+ unsigned int batt_max_voltage_uv;
+ const char *batt_type;
+ const char *batt_psy_name;
+ unsigned long last_sram_update_time;
+ unsigned long last_temp_update_time;
+ int64_t ocv_coeffs[12];
+ int64_t cutoff_voltage;
+ int evaluation_current;
+ int ocv_junction_p1p2;
+ int ocv_junction_p2p3;
+ int nom_cap_uah;
+ int actual_cap_uah;
+ int status;
+ int prev_status;
+ int health;
+ enum fg_batt_aging_mode batt_aging_mode;
+ /* capacity learning */
+ struct fg_learning_data learning_data;
+ struct alarm fg_cap_learning_alarm;
+ struct work_struct fg_cap_learning_work;
+ struct fg_cc_soc_data sw_cc_soc_data;
+ /* rslow compensation */
+ struct fg_rslow_data rslow_comp;
+ /* cycle counter */
+ struct fg_cyc_ctr_data cyc_ctr;
+ /* iadc compensation */
+ struct fg_iadc_comp_data iadc_comp_data;
+ /* interleaved memory access */
+ u16 *offset;
+ bool ima_supported;
+ bool init_done;
+ bool jeita_hysteresis_support;
+ bool batt_hot;
+ bool batt_cold;
+ int cold_hysteresis;
+ int hot_hysteresis;
+};
+
+/* FG_MEMIF DEBUGFS structures */
+#define ADDR_LEN 4 /* 3 byte address + 1 space character */
+#define CHARS_PER_ITEM 3 /* Format is 'XX ' */
+#define ITEMS_PER_LINE 4 /* 4 data items per line */
+#define MAX_LINE_LENGTH (ADDR_LEN + (ITEMS_PER_LINE * CHARS_PER_ITEM) + 1)
+#define MAX_REG_PER_TRANSACTION (8)
+
+static const char *DFS_ROOT_NAME = "fg_memif";
+static const mode_t DFS_MODE = S_IRUSR | S_IWUSR;
+static const char *default_batt_type = "Unknown Battery";
+static const char *loading_batt_type = "Loading Battery Data";
+static const char *missing_batt_type = "Disconnected Battery";
+
+/* Log buffer */
+struct fg_log_buffer {
+ size_t rpos; /* Current 'read' position in buffer */
+ size_t wpos; /* Current 'write' position in buffer */
+ size_t len; /* Length of the buffer */
+ char data[0]; /* Log buffer */
+};
+
+/* transaction parameters */
+struct fg_trans {
+ u32 cnt; /* Number of bytes to read */
+ u16 addr; /* 12-bit address in SRAM */
+ u32 offset; /* Offset of last read data + byte offset */
+ struct fg_chip *chip;
+ struct fg_log_buffer *log; /* log buffer */
+ u8 *data; /* fg data that is read */
+};
+
+struct fg_dbgfs {
+ u32 cnt;
+ u32 addr;
+ struct fg_chip *chip;
+ struct dentry *root;
+ struct mutex lock;
+ struct debugfs_blob_wrapper help_msg;
+};
+
+static struct fg_dbgfs dbgfs_data = {
+ .lock = __MUTEX_INITIALIZER(dbgfs_data.lock),
+ .help_msg = {
+ .data =
+"FG Debug-FS support\n"
+"\n"
+"Hierarchy schema:\n"
+"/sys/kernel/debug/fg_memif\n"
+" /help -- Static help text\n"
+" /address -- Starting register address for reads or writes\n"
+" /count -- Number of registers to read (only used for reads)\n"
+" /data -- Initiates the SRAM read (formatted output)\n"
+"\n",
+ },
+};
+
+static const struct of_device_id fg_match_table[] = {
+ { .compatible = QPNP_FG_DEV_NAME, },
+ {}
+};
+
+static char *fg_supplicants[] = {
+ "battery",
+ "bcl",
+ "fg_adc"
+};
+
+#define DEBUG_PRINT_BUFFER_SIZE 64
+static void fill_string(char *str, size_t str_len, u8 *buf, int buf_len)
+{
+ int pos = 0;
+ int i;
+
+ for (i = 0; i < buf_len; i++) {
+ pos += scnprintf(str + pos, str_len - pos, "%02X", buf[i]);
+ if (i < buf_len - 1)
+ pos += scnprintf(str + pos, str_len - pos, " ");
+ }
+}
+
+static int fg_write(struct fg_chip *chip, u8 *val, u16 addr, int len)
+{
+ int rc = 0;
+ struct spmi_device *spmi = chip->spmi;
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ if ((addr & 0xff00) == 0) {
+ pr_err("addr cannot be zero base=0x%02x sid=0x%02x rc=%d\n",
+ addr, spmi->sid, rc);
+ return -EINVAL;
+ }
+
+ rc = spmi_ext_register_writel(spmi->ctrl, spmi->sid, addr, val, len);
+ if (rc) {
+ pr_err("write failed addr=0x%02x sid=0x%02x rc=%d\n",
+ addr, spmi->sid, rc);
+ return rc;
+ }
+
+ if (!rc && (fg_debug_mask & FG_SPMI_DEBUG_WRITES)) {
+ str[0] = '\0';
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, len);
+ pr_info("write(0x%04X), sid=%d, len=%d; %s\n",
+ addr, spmi->sid, len, str);
+ }
+
+ return rc;
+}
+
+static int fg_read(struct fg_chip *chip, u8 *val, u16 addr, int len)
+{
+ int rc = 0;
+ struct spmi_device *spmi = chip->spmi;
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ if ((addr & 0xff00) == 0) {
+ pr_err("base cannot be zero base=0x%02x sid=0x%02x rc=%d\n",
+ addr, spmi->sid, rc);
+ return -EINVAL;
+ }
+
+ rc = spmi_ext_register_readl(spmi->ctrl, spmi->sid, addr, val, len);
+ if (rc) {
+ pr_err("SPMI read failed base=0x%02x sid=0x%02x rc=%d\n", addr,
+ spmi->sid, rc);
+ return rc;
+ }
+
+ if (!rc && (fg_debug_mask & FG_SPMI_DEBUG_READS)) {
+ str[0] = '\0';
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, len);
+ pr_info("read(0x%04x), sid=%d, len=%d; %s\n",
+ addr, spmi->sid, len, str);
+ }
+
+ return rc;
+}
+
+static int fg_masked_write(struct fg_chip *chip, u16 addr,
+ u8 mask, u8 val, int len)
+{
+ int rc;
+ u8 reg;
+
+ rc = fg_read(chip, &reg, addr, len);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n", addr, rc);
+ return rc;
+ }
+ pr_debug("addr = 0x%x read 0x%x\n", addr, reg);
+
+ reg &= ~mask;
+ reg |= val & mask;
+
+ pr_debug("Writing 0x%x\n", reg);
+
+ rc = fg_write(chip, &reg, addr, len);
+ if (rc) {
+ pr_err("spmi write failed: addr=%03X, rc=%d\n", addr, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+#define RIF_MEM_ACCESS_REQ BIT(7)
+static int fg_check_rif_mem_access(struct fg_chip *chip, bool *status)
+{
+ int rc;
+ u8 mem_if_sts;
+
+ rc = fg_read(chip, &mem_if_sts, MEM_INTF_CFG(chip), 1);
+ if (rc) {
+ pr_err("failed to read rif_mem status rc=%d\n", rc);
+ return rc;
+ }
+
+ *status = mem_if_sts & RIF_MEM_ACCESS_REQ;
+ return 0;
+}
+
+static bool fg_check_sram_access(struct fg_chip *chip)
+{
+ int rc;
+ u8 mem_if_sts;
+ bool rif_mem_sts = false;
+
+ rc = fg_read(chip, &mem_if_sts, INT_RT_STS(chip->mem_base), 1);
+ if (rc) {
+ pr_err("failed to read mem status rc=%d\n", rc);
+ return false;
+ }
+
+ if ((mem_if_sts & BIT(FG_MEM_AVAIL)) == 0)
+ return false;
+
+ rc = fg_check_rif_mem_access(chip, &rif_mem_sts);
+ if (rc)
+ return false;
+
+ return rif_mem_sts;
+}
+
+static inline int fg_assert_sram_access(struct fg_chip *chip)
+{
+ int rc;
+ u8 mem_if_sts;
+
+ rc = fg_read(chip, &mem_if_sts, INT_RT_STS(chip->mem_base), 1);
+ if (rc) {
+ pr_err("failed to read mem status rc=%d\n", rc);
+ return rc;
+ }
+
+ if ((mem_if_sts & BIT(FG_MEM_AVAIL)) == 0) {
+ pr_err("mem_avail not high: %02x\n", mem_if_sts);
+ return -EINVAL;
+ }
+
+ rc = fg_read(chip, &mem_if_sts, MEM_INTF_CFG(chip), 1);
+ if (rc) {
+ pr_err("failed to read mem status rc=%d\n", rc);
+ return rc;
+ }
+
+ if ((mem_if_sts & RIF_MEM_ACCESS_REQ) == 0) {
+ pr_err("mem_avail not high: %02x\n", mem_if_sts);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define INTF_CTL_BURST BIT(7)
+#define INTF_CTL_WR_EN BIT(6)
+static int fg_config_access(struct fg_chip *chip, bool write,
+ bool burst)
+{
+ int rc;
+ u8 intf_ctl = 0;
+
+ intf_ctl = (write ? INTF_CTL_WR_EN : 0) | (burst ? INTF_CTL_BURST : 0);
+
+ rc = fg_write(chip, &intf_ctl, MEM_INTF_CTL(chip), 1);
+ if (rc) {
+ pr_err("failed to set mem access bit\n");
+ return -EIO;
+ }
+
+ return rc;
+}
+
+static int fg_req_and_wait_access(struct fg_chip *chip, int timeout)
+{
+ int rc = 0, ret = 0;
+ bool tried_again = false;
+
+ if (!fg_check_sram_access(chip)) {
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip),
+ RIF_MEM_ACCESS_REQ, RIF_MEM_ACCESS_REQ, 1);
+ if (rc) {
+ pr_err("failed to set mem access bit\n");
+ return -EIO;
+ }
+ fg_stay_awake(&chip->memif_wakeup_source);
+ }
+
+wait:
+ /* Wait for MEM_AVAIL IRQ. */
+ ret = wait_for_completion_interruptible_timeout(
+ &chip->sram_access_granted,
+ msecs_to_jiffies(timeout));
+ /* If we were interrupted wait again one more time. */
+ if (ret == -ERESTARTSYS && !tried_again) {
+ tried_again = true;
+ goto wait;
+ } else if (ret <= 0) {
+ rc = -ETIMEDOUT;
+ pr_err("transaction timed out rc=%d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int fg_release_access(struct fg_chip *chip)
+{
+ int rc;
+
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip),
+ RIF_MEM_ACCESS_REQ, 0, 1);
+ fg_relax(&chip->memif_wakeup_source);
+ reinit_completion(&chip->sram_access_granted);
+
+ return rc;
+}
+
+static void fg_release_access_if_necessary(struct fg_chip *chip)
+{
+ mutex_lock(&chip->rw_lock);
+ if (atomic_sub_return(1, &chip->memif_user_cnt) <= 0) {
+ fg_release_access(chip);
+ }
+ mutex_unlock(&chip->rw_lock);
+}
+
+/*
+ * fg_mem_lock disallows the fuel gauge to release access until it has been
+ * released.
+ *
+ * an equal number of calls must be made to fg_mem_release for the fuel gauge
+ * driver to release the sram access.
+ */
+static void fg_mem_lock(struct fg_chip *chip)
+{
+ mutex_lock(&chip->rw_lock);
+ atomic_add_return(1, &chip->memif_user_cnt);
+ mutex_unlock(&chip->rw_lock);
+}
+
+static void fg_mem_release(struct fg_chip *chip)
+{
+ fg_release_access_if_necessary(chip);
+}
+
+static int fg_set_ram_addr(struct fg_chip *chip, u16 *address)
+{
+ int rc;
+
+ rc = fg_write(chip, (u8 *) address,
+ chip->mem_base + chip->offset[MEM_INTF_ADDR_LSB], 2);
+ if (rc) {
+ pr_err("spmi write failed: addr=%03X, rc=%d\n",
+ chip->mem_base + chip->offset[MEM_INTF_ADDR_LSB], rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+#define BUF_LEN 4
+static int fg_sub_mem_read(struct fg_chip *chip, u8 *val, u16 address, int len,
+ int offset)
+{
+ int rc, total_len;
+ u8 *rd_data = val;
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ rc = fg_config_access(chip, 0, (len > 4));
+ if (rc)
+ return rc;
+
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc)
+ return rc;
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("length %d addr=%02X\n", len, address);
+
+ total_len = len;
+ while (len > 0) {
+ if (!offset) {
+ rc = fg_read(chip, rd_data, MEM_INTF_RD_DATA0(chip),
+ min(len, BUF_LEN));
+ } else {
+ rc = fg_read(chip, rd_data,
+ MEM_INTF_RD_DATA0(chip) + offset,
+ min(len, BUF_LEN - offset));
+
+ /* manually set address to allow continous reads */
+ address += BUF_LEN;
+
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc)
+ return rc;
+ }
+ if (rc) {
+ pr_err("spmi read failed: addr=%03x, rc=%d\n",
+ MEM_INTF_RD_DATA0(chip) + offset, rc);
+ return rc;
+ }
+ rd_data += (BUF_LEN - offset);
+ len -= (BUF_LEN - offset);
+ offset = 0;
+ }
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS) {
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, total_len);
+ pr_info("data: %s\n", str);
+ }
+ return rc;
+}
+
+static int fg_conventional_mem_read(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset, bool keep_access)
+{
+ int rc = 0, user_cnt = 0, orig_address = address;
+
+ if (offset > 3) {
+ pr_err("offset too large %d\n", offset);
+ return -EINVAL;
+ }
+
+ address = ((orig_address + offset) / 4) * 4;
+ offset = (orig_address + offset) % 4;
+
+ user_cnt = atomic_add_return(1, &chip->memif_user_cnt);
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("user_cnt %d\n", user_cnt);
+ mutex_lock(&chip->rw_lock);
+ if (!fg_check_sram_access(chip)) {
+ rc = fg_req_and_wait_access(chip, MEM_IF_TIMEOUT_MS);
+ if (rc)
+ goto out;
+ }
+
+ rc = fg_sub_mem_read(chip, val, address, len, offset);
+
+out:
+ user_cnt = atomic_sub_return(1, &chip->memif_user_cnt);
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("user_cnt %d\n", user_cnt);
+
+ fg_assert_sram_access(chip);
+
+ if (!keep_access && (user_cnt == 0) && !rc) {
+ rc = fg_release_access(chip);
+ if (rc) {
+ pr_err("failed to set mem access bit\n");
+ rc = -EIO;
+ }
+ }
+
+ mutex_unlock(&chip->rw_lock);
+ return rc;
+}
+
+static int fg_conventional_mem_write(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset, bool keep_access)
+{
+ int rc = 0, user_cnt = 0, sublen;
+ bool access_configured = false;
+ u8 *wr_data = val, word[4];
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ if (address < RAM_OFFSET)
+ return -EINVAL;
+
+ if (offset > 3)
+ return -EINVAL;
+
+ address = ((address + offset) / 4) * 4;
+ offset = (address + offset) % 4;
+
+ user_cnt = atomic_add_return(1, &chip->memif_user_cnt);
+ if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
+ pr_info("user_cnt %d\n", user_cnt);
+ mutex_lock(&chip->rw_lock);
+ if (!fg_check_sram_access(chip)) {
+ rc = fg_req_and_wait_access(chip, MEM_IF_TIMEOUT_MS);
+ if (rc)
+ goto out;
+ }
+
+ if (fg_debug_mask & FG_MEM_DEBUG_WRITES) {
+ pr_info("length %d addr=%02X offset=%d\n",
+ len, address, offset);
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, wr_data, len);
+ pr_info("writing: %s\n", str);
+ }
+
+ while (len > 0) {
+ if (offset != 0) {
+ sublen = min(4 - offset, len);
+ rc = fg_sub_mem_read(chip, word, address, 4, 0);
+ if (rc)
+ goto out;
+ memcpy(word + offset, wr_data, sublen);
+ /* configure access as burst if more to write */
+ rc = fg_config_access(chip, 1, (len - sublen) > 0);
+ if (rc)
+ goto out;
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc)
+ goto out;
+ offset = 0;
+ access_configured = true;
+ } else if (len >= 4) {
+ if (!access_configured) {
+ rc = fg_config_access(chip, 1, len > 4);
+ if (rc)
+ goto out;
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc)
+ goto out;
+ access_configured = true;
+ }
+ sublen = 4;
+ memcpy(word, wr_data, 4);
+ } else if (len > 0 && len < 4) {
+ sublen = len;
+ rc = fg_sub_mem_read(chip, word, address, 4, 0);
+ if (rc)
+ goto out;
+ memcpy(word, wr_data, sublen);
+ rc = fg_config_access(chip, 1, 0);
+ if (rc)
+ goto out;
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc)
+ goto out;
+ access_configured = true;
+ } else {
+ pr_err("Invalid length: %d\n", len);
+ break;
+ }
+ rc = fg_write(chip, word, MEM_INTF_WR_DATA0(chip), 4);
+ if (rc) {
+ pr_err("spmi write failed: addr=%03x, rc=%d\n",
+ MEM_INTF_WR_DATA0(chip), rc);
+ goto out;
+ }
+ len -= sublen;
+ wr_data += sublen;
+ address += 4;
+ }
+
+out:
+ user_cnt = atomic_sub_return(1, &chip->memif_user_cnt);
+ if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
+ pr_info("user_cnt %d\n", user_cnt);
+
+ fg_assert_sram_access(chip);
+
+ if (!keep_access && (user_cnt == 0) && !rc) {
+ rc = fg_release_access(chip);
+ if (rc) {
+ pr_err("failed to set mem access bit\n");
+ rc = -EIO;
+ }
+ }
+
+ mutex_unlock(&chip->rw_lock);
+ return rc;
+}
+
+#define MEM_INTF_IMA_CFG 0x52
+#define MEM_INTF_IMA_OPR_STS 0x54
+#define MEM_INTF_IMA_ERR_STS 0x5F
+#define MEM_INTF_IMA_EXP_STS 0x55
+#define MEM_INTF_IMA_HW_STS 0x56
+#define MEM_INTF_IMA_BYTE_EN 0x60
+#define IMA_ADDR_STBL_ERR BIT(7)
+#define IMA_WR_ACS_ERR BIT(6)
+#define IMA_RD_ACS_ERR BIT(5)
+#define IMA_IACS_CLR BIT(2)
+#define IMA_IACS_RDY BIT(1)
+static int fg_check_ima_exception(struct fg_chip *chip)
+{
+ int rc = 0, ret = 0;
+ u8 err_sts, exp_sts = 0, hw_sts = 0;
+
+ rc = fg_read(chip, &err_sts,
+ chip->mem_base + MEM_INTF_IMA_ERR_STS, 1);
+ if (rc) {
+ pr_err("failed to read beat count rc=%d\n", rc);
+ return rc;
+ }
+
+ if (err_sts & (IMA_ADDR_STBL_ERR | IMA_WR_ACS_ERR | IMA_RD_ACS_ERR)) {
+ u8 temp;
+
+ fg_read(chip, &exp_sts,
+ chip->mem_base + MEM_INTF_IMA_EXP_STS, 1);
+ fg_read(chip, &hw_sts,
+ chip->mem_base + MEM_INTF_IMA_HW_STS, 1);
+ pr_err("IMA access failed ima_err_sts=%x ima_exp_sts=%x ima_hw_sts=%x\n",
+ err_sts, exp_sts, hw_sts);
+ rc = err_sts;
+
+ /* clear the error */
+ ret |= fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+ IMA_IACS_CLR, IMA_IACS_CLR, 1);
+ temp = 0x4;
+ ret |= fg_write(chip, &temp, MEM_INTF_ADDR_LSB(chip) + 1, 1);
+ temp = 0x0;
+ ret |= fg_write(chip, &temp, MEM_INTF_WR_DATA0(chip) + 3, 1);
+ ret |= fg_read(chip, &temp, MEM_INTF_RD_DATA0(chip) + 3, 1);
+ ret |= fg_masked_write(chip, chip->mem_base + MEM_INTF_IMA_CFG,
+ IMA_IACS_CLR, 0, 1);
+ if (!ret)
+ return -EAGAIN;
+ else
+ pr_err("Error clearing IMA exception ret=%d\n", ret);
+ }
+
+ return rc;
+}
+
+static int fg_check_iacs_ready(struct fg_chip *chip)
+{
+ int rc = 0, timeout = 250;
+ u8 ima_opr_sts = 0;
+
+ /*
+ * Additional delay to make sure IACS ready bit is set after
+ * Read/Write operation.
+ */
+
+ usleep_range(30, 35);
+ while (1) {
+ rc = fg_read(chip, &ima_opr_sts,
+ chip->mem_base + MEM_INTF_IMA_OPR_STS, 1);
+ if (!rc && (ima_opr_sts & IMA_IACS_RDY)) {
+ break;
+ } else {
+ if (!(--timeout) || rc)
+ break;
+ /* delay for iacs_ready to be asserted */
+ usleep_range(5000, 7000);
+ }
+ }
+
+ if (!timeout || rc) {
+ pr_err("IACS_RDY not set\n");
+ /* perform IACS_CLR sequence */
+ fg_check_ima_exception(chip);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+#define IACS_SLCT BIT(5)
+static int __fg_interleaved_mem_write(struct fg_chip *chip, u8 *val,
+ u16 address, int offset, int len)
+{
+ int rc = 0, i;
+ u8 *word = val, byte_enable = 0, num_bytes = 0;
+
+ if (fg_debug_mask & FG_MEM_DEBUG_WRITES)
+ pr_info("length %d addr=%02X offset=%d\n",
+ len, address, offset);
+
+ while (len > 0) {
+ num_bytes = (offset + len) > BUF_LEN ?
+ (BUF_LEN - offset) : len;
+ /* write to byte_enable */
+ for (i = offset; i < (offset + num_bytes); i++)
+ byte_enable |= BIT(i);
+
+ rc = fg_write(chip, &byte_enable,
+ chip->mem_base + MEM_INTF_IMA_BYTE_EN, 1);
+ if (rc) {
+ pr_err("Unable to write to byte_en_reg rc=%d\n",
+ rc);
+ return rc;
+ }
+ /* write data */
+ rc = fg_write(chip, word, MEM_INTF_WR_DATA0(chip) + offset,
+ num_bytes);
+ if (rc) {
+ pr_err("spmi write failed: addr=%03x, rc=%d\n",
+ MEM_INTF_WR_DATA0(chip) + offset, rc);
+ return rc;
+ }
+ /*
+ * The last-byte WR_DATA3 starts the write transaction.
+ * Write a dummy value to WR_DATA3 if it does not have
+ * valid data. This dummy data is not written to the
+ * SRAM as byte_en for WR_DATA3 is not set.
+ */
+ if (!(byte_enable & BIT(3))) {
+ u8 dummy_byte = 0x0;
+ rc = fg_write(chip, &dummy_byte,
+ MEM_INTF_WR_DATA0(chip) + 3, 1);
+ if (rc) {
+ pr_err("Unable to write dummy-data to WR_DATA3 rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = fg_check_iacs_ready(chip);
+ if (rc) {
+ pr_debug("IACS_RDY failed rc=%d\n", rc);
+ return rc;
+ }
+
+ /* check for error condition */
+ rc = fg_check_ima_exception(chip);
+ if (rc) {
+ pr_err("IMA transaction failed rc=%d", rc);
+ return rc;
+ }
+
+ word += num_bytes;
+ len -= num_bytes;
+ offset = byte_enable = 0;
+ }
+
+ return rc;
+}
+
+static int __fg_interleaved_mem_read(struct fg_chip *chip, u8 *val, u16 address,
+ int offset, int len)
+{
+ int rc = 0, total_len;
+ u8 *rd_data = val, num_bytes;
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("length %d addr=%02X\n", len, address);
+
+ total_len = len;
+ while (len > 0) {
+ num_bytes = (offset + len) > BUF_LEN ? (BUF_LEN - offset) : len;
+ rc = fg_read(chip, rd_data, MEM_INTF_RD_DATA0(chip) + offset,
+ num_bytes);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03x, rc=%d\n",
+ MEM_INTF_RD_DATA0(chip) + offset, rc);
+ return rc;
+ }
+
+ rd_data += num_bytes;
+ len -= num_bytes;
+ offset = 0;
+
+ rc = fg_check_iacs_ready(chip);
+ if (rc) {
+ pr_debug("IACS_RDY failed rc=%d\n", rc);
+ return rc;
+ }
+
+ /* check for error condition */
+ rc = fg_check_ima_exception(chip);
+ if (rc) {
+ pr_err("IMA transaction failed rc=%d", rc);
+ return rc;
+ }
+
+ if (len && (len + offset) < BUF_LEN) {
+ /* move to single mode */
+ u8 intr_ctl = 0;
+
+ rc = fg_write(chip, &intr_ctl, MEM_INTF_CTL(chip), 1);
+ if (rc) {
+ pr_err("failed to move to single mode rc=%d\n",
+ rc);
+ return -EIO;
+ }
+ }
+ }
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS) {
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, val, total_len);
+ pr_info("data: %s\n", str);
+ }
+
+ return rc;
+}
+
+#define IMA_REQ_ACCESS (IACS_SLCT | RIF_MEM_ACCESS_REQ)
+static int fg_interleaved_mem_config(struct fg_chip *chip, u8 *val,
+ u16 address, int len, int offset, int op)
+{
+ int rc = 0;
+ bool rif_mem_sts = true;
+ int time_count = 0;
+
+ while (1) {
+ rc = fg_check_rif_mem_access(chip, &rif_mem_sts);
+ if (rc)
+ return rc;
+
+ if (!rif_mem_sts)
+ break;
+
+ if (fg_debug_mask & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+ pr_info("RIF_MEM_ACCESS_REQ is not clear yet for IMA_%s\n",
+ op ? "write" : "read");
+
+ /*
+ * Try this no more than 4 times. If RIF_MEM_ACCESS_REQ is not
+ * clear, then return an error instead of waiting for it again.
+ */
+ if (time_count > 4) {
+ pr_err("Waited for 1.5 seconds polling RIF_MEM_ACCESS_REQ\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Wait for 4ms before reading RIF_MEM_ACCESS_REQ again */
+ usleep_range(4000, 4100);
+ time_count++;
+ }
+
+ /* configure for IMA access */
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip),
+ IMA_REQ_ACCESS, IMA_REQ_ACCESS, 1);
+ if (rc) {
+ pr_err("failed to set mem access bit rc = %d\n", rc);
+ return rc;
+ }
+
+ /* configure for the read/write single/burst mode */
+ rc = fg_config_access(chip, op, (offset + len) > 4);
+ if (rc) {
+ pr_err("failed to set configure memory access rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = fg_check_iacs_ready(chip);
+ if (rc) {
+ pr_debug("IACS_RDY failed rc=%d\n", rc);
+ return rc;
+ }
+
+ /* write addresses to the register */
+ rc = fg_set_ram_addr(chip, &address);
+ if (rc) {
+ pr_err("failed to set SRAM address rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = fg_check_iacs_ready(chip);
+ if (rc)
+ pr_debug("IACS_RDY failed rc=%d\n", rc);
+
+ return rc;
+}
+
+#define MEM_INTF_FG_BEAT_COUNT 0x57
+#define BEAT_COUNT_MASK 0x0F
+#define RETRY_COUNT 3
+static int fg_interleaved_mem_read(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset)
+{
+ int rc = 0, orig_address = address;
+ u8 start_beat_count, end_beat_count, count = 0;
+ bool retry = false;
+
+ if (offset > 3) {
+ pr_err("offset too large %d\n", offset);
+ return -EINVAL;
+ }
+
+ fg_stay_awake(&chip->memif_wakeup_source);
+ address = ((orig_address + offset) / 4) * 4;
+ offset = (orig_address + offset) % 4;
+
+ if (address < RAM_OFFSET) {
+ /*
+ * OTP memory reads need a conventional memory access, do a
+ * conventional read when SRAM offset < RAM_OFFSET.
+ */
+ rc = fg_conventional_mem_read(chip, val, address, len, offset,
+ 0);
+ if (rc)
+ pr_err("Failed to read OTP memory %d\n", rc);
+ goto exit;
+ }
+
+ mutex_lock(&chip->rw_lock);
+
+retry:
+ rc = fg_interleaved_mem_config(chip, val, address, offset, len, 0);
+ if (rc) {
+ pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
+ goto out;
+ }
+
+ /* read the start beat count */
+ rc = fg_read(chip, &start_beat_count,
+ chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
+ if (rc) {
+ pr_err("failed to read beat count rc=%d\n", rc);
+ goto out;
+ }
+
+ /* read data */
+ rc = __fg_interleaved_mem_read(chip, val, address, offset, len);
+ if (rc) {
+ if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
+ count++;
+ pr_err("IMA access failed retry_count = %d\n", count);
+ goto retry;
+ } else {
+ pr_err("failed to read SRAM address rc = %d\n", rc);
+ goto out;
+ }
+ }
+
+ /* read the end beat count */
+ rc = fg_read(chip, &end_beat_count,
+ chip->mem_base + MEM_INTF_FG_BEAT_COUNT, 1);
+ if (rc) {
+ pr_err("failed to read beat count rc=%d\n", rc);
+ goto out;
+ }
+
+ start_beat_count &= BEAT_COUNT_MASK;
+ end_beat_count &= BEAT_COUNT_MASK;
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("Start beat_count = %x End beat_count = %x\n",
+ start_beat_count, end_beat_count);
+ if (start_beat_count != end_beat_count) {
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("Beat count do not match - retry transaction\n");
+ retry = true;
+ }
+out:
+ /* Release IMA access */
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
+ if (rc)
+ pr_err("failed to reset IMA access bit rc = %d\n", rc);
+
+ if (retry) {
+ retry = false;
+ goto retry;
+ }
+ mutex_unlock(&chip->rw_lock);
+
+exit:
+ fg_relax(&chip->memif_wakeup_source);
+ return rc;
+}
+
+static int fg_interleaved_mem_write(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset)
+{
+ int rc = 0, orig_address = address;
+ u8 count = 0;
+
+ if (address < RAM_OFFSET)
+ return -EINVAL;
+
+ if (offset > 3) {
+ pr_err("offset too large %d\n", offset);
+ return -EINVAL;
+ }
+
+ fg_stay_awake(&chip->memif_wakeup_source);
+ address = ((orig_address + offset) / 4) * 4;
+ offset = (orig_address + offset) % 4;
+
+ mutex_lock(&chip->rw_lock);
+
+retry:
+ rc = fg_interleaved_mem_config(chip, val, address, offset, len, 1);
+ if (rc) {
+ pr_err("failed to xonfigure SRAM for IMA rc = %d\n", rc);
+ goto out;
+ }
+
+ /* write data */
+ rc = __fg_interleaved_mem_write(chip, val, address, offset, len);
+ if (rc) {
+ if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
+ count++;
+ pr_err("IMA access failed retry_count = %d\n", count);
+ goto retry;
+ } else {
+ pr_err("failed to write SRAM address rc = %d\n", rc);
+ goto out;
+ }
+ }
+
+out:
+ /* Release IMA access */
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip), IMA_REQ_ACCESS, 0, 1);
+ if (rc)
+ pr_err("failed to reset IMA access bit rc = %d\n", rc);
+
+ mutex_unlock(&chip->rw_lock);
+ fg_relax(&chip->memif_wakeup_source);
+ return rc;
+}
+
+static int fg_mem_read(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset, bool keep_access)
+{
+ if (chip->ima_supported)
+ return fg_interleaved_mem_read(chip, val, address,
+ len, offset);
+ else
+ return fg_conventional_mem_read(chip, val, address,
+ len, offset, keep_access);
+}
+
+static int fg_mem_write(struct fg_chip *chip, u8 *val, u16 address,
+ int len, int offset, bool keep_access)
+{
+ if (chip->ima_supported)
+ return fg_interleaved_mem_write(chip, val, address,
+ len, offset);
+ else
+ return fg_conventional_mem_write(chip, val, address,
+ len, offset, keep_access);
+}
+
+static int fg_mem_masked_write(struct fg_chip *chip, u16 addr,
+ u8 mask, u8 val, u8 offset)
+{
+ int rc = 0;
+ u8 reg[4];
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+
+ rc = fg_mem_read(chip, reg, addr, 4, 0, 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n", addr, rc);
+ return rc;
+ }
+
+ reg[offset] &= ~mask;
+ reg[offset] |= val & mask;
+
+ str[0] = '\0';
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, reg, 4);
+ pr_debug("Writing %s address %03x, offset %d\n", str, addr, offset);
+
+ rc = fg_mem_write(chip, reg, addr, 4, 0, 0);
+ if (rc) {
+ pr_err("spmi write failed: addr=%03X, rc=%d\n", addr, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int soc_to_setpoint(int soc)
+{
+ return DIV_ROUND_CLOSEST(soc * 255, 100);
+}
+
+static void batt_to_setpoint_adc(int vbatt_mv, u8 *data)
+{
+ int val;
+ /* Battery voltage is an offset from 0 V and LSB is 1/2^15. */
+ val = DIV_ROUND_CLOSEST(vbatt_mv * 32768, 5000);
+ data[0] = val & 0xFF;
+ data[1] = val >> 8;
+ return;
+}
+
+static u8 batt_to_setpoint_8b(int vbatt_mv)
+{
+ int val;
+ /* Battery voltage is an offset from 2.5 V and LSB is 5/2^9. */
+ val = (vbatt_mv - 2500) * 512 / 1000;
+ return DIV_ROUND_CLOSEST(val, 5);
+}
+
+static u8 therm_delay_to_setpoint(u32 delay_us)
+{
+ u8 val;
+
+ if (delay_us < 2560)
+ val = 0;
+ else if (delay_us > 163840)
+ val = 7;
+ else
+ val = ilog2(delay_us / 10) - 7;
+ return val << 5;
+}
+
+static int get_current_time(unsigned long *now_tm_sec)
+{
+ struct rtc_time tm;
+ struct rtc_device *rtc;
+ int rc;
+
+ rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
+ if (rtc == NULL) {
+ pr_err("%s: unable to open rtc device (%s)\n",
+ __FILE__, CONFIG_RTC_HCTOSYS_DEVICE);
+ return -EINVAL;
+ }
+
+ rc = rtc_read_time(rtc, &tm);
+ if (rc) {
+ pr_err("Error reading rtc device (%s) : %d\n",
+ CONFIG_RTC_HCTOSYS_DEVICE, rc);
+ goto close_time;
+ }
+
+ rc = rtc_valid_tm(&tm);
+ if (rc) {
+ pr_err("Invalid RTC time (%s): %d\n",
+ CONFIG_RTC_HCTOSYS_DEVICE, rc);
+ goto close_time;
+ }
+ rtc_tm_to_time(&tm, now_tm_sec);
+
+close_time:
+ rtc_class_close(rtc);
+ return rc;
+}
+
+#define BATTERY_SOC_REG 0x56C
+#define BATTERY_SOC_OFFSET 1
+#define FULL_PERCENT_3B 0xFFFFFF
+static int get_battery_soc_raw(struct fg_chip *chip)
+{
+ int rc;
+ u8 buffer[3];
+
+ rc = fg_mem_read(chip, buffer, BATTERY_SOC_REG, 3, 1, 0);
+ if (rc) {
+ pr_err("Unable to read battery soc: %d\n", rc);
+ return 0;
+ }
+ return (int)(buffer[2] << 16 | buffer[1] << 8 | buffer[0]);
+}
+
+#define COUNTER_IMPTR_REG 0X558
+#define COUNTER_PULSE_REG 0X55C
+#define SOC_FULL_REG 0x564
+#define COUNTER_IMPTR_OFFSET 2
+#define COUNTER_PULSE_OFFSET 0
+#define SOC_FULL_OFFSET 3
+#define ESR_PULSE_RECONFIG_SOC 0xFFF971
+static int fg_configure_soc(struct fg_chip *chip)
+{
+ u32 batt_soc;
+ u8 cntr[2] = {0, 0};
+ int rc = 0;
+
+ mutex_lock(&chip->rw_lock);
+ atomic_add_return(1, &chip->memif_user_cnt);
+ mutex_unlock(&chip->rw_lock);
+
+ /* Read Battery SOC */
+ batt_soc = get_battery_soc_raw(chip);
+
+ if (batt_soc > ESR_PULSE_RECONFIG_SOC) {
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info("Configuring soc registers batt_soc: %x\n",
+ batt_soc);
+ batt_soc = ESR_PULSE_RECONFIG_SOC;
+ rc = fg_mem_write(chip, (u8 *)&batt_soc, BATTERY_SOC_REG, 3,
+ BATTERY_SOC_OFFSET, 1);
+ if (rc) {
+ pr_err("failed to write BATT_SOC rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = fg_mem_write(chip, (u8 *)&batt_soc, SOC_FULL_REG, 3,
+ SOC_FULL_OFFSET, 1);
+ if (rc) {
+ pr_err("failed to write SOC_FULL rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = fg_mem_write(chip, cntr, COUNTER_IMPTR_REG, 2,
+ COUNTER_IMPTR_OFFSET, 1);
+ if (rc) {
+ pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = fg_mem_write(chip, cntr, COUNTER_PULSE_REG, 2,
+ COUNTER_PULSE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write COUNTER_IMPTR rc=%d\n", rc);
+ }
+out:
+ fg_release_access_if_necessary(chip);
+ return rc;
+}
+
+#define SOC_EMPTY BIT(3)
+static bool fg_is_batt_empty(struct fg_chip *chip)
+{
+ u8 fg_soc_sts;
+ int rc;
+
+ rc = fg_read(chip, &fg_soc_sts,
+ INT_RT_STS(chip->soc_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ return false;
+ }
+
+ return (fg_soc_sts & SOC_EMPTY) != 0;
+}
+
+static int get_monotonic_soc_raw(struct fg_chip *chip)
+{
+ u8 cap[2];
+ int rc, tries = 0;
+
+ while (tries < MAX_TRIES_SOC) {
+ rc = fg_read(chip, cap,
+ chip->soc_base + SOC_MONOTONIC_SOC, 2);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03x, rc=%d\n",
+ chip->soc_base + SOC_MONOTONIC_SOC, rc);
+ return rc;
+ }
+
+ if (cap[0] == cap[1])
+ break;
+
+ tries++;
+ }
+
+ if (tries == MAX_TRIES_SOC) {
+ pr_err("shadow registers do not match\n");
+ return -EINVAL;
+ }
+
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info_ratelimited("raw: 0x%02x\n", cap[0]);
+ return cap[0];
+}
+
+#define EMPTY_CAPACITY 0
+#define DEFAULT_CAPACITY 50
+#define MISSING_CAPACITY 100
+#define FULL_CAPACITY 100
+#define FULL_SOC_RAW 0xFF
+static int get_prop_capacity(struct fg_chip *chip)
+{
+ int msoc;
+
+ if (chip->battery_missing)
+ return MISSING_CAPACITY;
+ if (!chip->profile_loaded && !chip->use_otp_profile)
+ return DEFAULT_CAPACITY;
+ if (chip->charge_full)
+ return FULL_CAPACITY;
+ if (chip->soc_empty) {
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info_ratelimited("capacity: %d, EMPTY\n",
+ EMPTY_CAPACITY);
+ return EMPTY_CAPACITY;
+ }
+ msoc = get_monotonic_soc_raw(chip);
+ if (msoc == 0)
+ return EMPTY_CAPACITY;
+ else if (msoc == FULL_SOC_RAW)
+ return FULL_CAPACITY;
+ return DIV_ROUND_CLOSEST((msoc - 1) * (FULL_CAPACITY - 2),
+ FULL_SOC_RAW - 2) + 1;
+}
+
+#define HIGH_BIAS 3
+#define MED_BIAS BIT(1)
+#define LOW_BIAS BIT(0)
+static u8 bias_ua[] = {
+ [HIGH_BIAS] = 150,
+ [MED_BIAS] = 15,
+ [LOW_BIAS] = 5,
+};
+
+static int64_t get_batt_id(unsigned int battery_id_uv, u8 bid_info)
+{
+ u64 battery_id_ohm;
+
+ if ((bid_info & 0x3) == 0) {
+ pr_err("can't determine battery id 0x%02x\n", bid_info);
+ return -EINVAL;
+ }
+
+ battery_id_ohm = div_u64(battery_id_uv, bias_ua[bid_info & 0x3]);
+
+ return battery_id_ohm;
+}
+
+#define DEFAULT_TEMP_DEGC 250
+static int get_sram_prop_now(struct fg_chip *chip, unsigned int type)
+{
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info("addr 0x%02X, offset %d value %d\n",
+ fg_data[type].address, fg_data[type].offset,
+ fg_data[type].value);
+
+ if (type == FG_DATA_BATT_ID)
+ return get_batt_id(fg_data[type].value,
+ fg_data[FG_DATA_BATT_ID_INFO].value);
+
+ return fg_data[type].value;
+}
+
+#define MIN_TEMP_DEGC -300
+#define MAX_TEMP_DEGC 970
+static int get_prop_jeita_temp(struct fg_chip *chip, unsigned int type)
+{
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info("addr 0x%02X, offset %d\n", settings[type].address,
+ settings[type].offset);
+
+ return settings[type].value;
+}
+
+static int set_prop_jeita_temp(struct fg_chip *chip,
+ unsigned int type, int decidegc)
+{
+ int rc = 0;
+
+ if (fg_debug_mask & FG_POWER_SUPPLY)
+ pr_info("addr 0x%02X, offset %d temp%d\n",
+ settings[type].address,
+ settings[type].offset, decidegc);
+
+ settings[type].value = decidegc;
+
+ cancel_delayed_work_sync(
+ &chip->update_jeita_setting);
+ schedule_delayed_work(
+ &chip->update_jeita_setting, 0);
+
+ return rc;
+}
+
+#define EXTERNAL_SENSE_SELECT 0x4AC
+#define EXTERNAL_SENSE_OFFSET 0x2
+#define EXTERNAL_SENSE_BIT BIT(2)
+static int set_prop_sense_type(struct fg_chip *chip, int ext_sense_type)
+{
+ int rc;
+
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ EXTERNAL_SENSE_BIT,
+ ext_sense_type ? EXTERNAL_SENSE_BIT : 0,
+ EXTERNAL_SENSE_OFFSET);
+ if (rc) {
+ pr_err("failed to write profile rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+#define EXPONENT_MASK 0xF800
+#define MANTISSA_MASK 0x3FF
+#define SIGN BIT(10)
+#define EXPONENT_SHIFT 11
+#define MICRO_UNIT 1000000ULL
+static int64_t float_decode(u16 reg)
+{
+ int64_t final_val, exponent_val, mantissa_val;
+ int exponent, mantissa, n;
+ bool sign;
+
+ exponent = (reg & EXPONENT_MASK) >> EXPONENT_SHIFT;
+ mantissa = (reg & MANTISSA_MASK);
+ sign = !!(reg & SIGN);
+
+ pr_debug("exponent=%d mantissa=%d sign=%d\n", exponent, mantissa, sign);
+
+ mantissa_val = mantissa * MICRO_UNIT;
+
+ n = exponent - 15;
+ if (n < 0)
+ exponent_val = MICRO_UNIT >> -n;
+ else
+ exponent_val = MICRO_UNIT << n;
+
+ n = n - 10;
+ if (n < 0)
+ mantissa_val >>= -n;
+ else
+ mantissa_val <<= n;
+
+ final_val = exponent_val + mantissa_val;
+
+ if (sign)
+ final_val *= -1;
+
+ return final_val;
+}
+
+#define MIN_HALFFLOAT_EXP_N -15
+#define MAX_HALFFLOAT_EXP_N 16
+static int log2_floor(int64_t uval)
+{
+ int n = 0;
+ int64_t i = MICRO_UNIT;
+
+ if (uval > i) {
+ while (uval > i && n > MIN_HALFFLOAT_EXP_N) {
+ i <<= 1;
+ n += 1;
+ }
+ if (uval < i)
+ n -= 1;
+ } else if (uval < i) {
+ while (uval < i && n < MAX_HALFFLOAT_EXP_N) {
+ i >>= 1;
+ n -= 1;
+ }
+ }
+
+ return n;
+}
+
+static int64_t exp2_int(int64_t n)
+{
+ int p = n - 1;
+
+ if (p > 0)
+ return (2 * MICRO_UNIT) << p;
+ else
+ return (2 * MICRO_UNIT) >> abs(p);
+}
+
+static u16 float_encode(int64_t uval)
+{
+ int sign = 0, n, exp, mantissa;
+ u16 half = 0;
+
+ if (uval < 0) {
+ sign = 1;
+ uval = abs(uval);
+ }
+ n = log2_floor(uval);
+ exp = n + 15;
+ mantissa = div_s64(div_s64((uval - exp2_int(n)) * exp2_int(10 - n),
+ MICRO_UNIT) + MICRO_UNIT / 2, MICRO_UNIT);
+
+ half = (mantissa & MANTISSA_MASK) | ((sign << 10) & SIGN)
+ | ((exp << 11) & EXPONENT_MASK);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("uval = %lld, m = 0x%02x, sign = 0x%02x, exp = 0x%02x, half = 0x%04x\n",
+ uval, mantissa, sign, exp, half);
+ return half;
+}
+
+#define BATT_IDED BIT(3)
+static int fg_is_batt_id_valid(struct fg_chip *chip)
+{
+ u8 fg_batt_sts;
+ int rc;
+
+ rc = fg_read(chip, &fg_batt_sts,
+ INT_RT_STS(chip->batt_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->batt_base), rc);
+ return rc;
+ }
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("fg batt sts 0x%x\n", fg_batt_sts);
+
+ return (fg_batt_sts & BATT_IDED) ? 1 : 0;
+}
+
+static int64_t twos_compliment_extend(int64_t val, int nbytes)
+{
+ int i;
+ int64_t mask;
+
+ mask = 0x80LL << ((nbytes - 1) * 8);
+ if (val & mask) {
+ for (i = 8; i > nbytes; i--) {
+ mask = 0xFFLL << ((i - 1) * 8);
+ val |= mask;
+ }
+ }
+
+ return val;
+}
+
+#define LSB_24B_NUMRTR 596046
+#define LSB_24B_DENMTR 1000000
+#define LSB_16B_NUMRTR 152587
+#define LSB_16B_DENMTR 1000
+#define LSB_8B 9800
+#define TEMP_LSB_16B 625
+#define DECIKELVIN 2730
+#define SRAM_PERIOD_NO_ID_UPDATE_MS 100
+#define FULL_PERCENT_28BIT 0xFFFFFFF
+static void update_sram_data(struct fg_chip *chip, int *resched_ms)
+{
+ int i, j, rc = 0;
+ u8 reg[4];
+ int64_t temp;
+ int battid_valid = fg_is_batt_id_valid(chip);
+
+ fg_stay_awake(&chip->update_sram_wakeup_source);
+ if (chip->fg_restarting)
+ goto resched;
+
+ fg_mem_lock(chip);
+ for (i = 1; i < FG_DATA_MAX; i++) {
+ if (chip->profile_loaded && i >= FG_DATA_BATT_ID)
+ continue;
+ rc = fg_mem_read(chip, reg, fg_data[i].address,
+ fg_data[i].len, fg_data[i].offset, 0);
+ if (rc) {
+ pr_err("Failed to update sram data\n");
+ break;
+ }
+
+ temp = 0;
+ for (j = 0; j < fg_data[i].len; j++)
+ temp |= reg[j] << (8 * j);
+
+ switch (i) {
+ case FG_DATA_OCV:
+ case FG_DATA_VOLTAGE:
+ case FG_DATA_CPRED_VOLTAGE:
+ fg_data[i].value = div_u64(
+ (u64)(u16)temp * LSB_16B_NUMRTR,
+ LSB_16B_DENMTR);
+ break;
+ case FG_DATA_CURRENT:
+ temp = twos_compliment_extend(temp, fg_data[i].len);
+ fg_data[i].value = div_s64(
+ (s64)temp * LSB_16B_NUMRTR,
+ LSB_16B_DENMTR);
+ break;
+ case FG_DATA_BATT_ESR:
+ fg_data[i].value = float_decode((u16) temp);
+ break;
+ case FG_DATA_BATT_ESR_COUNT:
+ fg_data[i].value = (u16)temp;
+ break;
+ case FG_DATA_BATT_ID:
+ if (battid_valid)
+ fg_data[i].value = reg[0] * LSB_8B;
+ break;
+ case FG_DATA_BATT_ID_INFO:
+ if (battid_valid)
+ fg_data[i].value = reg[0];
+ break;
+ case FG_DATA_BATT_SOC:
+ fg_data[i].value = div64_s64((temp * 10000),
+ FULL_PERCENT_3B);
+ break;
+ case FG_DATA_CC_CHARGE:
+ temp = twos_compliment_extend(temp, fg_data[i].len);
+ fg_data[i].value = div64_s64(
+ temp * (int64_t)chip->nom_cap_uah,
+ FULL_PERCENT_28BIT);
+ break;
+ case FG_DATA_VINT_ERR:
+ temp = twos_compliment_extend(temp, fg_data[i].len);
+ fg_data[i].value = div64_s64(temp * chip->nom_cap_uah,
+ FULL_PERCENT_3B);
+ break;
+ };
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("%d %lld %d\n", i, temp, fg_data[i].value);
+ }
+ fg_mem_release(chip);
+
+ if (!rc)
+ get_current_time(&chip->last_sram_update_time);
+
+resched:
+ if (battid_valid) {
+ complete_all(&chip->batt_id_avail);
+ *resched_ms = fg_sram_update_period_ms;
+ } else {
+ *resched_ms = SRAM_PERIOD_NO_ID_UPDATE_MS;
+ }
+ fg_relax(&chip->update_sram_wakeup_source);
+}
+
+#define SRAM_TIMEOUT_MS 3000
+static void update_sram_data_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ update_sram_data.work);
+ int resched_ms, ret;
+ bool tried_again = false;
+
+wait:
+ /* Wait for MEMIF access revoked */
+ ret = wait_for_completion_interruptible_timeout(
+ &chip->sram_access_revoked,
+ msecs_to_jiffies(SRAM_TIMEOUT_MS));
+
+ /* If we were interrupted wait again one more time. */
+ if (ret == -ERESTARTSYS && !tried_again) {
+ tried_again = true;
+ goto wait;
+ } else if (ret <= 0) {
+ pr_err("transaction timed out ret=%d\n", ret);
+ goto out;
+ }
+ update_sram_data(chip, &resched_ms);
+
+out:
+ schedule_delayed_work(
+ &chip->update_sram_data,
+ msecs_to_jiffies(resched_ms));
+}
+
+#define BATT_TEMP_OFFSET 3
+#define BATT_TEMP_CNTRL_MASK 0x17
+#define DISABLE_THERM_BIT BIT(0)
+#define TEMP_SENSE_ALWAYS_BIT BIT(1)
+#define TEMP_SENSE_CHARGE_BIT BIT(2)
+#define FORCE_RBIAS_ON_BIT BIT(4)
+#define BATT_TEMP_OFF DISABLE_THERM_BIT
+#define BATT_TEMP_ON (FORCE_RBIAS_ON_BIT | TEMP_SENSE_ALWAYS_BIT | \
+ TEMP_SENSE_CHARGE_BIT)
+#define TEMP_PERIOD_UPDATE_MS 10000
+#define TEMP_PERIOD_TIMEOUT_MS 3000
+static void update_temp_data(struct work_struct *work)
+{
+ s16 temp;
+ u8 reg[2];
+ bool tried_again = false;
+ int rc, ret, timeout = TEMP_PERIOD_TIMEOUT_MS;
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ update_temp_work.work);
+
+ if (chip->fg_restarting)
+ goto resched;
+
+ fg_stay_awake(&chip->update_temp_wakeup_source);
+ if (chip->sw_rbias_ctrl) {
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ BATT_TEMP_CNTRL_MASK,
+ BATT_TEMP_ON,
+ BATT_TEMP_OFFSET);
+ if (rc) {
+ pr_err("failed to write BATT_TEMP_ON rc=%d\n", rc);
+ goto out;
+ }
+
+wait:
+ /* Wait for MEMIF access revoked */
+ ret = wait_for_completion_interruptible_timeout(
+ &chip->sram_access_revoked,
+ msecs_to_jiffies(timeout));
+
+ /* If we were interrupted wait again one more time. */
+ if (ret == -ERESTARTSYS && !tried_again) {
+ tried_again = true;
+ goto wait;
+ } else if (ret <= 0) {
+ rc = -ETIMEDOUT;
+ pr_err("transaction timed out ret=%d\n", ret);
+ goto out;
+ }
+ }
+
+ /* Read FG_DATA_BATT_TEMP now */
+ rc = fg_mem_read(chip, reg, fg_data[0].address,
+ fg_data[0].len, fg_data[0].offset,
+ chip->sw_rbias_ctrl ? 1 : 0);
+ if (rc) {
+ pr_err("Failed to update temp data\n");
+ goto out;
+ }
+
+ temp = reg[0] | (reg[1] << 8);
+ fg_data[0].value = (temp * TEMP_LSB_16B / 1000)
+ - DECIKELVIN;
+
+ if (fg_debug_mask & FG_MEM_DEBUG_READS)
+ pr_info("BATT_TEMP %d %d\n", temp, fg_data[0].value);
+
+ get_current_time(&chip->last_temp_update_time);
+
+out:
+ if (chip->sw_rbias_ctrl) {
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ BATT_TEMP_CNTRL_MASK,
+ BATT_TEMP_OFF,
+ BATT_TEMP_OFFSET);
+ if (rc)
+ pr_err("failed to write BATT_TEMP_OFF rc=%d\n", rc);
+ }
+ fg_relax(&chip->update_temp_wakeup_source);
+
+resched:
+ schedule_delayed_work(
+ &chip->update_temp_work,
+ msecs_to_jiffies(TEMP_PERIOD_UPDATE_MS));
+}
+
+static void update_jeita_setting(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ update_jeita_setting.work);
+ u8 reg[4];
+ int i, rc;
+
+ for (i = 0; i < 4; i++)
+ reg[i] = (settings[FG_MEM_SOFT_COLD + i].value / 10) + 30;
+
+ rc = fg_mem_write(chip, reg, settings[FG_MEM_SOFT_COLD].address,
+ 4, settings[FG_MEM_SOFT_COLD].offset, 0);
+ if (rc)
+ pr_err("failed to update JEITA setting rc=%d\n", rc);
+}
+
+static int fg_set_resume_soc(struct fg_chip *chip, u8 threshold)
+{
+ u16 address;
+ int offset, rc;
+
+ address = settings[FG_MEM_RESUME_SOC].address;
+ offset = settings[FG_MEM_RESUME_SOC].offset;
+
+ rc = fg_mem_masked_write(chip, address, 0xFF, threshold, offset);
+
+ if (rc)
+ pr_err("write failed rc=%d\n", rc);
+ else
+ pr_debug("setting resume-soc to %x\n", threshold);
+
+ return rc;
+}
+
+#define VBATT_LOW_STS_BIT BIT(2)
+static int fg_get_vbatt_status(struct fg_chip *chip, bool *vbatt_low_sts)
+{
+ int rc = 0;
+ u8 fg_batt_sts;
+
+ rc = fg_read(chip, &fg_batt_sts, INT_RT_STS(chip->batt_base), 1);
+ if (!rc)
+ *vbatt_low_sts = !!(fg_batt_sts & VBATT_LOW_STS_BIT);
+ return rc;
+}
+
+#define BATT_CYCLE_NUMBER_REG 0x5E8
+#define BATT_CYCLE_OFFSET 0
+static void restore_cycle_counter(struct fg_chip *chip)
+{
+ int rc = 0, i, address;
+ u8 data[2];
+
+ fg_mem_lock(chip);
+ for (i = 0; i < BUCKET_COUNT; i++) {
+ address = BATT_CYCLE_NUMBER_REG + i * 2;
+ rc = fg_mem_read(chip, (u8 *)&data, address, 2,
+ BATT_CYCLE_OFFSET, 0);
+ if (rc)
+ pr_err("Failed to read BATT_CYCLE_NUMBER[%d] rc: %d\n",
+ i, rc);
+ else
+ chip->cyc_ctr.count[i] = data[0] | data[1] << 8;
+ }
+ fg_mem_release(chip);
+}
+
+static void clear_cycle_counter(struct fg_chip *chip)
+{
+ int rc = 0, len, i;
+
+ if (!chip->cyc_ctr.en)
+ return;
+
+ len = sizeof(chip->cyc_ctr.count);
+ memset(chip->cyc_ctr.count, 0, len);
+ for (i = 0; i < BUCKET_COUNT; i++) {
+ chip->cyc_ctr.started[i] = false;
+ chip->cyc_ctr.last_soc[i] = 0;
+ }
+ rc = fg_mem_write(chip, (u8 *)&chip->cyc_ctr.count,
+ BATT_CYCLE_NUMBER_REG, len,
+ BATT_CYCLE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write BATT_CYCLE_NUMBER rc=%d\n", rc);
+}
+
+static int fg_inc_store_cycle_ctr(struct fg_chip *chip, int bucket)
+{
+ int rc = 0, address;
+ u16 cyc_count;
+ u8 data[2];
+
+ if (bucket < 0 || (bucket > BUCKET_COUNT - 1))
+ return 0;
+
+ cyc_count = chip->cyc_ctr.count[bucket];
+ cyc_count++;
+ data[0] = cyc_count & 0xFF;
+ data[1] = cyc_count >> 8;
+
+ address = BATT_CYCLE_NUMBER_REG + bucket * 2;
+
+ rc = fg_mem_write(chip, data, address, 2, BATT_CYCLE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write BATT_CYCLE_NUMBER[%d] rc=%d\n",
+ bucket, rc);
+ else
+ chip->cyc_ctr.count[bucket] = cyc_count;
+ return rc;
+}
+
+static void update_cycle_count(struct work_struct *work)
+{
+ int rc = 0, bucket, i;
+ u8 reg[3], batt_soc;
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ cycle_count_work);
+
+ mutex_lock(&chip->cyc_ctr.lock);
+ rc = fg_mem_read(chip, reg, BATTERY_SOC_REG, 3,
+ BATTERY_SOC_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read battery soc rc: %d\n", rc);
+ goto out;
+ }
+ batt_soc = reg[2];
+
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ /* Find out which bucket the SOC falls in */
+ bucket = batt_soc / BUCKET_SOC_PCT;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("batt_soc: %x bucket: %d\n", reg[2], bucket);
+
+ /*
+ * If we've started counting for the previous bucket,
+ * then store the counter for that bucket if the
+ * counter for current bucket is getting started.
+ */
+ if (bucket > 0 && chip->cyc_ctr.started[bucket - 1] &&
+ !chip->cyc_ctr.started[bucket]) {
+ rc = fg_inc_store_cycle_ctr(chip, bucket - 1);
+ if (rc) {
+ pr_err("Error in storing cycle_ctr rc: %d\n",
+ rc);
+ goto out;
+ } else {
+ chip->cyc_ctr.started[bucket - 1] = false;
+ chip->cyc_ctr.last_soc[bucket - 1] = 0;
+ }
+ }
+ if (!chip->cyc_ctr.started[bucket]) {
+ chip->cyc_ctr.started[bucket] = true;
+ chip->cyc_ctr.last_soc[bucket] = batt_soc;
+ }
+ } else {
+ for (i = 0; i < BUCKET_COUNT; i++) {
+ if (chip->cyc_ctr.started[i] &&
+ batt_soc > chip->cyc_ctr.last_soc[i]) {
+ rc = fg_inc_store_cycle_ctr(chip, i);
+ if (rc)
+ pr_err("Error in storing cycle_ctr rc: %d\n",
+ rc);
+ chip->cyc_ctr.last_soc[i] = 0;
+ }
+ chip->cyc_ctr.started[i] = false;
+ }
+ }
+out:
+ mutex_unlock(&chip->cyc_ctr.lock);
+}
+
+static int fg_get_cycle_count(struct fg_chip *chip)
+{
+ int count;
+
+ if (!chip->cyc_ctr.en)
+ return 0;
+
+ if ((chip->cyc_ctr.id <= 0) || (chip->cyc_ctr.id > BUCKET_COUNT))
+ return -EINVAL;
+
+ mutex_lock(&chip->cyc_ctr.lock);
+ count = chip->cyc_ctr.count[chip->cyc_ctr.id - 1];
+ mutex_unlock(&chip->cyc_ctr.lock);
+ return count;
+}
+
+static void half_float_to_buffer(int64_t uval, u8 *buffer)
+{
+ u16 raw;
+
+ raw = float_encode(uval);
+ buffer[0] = (u8)(raw & 0xFF);
+ buffer[1] = (u8)((raw >> 8) & 0xFF);
+}
+
+static int64_t half_float(u8 *buffer)
+{
+ u16 val;
+
+ val = buffer[1] << 8 | buffer[0];
+ return float_decode(val);
+}
+
+static int voltage_2b(u8 *buffer)
+{
+ u16 val;
+
+ val = buffer[1] << 8 | buffer[0];
+ /* the range of voltage 2b is [-5V, 5V], so it will fit in an int */
+ return (int)div_u64(((u64)val) * LSB_16B_NUMRTR, LSB_16B_DENMTR);
+}
+
+static int bcap_uah_2b(u8 *buffer)
+{
+ u16 val;
+
+ val = buffer[1] << 8 | buffer[0];
+ return ((int)val) * 1000;
+}
+
+static int lookup_ocv_for_soc(struct fg_chip *chip, int soc)
+{
+ int64_t *coeffs;
+
+ if (soc > chip->ocv_junction_p1p2 * 10)
+ coeffs = chip->ocv_coeffs;
+ else if (soc > chip->ocv_junction_p2p3 * 10)
+ coeffs = chip->ocv_coeffs + 4;
+ else
+ coeffs = chip->ocv_coeffs + 8;
+ /* the range of ocv will fit in a 32 bit int */
+ return (int)(coeffs[0]
+ + div_s64(coeffs[1] * soc, 1000LL)
+ + div_s64(coeffs[2] * soc * soc, 1000000LL)
+ + div_s64(coeffs[3] * soc * soc * soc, 1000000000LL));
+}
+
+static int lookup_soc_for_ocv(struct fg_chip *chip, int ocv)
+{
+ int64_t val;
+ int soc = -EINVAL;
+ /*
+ * binary search variables representing the valid start and end
+ * percentages to search
+ */
+ int start = 0, end = 1000, mid;
+
+ if (fg_debug_mask & FG_AGING)
+ pr_info("target_ocv = %d\n", ocv);
+ /* do a binary search for the closest soc to match the ocv */
+ while (end - start > 1) {
+ mid = (start + end) / 2;
+ val = lookup_ocv_for_soc(chip, mid);
+ if (fg_debug_mask & FG_AGING)
+ pr_info("start = %d, mid = %d, end = %d, ocv = %lld\n",
+ start, mid, end, val);
+ if (ocv < val) {
+ end = mid;
+ } else if (ocv > val) {
+ start = mid;
+ } else {
+ soc = mid;
+ break;
+ }
+ }
+ /*
+ * if the exact soc was not found and there are two or less values
+ * remaining, just compare them and see which one is closest to the ocv
+ */
+ if (soc == -EINVAL) {
+ if (abs(ocv - lookup_ocv_for_soc(chip, start))
+ > abs(ocv - lookup_ocv_for_soc(chip, end)))
+ soc = end;
+ else
+ soc = start;
+ }
+ if (fg_debug_mask & FG_AGING)
+ pr_info("closest = %d, target_ocv = %d, ocv_found = %d\n",
+ soc, ocv, lookup_ocv_for_soc(chip, soc));
+ return soc;
+}
+
+#define ESR_ACTUAL_REG 0x554
+#define BATTERY_ESR_REG 0x4F4
+#define TEMP_RS_TO_RSLOW_REG 0x514
+static int estimate_battery_age(struct fg_chip *chip, int *actual_capacity)
+{
+ int64_t ocv_cutoff_new, ocv_cutoff_aged, temp_rs_to_rslow;
+ int64_t esr_actual, battery_esr, val;
+ int soc_cutoff_aged, soc_cutoff_new, rc;
+ int battery_soc, unusable_soc, batt_temp;
+ u8 buffer[3];
+
+ if (chip->batt_aging_mode != FG_AGING_ESR)
+ return 0;
+
+ if (chip->nom_cap_uah == 0) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("ocv coefficients not loaded, aborting\n");
+ return 0;
+ }
+ fg_mem_lock(chip);
+
+ batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+ if (batt_temp < 150 || batt_temp > 400) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("Battery temp (%d) out of range, aborting\n",
+ (int)batt_temp);
+ rc = 0;
+ goto done;
+ }
+
+ battery_soc = get_battery_soc_raw(chip) * 100 / FULL_PERCENT_3B;
+ if (rc) {
+ goto error_done;
+ } else if (battery_soc < 25 || battery_soc > 75) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("Battery SoC (%d) out of range, aborting\n",
+ (int)battery_soc);
+ rc = 0;
+ goto done;
+ }
+
+ rc = fg_mem_read(chip, buffer, ESR_ACTUAL_REG, 2, 2, 0);
+ esr_actual = half_float(buffer);
+ rc |= fg_mem_read(chip, buffer, BATTERY_ESR_REG, 2, 2, 0);
+ battery_esr = half_float(buffer);
+
+ if (rc) {
+ goto error_done;
+ } else if (esr_actual < battery_esr) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("Batt ESR lower than ESR actual, aborting\n");
+ rc = 0;
+ goto done;
+ }
+ rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2, 0, 0);
+ temp_rs_to_rslow = half_float(buffer);
+
+ if (rc)
+ goto error_done;
+
+ fg_mem_release(chip);
+
+ if (fg_debug_mask & FG_AGING) {
+ pr_info("batt_soc = %d, cutoff_voltage = %lld, eval current = %d\n",
+ battery_soc, chip->cutoff_voltage,
+ chip->evaluation_current);
+ pr_info("temp_rs_to_rslow = %lld, batt_esr = %lld, esr_actual = %lld\n",
+ temp_rs_to_rslow, battery_esr, esr_actual);
+ }
+
+ /* calculate soc_cutoff_new */
+ val = (1000000LL + temp_rs_to_rslow) * battery_esr;
+ do_div(val, 1000000);
+ ocv_cutoff_new = div64_s64(chip->evaluation_current * val, 1000)
+ + chip->cutoff_voltage;
+
+ /* calculate soc_cutoff_aged */
+ val = (1000000LL + temp_rs_to_rslow) * esr_actual;
+ do_div(val, 1000000);
+ ocv_cutoff_aged = div64_s64(chip->evaluation_current * val, 1000)
+ + chip->cutoff_voltage;
+
+ if (fg_debug_mask & FG_AGING)
+ pr_info("ocv_cutoff_new = %lld, ocv_cutoff_aged = %lld\n",
+ ocv_cutoff_new, ocv_cutoff_aged);
+
+ soc_cutoff_new = lookup_soc_for_ocv(chip, ocv_cutoff_new);
+ soc_cutoff_aged = lookup_soc_for_ocv(chip, ocv_cutoff_aged);
+
+ if (fg_debug_mask & FG_AGING)
+ pr_info("aged soc = %d, new soc = %d\n",
+ soc_cutoff_aged, soc_cutoff_new);
+ unusable_soc = soc_cutoff_aged - soc_cutoff_new;
+
+ *actual_capacity = div64_s64(((int64_t)chip->nom_cap_uah)
+ * (1000 - unusable_soc), 1000);
+ if (fg_debug_mask & FG_AGING)
+ pr_info("nom cap = %d, actual cap = %d\n",
+ chip->nom_cap_uah, *actual_capacity);
+
+ return rc;
+
+error_done:
+ pr_err("some register reads failed: %d\n", rc);
+done:
+ fg_mem_release(chip);
+ return rc;
+}
+
+static void battery_age_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ battery_age_work);
+
+ estimate_battery_age(chip, &chip->actual_cap_uah);
+}
+
+static enum power_supply_property fg_power_props[] = {
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_RAW,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_OCV,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+ POWER_SUPPLY_PROP_CHARGE_NOW_ERROR,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_COOL_TEMP,
+ POWER_SUPPLY_PROP_WARM_TEMP,
+ POWER_SUPPLY_PROP_RESISTANCE,
+ POWER_SUPPLY_PROP_RESISTANCE_ID,
+ POWER_SUPPLY_PROP_BATTERY_TYPE,
+ POWER_SUPPLY_PROP_UPDATE_NOW,
+ POWER_SUPPLY_PROP_ESR_COUNT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+ POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+ POWER_SUPPLY_PROP_HI_POWER,
+};
+
+static int fg_power_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct fg_chip *chip = container_of(psy, struct fg_chip, bms_psy);
+ bool vbatt_low_sts;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_BATTERY_TYPE:
+ if (chip->battery_missing)
+ val->strval = missing_batt_type;
+ else if (chip->fg_restarting)
+ val->strval = loading_batt_type;
+ else
+ val->strval = chip->batt_type;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = get_prop_capacity(chip);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY_RAW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_SOC);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW_ERROR:
+ val->intval = get_sram_prop_now(chip, FG_DATA_VINT_ERR);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_CURRENT);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_VOLTAGE);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+ val->intval = get_sram_prop_now(chip, FG_DATA_OCV);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = chip->batt_max_voltage_uv;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+ break;
+ case POWER_SUPPLY_PROP_COOL_TEMP:
+ val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_COLD);
+ break;
+ case POWER_SUPPLY_PROP_WARM_TEMP:
+ val->intval = get_prop_jeita_temp(chip, FG_MEM_SOFT_HOT);
+ break;
+ case POWER_SUPPLY_PROP_RESISTANCE:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR);
+ break;
+ case POWER_SUPPLY_PROP_ESR_COUNT:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ESR_COUNT);
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ val->intval = fg_get_cycle_count(chip);
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+ val->intval = chip->cyc_ctr.id;
+ break;
+ case POWER_SUPPLY_PROP_RESISTANCE_ID:
+ val->intval = get_sram_prop_now(chip, FG_DATA_BATT_ID);
+ break;
+ case POWER_SUPPLY_PROP_UPDATE_NOW:
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ if (!fg_get_vbatt_status(chip, &vbatt_low_sts))
+ val->intval = (int)vbatt_low_sts;
+ else
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+ val->intval = chip->nom_cap_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ val->intval = chip->learning_data.learned_cc_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ val->intval = chip->learning_data.cc_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
+ val->intval = get_sram_prop_now(chip, FG_DATA_CC_CHARGE);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int correction_times[] = {
+ 1470,
+ 2940,
+ 4410,
+ 5880,
+ 7350,
+ 8820,
+ 10290,
+ 11760,
+ 13230,
+ 14700,
+ 16170,
+ 17640,
+ 19110,
+ 20580,
+ 22050,
+ 23520,
+ 24990,
+ 26460,
+ 27930,
+ 29400,
+ 30870,
+ 32340,
+ 33810,
+ 35280,
+ 36750,
+ 38220,
+ 39690,
+ 41160,
+ 42630,
+ 44100,
+ 45570,
+ 47040,
+};
+
+static int correction_factors[] = {
+ 1000000,
+ 1007874,
+ 1015789,
+ 1023745,
+ 1031742,
+ 1039780,
+ 1047859,
+ 1055979,
+ 1064140,
+ 1072342,
+ 1080584,
+ 1088868,
+ 1097193,
+ 1105558,
+ 1113964,
+ 1122411,
+ 1130899,
+ 1139427,
+ 1147996,
+ 1156606,
+ 1165256,
+ 1173947,
+ 1182678,
+ 1191450,
+ 1200263,
+ 1209115,
+ 1218008,
+ 1226942,
+ 1235915,
+ 1244929,
+ 1253983,
+ 1263076,
+};
+
+#define FG_CONVERSION_FACTOR (64198531LL)
+static int iavg_3b_to_uah(u8 *buffer, int delta_ms)
+{
+ int64_t val, i_filtered;
+ int i, correction_factor;
+
+ for (i = 0; i < ARRAY_SIZE(correction_times); i++) {
+ if (correction_times[i] > delta_ms)
+ break;
+ }
+ if (i >= ARRAY_SIZE(correction_times)) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("fuel gauge took more than 32 cycles\n");
+ i = ARRAY_SIZE(correction_times) - 1;
+ }
+ correction_factor = correction_factors[i];
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("delta_ms = %d, cycles = %d, correction = %d\n",
+ delta_ms, i, correction_factor);
+ val = buffer[2] << 16 | buffer[1] << 8 | buffer[0];
+ /* convert val from signed 24b to signed 64b */
+ i_filtered = (val << 40) >> 40;
+ val = i_filtered * correction_factor;
+ val = div64_s64(val + FG_CONVERSION_FACTOR / 2, FG_CONVERSION_FACTOR);
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("i_filtered = 0x%llx/%lld, cc_uah = %lld\n",
+ i_filtered, i_filtered, val);
+
+ return val;
+}
+
+static bool fg_is_temperature_ok_for_learning(struct fg_chip *chip)
+{
+ int batt_temp = get_sram_prop_now(chip, FG_DATA_BATT_TEMP);
+
+ if (batt_temp > chip->learning_data.max_temp
+ || batt_temp < chip->learning_data.min_temp) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("temp (%d) out of range [%d, %d], aborting\n",
+ batt_temp,
+ chip->learning_data.min_temp,
+ chip->learning_data.max_temp);
+ return false;
+ }
+ return true;
+}
+
+static void fg_cap_learning_stop(struct fg_chip *chip)
+{
+ chip->learning_data.cc_uah = 0;
+ chip->learning_data.active = false;
+}
+
+#define I_FILTERED_REG 0x584
+static void fg_cap_learning_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ fg_cap_learning_work);
+ u8 i_filtered[3], data[3];
+ int rc, cc_uah, delta_ms;
+ ktime_t now_kt, delta_kt;
+
+ mutex_lock(&chip->learning_data.learning_lock);
+ if (!chip->learning_data.active)
+ goto fail;
+ if (!fg_is_temperature_ok_for_learning(chip)) {
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ if (chip->wa_flag & USE_CC_SOC_REG) {
+ mutex_unlock(&chip->learning_data.learning_lock);
+ fg_relax(&chip->capacity_learning_wakeup_source);
+ return;
+ }
+
+ fg_mem_lock(chip);
+
+ rc = fg_mem_read(chip, i_filtered, I_FILTERED_REG, 3, 0, 0);
+ if (rc) {
+ pr_err("Failed to read i_filtered: %d\n", rc);
+ fg_mem_release(chip);
+ goto fail;
+ }
+ memset(data, 0, 3);
+ rc = fg_mem_write(chip, data, I_FILTERED_REG, 3, 0, 0);
+ if (rc) {
+ pr_err("Failed to clear i_filtered: %d\n", rc);
+ fg_mem_release(chip);
+ goto fail;
+ }
+ fg_mem_release(chip);
+
+ now_kt = ktime_get_boottime();
+ delta_kt = ktime_sub(now_kt, chip->learning_data.time_stamp);
+ chip->learning_data.time_stamp = now_kt;
+
+ delta_ms = (int)div64_s64(ktime_to_ns(delta_kt), 1000000);
+
+ cc_uah = iavg_3b_to_uah(i_filtered, delta_ms);
+ chip->learning_data.cc_uah -= cc_uah;
+ if (fg_debug_mask & FG_AGING)
+ pr_info("total_cc_uah = %lld\n", chip->learning_data.cc_uah);
+
+fail:
+ mutex_unlock(&chip->learning_data.learning_lock);
+ return;
+
+}
+
+#define CC_SOC_BASE_REG 0x5BC
+#define CC_SOC_OFFSET 3
+#define CC_SOC_MAGNITUDE_MASK 0x1FFFFFFF
+#define CC_SOC_NEGATIVE_BIT BIT(29)
+static int fg_get_cc_soc(struct fg_chip *chip, int *cc_soc)
+{
+ int rc;
+ u8 reg[4];
+ unsigned int temp, magnitude;
+
+ rc = fg_mem_read(chip, reg, CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read CC_SOC_REG rc=%d\n", rc);
+ return rc;
+ }
+
+ temp = reg[3] << 24 | reg[2] << 16 | reg[1] << 8 | reg[0];
+ magnitude = temp & CC_SOC_MAGNITUDE_MASK;
+ if (temp & CC_SOC_NEGATIVE_BIT)
+ *cc_soc = -1 * (~magnitude + 1);
+ else
+ *cc_soc = magnitude;
+
+ return 0;
+}
+
+static int fg_cap_learning_process_full_data(struct fg_chip *chip)
+{
+ int cc_pc_val, rc = -EINVAL;
+ unsigned int cc_soc_delta_pc;
+ int64_t delta_cc_uah;
+
+ if (!chip->learning_data.active)
+ goto fail;
+
+ if (!fg_is_temperature_ok_for_learning(chip)) {
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ rc = fg_get_cc_soc(chip, &cc_pc_val);
+ if (rc) {
+ pr_err("failed to get CC_SOC, stopping capacity learning\n");
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ cc_soc_delta_pc = DIV_ROUND_CLOSEST(
+ abs(cc_pc_val - chip->learning_data.init_cc_pc_val)
+ * 100, FULL_PERCENT_28BIT);
+
+ delta_cc_uah = div64_s64(
+ chip->learning_data.learned_cc_uah * cc_soc_delta_pc,
+ 100);
+ chip->learning_data.cc_uah = delta_cc_uah + chip->learning_data.cc_uah;
+
+ if (fg_debug_mask & FG_AGING)
+ pr_info("current cc_soc=%d cc_soc_pc=%d total_cc_uah = %lld\n",
+ cc_pc_val, cc_soc_delta_pc,
+ chip->learning_data.cc_uah);
+
+ return 0;
+
+fail:
+ return rc;
+}
+
+#define FG_CAP_LEARNING_INTERVAL_NS 30000000000
+static enum alarmtimer_restart fg_cap_learning_alarm_cb(struct alarm *alarm,
+ ktime_t now)
+{
+ struct fg_chip *chip = container_of(alarm, struct fg_chip,
+ fg_cap_learning_alarm);
+
+ if (chip->learning_data.active) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("alarm fired\n");
+ schedule_work(&chip->fg_cap_learning_work);
+ alarm_forward_now(alarm,
+ ns_to_ktime(FG_CAP_LEARNING_INTERVAL_NS));
+ return ALARMTIMER_RESTART;
+ }
+ if (fg_debug_mask & FG_AGING)
+ pr_info("alarm misfired\n");
+ return ALARMTIMER_NORESTART;
+}
+
+#define FG_AGING_STORAGE_REG 0x5E4
+#define ACTUAL_CAPACITY_REG 0x578
+#define MAH_TO_SOC_CONV_REG 0x4A0
+#define CC_SOC_COEFF_OFFSET 0
+#define ACTUAL_CAPACITY_OFFSET 2
+#define MAH_TO_SOC_CONV_CS_OFFSET 0
+static int fg_calc_and_store_cc_soc_coeff(struct fg_chip *chip, int16_t cc_mah)
+{
+ int rc;
+ int64_t cc_to_soc_coeff, mah_to_soc;
+ u8 data[2];
+
+ rc = fg_mem_write(chip, (u8 *)&cc_mah, ACTUAL_CAPACITY_REG, 2,
+ ACTUAL_CAPACITY_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to store actual capacity: %d\n", rc);
+ return rc;
+ }
+
+ rc = fg_mem_read(chip, (u8 *)&data, MAH_TO_SOC_CONV_REG, 2,
+ MAH_TO_SOC_CONV_CS_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read mah_to_soc_conv_cs: %d\n", rc);
+ } else {
+ mah_to_soc = data[1] << 8 | data[0];
+ mah_to_soc *= MICRO_UNIT;
+ cc_to_soc_coeff = div64_s64(mah_to_soc, cc_mah);
+ half_float_to_buffer(cc_to_soc_coeff, data);
+ rc = fg_mem_write(chip, (u8 *)data,
+ ACTUAL_CAPACITY_REG, 2,
+ CC_SOC_COEFF_OFFSET, 0);
+ if (rc)
+ pr_err("Failed to write cc_soc_coeff_offset: %d\n",
+ rc);
+ else if (fg_debug_mask & FG_AGING)
+ pr_info("new cc_soc_coeff %lld [%x %x] saved to sram\n",
+ cc_to_soc_coeff, data[0], data[1]);
+ }
+ return rc;
+}
+
+static void fg_cap_learning_load_data(struct fg_chip *chip)
+{
+ int16_t cc_mah;
+ int64_t old_cap = chip->learning_data.learned_cc_uah;
+ int rc;
+
+ rc = fg_mem_read(chip, (u8 *)&cc_mah, FG_AGING_STORAGE_REG, 2, 0, 0);
+ if (rc) {
+ pr_err("Failed to load aged capacity: %d\n", rc);
+ } else {
+ chip->learning_data.learned_cc_uah = cc_mah * 1000;
+ if (fg_debug_mask & FG_AGING)
+ pr_info("learned capacity %lld-> %lld/%x uah\n",
+ old_cap,
+ chip->learning_data.learned_cc_uah,
+ cc_mah);
+ }
+}
+
+static void fg_cap_learning_save_data(struct fg_chip *chip)
+{
+ int16_t cc_mah;
+ int rc;
+
+ cc_mah = div64_s64(chip->learning_data.learned_cc_uah, 1000);
+
+ rc = fg_mem_write(chip, (u8 *)&cc_mah, FG_AGING_STORAGE_REG, 2, 0, 0);
+ if (rc)
+ pr_err("Failed to store aged capacity: %d\n", rc);
+ else if (fg_debug_mask & FG_AGING)
+ pr_info("learned capacity %lld uah (%d/0x%x uah) saved to sram\n",
+ chip->learning_data.learned_cc_uah,
+ cc_mah, cc_mah);
+
+ if (chip->learning_data.feedback_on) {
+ rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah);
+ if (rc)
+ pr_err("Error in storing cc_soc_coeff, rc:%d\n", rc);
+ }
+}
+
+static void fg_cap_learning_post_process(struct fg_chip *chip)
+{
+ int64_t max_inc_val, min_dec_val, old_cap;
+
+ max_inc_val = chip->learning_data.learned_cc_uah
+ * (1000 + chip->learning_data.max_increment);
+ do_div(max_inc_val, 1000);
+
+ min_dec_val = chip->learning_data.learned_cc_uah
+ * (1000 - chip->learning_data.max_decrement);
+ do_div(min_dec_val, 1000);
+
+ old_cap = chip->learning_data.learned_cc_uah;
+ if (chip->learning_data.cc_uah > max_inc_val)
+ chip->learning_data.learned_cc_uah = max_inc_val;
+ else if (chip->learning_data.cc_uah < min_dec_val)
+ chip->learning_data.learned_cc_uah = min_dec_val;
+ else
+ chip->learning_data.learned_cc_uah =
+ chip->learning_data.cc_uah;
+
+ fg_cap_learning_save_data(chip);
+ if (fg_debug_mask & FG_AGING)
+ pr_info("final cc_uah = %lld, learned capacity %lld -> %lld uah\n",
+ chip->learning_data.cc_uah,
+ old_cap, chip->learning_data.learned_cc_uah);
+}
+
+static int get_vbat_est_diff(struct fg_chip *chip)
+{
+ return abs(fg_data[FG_DATA_VOLTAGE].value
+ - fg_data[FG_DATA_CPRED_VOLTAGE].value);
+}
+
+#define CBITS_INPUT_FILTER_REG 0x4B4
+#define IBATTF_TAU_MASK 0x38
+#define IBATTF_TAU_99_S 0x30
+static int fg_cap_learning_check(struct fg_chip *chip)
+{
+ u8 data[4];
+ int rc = 0, battery_soc, cc_pc_val;
+ int vbat_est_diff, vbat_est_thr_uv;
+ unsigned int cc_pc_100 = FULL_PERCENT_28BIT;
+
+ mutex_lock(&chip->learning_data.learning_lock);
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING
+ && !chip->learning_data.active
+ && chip->batt_aging_mode == FG_AGING_CC) {
+ if (chip->learning_data.learned_cc_uah == 0) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("no capacity, aborting\n");
+ goto fail;
+ }
+
+ if (!fg_is_temperature_ok_for_learning(chip))
+ goto fail;
+
+ fg_mem_lock(chip);
+ if (!chip->learning_data.feedback_on) {
+ vbat_est_diff = get_vbat_est_diff(chip);
+ vbat_est_thr_uv = chip->learning_data.vbat_est_thr_uv;
+ if (vbat_est_diff >= vbat_est_thr_uv &&
+ vbat_est_thr_uv > 0) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("vbat_est_diff (%d) < threshold (%d)\n",
+ vbat_est_diff, vbat_est_thr_uv);
+ fg_mem_release(chip);
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+ }
+ battery_soc = get_battery_soc_raw(chip);
+ if (fg_debug_mask & FG_AGING)
+ pr_info("checking battery soc (%d vs %d)\n",
+ battery_soc * 100 / FULL_PERCENT_3B,
+ chip->learning_data.max_start_soc);
+ /* check if the battery is low enough to start soc learning */
+ if (battery_soc * 100 / FULL_PERCENT_3B
+ > chip->learning_data.max_start_soc) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("battery soc too low (%d < %d), aborting\n",
+ battery_soc * 100 / FULL_PERCENT_3B,
+ chip->learning_data.max_start_soc);
+ fg_mem_release(chip);
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ /* set the coulomb counter to a percentage of the capacity */
+ chip->learning_data.cc_uah = div64_s64(
+ (chip->learning_data.learned_cc_uah * battery_soc),
+ FULL_PERCENT_3B);
+
+ /* Use CC_SOC_REG based capacity learning */
+ if (chip->wa_flag & USE_CC_SOC_REG) {
+ fg_mem_release(chip);
+ /* SW_CC_SOC based capacity learning */
+ if (fg_get_cc_soc(chip, &cc_pc_val)) {
+ pr_err("failed to get CC_SOC, stop capacity learning\n");
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ chip->learning_data.init_cc_pc_val = cc_pc_val;
+ chip->learning_data.active = true;
+ if (fg_debug_mask & FG_AGING)
+ pr_info("SW_CC_SOC based learning init_CC_SOC=%d\n",
+ chip->learning_data.init_cc_pc_val);
+ } else {
+ rc = fg_mem_masked_write(chip, CBITS_INPUT_FILTER_REG,
+ IBATTF_TAU_MASK, IBATTF_TAU_99_S, 0);
+ if (rc) {
+ pr_err("Failed to write IF IBAT Tau: %d\n",
+ rc);
+ fg_mem_release(chip);
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+
+ /* clear the i_filtered register */
+ memset(data, 0, 4);
+ rc = fg_mem_write(chip, data, I_FILTERED_REG, 3, 0, 0);
+ if (rc) {
+ pr_err("Failed to clear i_filtered: %d\n", rc);
+ fg_mem_release(chip);
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+ fg_mem_release(chip);
+ chip->learning_data.time_stamp = ktime_get_boottime();
+ chip->learning_data.active = true;
+
+ if (fg_debug_mask & FG_AGING)
+ pr_info("cap learning started, soc = %d cc_uah = %lld\n",
+ battery_soc * 100 / FULL_PERCENT_3B,
+ chip->learning_data.cc_uah);
+ rc = alarm_start_relative(&chip->fg_cap_learning_alarm,
+ ns_to_ktime(FG_CAP_LEARNING_INTERVAL_NS));
+ if (rc) {
+ pr_err("Failed to start alarm: %d\n", rc);
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+ }
+ } else if ((chip->status != POWER_SUPPLY_STATUS_CHARGING)
+ && chip->learning_data.active) {
+ if (fg_debug_mask & FG_AGING)
+ pr_info("capacity learning stopped\n");
+ if (!(chip->wa_flag & USE_CC_SOC_REG))
+ alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
+
+ if (chip->status == POWER_SUPPLY_STATUS_FULL) {
+ if (chip->wa_flag & USE_CC_SOC_REG) {
+ rc = fg_cap_learning_process_full_data(chip);
+ if (rc) {
+ fg_cap_learning_stop(chip);
+ goto fail;
+ }
+ /* reset SW_CC_SOC register to 100% */
+ rc = fg_mem_write(chip, (u8 *)&cc_pc_100,
+ CC_SOC_BASE_REG, 4, CC_SOC_OFFSET, 0);
+ if (rc)
+ pr_err("Failed to reset CC_SOC_REG rc=%d\n",
+ rc);
+ }
+ fg_cap_learning_post_process(chip);
+ }
+
+ fg_cap_learning_stop(chip);
+ }
+
+fail:
+ mutex_unlock(&chip->learning_data.learning_lock);
+ return rc;
+}
+
+static bool is_usb_present(struct fg_chip *chip)
+{
+ union power_supply_propval prop = {0,};
+ if (!chip->usb_psy)
+ chip->usb_psy = power_supply_get_by_name("usb");
+
+ if (chip->usb_psy)
+ chip->usb_psy->get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_PRESENT, &prop);
+ return prop.intval != 0;
+}
+
+static bool is_dc_present(struct fg_chip *chip)
+{
+ union power_supply_propval prop = {0,};
+ if (!chip->dc_psy)
+ chip->dc_psy = power_supply_get_by_name("dc");
+
+ if (chip->dc_psy)
+ chip->dc_psy->get_property(chip->dc_psy,
+ POWER_SUPPLY_PROP_PRESENT, &prop);
+ return prop.intval != 0;
+}
+
+static bool is_input_present(struct fg_chip *chip)
+{
+ return is_usb_present(chip) || is_dc_present(chip);
+}
+
+static bool is_otg_present(struct fg_chip *chip)
+{
+ union power_supply_propval prop = {0,};
+
+ if (!chip->usb_psy)
+ chip->usb_psy = power_supply_get_by_name("usb");
+
+ if (chip->usb_psy)
+ chip->usb_psy->get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_USB_OTG, &prop);
+ return prop.intval != 0;
+}
+
+static int set_prop_enable_charging(struct fg_chip *chip, bool enable)
+{
+ int rc = 0;
+ union power_supply_propval ret = {enable, };
+
+ if (!chip->batt_psy)
+ chip->batt_psy = power_supply_get_by_name("battery");
+
+ if (chip->batt_psy) {
+ rc = chip->batt_psy->set_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED,
+ &ret);
+ if (rc)
+ pr_err("couldn't configure batt chg %d\n", rc);
+ }
+
+ return rc;
+}
+
+#define MAX_BATTERY_CC_SOC_CAPACITY 150
+static void status_change_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ status_change_work);
+ unsigned long current_time = 0;
+ int cc_soc, rc, capacity = get_prop_capacity(chip);
+
+ if (chip->status == POWER_SUPPLY_STATUS_FULL) {
+ if (capacity >= 99 && chip->hold_soc_while_full
+ && chip->health == POWER_SUPPLY_HEALTH_GOOD) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("holding soc at 100\n");
+ chip->charge_full = true;
+ } else if (fg_debug_mask & FG_STATUS) {
+ pr_info("terminated charging at %d/0x%02x\n",
+ capacity, get_monotonic_soc_raw(chip));
+ }
+ }
+ if (chip->status == POWER_SUPPLY_STATUS_FULL ||
+ chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ if (!chip->vbat_low_irq_enabled) {
+ enable_irq(chip->batt_irq[VBATT_LOW].irq);
+ enable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = true;
+ }
+ if (!!(chip->wa_flag & PULSE_REQUEST_WA) && capacity == 100)
+ fg_configure_soc(chip);
+ } else if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
+ if (chip->vbat_low_irq_enabled) {
+ disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = false;
+ }
+ }
+ fg_cap_learning_check(chip);
+ schedule_work(&chip->update_esr_work);
+
+ if (chip->wa_flag & USE_CC_SOC_REG) {
+ if (fg_get_cc_soc(chip, &cc_soc)) {
+ pr_err("failed to get CC_SOC\n");
+ return;
+ }
+ }
+
+ if (chip->prev_status != chip->status && chip->last_sram_update_time) {
+ get_current_time(&current_time);
+ /*
+ * When charging status changes, update SRAM parameters if it
+ * was not updated before 5 seconds from now.
+ */
+ if (chip->last_sram_update_time + 5 < current_time) {
+ cancel_delayed_work(&chip->update_sram_data);
+ schedule_delayed_work(&chip->update_sram_data,
+ msecs_to_jiffies(0));
+ }
+ if (chip->cyc_ctr.en)
+ schedule_work(&chip->cycle_count_work);
+ if ((chip->wa_flag & USE_CC_SOC_REG) &&
+ chip->bad_batt_detection_en &&
+ chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ chip->sw_cc_soc_data.init_sys_soc = capacity;
+ chip->sw_cc_soc_data.init_cc_soc = cc_soc;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info(" Init_sys_soc %d init_cc_soc %d\n",
+ chip->sw_cc_soc_data.init_sys_soc,
+ chip->sw_cc_soc_data.init_cc_soc);
+ }
+ }
+ if ((chip->wa_flag & USE_CC_SOC_REG) && chip->bad_batt_detection_en
+ && chip->safety_timer_expired) {
+ chip->sw_cc_soc_data.delta_soc =
+ DIV_ROUND_CLOSEST(abs(cc_soc -
+ chip->sw_cc_soc_data.init_cc_soc)
+ * 100, FULL_PERCENT_28BIT);
+ chip->sw_cc_soc_data.full_capacity =
+ chip->sw_cc_soc_data.delta_soc +
+ chip->sw_cc_soc_data.init_sys_soc;
+ pr_info("Init_sys_soc %d init_cc_soc %d cc_soc %d delta_soc %d full_capacity %d\n",
+ chip->sw_cc_soc_data.init_sys_soc,
+ chip->sw_cc_soc_data.init_cc_soc, cc_soc,
+ chip->sw_cc_soc_data.delta_soc,
+ chip->sw_cc_soc_data.full_capacity);
+ /*
+ * If sw_cc_soc capacity greater than 150, then it's a bad
+ * battery. else, reset timer and restart charging.
+ */
+ if (chip->sw_cc_soc_data.full_capacity >
+ MAX_BATTERY_CC_SOC_CAPACITY) {
+ pr_info("Battery possibly damaged, do not restart charging\n");
+ } else {
+ pr_info("Reset safety-timer and restart charging\n");
+ rc = set_prop_enable_charging(chip, false);
+ if (rc) {
+ pr_err("failed to disable charging %d\n", rc);
+ return;
+ }
+
+ chip->safety_timer_expired = false;
+ msleep(200);
+
+ rc = set_prop_enable_charging(chip, true);
+ if (rc) {
+ pr_err("failed to enable charging %d\n", rc);
+ return;
+ }
+ }
+ }
+}
+
+/*
+ * Check for change in the status of input or OTG and schedule
+ * IADC gain compensation work.
+ */
+static void check_gain_compensation(struct fg_chip *chip)
+{
+ bool input_present = is_input_present(chip);
+ bool otg_present = is_otg_present(chip);
+
+ if ((chip->wa_flag & IADC_GAIN_COMP_WA)
+ && ((chip->input_present ^ input_present)
+ || (chip->otg_present ^ otg_present))) {
+ fg_stay_awake(&chip->gain_comp_wakeup_source);
+ chip->input_present = input_present;
+ chip->otg_present = otg_present;
+ cancel_work_sync(&chip->gain_comp_work);
+ schedule_work(&chip->gain_comp_work);
+ }
+}
+
+static void fg_hysteresis_config(struct fg_chip *chip)
+{
+ int hard_hot = 0, hard_cold = 0;
+
+ hard_hot = get_prop_jeita_temp(chip, FG_MEM_HARD_HOT);
+ hard_cold = get_prop_jeita_temp(chip, FG_MEM_HARD_COLD);
+ if (chip->health == POWER_SUPPLY_HEALTH_OVERHEAT && !chip->batt_hot) {
+ /* turn down the hard hot threshold */
+ chip->batt_hot = true;
+ set_prop_jeita_temp(chip, FG_MEM_HARD_HOT,
+ hard_hot - chip->hot_hysteresis);
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("hard hot hysteresis: old hot=%d, new hot=%d\n",
+ hard_hot, hard_hot - chip->hot_hysteresis);
+ } else if (chip->health == POWER_SUPPLY_HEALTH_COLD &&
+ !chip->batt_cold) {
+ /* turn up the hard cold threshold */
+ chip->batt_cold = true;
+ set_prop_jeita_temp(chip, FG_MEM_HARD_COLD,
+ hard_cold + chip->cold_hysteresis);
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("hard cold hysteresis: old cold=%d, new cold=%d\n",
+ hard_cold, hard_cold + chip->hot_hysteresis);
+ } else if (chip->health != POWER_SUPPLY_HEALTH_OVERHEAT &&
+ chip->batt_hot) {
+ /* restore the hard hot threshold */
+ set_prop_jeita_temp(chip, FG_MEM_HARD_HOT,
+ hard_hot + chip->hot_hysteresis);
+ chip->batt_hot = !chip->batt_hot;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("restore hard hot threshold: old hot=%d, new hot=%d\n",
+ hard_hot,
+ hard_hot + chip->hot_hysteresis);
+ } else if (chip->health != POWER_SUPPLY_HEALTH_COLD &&
+ chip->batt_cold) {
+ /* restore the hard cold threshold */
+ set_prop_jeita_temp(chip, FG_MEM_HARD_COLD,
+ hard_cold - chip->cold_hysteresis);
+ chip->batt_cold = !chip->batt_cold;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("restore hard cold threshold: old cold=%d, new cold=%d\n",
+ hard_cold,
+ hard_cold - chip->cold_hysteresis);
+ }
+}
+
+#define BATT_INFO_STS(base) (base + 0x09)
+#define JEITA_HARD_HOT_RT_STS BIT(6)
+#define JEITA_HARD_COLD_RT_STS BIT(5)
+static int fg_init_batt_temp_state(struct fg_chip *chip)
+{
+ int rc = 0;
+ u8 batt_info_sts;
+ int hard_hot = 0, hard_cold = 0;
+
+ /*
+ * read the batt_info_sts register to parse battery's
+ * initial status and do hysteresis config accordingly.
+ */
+ rc = fg_read(chip, &batt_info_sts,
+ BATT_INFO_STS(chip->batt_base), 1);
+ if (rc) {
+ pr_err("failed to read batt info sts, rc=%d\n", rc);
+ return rc;
+ }
+
+ hard_hot = get_prop_jeita_temp(chip, FG_MEM_HARD_HOT);
+ hard_cold = get_prop_jeita_temp(chip, FG_MEM_HARD_COLD);
+ chip->batt_hot =
+ (batt_info_sts & JEITA_HARD_HOT_RT_STS) ? true : false;
+ chip->batt_cold =
+ (batt_info_sts & JEITA_HARD_COLD_RT_STS) ? true : false;
+ if (chip->batt_hot || chip->batt_cold) {
+ if (chip->batt_hot) {
+ chip->health = POWER_SUPPLY_HEALTH_OVERHEAT;
+ set_prop_jeita_temp(chip, FG_MEM_HARD_HOT,
+ hard_hot - chip->hot_hysteresis);
+ } else {
+ chip->health = POWER_SUPPLY_HEALTH_COLD;
+ set_prop_jeita_temp(chip, FG_MEM_HARD_COLD,
+ hard_cold + chip->cold_hysteresis);
+ }
+ }
+
+ return rc;
+}
+
+static int fg_power_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct fg_chip *chip = container_of(psy, struct fg_chip, bms_psy);
+ int rc = 0, unused;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_COOL_TEMP:
+ rc = set_prop_jeita_temp(chip, FG_MEM_SOFT_COLD, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_WARM_TEMP:
+ rc = set_prop_jeita_temp(chip, FG_MEM_SOFT_HOT, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_UPDATE_NOW:
+ if (val->intval)
+ update_sram_data(chip, &unused);
+ break;
+ case POWER_SUPPLY_PROP_STATUS:
+ chip->prev_status = chip->status;
+ chip->status = val->intval;
+ schedule_work(&chip->status_change_work);
+ check_gain_compensation(chip);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ chip->health = val->intval;
+ if (chip->health == POWER_SUPPLY_HEALTH_GOOD) {
+ fg_stay_awake(&chip->resume_soc_wakeup_source);
+ schedule_work(&chip->set_resume_soc_work);
+ }
+
+ if (chip->jeita_hysteresis_support)
+ fg_hysteresis_config(chip);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_DONE:
+ chip->charge_done = val->intval;
+ if (!chip->resume_soc_lowered) {
+ fg_stay_awake(&chip->resume_soc_wakeup_source);
+ schedule_work(&chip->set_resume_soc_work);
+ }
+ break;
+ case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+ if ((val->intval > 0) && (val->intval <= BUCKET_COUNT)) {
+ chip->cyc_ctr.id = val->intval;
+ } else {
+ pr_err("rejecting invalid cycle_count_id = %d\n",
+ val->intval);
+ rc = -EINVAL;
+ }
+ break;
+ case POWER_SUPPLY_PROP_SAFETY_TIMER_EXPIRED:
+ chip->safety_timer_expired = val->intval;
+ schedule_work(&chip->status_change_work);
+ break;
+ case POWER_SUPPLY_PROP_HI_POWER:
+ if (chip->wa_flag & BCL_HI_POWER_FOR_CHGLED_WA) {
+ chip->bcl_lpm_disabled = !!val->intval;
+ schedule_work(&chip->bcl_hi_power_work);
+ }
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ return rc;
+};
+
+static int fg_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_COOL_TEMP:
+ case POWER_SUPPLY_PROP_WARM_TEMP:
+ case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+ return 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define SRAM_DUMP_START 0x400
+#define SRAM_DUMP_LEN 0x200
+static void dump_sram(struct work_struct *work)
+{
+ int i, rc;
+ u8 *buffer, rt_sts;
+ char str[16];
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ dump_sram);
+
+ buffer = devm_kzalloc(chip->dev, SRAM_DUMP_LEN, GFP_KERNEL);
+ if (buffer == NULL) {
+ pr_err("Can't allocate buffer\n");
+ return;
+ }
+
+ rc = fg_read(chip, &rt_sts, INT_RT_STS(chip->soc_base), 1);
+ if (rc)
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ else
+ pr_info("soc rt_sts: 0x%x\n", rt_sts);
+
+ rc = fg_read(chip, &rt_sts, INT_RT_STS(chip->batt_base), 1);
+ if (rc)
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->batt_base), rc);
+ else
+ pr_info("batt rt_sts: 0x%x\n", rt_sts);
+
+ rc = fg_read(chip, &rt_sts, INT_RT_STS(chip->mem_base), 1);
+ if (rc)
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->mem_base), rc);
+ else
+ pr_info("memif rt_sts: 0x%x\n", rt_sts);
+
+ rc = fg_mem_read(chip, buffer, SRAM_DUMP_START, SRAM_DUMP_LEN, 0, 0);
+ if (rc) {
+ pr_err("dump failed: rc = %d\n", rc);
+ return;
+ }
+
+ for (i = 0; i < SRAM_DUMP_LEN; i += 4) {
+ str[0] = '\0';
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, buffer + i, 4);
+ pr_info("%03X %s\n", SRAM_DUMP_START + i, str);
+ }
+ devm_kfree(chip->dev, buffer);
+}
+
+#define MAXRSCHANGE_REG 0x434
+#define ESR_VALUE_OFFSET 1
+#define ESR_STRICT_VALUE 0x4120391F391F3019
+#define ESR_DEFAULT_VALUE 0x58CD4A6761C34A67
+static void update_esr_value(struct work_struct *work)
+{
+ union power_supply_propval prop = {0, };
+ u64 esr_value;
+ int rc = 0;
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ update_esr_work);
+
+ if (!chip->batt_psy && chip->batt_psy_name)
+ chip->batt_psy = power_supply_get_by_name(chip->batt_psy_name);
+
+ if (chip->batt_psy)
+ chip->batt_psy->get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_TYPE, &prop);
+ else
+ return;
+
+ if (!chip->esr_strict_filter) {
+ if ((prop.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER &&
+ chip->status == POWER_SUPPLY_STATUS_CHARGING) ||
+ (chip->status == POWER_SUPPLY_STATUS_FULL)) {
+ esr_value = ESR_STRICT_VALUE;
+ rc = fg_mem_write(chip, (u8 *)&esr_value,
+ MAXRSCHANGE_REG, 8,
+ ESR_VALUE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write strict ESR value rc=%d\n",
+ rc);
+ else
+ chip->esr_strict_filter = true;
+ }
+ } else if ((prop.intval != POWER_SUPPLY_CHARGE_TYPE_TAPER &&
+ chip->status == POWER_SUPPLY_STATUS_CHARGING) ||
+ (chip->status == POWER_SUPPLY_STATUS_DISCHARGING)) {
+ esr_value = ESR_DEFAULT_VALUE;
+ rc = fg_mem_write(chip, (u8 *)&esr_value, MAXRSCHANGE_REG, 8,
+ ESR_VALUE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write default ESR value rc=%d\n", rc);
+ else
+ chip->esr_strict_filter = false;
+ }
+}
+
+#define TEMP_COUNTER_REG 0x580
+#define VBAT_FILTERED_OFFSET 1
+#define GAIN_REG 0x424
+#define GAIN_OFFSET 1
+#define K_VCOR_REG 0x484
+#define DEF_GAIN_OFFSET 2
+#define PICO_UNIT 0xE8D4A51000LL
+#define ATTO_UNIT 0xDE0B6B3A7640000LL
+#define VBAT_REF 3800000
+
+/*
+ * IADC Gain compensation steps:
+ * If Input/OTG absent:
+ * - read VBAT_FILTERED, KVCOR, GAIN
+ * - calculate the gain compensation using following formula:
+ * gain = (1 + gain) * (1 + kvcor * (vbat_filtered - 3800000)) - 1;
+ * else
+ * - reset to the default gain compensation
+ */
+static void iadc_gain_comp_work(struct work_struct *work)
+{
+ u8 reg[4];
+ int rc;
+ uint64_t vbat_filtered;
+ int64_t gain, kvcor, temp, numerator;
+ struct fg_chip *chip = container_of(work, struct fg_chip,
+ gain_comp_work);
+ bool input_present = is_input_present(chip);
+ bool otg_present = is_otg_present(chip);
+
+ if (!chip->init_done)
+ goto done;
+
+ if (!input_present && !otg_present) {
+ /* read VBAT_FILTERED */
+ rc = fg_mem_read(chip, reg, TEMP_COUNTER_REG, 3,
+ VBAT_FILTERED_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read VBAT: rc=%d\n", rc);
+ goto done;
+ }
+ temp = (reg[2] << 16) | (reg[1] << 8) | reg[0];
+ vbat_filtered = div_u64((u64)temp * LSB_24B_NUMRTR,
+ LSB_24B_DENMTR);
+
+ /* read K_VCOR */
+ rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, 0, 0);
+ if (rc) {
+ pr_err("Failed to KVCOR rc=%d\n", rc);
+ goto done;
+ }
+ kvcor = half_float(reg);
+
+ /* calculate gain */
+ numerator = (MICRO_UNIT + chip->iadc_comp_data.dfl_gain)
+ * (PICO_UNIT + kvcor * (vbat_filtered - VBAT_REF))
+ - ATTO_UNIT;
+ gain = div64_s64(numerator, PICO_UNIT);
+
+ /* write back gain */
+ half_float_to_buffer(gain, reg);
+ rc = fg_mem_write(chip, reg, GAIN_REG, 2, GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to write gain reg rc=%d\n", rc);
+ goto done;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("IADC gain update [%x %x]\n", reg[1], reg[0]);
+ chip->iadc_comp_data.gain_active = true;
+ } else {
+ /* reset gain register */
+ rc = fg_mem_write(chip, chip->iadc_comp_data.dfl_gain_reg,
+ GAIN_REG, 2, GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write gain comp: %d\n", rc);
+ goto done;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("IADC gain reset [%x %x]\n",
+ chip->iadc_comp_data.dfl_gain_reg[1],
+ chip->iadc_comp_data.dfl_gain_reg[0]);
+ chip->iadc_comp_data.gain_active = false;
+ }
+
+done:
+ fg_relax(&chip->gain_comp_wakeup_source);
+}
+
+#define BATT_MISSING_STS BIT(6)
+static bool is_battery_missing(struct fg_chip *chip)
+{
+ int rc;
+ u8 fg_batt_sts;
+
+ rc = fg_read(chip, &fg_batt_sts,
+ INT_RT_STS(chip->batt_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->batt_base), rc);
+ return false;
+ }
+
+ return (fg_batt_sts & BATT_MISSING_STS) ? true : false;
+}
+
+#define SOC_FIRST_EST_DONE BIT(5)
+static bool is_first_est_done(struct fg_chip *chip)
+{
+ int rc;
+ u8 fg_soc_sts;
+
+ rc = fg_read(chip, &fg_soc_sts,
+ INT_RT_STS(chip->soc_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ return false;
+ }
+
+ return (fg_soc_sts & SOC_FIRST_EST_DONE) ? true : false;
+}
+
+static irqreturn_t fg_vbatt_low_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+ int rc;
+ bool vbatt_low_sts;
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("vbatt-low triggered\n");
+
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ rc = fg_get_vbatt_status(chip, &vbatt_low_sts);
+ if (rc) {
+ pr_err("error in reading vbatt_status, rc:%d\n", rc);
+ goto out;
+ }
+ if (!vbatt_low_sts && chip->vbat_low_irq_enabled) {
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("disabling vbatt_low irq\n");
+ disable_irq_wake(chip->batt_irq[VBATT_LOW].irq);
+ disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = false;
+ }
+ }
+ if (chip->power_supply_registered)
+ power_supply_changed(&chip->bms_psy);
+out:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_batt_missing_irq_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+ bool batt_missing = is_battery_missing(chip);
+
+ if (batt_missing) {
+ chip->battery_missing = true;
+ chip->profile_loaded = false;
+ chip->batt_type = default_batt_type;
+ mutex_lock(&chip->cyc_ctr.lock);
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("battery missing, clearing cycle counters\n");
+ clear_cycle_counter(chip);
+ mutex_unlock(&chip->cyc_ctr.lock);
+ } else {
+ if (!chip->use_otp_profile) {
+ reinit_completion(&chip->batt_id_avail);
+ reinit_completion(&chip->first_soc_done);
+ schedule_delayed_work(&chip->batt_profile_init, 0);
+ cancel_delayed_work(&chip->update_sram_data);
+ schedule_delayed_work(
+ &chip->update_sram_data,
+ msecs_to_jiffies(0));
+ } else {
+ chip->battery_missing = false;
+ }
+ }
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("batt-missing triggered: %s\n",
+ batt_missing ? "missing" : "present");
+
+ if (chip->power_supply_registered)
+ power_supply_changed(&chip->bms_psy);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_mem_avail_irq_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+ u8 mem_if_sts;
+ int rc;
+
+ rc = fg_read(chip, &mem_if_sts, INT_RT_STS(chip->mem_base), 1);
+ if (rc) {
+ pr_err("failed to read mem status rc=%d\n", rc);
+ return IRQ_HANDLED;
+ }
+
+ if (fg_check_sram_access(chip)) {
+ if ((fg_debug_mask & FG_IRQS)
+ & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+ pr_info("sram access granted\n");
+ reinit_completion(&chip->sram_access_revoked);
+ complete_all(&chip->sram_access_granted);
+ } else {
+ if ((fg_debug_mask & FG_IRQS)
+ & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+ pr_info("sram access revoked\n");
+ complete_all(&chip->sram_access_revoked);
+ }
+
+ if (!rc && (fg_debug_mask & FG_IRQS)
+ & (FG_MEM_DEBUG_READS | FG_MEM_DEBUG_WRITES))
+ pr_info("mem_if sts 0x%02x\n", mem_if_sts);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_soc_irq_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+ u8 soc_rt_sts;
+ int rc;
+
+ rc = fg_read(chip, &soc_rt_sts, INT_RT_STS(chip->soc_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ }
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("triggered 0x%x\n", soc_rt_sts);
+
+ schedule_work(&chip->battery_age_work);
+
+ if (chip->power_supply_registered)
+ power_supply_changed(&chip->bms_psy);
+
+ if (chip->rslow_comp.chg_rs_to_rslow > 0 &&
+ chip->rslow_comp.chg_rslow_comp_c1 > 0 &&
+ chip->rslow_comp.chg_rslow_comp_c2 > 0)
+ schedule_work(&chip->rslow_comp_work);
+ if (chip->cyc_ctr.en)
+ schedule_work(&chip->cycle_count_work);
+ schedule_work(&chip->update_esr_work);
+ if (chip->charge_full)
+ schedule_work(&chip->charge_full_work);
+ if (chip->wa_flag & IADC_GAIN_COMP_WA
+ && chip->iadc_comp_data.gain_active) {
+ fg_stay_awake(&chip->gain_comp_wakeup_source);
+ schedule_work(&chip->gain_comp_work);
+ }
+
+ if (chip->wa_flag & USE_CC_SOC_REG
+ && chip->learning_data.active) {
+ fg_stay_awake(&chip->capacity_learning_wakeup_source);
+ schedule_work(&chip->fg_cap_learning_work);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define FG_EMPTY_DEBOUNCE_MS 1500
+static irqreturn_t fg_empty_soc_irq_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+ u8 soc_rt_sts;
+ int rc;
+
+ rc = fg_read(chip, &soc_rt_sts, INT_RT_STS(chip->soc_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ goto done;
+ }
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("triggered 0x%x\n", soc_rt_sts);
+ if (fg_is_batt_empty(chip)) {
+ fg_stay_awake(&chip->empty_check_wakeup_source);
+ schedule_delayed_work(&chip->check_empty_work,
+ msecs_to_jiffies(FG_EMPTY_DEBOUNCE_MS));
+ } else {
+ chip->soc_empty = false;
+ }
+
+done:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_first_soc_irq_handler(int irq, void *_chip)
+{
+ struct fg_chip *chip = _chip;
+
+ if (fg_debug_mask & FG_IRQS)
+ pr_info("triggered\n");
+
+ if (fg_est_dump)
+ schedule_work(&chip->dump_sram);
+
+ if (chip->power_supply_registered)
+ power_supply_changed(&chip->bms_psy);
+
+ complete_all(&chip->first_soc_done);
+
+ return IRQ_HANDLED;
+}
+
+static void fg_external_power_changed(struct power_supply *psy)
+{
+ struct fg_chip *chip = container_of(psy, struct fg_chip, bms_psy);
+
+ if (is_input_present(chip) && chip->rslow_comp.active &&
+ chip->rslow_comp.chg_rs_to_rslow > 0 &&
+ chip->rslow_comp.chg_rslow_comp_c1 > 0 &&
+ chip->rslow_comp.chg_rslow_comp_c2 > 0)
+ schedule_work(&chip->rslow_comp_work);
+ if (!is_input_present(chip) && chip->resume_soc_lowered) {
+ fg_stay_awake(&chip->resume_soc_wakeup_source);
+ schedule_work(&chip->set_resume_soc_work);
+ }
+ if (!is_input_present(chip) && chip->charge_full)
+ schedule_work(&chip->charge_full_work);
+}
+
+static void set_resume_soc_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ set_resume_soc_work);
+ int rc, resume_soc_raw;
+
+ if (is_input_present(chip) && !chip->resume_soc_lowered) {
+ if (!chip->charge_done)
+ goto done;
+ resume_soc_raw = get_monotonic_soc_raw(chip)
+ - (0xFF - settings[FG_MEM_RESUME_SOC].value);
+ if (resume_soc_raw > 0 && resume_soc_raw < FULL_SOC_RAW) {
+ rc = fg_set_resume_soc(chip, resume_soc_raw);
+ if (rc) {
+ pr_err("Couldn't set resume SOC for FG\n");
+ goto done;
+ }
+ if (fg_debug_mask & FG_STATUS) {
+ pr_info("resume soc lowered to 0x%02x\n",
+ resume_soc_raw);
+ }
+ } else if (settings[FG_MEM_RESUME_SOC].value > 0) {
+ pr_err("bad resume soc 0x%02x\n", resume_soc_raw);
+ }
+ chip->charge_done = false;
+ chip->resume_soc_lowered = true;
+ } else if (chip->resume_soc_lowered && (!is_input_present(chip)
+ || chip->health == POWER_SUPPLY_HEALTH_GOOD)) {
+ resume_soc_raw = settings[FG_MEM_RESUME_SOC].value;
+ if (resume_soc_raw > 0 && resume_soc_raw < FULL_SOC_RAW) {
+ rc = fg_set_resume_soc(chip, resume_soc_raw);
+ if (rc) {
+ pr_err("Couldn't set resume SOC for FG\n");
+ goto done;
+ }
+ if (fg_debug_mask & FG_STATUS) {
+ pr_info("resume soc set to 0x%02x\n",
+ resume_soc_raw);
+ }
+ } else if (settings[FG_MEM_RESUME_SOC].value > 0) {
+ pr_err("bad resume soc 0x%02x\n", resume_soc_raw);
+ }
+ chip->resume_soc_lowered = false;
+ }
+done:
+ fg_relax(&chip->resume_soc_wakeup_source);
+}
+
+
+#define OCV_COEFFS_START_REG 0x4C0
+#define OCV_JUNCTION_REG 0x4D8
+#define NOM_CAP_REG 0x4F4
+#define CUTOFF_VOLTAGE_REG 0x40C
+#define RSLOW_CFG_REG 0x538
+#define RSLOW_CFG_OFFSET 2
+#define RSLOW_THRESH_REG 0x52C
+#define RSLOW_THRESH_OFFSET 0
+#define TEMP_RS_TO_RSLOW_OFFSET 2
+#define RSLOW_COMP_REG 0x528
+#define RSLOW_COMP_C1_OFFSET 0
+#define RSLOW_COMP_C2_OFFSET 2
+static int populate_system_data(struct fg_chip *chip)
+{
+ u8 buffer[24];
+ int rc, i;
+ int16_t cc_mah;
+
+ fg_mem_lock(chip);
+ rc = fg_mem_read(chip, buffer, OCV_COEFFS_START_REG, 24, 0, 0);
+ if (rc) {
+ pr_err("Failed to read ocv coefficients: %d\n", rc);
+ goto done;
+ }
+ for (i = 0; i < 12; i += 1)
+ chip->ocv_coeffs[i] = half_float(buffer + (i * 2));
+ if (fg_debug_mask & FG_AGING) {
+ pr_info("coeffs1 = %lld %lld %lld %lld\n",
+ chip->ocv_coeffs[0], chip->ocv_coeffs[1],
+ chip->ocv_coeffs[2], chip->ocv_coeffs[3]);
+ pr_info("coeffs2 = %lld %lld %lld %lld\n",
+ chip->ocv_coeffs[4], chip->ocv_coeffs[5],
+ chip->ocv_coeffs[6], chip->ocv_coeffs[7]);
+ pr_info("coeffs3 = %lld %lld %lld %lld\n",
+ chip->ocv_coeffs[8], chip->ocv_coeffs[9],
+ chip->ocv_coeffs[10], chip->ocv_coeffs[11]);
+ }
+ rc = fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 1, 0, 0);
+ chip->ocv_junction_p1p2 = buffer[0] * 100 / 255;
+ rc |= fg_mem_read(chip, buffer, OCV_JUNCTION_REG, 1, 1, 0);
+ chip->ocv_junction_p2p3 = buffer[0] * 100 / 255;
+ if (rc) {
+ pr_err("Failed to read ocv junctions: %d\n", rc);
+ goto done;
+ }
+ rc = fg_mem_read(chip, buffer, NOM_CAP_REG, 2, 0, 0);
+ if (rc) {
+ pr_err("Failed to read nominal capacitance: %d\n", rc);
+ goto done;
+ }
+ chip->nom_cap_uah = bcap_uah_2b(buffer);
+ chip->actual_cap_uah = chip->nom_cap_uah;
+ if (chip->learning_data.learned_cc_uah == 0) {
+ chip->learning_data.learned_cc_uah = chip->nom_cap_uah;
+ fg_cap_learning_save_data(chip);
+ } else if (chip->learning_data.feedback_on) {
+ cc_mah = div64_s64(chip->learning_data.learned_cc_uah, 1000);
+ rc = fg_calc_and_store_cc_soc_coeff(chip, cc_mah);
+ if (rc)
+ pr_err("Error in restoring cc_soc_coeff, rc:%d\n", rc);
+ }
+ rc = fg_mem_read(chip, buffer, CUTOFF_VOLTAGE_REG, 2, 0, 0);
+ if (rc) {
+ pr_err("Failed to read cutoff voltage: %d\n", rc);
+ goto done;
+ }
+ chip->cutoff_voltage = voltage_2b(buffer);
+ if (fg_debug_mask & FG_AGING)
+ pr_info("cutoff_voltage = %lld, nom_cap_uah = %d p1p2 = %d, p2p3 = %d\n",
+ chip->cutoff_voltage, chip->nom_cap_uah,
+ chip->ocv_junction_p1p2,
+ chip->ocv_junction_p2p3);
+
+ rc = fg_mem_read(chip, buffer, RSLOW_CFG_REG, 1, RSLOW_CFG_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read rslow cfg: %d\n", rc);
+ goto done;
+ }
+ chip->rslow_comp.rslow_cfg = buffer[0];
+ rc = fg_mem_read(chip, buffer, RSLOW_THRESH_REG, 1,
+ RSLOW_THRESH_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read rslow thresh: %d\n", rc);
+ goto done;
+ }
+ chip->rslow_comp.rslow_thr = buffer[0];
+ rc = fg_mem_read(chip, buffer, TEMP_RS_TO_RSLOW_REG, 2,
+ RSLOW_THRESH_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read rs to rslow: %d\n", rc);
+ goto done;
+ }
+ memcpy(chip->rslow_comp.rs_to_rslow, buffer, 2);
+ rc = fg_mem_read(chip, buffer, RSLOW_COMP_REG, 4,
+ RSLOW_COMP_C1_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to read rslow comp: %d\n", rc);
+ goto done;
+ }
+ memcpy(chip->rslow_comp.rslow_comp, buffer, 4);
+
+done:
+ fg_mem_release(chip);
+ return rc;
+}
+
+#define RSLOW_CFG_MASK (BIT(2) | BIT(3) | BIT(4) | BIT(5))
+#define RSLOW_CFG_ON_VAL (BIT(2) | BIT(3))
+#define RSLOW_THRESH_FULL_VAL 0xFF
+static int fg_rslow_charge_comp_set(struct fg_chip *chip)
+{
+ int rc;
+ u8 buffer[2];
+
+ mutex_lock(&chip->rslow_comp.lock);
+ fg_mem_lock(chip);
+
+ rc = fg_mem_masked_write(chip, RSLOW_CFG_REG,
+ RSLOW_CFG_MASK, RSLOW_CFG_ON_VAL, RSLOW_CFG_OFFSET);
+ if (rc) {
+ pr_err("unable to write rslow cfg: %d\n", rc);
+ goto done;
+ }
+ rc = fg_mem_masked_write(chip, RSLOW_THRESH_REG,
+ 0xFF, RSLOW_THRESH_FULL_VAL, RSLOW_THRESH_OFFSET);
+ if (rc) {
+ pr_err("unable to write rslow thresh: %d\n", rc);
+ goto done;
+ }
+
+ half_float_to_buffer(chip->rslow_comp.chg_rs_to_rslow, buffer);
+ rc = fg_mem_write(chip, buffer,
+ TEMP_RS_TO_RSLOW_REG, 2, TEMP_RS_TO_RSLOW_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rs to rslow: %d\n", rc);
+ goto done;
+ }
+ half_float_to_buffer(chip->rslow_comp.chg_rslow_comp_c1, buffer);
+ rc = fg_mem_write(chip, buffer,
+ RSLOW_COMP_REG, 2, RSLOW_COMP_C1_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rslow comp: %d\n", rc);
+ goto done;
+ }
+ half_float_to_buffer(chip->rslow_comp.chg_rslow_comp_c2, buffer);
+ rc = fg_mem_write(chip, buffer,
+ RSLOW_COMP_REG, 2, RSLOW_COMP_C2_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rslow comp: %d\n", rc);
+ goto done;
+ }
+ chip->rslow_comp.active = true;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Activated rslow charge comp values\n");
+
+done:
+ fg_mem_release(chip);
+ mutex_unlock(&chip->rslow_comp.lock);
+ return rc;
+}
+
+#define RSLOW_CFG_ORIG_MASK (BIT(4) | BIT(5))
+static int fg_rslow_charge_comp_clear(struct fg_chip *chip)
+{
+ u8 reg;
+ int rc;
+
+ mutex_lock(&chip->rslow_comp.lock);
+ fg_mem_lock(chip);
+
+ reg = chip->rslow_comp.rslow_cfg & RSLOW_CFG_ORIG_MASK;
+ rc = fg_mem_masked_write(chip, RSLOW_CFG_REG,
+ RSLOW_CFG_MASK, reg, RSLOW_CFG_OFFSET);
+ if (rc) {
+ pr_err("unable to write rslow cfg: %d\n", rc);
+ goto done;
+ }
+ rc = fg_mem_masked_write(chip, RSLOW_THRESH_REG,
+ 0xFF, chip->rslow_comp.rslow_thr, RSLOW_THRESH_OFFSET);
+ if (rc) {
+ pr_err("unable to write rslow thresh: %d\n", rc);
+ goto done;
+ }
+
+ rc = fg_mem_write(chip, chip->rslow_comp.rs_to_rslow,
+ TEMP_RS_TO_RSLOW_REG, 2, TEMP_RS_TO_RSLOW_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rs to rslow: %d\n", rc);
+ goto done;
+ }
+ rc = fg_mem_write(chip, chip->rslow_comp.rslow_comp,
+ RSLOW_COMP_REG, 4, RSLOW_COMP_C1_OFFSET, 0);
+ if (rc) {
+ pr_err("unable to write rslow comp: %d\n", rc);
+ goto done;
+ }
+ chip->rslow_comp.active = false;
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Cleared rslow charge comp values\n");
+
+done:
+ fg_mem_release(chip);
+ mutex_unlock(&chip->rslow_comp.lock);
+ return rc;
+}
+
+static void rslow_comp_work(struct work_struct *work)
+{
+ int battery_soc_1b;
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ rslow_comp_work);
+
+ battery_soc_1b = get_battery_soc_raw(chip) >> 16;
+ if (battery_soc_1b > chip->rslow_comp.chg_rslow_comp_thr
+ && chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ if (!chip->rslow_comp.active)
+ fg_rslow_charge_comp_set(chip);
+ } else {
+ if (chip->rslow_comp.active)
+ fg_rslow_charge_comp_clear(chip);
+ }
+}
+
+#define MICROUNITS_TO_ADC_RAW(units) \
+ div64_s64(units * LSB_16B_DENMTR, LSB_16B_NUMRTR)
+static int update_chg_iterm(struct fg_chip *chip)
+{
+ u8 data[2];
+ u16 converted_current_raw;
+ s64 current_ma = -settings[FG_MEM_CHG_TERM_CURRENT].value;
+
+ converted_current_raw = (s16)MICROUNITS_TO_ADC_RAW(current_ma * 1000);
+ data[0] = cpu_to_le16(converted_current_raw) & 0xFF;
+ data[1] = cpu_to_le16(converted_current_raw) >> 8;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("current = %lld, converted_raw = %04x, data = %02x %02x\n",
+ current_ma, converted_current_raw, data[0], data[1]);
+ return fg_mem_write(chip, data,
+ settings[FG_MEM_CHG_TERM_CURRENT].address,
+ 2, settings[FG_MEM_CHG_TERM_CURRENT].offset, 0);
+}
+
+#define CC_CV_SETPOINT_REG 0x4F8
+#define CC_CV_SETPOINT_OFFSET 0
+static void update_cc_cv_setpoint(struct fg_chip *chip)
+{
+ int rc;
+ u8 tmp[2];
+
+ if (!chip->cc_cv_threshold_mv)
+ return;
+ batt_to_setpoint_adc(chip->cc_cv_threshold_mv, tmp);
+ rc = fg_mem_write(chip, tmp, CC_CV_SETPOINT_REG, 2,
+ CC_CV_SETPOINT_OFFSET, 0);
+ if (rc) {
+ pr_err("failed to write CC_CV_VOLT rc=%d\n", rc);
+ return;
+ }
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Wrote %x %x to address %x for CC_CV setpoint\n",
+ tmp[0], tmp[1], CC_CV_SETPOINT_REG);
+}
+
+#define LOW_LATENCY BIT(6)
+#define BATT_PROFILE_OFFSET 0x4C0
+#define PROFILE_INTEGRITY_REG 0x53C
+#define PROFILE_INTEGRITY_BIT BIT(0)
+#define FIRST_EST_DONE_BIT BIT(5)
+#define MAX_TRIES_FIRST_EST 3
+#define FIRST_EST_WAIT_MS 2000
+#define PROFILE_LOAD_TIMEOUT_MS 5000
+static int fg_do_restart(struct fg_chip *chip, bool write_profile)
+{
+ int rc;
+ u8 reg = 0;
+ u8 buf[2];
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("restarting fuel gauge...\n");
+
+ chip->fg_restarting = true;
+ /*
+ * save the temperature if the sw rbias control is active so that there
+ * is no gap of time when there is no valid temperature read after the
+ * restart
+ */
+ if (chip->sw_rbias_ctrl) {
+ rc = fg_mem_read(chip, buf,
+ fg_data[FG_DATA_BATT_TEMP].address,
+ fg_data[FG_DATA_BATT_TEMP].len,
+ fg_data[FG_DATA_BATT_TEMP].offset, 0);
+ if (rc) {
+ pr_err("failed to read batt temp rc=%d\n", rc);
+ goto sub_and_fail;
+ }
+ }
+ /*
+ * release the sram access and configure the correct settings
+ * before re-requesting access.
+ */
+ mutex_lock(&chip->rw_lock);
+ fg_release_access(chip);
+
+ rc = fg_masked_write(chip, chip->soc_base + SOC_BOOT_MOD,
+ NO_OTP_PROF_RELOAD, 0, 1);
+ if (rc) {
+ pr_err("failed to set no otp reload bit\n");
+ goto unlock_and_fail;
+ }
+
+ /* unset the restart bits so the fg doesn't continuously restart */
+ reg = REDO_FIRST_ESTIMATE | RESTART_GO;
+ rc = fg_masked_write(chip, chip->soc_base + SOC_RESTART,
+ reg, 0, 1);
+ if (rc) {
+ pr_err("failed to unset fg restart: %d\n", rc);
+ goto unlock_and_fail;
+ }
+
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip),
+ LOW_LATENCY, LOW_LATENCY, 1);
+ if (rc) {
+ pr_err("failed to set low latency access bit\n");
+ goto unlock_and_fail;
+ }
+ mutex_unlock(&chip->rw_lock);
+
+ /* read once to get a fg cycle in */
+ rc = fg_mem_read(chip, &reg, PROFILE_INTEGRITY_REG, 1, 0, 0);
+ if (rc) {
+ pr_err("failed to read profile integrity rc=%d\n", rc);
+ goto fail;
+ }
+
+ /*
+ * If this is not the first time a profile has been loaded, sleep for
+ * 3 seconds to make sure the NO_OTP_RELOAD is cleared in memory
+ */
+ if (chip->first_profile_loaded)
+ msleep(3000);
+
+ mutex_lock(&chip->rw_lock);
+ fg_release_access(chip);
+ rc = fg_masked_write(chip, MEM_INTF_CFG(chip), LOW_LATENCY, 0, 1);
+ if (rc) {
+ pr_err("failed to set low latency access bit\n");
+ goto unlock_and_fail;
+ }
+
+ atomic_add_return(1, &chip->memif_user_cnt);
+ mutex_unlock(&chip->rw_lock);
+
+ if (write_profile) {
+ /* write the battery profile */
+ rc = fg_mem_write(chip, chip->batt_profile, BATT_PROFILE_OFFSET,
+ chip->batt_profile_len, 0, 1);
+ if (rc) {
+ pr_err("failed to write profile rc=%d\n", rc);
+ goto sub_and_fail;
+ }
+ /* write the integrity bits and release access */
+ rc = fg_mem_masked_write(chip, PROFILE_INTEGRITY_REG,
+ PROFILE_INTEGRITY_BIT,
+ PROFILE_INTEGRITY_BIT, 0);
+ if (rc) {
+ pr_err("failed to write profile rc=%d\n", rc);
+ goto sub_and_fail;
+ }
+ }
+
+ /* decrement the user count so that memory access can be released */
+ fg_release_access_if_necessary(chip);
+
+ /*
+ * make sure that the first estimate has completed
+ * in case of a hotswap
+ */
+ rc = wait_for_completion_interruptible_timeout(&chip->first_soc_done,
+ msecs_to_jiffies(PROFILE_LOAD_TIMEOUT_MS));
+ if (rc <= 0) {
+ pr_err("transaction timed out rc=%d\n", rc);
+ rc = -ETIMEDOUT;
+ goto fail;
+ }
+
+ /*
+ * reinitialize the completion so that the driver knows when the restart
+ * finishes
+ */
+ reinit_completion(&chip->first_soc_done);
+
+ /*
+ * set the restart bits so that the next fg cycle will not reload
+ * the profile
+ */
+ rc = fg_masked_write(chip, chip->soc_base + SOC_BOOT_MOD,
+ NO_OTP_PROF_RELOAD, NO_OTP_PROF_RELOAD, 1);
+ if (rc) {
+ pr_err("failed to set no otp reload bit\n");
+ goto fail;
+ }
+
+ reg = REDO_FIRST_ESTIMATE | RESTART_GO;
+ rc = fg_masked_write(chip, chip->soc_base + SOC_RESTART,
+ reg, reg, 1);
+ if (rc) {
+ pr_err("failed to set fg restart: %d\n", rc);
+ goto fail;
+ }
+
+ /* wait for the first estimate to complete */
+ rc = wait_for_completion_interruptible_timeout(&chip->first_soc_done,
+ msecs_to_jiffies(PROFILE_LOAD_TIMEOUT_MS));
+ if (rc <= 0) {
+ pr_err("transaction timed out rc=%d\n", rc);
+ rc = -ETIMEDOUT;
+ goto fail;
+ }
+ rc = fg_read(chip, &reg, INT_RT_STS(chip->soc_base), 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n",
+ INT_RT_STS(chip->soc_base), rc);
+ goto fail;
+ }
+ if ((reg & FIRST_EST_DONE_BIT) == 0)
+ pr_err("Battery profile reloading failed, no first estimate\n");
+
+ rc = fg_masked_write(chip, chip->soc_base + SOC_BOOT_MOD,
+ NO_OTP_PROF_RELOAD, 0, 1);
+ if (rc) {
+ pr_err("failed to set no otp reload bit\n");
+ goto fail;
+ }
+ /* unset the restart bits so the fg doesn't continuously restart */
+ reg = REDO_FIRST_ESTIMATE | RESTART_GO;
+ rc = fg_masked_write(chip, chip->soc_base + SOC_RESTART,
+ reg, 0, 1);
+ if (rc) {
+ pr_err("failed to unset fg restart: %d\n", rc);
+ goto fail;
+ }
+
+ /* restore the battery temperature reading here */
+ if (chip->sw_rbias_ctrl) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("reloaded 0x%02x%02x into batt temp",
+ buf[0], buf[1]);
+ rc = fg_mem_write(chip, buf,
+ fg_data[FG_DATA_BATT_TEMP].address,
+ fg_data[FG_DATA_BATT_TEMP].len,
+ fg_data[FG_DATA_BATT_TEMP].offset, 0);
+ if (rc) {
+ pr_err("failed to write batt temp rc=%d\n", rc);
+ goto fail;
+ }
+ }
+ chip->fg_restarting = false;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("done!\n");
+ return 0;
+
+unlock_and_fail:
+ mutex_unlock(&chip->rw_lock);
+ goto fail;
+sub_and_fail:
+ fg_release_access_if_necessary(chip);
+ goto fail;
+fail:
+ chip->fg_restarting = false;
+ return -EINVAL;
+}
+
+#define FG_PROFILE_LEN 128
+#define PROFILE_COMPARE_LEN 32
+#define THERMAL_COEFF_ADDR 0x444
+#define THERMAL_COEFF_OFFSET 0x2
+#define BATTERY_PSY_WAIT_MS 2000
+static int fg_batt_profile_init(struct fg_chip *chip)
+{
+ int rc = 0, ret;
+ int len;
+ struct device_node *node = chip->spmi->dev.of_node;
+ struct device_node *batt_node, *profile_node;
+ const char *data, *batt_type_str;
+ bool tried_again = false, vbat_in_range, profiles_same;
+ u8 reg = 0;
+
+wait:
+ fg_stay_awake(&chip->profile_wakeup_source);
+ ret = wait_for_completion_interruptible_timeout(&chip->batt_id_avail,
+ msecs_to_jiffies(PROFILE_LOAD_TIMEOUT_MS));
+ /* If we were interrupted wait again one more time. */
+ if (ret == -ERESTARTSYS && !tried_again) {
+ tried_again = true;
+ pr_debug("interrupted, waiting again\n");
+ goto wait;
+ } else if (ret <= 0) {
+ rc = -ETIMEDOUT;
+ pr_err("profile loading timed out rc=%d\n", rc);
+ goto no_profile;
+ }
+
+ batt_node = of_find_node_by_name(node, "qcom,battery-data");
+ if (!batt_node) {
+ pr_warn("No available batterydata, using OTP defaults\n");
+ rc = 0;
+ goto no_profile;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("battery id = %d\n",
+ get_sram_prop_now(chip, FG_DATA_BATT_ID));
+ profile_node = of_batterydata_get_best_profile(batt_node, "bms",
+ fg_batt_type);
+ if (!profile_node) {
+ pr_err("couldn't find profile handle\n");
+ rc = -ENODATA;
+ goto no_profile;
+ }
+
+ /* read rslow compensation values if they're available */
+ rc = of_property_read_u32(profile_node, "qcom,chg-rs-to-rslow",
+ &chip->rslow_comp.chg_rs_to_rslow);
+ if (rc) {
+ chip->rslow_comp.chg_rs_to_rslow = -EINVAL;
+ if (rc != -EINVAL)
+ pr_err("Could not read rs to rslow: %d\n", rc);
+ }
+ rc = of_property_read_u32(profile_node, "qcom,chg-rslow-comp-c1",
+ &chip->rslow_comp.chg_rslow_comp_c1);
+ if (rc) {
+ chip->rslow_comp.chg_rslow_comp_c1 = -EINVAL;
+ if (rc != -EINVAL)
+ pr_err("Could not read rslow comp c1: %d\n", rc);
+ }
+ rc = of_property_read_u32(profile_node, "qcom,chg-rslow-comp-c2",
+ &chip->rslow_comp.chg_rslow_comp_c2);
+ if (rc) {
+ chip->rslow_comp.chg_rslow_comp_c2 = -EINVAL;
+ if (rc != -EINVAL)
+ pr_err("Could not read rslow comp c2: %d\n", rc);
+ }
+ rc = of_property_read_u32(profile_node, "qcom,chg-rslow-comp-thr",
+ &chip->rslow_comp.chg_rslow_comp_thr);
+ if (rc) {
+ chip->rslow_comp.chg_rslow_comp_thr = -EINVAL;
+ if (rc != -EINVAL)
+ pr_err("Could not read rslow comp thr: %d\n", rc);
+ }
+
+ rc = of_property_read_u32(profile_node, "qcom,max-voltage-uv",
+ &chip->batt_max_voltage_uv);
+
+ if (rc)
+ pr_warn("couldn't find battery max voltage\n");
+
+ /*
+ * Only configure from profile if fg-cc-cv-threshold-mv is not
+ * defined in the charger device node.
+ */
+ if (!of_find_property(chip->spmi->dev.of_node,
+ "qcom,fg-cc-cv-threshold-mv", NULL)) {
+ of_property_read_u32(profile_node,
+ "qcom,fg-cc-cv-threshold-mv",
+ &chip->cc_cv_threshold_mv);
+ }
+
+ data = of_get_property(profile_node, "qcom,fg-profile-data", &len);
+ if (!data) {
+ pr_err("no battery profile loaded\n");
+ rc = 0;
+ goto no_profile;
+ }
+
+ if (len != FG_PROFILE_LEN) {
+ pr_err("battery profile incorrect size: %d\n", len);
+ rc = -EINVAL;
+ goto no_profile;
+ }
+
+ rc = of_property_read_string(profile_node, "qcom,battery-type",
+ &batt_type_str);
+ if (rc) {
+ pr_err("Could not find battery data type: %d\n", rc);
+ rc = 0;
+ goto no_profile;
+ }
+
+ if (!chip->batt_profile)
+ chip->batt_profile = devm_kzalloc(chip->dev,
+ sizeof(char) * len, GFP_KERNEL);
+
+ if (!chip->batt_profile) {
+ pr_err("out of memory\n");
+ rc = -ENOMEM;
+ goto no_profile;
+ }
+
+ rc = fg_mem_read(chip, &reg, PROFILE_INTEGRITY_REG, 1, 0, 1);
+ if (rc) {
+ pr_err("failed to read profile integrity rc=%d\n", rc);
+ goto no_profile;
+ }
+
+ rc = fg_mem_read(chip, chip->batt_profile, BATT_PROFILE_OFFSET,
+ len, 0, 1);
+ if (rc) {
+ pr_err("failed to read profile rc=%d\n", rc);
+ goto no_profile;
+ }
+
+ vbat_in_range = get_vbat_est_diff(chip)
+ < settings[FG_MEM_VBAT_EST_DIFF].value * 1000;
+ profiles_same = memcmp(chip->batt_profile, data,
+ PROFILE_COMPARE_LEN) == 0;
+ if (reg & PROFILE_INTEGRITY_BIT) {
+ fg_cap_learning_load_data(chip);
+ if (vbat_in_range && !fg_is_batt_empty(chip) && profiles_same) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("Battery profiles same, using default\n");
+ if (fg_est_dump)
+ schedule_work(&chip->dump_sram);
+ goto done;
+ }
+ } else {
+ pr_info("Battery profile not same, clearing data\n");
+ clear_cycle_counter(chip);
+ chip->learning_data.learned_cc_uah = 0;
+ }
+ if (fg_est_dump)
+ dump_sram(&chip->dump_sram);
+ if ((fg_debug_mask & FG_STATUS) && !vbat_in_range)
+ pr_info("Vbat out of range: v_current_pred: %d, v:%d\n",
+ fg_data[FG_DATA_CPRED_VOLTAGE].value,
+ fg_data[FG_DATA_VOLTAGE].value);
+ if ((fg_debug_mask & FG_STATUS) && fg_is_batt_empty(chip))
+ pr_info("battery empty\n");
+ if ((fg_debug_mask & FG_STATUS) && !profiles_same)
+ pr_info("profiles differ\n");
+ if (fg_debug_mask & FG_STATUS) {
+ pr_info("Using new profile\n");
+ print_hex_dump(KERN_INFO, "FG: loaded profile: ",
+ DUMP_PREFIX_NONE, 16, 1,
+ chip->batt_profile, len, false);
+ }
+ if (!chip->batt_psy && chip->batt_psy_name)
+ chip->batt_psy = power_supply_get_by_name(chip->batt_psy_name);
+
+ if (!chip->batt_psy) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("batt psy not registered\n");
+ goto reschedule;
+ }
+ if (chip->power_supply_registered)
+ power_supply_changed(&chip->bms_psy);
+
+ memcpy(chip->batt_profile, data, len);
+
+ chip->batt_profile_len = len;
+
+ if (fg_debug_mask & FG_STATUS)
+ print_hex_dump(KERN_INFO, "FG: new profile: ",
+ DUMP_PREFIX_NONE, 16, 1, chip->batt_profile,
+ chip->batt_profile_len, false);
+
+ rc = fg_do_restart(chip, true);
+ if (rc) {
+ pr_err("restart failed: %d\n", rc);
+ goto no_profile;
+ }
+
+ /*
+ * Only configure from profile if thermal-coefficients is not
+ * defined in the FG device node.
+ */
+ if (!of_find_property(chip->spmi->dev.of_node,
+ "qcom,thermal-coefficients", NULL)) {
+ data = of_get_property(profile_node,
+ "qcom,thermal-coefficients", &len);
+ if (data && len == THERMAL_COEFF_N_BYTES) {
+ memcpy(chip->thermal_coefficients, data, len);
+ rc = fg_mem_write(chip, chip->thermal_coefficients,
+ THERMAL_COEFF_ADDR, THERMAL_COEFF_N_BYTES,
+ THERMAL_COEFF_OFFSET, 0);
+ if (rc)
+ pr_err("spmi write failed addr:%03x, ret:%d\n",
+ THERMAL_COEFF_ADDR, rc);
+ else if (fg_debug_mask & FG_STATUS)
+ pr_info("Battery thermal coefficients changed\n");
+ }
+ }
+
+done:
+ if (fg_batt_type)
+ chip->batt_type = fg_batt_type;
+ else
+ chip->batt_type = batt_type_str;
+ chip->first_profile_loaded = true;
+ chip->profile_loaded = true;
+ chip->battery_missing = is_battery_missing(chip);
+ update_chg_iterm(chip);
+ update_cc_cv_setpoint(chip);
+ rc = populate_system_data(chip);
+ if (rc) {
+ pr_err("failed to read ocv properties=%d\n", rc);
+ return rc;
+ }
+ estimate_battery_age(chip, &chip->actual_cap_uah);
+ schedule_work(&chip->status_change_work);
+ if (chip->power_supply_registered)
+ power_supply_changed(&chip->bms_psy);
+ fg_relax(&chip->profile_wakeup_source);
+ pr_info("Battery SOC: %d, V: %duV\n", get_prop_capacity(chip),
+ fg_data[FG_DATA_VOLTAGE].value);
+ return rc;
+no_profile:
+ if (chip->power_supply_registered)
+ power_supply_changed(&chip->bms_psy);
+ fg_relax(&chip->profile_wakeup_source);
+ return rc;
+reschedule:
+ schedule_delayed_work(
+ &chip->batt_profile_init,
+ msecs_to_jiffies(BATTERY_PSY_WAIT_MS));
+ fg_relax(&chip->profile_wakeup_source);
+ return 0;
+}
+
+static void check_empty_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ check_empty_work.work);
+
+ if (fg_is_batt_empty(chip)) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("EMPTY SOC high\n");
+ chip->soc_empty = true;
+ if (chip->power_supply_registered)
+ power_supply_changed(&chip->bms_psy);
+ }
+ fg_relax(&chip->empty_check_wakeup_source);
+}
+
+static void batt_profile_init(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ batt_profile_init.work);
+
+ if (fg_batt_profile_init(chip))
+ pr_err("failed to initialize profile\n");
+}
+
+static void sysfs_restart_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ sysfs_restart_work);
+ int rc;
+
+ rc = fg_do_restart(chip, false);
+ if (rc)
+ pr_err("fg restart failed: %d\n", rc);
+ mutex_lock(&chip->sysfs_restart_lock);
+ fg_restart = 0;
+ mutex_unlock(&chip->sysfs_restart_lock);
+}
+
+#define SRAM_MONOTONIC_SOC_REG 0x574
+#define SRAM_MONOTONIC_SOC_OFFSET 2
+#define SRAM_RELEASE_TIMEOUT_MS 500
+static void charge_full_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ charge_full_work);
+ int rc;
+ u8 buffer[3];
+ int bsoc;
+ int resume_soc_raw = FULL_SOC_RAW - settings[FG_MEM_RESUME_SOC].value;
+ bool disable = false;
+ u8 reg;
+
+ if (chip->status != POWER_SUPPLY_STATUS_FULL) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("battery not full: %d\n", chip->status);
+ disable = true;
+ }
+
+ fg_mem_lock(chip);
+ rc = fg_mem_read(chip, buffer, BATTERY_SOC_REG, 3, 1, 0);
+ if (rc) {
+ pr_err("Unable to read battery soc: %d\n", rc);
+ goto out;
+ }
+ if (buffer[2] <= resume_soc_raw) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("bsoc = 0x%02x <= resume = 0x%02x\n",
+ buffer[2], resume_soc_raw);
+ disable = true;
+ }
+ if (!disable)
+ goto out;
+
+ rc = fg_mem_write(chip, buffer, SOC_FULL_REG, 3,
+ SOC_FULL_OFFSET, 0);
+ if (rc) {
+ pr_err("failed to write SOC_FULL rc=%d\n", rc);
+ goto out;
+ }
+ /* force a full soc value into the monotonic in order to display 100 */
+ buffer[0] = 0xFF;
+ buffer[1] = 0xFF;
+ rc = fg_mem_write(chip, buffer, SRAM_MONOTONIC_SOC_REG, 2,
+ SRAM_MONOTONIC_SOC_OFFSET, 0);
+ if (rc) {
+ pr_err("failed to write SOC_FULL rc=%d\n", rc);
+ goto out;
+ }
+ if (fg_debug_mask & FG_STATUS) {
+ bsoc = buffer[0] | buffer[1] << 8 | buffer[2] << 16;
+ pr_info("wrote %06x into soc full\n", bsoc);
+ }
+ fg_mem_release(chip);
+ /*
+ * wait one cycle to make sure the soc is updated before clearing
+ * the soc mask bit
+ */
+ fg_mem_lock(chip);
+ fg_mem_read(chip, &reg, PROFILE_INTEGRITY_REG, 1, 0, 0);
+out:
+ fg_mem_release(chip);
+ if (disable)
+ chip->charge_full = false;
+}
+
+static void update_bcl_thresholds(struct fg_chip *chip)
+{
+ u8 data[4];
+ u8 mh_offset = 0, lm_offset = 0;
+ u16 address = 0;
+ int ret = 0;
+
+ address = settings[FG_MEM_BCL_MH_THRESHOLD].address;
+ mh_offset = settings[FG_MEM_BCL_MH_THRESHOLD].offset;
+ lm_offset = settings[FG_MEM_BCL_LM_THRESHOLD].offset;
+ ret = fg_mem_read(chip, data, address, 4, 0, 1);
+ if (ret)
+ pr_err("Error reading BCL LM & MH threshold rc:%d\n", ret);
+ else
+ pr_debug("Old BCL LM threshold:%x MH threshold:%x\n",
+ data[lm_offset], data[mh_offset]);
+ BCL_MA_TO_ADC(settings[FG_MEM_BCL_MH_THRESHOLD].value, data[mh_offset]);
+ BCL_MA_TO_ADC(settings[FG_MEM_BCL_LM_THRESHOLD].value, data[lm_offset]);
+
+ ret = fg_mem_write(chip, data, address, 4, 0, 0);
+ if (ret)
+ pr_err("spmi write failed. addr:%03x, ret:%d\n",
+ address, ret);
+ else
+ pr_debug("New BCL LM threshold:%x MH threshold:%x\n",
+ data[lm_offset], data[mh_offset]);
+}
+
+static int disable_bcl_lpm(struct fg_chip *chip)
+{
+ u8 data[4];
+ u8 lm_offset = 0;
+ u16 address = 0;
+ int rc = 0;
+
+ address = settings[FG_MEM_BCL_LM_THRESHOLD].address;
+ lm_offset = settings[FG_MEM_BCL_LM_THRESHOLD].offset;
+ rc = fg_mem_read(chip, data, address, 4, 0, 1);
+ if (rc) {
+ pr_err("Error reading BCL LM & MH threshold rc:%d\n", rc);
+ return rc;
+ }
+ pr_debug("Old BCL LM threshold:%x\n", data[lm_offset]);
+
+ /* Put BCL always above LPM */
+ BCL_MA_TO_ADC(0, data[lm_offset]);
+
+ rc = fg_mem_write(chip, data, address, 4, 0, 0);
+ if (rc)
+ pr_err("spmi write failed. addr:%03x, rc:%d\n",
+ address, rc);
+ else
+ pr_debug("New BCL LM threshold:%x\n", data[lm_offset]);
+
+ return rc;
+}
+
+static void bcl_hi_power_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ bcl_hi_power_work);
+ int rc;
+
+ if (chip->bcl_lpm_disabled) {
+ rc = disable_bcl_lpm(chip);
+ if (rc)
+ pr_err("failed to disable bcl low mode %d\n",
+ rc);
+ } else {
+ update_bcl_thresholds(chip);
+ }
+}
+
+#define VOLT_UV_TO_VOLTCMP8(volt_uv) \
+ ((volt_uv - 2500000) / 9766)
+static int update_irq_volt_empty(struct fg_chip *chip)
+{
+ u8 data;
+ int volt_mv = settings[FG_MEM_IRQ_VOLT_EMPTY].value;
+
+ data = (u8)VOLT_UV_TO_VOLTCMP8(volt_mv * 1000);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("voltage = %d, converted_raw = %04x\n", volt_mv, data);
+ return fg_mem_write(chip, &data,
+ settings[FG_MEM_IRQ_VOLT_EMPTY].address, 1,
+ settings[FG_MEM_IRQ_VOLT_EMPTY].offset, 0);
+}
+
+static int update_cutoff_voltage(struct fg_chip *chip)
+{
+ u8 data[2];
+ u16 converted_voltage_raw;
+ s64 voltage_mv = settings[FG_MEM_CUTOFF_VOLTAGE].value;
+
+ converted_voltage_raw = (s16)MICROUNITS_TO_ADC_RAW(voltage_mv * 1000);
+ data[0] = cpu_to_le16(converted_voltage_raw) & 0xFF;
+ data[1] = cpu_to_le16(converted_voltage_raw) >> 8;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("voltage = %lld, converted_raw = %04x, data = %02x %02x\n",
+ voltage_mv, converted_voltage_raw, data[0], data[1]);
+ return fg_mem_write(chip, data, settings[FG_MEM_CUTOFF_VOLTAGE].address,
+ 2, settings[FG_MEM_CUTOFF_VOLTAGE].offset, 0);
+}
+
+static int update_iterm(struct fg_chip *chip)
+{
+ u8 data[2];
+ u16 converted_current_raw;
+ s64 current_ma = -settings[FG_MEM_TERM_CURRENT].value;
+
+ converted_current_raw = (s16)MICROUNITS_TO_ADC_RAW(current_ma * 1000);
+ data[0] = cpu_to_le16(converted_current_raw) & 0xFF;
+ data[1] = cpu_to_le16(converted_current_raw) >> 8;
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("current = %lld, converted_raw = %04x, data = %02x %02x\n",
+ current_ma, converted_current_raw, data[0], data[1]);
+ return fg_mem_write(chip, data, settings[FG_MEM_TERM_CURRENT].address,
+ 2, settings[FG_MEM_TERM_CURRENT].offset, 0);
+}
+
+#define OF_READ_SETTING(type, qpnp_dt_property, retval, optional) \
+do { \
+ if (retval) \
+ break; \
+ \
+ retval = of_property_read_u32(chip->spmi->dev.of_node, \
+ "qcom," qpnp_dt_property, \
+ &settings[type].value); \
+ \
+ if ((retval == -EINVAL) && optional) \
+ retval = 0; \
+ else if (retval) \
+ pr_err("Error reading " #qpnp_dt_property \
+ " property rc = %d\n", rc); \
+} while (0)
+
+#define OF_READ_PROPERTY(store, qpnp_dt_property, retval, default_val) \
+do { \
+ if (retval) \
+ break; \
+ \
+ retval = of_property_read_u32(chip->spmi->dev.of_node, \
+ "qcom," qpnp_dt_property, \
+ &store); \
+ \
+ if (retval == -EINVAL) { \
+ retval = 0; \
+ store = default_val; \
+ } else if (retval) { \
+ pr_err("Error reading " #qpnp_dt_property \
+ " property rc = %d\n", rc); \
+ } \
+} while (0)
+
+#define DEFAULT_EVALUATION_CURRENT_MA 1000
+static int fg_of_init(struct fg_chip *chip)
+{
+ int rc = 0, sense_type, len = 0;
+ const char *data;
+ struct device_node *node = chip->spmi->dev.of_node;
+ u32 temp[2] = {0};
+
+ OF_READ_SETTING(FG_MEM_SOFT_HOT, "warm-bat-decidegc", rc, 1);
+ OF_READ_SETTING(FG_MEM_SOFT_COLD, "cool-bat-decidegc", rc, 1);
+ OF_READ_SETTING(FG_MEM_HARD_HOT, "hot-bat-decidegc", rc, 1);
+ OF_READ_SETTING(FG_MEM_HARD_COLD, "cold-bat-decidegc", rc, 1);
+
+ if (of_find_property(node, "qcom,cold-hot-jeita-hysteresis", NULL)) {
+ int hard_hot = 0, soft_hot = 0, hard_cold = 0, soft_cold = 0;
+
+ rc = of_property_read_u32_array(node,
+ "qcom,cold-hot-jeita-hysteresis", temp, 2);
+ if (rc) {
+ pr_err("Error reading cold-hot-jeita-hysteresis rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ chip->jeita_hysteresis_support = true;
+ chip->cold_hysteresis = temp[0];
+ chip->hot_hysteresis = temp[1];
+ hard_hot = settings[FG_MEM_HARD_HOT].value;
+ soft_hot = settings[FG_MEM_SOFT_HOT].value;
+ hard_cold = settings[FG_MEM_HARD_COLD].value;
+ soft_cold = settings[FG_MEM_SOFT_COLD].value;
+ if (((hard_hot - chip->hot_hysteresis) < soft_hot) ||
+ ((hard_cold + chip->cold_hysteresis) > soft_cold)) {
+ chip->jeita_hysteresis_support = false;
+ pr_err("invalid hysteresis: hot_hysterresis = %d cold_hysteresis = %d\n",
+ chip->hot_hysteresis, chip->cold_hysteresis);
+ } else {
+ pr_debug("cold_hysteresis = %d, hot_hysteresis = %d\n",
+ chip->cold_hysteresis, chip->hot_hysteresis);
+ }
+ }
+
+ OF_READ_SETTING(FG_MEM_BCL_LM_THRESHOLD, "bcl-lm-threshold-ma",
+ rc, 1);
+ OF_READ_SETTING(FG_MEM_BCL_MH_THRESHOLD, "bcl-mh-threshold-ma",
+ rc, 1);
+ OF_READ_SETTING(FG_MEM_TERM_CURRENT, "fg-iterm-ma", rc, 1);
+ OF_READ_SETTING(FG_MEM_CHG_TERM_CURRENT, "fg-chg-iterm-ma", rc, 1);
+ OF_READ_SETTING(FG_MEM_CUTOFF_VOLTAGE, "fg-cutoff-voltage-mv", rc, 1);
+ data = of_get_property(chip->spmi->dev.of_node,
+ "qcom,thermal-coefficients", &len);
+ if (data && len == THERMAL_COEFF_N_BYTES) {
+ memcpy(chip->thermal_coefficients, data, len);
+ chip->use_thermal_coefficients = true;
+ }
+ OF_READ_SETTING(FG_MEM_RESUME_SOC, "resume-soc", rc, 1);
+ settings[FG_MEM_RESUME_SOC].value =
+ DIV_ROUND_CLOSEST(settings[FG_MEM_RESUME_SOC].value
+ * FULL_SOC_RAW, FULL_CAPACITY);
+ OF_READ_SETTING(FG_MEM_RESUME_SOC, "resume-soc-raw", rc, 1);
+ OF_READ_SETTING(FG_MEM_IRQ_VOLT_EMPTY, "irq-volt-empty-mv", rc, 1);
+ OF_READ_SETTING(FG_MEM_VBAT_EST_DIFF, "vbat-estimate-diff-mv", rc, 1);
+ OF_READ_SETTING(FG_MEM_DELTA_SOC, "fg-delta-soc", rc, 1);
+ OF_READ_SETTING(FG_MEM_BATT_LOW, "fg-vbatt-low-threshold", rc, 1);
+ OF_READ_SETTING(FG_MEM_THERM_DELAY, "fg-therm-delay-us", rc, 1);
+ OF_READ_PROPERTY(chip->learning_data.max_increment,
+ "cl-max-increment-deciperc", rc, 5);
+ OF_READ_PROPERTY(chip->learning_data.max_decrement,
+ "cl-max-decrement-deciperc", rc, 100);
+ OF_READ_PROPERTY(chip->learning_data.max_temp,
+ "cl-max-temp-decidegc", rc, 450);
+ OF_READ_PROPERTY(chip->learning_data.min_temp,
+ "cl-min-temp-decidegc", rc, 150);
+ OF_READ_PROPERTY(chip->learning_data.max_start_soc,
+ "cl-max-start-capacity", rc, 15);
+ OF_READ_PROPERTY(chip->learning_data.vbat_est_thr_uv,
+ "cl-vbat-est-thr-uv", rc, 40000);
+ OF_READ_PROPERTY(chip->evaluation_current,
+ "aging-eval-current-ma", rc,
+ DEFAULT_EVALUATION_CURRENT_MA);
+ OF_READ_PROPERTY(chip->cc_cv_threshold_mv,
+ "fg-cc-cv-threshold-mv", rc, 0);
+ if (of_property_read_bool(chip->spmi->dev.of_node,
+ "qcom,capacity-learning-on"))
+ chip->batt_aging_mode = FG_AGING_CC;
+ else if (of_property_read_bool(chip->spmi->dev.of_node,
+ "qcom,capacity-estimation-on"))
+ chip->batt_aging_mode = FG_AGING_ESR;
+ else
+ chip->batt_aging_mode = FG_AGING_NONE;
+ if (chip->batt_aging_mode == FG_AGING_CC) {
+ chip->learning_data.feedback_on = of_property_read_bool(
+ chip->spmi->dev.of_node,
+ "qcom,capacity-learning-feedback");
+ }
+ if (fg_debug_mask & FG_AGING)
+ pr_info("battery aging mode: %d\n", chip->batt_aging_mode);
+
+ /* Get the use-otp-profile property */
+ chip->use_otp_profile = of_property_read_bool(
+ chip->spmi->dev.of_node,
+ "qcom,use-otp-profile");
+ chip->hold_soc_while_full = of_property_read_bool(
+ chip->spmi->dev.of_node,
+ "qcom,hold-soc-while-full");
+
+ sense_type = of_property_read_bool(chip->spmi->dev.of_node,
+ "qcom,ext-sense-type");
+ if (rc == 0) {
+ if (fg_sense_type < 0)
+ fg_sense_type = sense_type;
+
+ if (fg_debug_mask & FG_STATUS) {
+ if (fg_sense_type == INTERNAL_CURRENT_SENSE)
+ pr_info("Using internal sense\n");
+ else if (fg_sense_type == EXTERNAL_CURRENT_SENSE)
+ pr_info("Using external sense\n");
+ else
+ pr_info("Using default sense\n");
+ }
+ } else {
+ rc = 0;
+ }
+
+ chip->bad_batt_detection_en = of_property_read_bool(node,
+ "qcom,bad-battery-detection-enable");
+
+ chip->sw_rbias_ctrl = of_property_read_bool(node,
+ "qcom,sw-rbias-control");
+
+ chip->cyc_ctr.en = of_property_read_bool(node,
+ "qcom,cycle-counter-en");
+ if (chip->cyc_ctr.en)
+ chip->cyc_ctr.id = 1;
+
+ return rc;
+}
+
+static int fg_init_irqs(struct fg_chip *chip)
+{
+ int rc = 0;
+ struct resource *resource;
+ struct spmi_resource *spmi_resource;
+ u8 subtype;
+ struct spmi_device *spmi = chip->spmi;
+
+ spmi_for_each_container_dev(spmi_resource, spmi) {
+ if (!spmi_resource) {
+ pr_err("fg: spmi resource absent\n");
+ return rc;
+ }
+
+ resource = spmi_get_resource(spmi, spmi_resource,
+ IORESOURCE_MEM, 0);
+ if (!(resource && resource->start)) {
+ pr_err("node %s IO resource absent!\n",
+ spmi->dev.of_node->full_name);
+ return rc;
+ }
+
+ if ((resource->start == chip->vbat_adc_addr) ||
+ (resource->start == chip->ibat_adc_addr) ||
+ (resource->start == chip->tp_rev_addr))
+ continue;
+
+ rc = fg_read(chip, &subtype,
+ resource->start + REG_OFFSET_PERP_SUBTYPE, 1);
+ if (rc) {
+ pr_err("Peripheral subtype read failed rc=%d\n", rc);
+ return rc;
+ }
+
+ switch (subtype) {
+ case FG_SOC:
+ chip->soc_irq[FULL_SOC].irq = spmi_get_irq_byname(
+ chip->spmi, spmi_resource, "full-soc");
+ if (chip->soc_irq[FULL_SOC].irq < 0) {
+ pr_err("Unable to get full-soc irq\n");
+ return rc;
+ }
+ chip->soc_irq[EMPTY_SOC].irq = spmi_get_irq_byname(
+ chip->spmi, spmi_resource, "empty-soc");
+ if (chip->soc_irq[EMPTY_SOC].irq < 0) {
+ pr_err("Unable to get low-soc irq\n");
+ return rc;
+ }
+ chip->soc_irq[DELTA_SOC].irq = spmi_get_irq_byname(
+ chip->spmi, spmi_resource, "delta-soc");
+ if (chip->soc_irq[DELTA_SOC].irq < 0) {
+ pr_err("Unable to get delta-soc irq\n");
+ return rc;
+ }
+ chip->soc_irq[FIRST_EST_DONE].irq = spmi_get_irq_byname(
+ chip->spmi, spmi_resource, "first-est-done");
+ if (chip->soc_irq[FIRST_EST_DONE].irq < 0) {
+ pr_err("Unable to get first-est-done irq\n");
+ return rc;
+ }
+
+ rc = devm_request_irq(chip->dev,
+ chip->soc_irq[FULL_SOC].irq,
+ fg_soc_irq_handler, IRQF_TRIGGER_RISING,
+ "full-soc", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d full-soc: %d\n",
+ chip->soc_irq[FULL_SOC].irq, rc);
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev,
+ chip->soc_irq[EMPTY_SOC].irq,
+ fg_empty_soc_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ "empty-soc", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d empty-soc: %d\n",
+ chip->soc_irq[EMPTY_SOC].irq, rc);
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev,
+ chip->soc_irq[DELTA_SOC].irq,
+ fg_soc_irq_handler, IRQF_TRIGGER_RISING,
+ "delta-soc", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d delta-soc: %d\n",
+ chip->soc_irq[DELTA_SOC].irq, rc);
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev,
+ chip->soc_irq[FIRST_EST_DONE].irq,
+ fg_first_soc_irq_handler, IRQF_TRIGGER_RISING,
+ "first-est-done", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d delta-soc: %d\n",
+ chip->soc_irq[FIRST_EST_DONE].irq, rc);
+ return rc;
+ }
+
+ enable_irq_wake(chip->soc_irq[DELTA_SOC].irq);
+ enable_irq_wake(chip->soc_irq[FULL_SOC].irq);
+ enable_irq_wake(chip->soc_irq[EMPTY_SOC].irq);
+ break;
+ case FG_MEMIF:
+ chip->mem_irq[FG_MEM_AVAIL].irq = spmi_get_irq_byname(
+ chip->spmi, spmi_resource, "mem-avail");
+ if (chip->mem_irq[FG_MEM_AVAIL].irq < 0) {
+ pr_err("Unable to get mem-avail irq\n");
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev,
+ chip->mem_irq[FG_MEM_AVAIL].irq,
+ fg_mem_avail_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "mem-avail", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d mem-avail: %d\n",
+ chip->mem_irq[FG_MEM_AVAIL].irq, rc);
+ return rc;
+ }
+ break;
+ case FG_BATT:
+ chip->batt_irq[BATT_MISSING].irq = spmi_get_irq_byname(
+ chip->spmi, spmi_resource,
+ "batt-missing");
+ if (chip->batt_irq[BATT_MISSING].irq < 0) {
+ pr_err("Unable to get batt-missing irq\n");
+ rc = -EINVAL;
+ return rc;
+ }
+ rc = devm_request_threaded_irq(chip->dev,
+ chip->batt_irq[BATT_MISSING].irq,
+ NULL,
+ fg_batt_missing_irq_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "batt-missing", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d batt-missing: %d\n",
+ chip->batt_irq[BATT_MISSING].irq, rc);
+ return rc;
+ }
+ chip->batt_irq[VBATT_LOW].irq = spmi_get_irq_byname(
+ chip->spmi, spmi_resource,
+ "vbatt-low");
+ if (chip->batt_irq[VBATT_LOW].irq < 0) {
+ pr_err("Unable to get vbatt-low irq\n");
+ rc = -EINVAL;
+ return rc;
+ }
+ rc = devm_request_irq(chip->dev,
+ chip->batt_irq[VBATT_LOW].irq,
+ fg_vbatt_low_handler,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ "vbatt-low", chip);
+ if (rc < 0) {
+ pr_err("Can't request %d vbatt-low: %d\n",
+ chip->batt_irq[VBATT_LOW].irq, rc);
+ return rc;
+ }
+ disable_irq_nosync(chip->batt_irq[VBATT_LOW].irq);
+ chip->vbat_low_irq_enabled = false;
+ break;
+ case FG_ADC:
+ break;
+ default:
+ pr_err("subtype %d\n", subtype);
+ return -EINVAL;
+ }
+ }
+
+ return rc;
+}
+
+static void fg_cleanup(struct fg_chip *chip)
+{
+ cancel_delayed_work_sync(&chip->update_sram_data);
+ cancel_delayed_work_sync(&chip->update_temp_work);
+ cancel_delayed_work_sync(&chip->update_jeita_setting);
+ cancel_delayed_work_sync(&chip->check_empty_work);
+ cancel_delayed_work_sync(&chip->batt_profile_init);
+ alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
+ cancel_work_sync(&chip->rslow_comp_work);
+ cancel_work_sync(&chip->set_resume_soc_work);
+ cancel_work_sync(&chip->fg_cap_learning_work);
+ cancel_work_sync(&chip->dump_sram);
+ cancel_work_sync(&chip->status_change_work);
+ cancel_work_sync(&chip->cycle_count_work);
+ cancel_work_sync(&chip->update_esr_work);
+ cancel_work_sync(&chip->sysfs_restart_work);
+ cancel_work_sync(&chip->gain_comp_work);
+ cancel_work_sync(&chip->init_work);
+ cancel_work_sync(&chip->charge_full_work);
+ power_supply_unregister(&chip->bms_psy);
+ mutex_destroy(&chip->rslow_comp.lock);
+ mutex_destroy(&chip->rw_lock);
+ mutex_destroy(&chip->cyc_ctr.lock);
+ mutex_destroy(&chip->learning_data.learning_lock);
+ mutex_destroy(&chip->sysfs_restart_lock);
+ wakeup_source_trash(&chip->resume_soc_wakeup_source.source);
+ wakeup_source_trash(&chip->empty_check_wakeup_source.source);
+ wakeup_source_trash(&chip->memif_wakeup_source.source);
+ wakeup_source_trash(&chip->profile_wakeup_source.source);
+ wakeup_source_trash(&chip->update_temp_wakeup_source.source);
+ wakeup_source_trash(&chip->update_sram_wakeup_source.source);
+ wakeup_source_trash(&chip->gain_comp_wakeup_source.source);
+ wakeup_source_trash(&chip->capacity_learning_wakeup_source.source);
+}
+
+static int fg_remove(struct spmi_device *spmi)
+{
+ struct fg_chip *chip = dev_get_drvdata(&spmi->dev);
+
+ fg_cleanup(chip);
+ dev_set_drvdata(&spmi->dev, NULL);
+ return 0;
+}
+
+static int fg_memif_data_open(struct inode *inode, struct file *file)
+{
+ struct fg_log_buffer *log;
+ struct fg_trans *trans;
+ u8 *data_buf;
+
+ size_t logbufsize = SZ_4K;
+ size_t databufsize = SZ_4K;
+
+ if (!dbgfs_data.chip) {
+ pr_err("Not initialized data\n");
+ return -EINVAL;
+ }
+
+ /* Per file "transaction" data */
+ trans = kzalloc(sizeof(*trans), GFP_KERNEL);
+ if (!trans) {
+ pr_err("Unable to allocate memory for transaction data\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate log buffer */
+ log = kzalloc(logbufsize, GFP_KERNEL);
+
+ if (!log) {
+ kfree(trans);
+ pr_err("Unable to allocate memory for log buffer\n");
+ return -ENOMEM;
+ }
+
+ log->rpos = 0;
+ log->wpos = 0;
+ log->len = logbufsize - sizeof(*log);
+
+ /* Allocate data buffer */
+ data_buf = kzalloc(databufsize, GFP_KERNEL);
+
+ if (!data_buf) {
+ kfree(trans);
+ kfree(log);
+ pr_err("Unable to allocate memory for data buffer\n");
+ return -ENOMEM;
+ }
+
+ trans->log = log;
+ trans->data = data_buf;
+ trans->cnt = dbgfs_data.cnt;
+ trans->addr = dbgfs_data.addr;
+ trans->chip = dbgfs_data.chip;
+ trans->offset = trans->addr;
+
+ file->private_data = trans;
+ return 0;
+}
+
+static int fg_memif_dfs_close(struct inode *inode, struct file *file)
+{
+ struct fg_trans *trans = file->private_data;
+
+ if (trans && trans->log && trans->data) {
+ file->private_data = NULL;
+ kfree(trans->log);
+ kfree(trans->data);
+ kfree(trans);
+ }
+
+ return 0;
+}
+
+/**
+ * print_to_log: format a string and place into the log buffer
+ * @log: The log buffer to place the result into.
+ * @fmt: The format string to use.
+ * @...: The arguments for the format string.
+ *
+ * The return value is the number of characters written to @log buffer
+ * not including the trailing '\0'.
+ */
+static int print_to_log(struct fg_log_buffer *log, const char *fmt, ...)
+{
+ va_list args;
+ int cnt;
+ char *buf = &log->data[log->wpos];
+ size_t size = log->len - log->wpos;
+
+ va_start(args, fmt);
+ cnt = vscnprintf(buf, size, fmt, args);
+ va_end(args);
+
+ log->wpos += cnt;
+ return cnt;
+}
+
+/**
+ * write_next_line_to_log: Writes a single "line" of data into the log buffer
+ * @trans: Pointer to SRAM transaction data.
+ * @offset: SRAM address offset to start reading from.
+ * @pcnt: Pointer to 'cnt' variable. Indicates the number of bytes to read.
+ *
+ * The 'offset' is a 12-bit SRAM address.
+ *
+ * On a successful read, the pcnt is decremented by the number of data
+ * bytes read from the SRAM. When the cnt reaches 0, all requested bytes have
+ * been read.
+ */
+static int
+write_next_line_to_log(struct fg_trans *trans, int offset, size_t *pcnt)
+{
+ int i, j;
+ u8 data[ITEMS_PER_LINE];
+ struct fg_log_buffer *log = trans->log;
+
+ int cnt = 0;
+ int padding = offset % ITEMS_PER_LINE;
+ int items_to_read = min(ARRAY_SIZE(data) - padding, *pcnt);
+ int items_to_log = min(ITEMS_PER_LINE, padding + items_to_read);
+
+ /* Buffer needs enough space for an entire line */
+ if ((log->len - log->wpos) < MAX_LINE_LENGTH)
+ goto done;
+
+ memcpy(data, trans->data + (offset - trans->addr), items_to_read);
+
+ *pcnt -= items_to_read;
+
+ /* Each line starts with the aligned offset (12-bit address) */
+ cnt = print_to_log(log, "%3.3X ", offset & 0xfff);
+ if (cnt == 0)
+ goto done;
+
+ /* If the offset is unaligned, add padding to right justify items */
+ for (i = 0; i < padding; ++i) {
+ cnt = print_to_log(log, "-- ");
+ if (cnt == 0)
+ goto done;
+ }
+
+ /* Log the data items */
+ for (j = 0; i < items_to_log; ++i, ++j) {
+ cnt = print_to_log(log, "%2.2X ", data[j]);
+ if (cnt == 0)
+ goto done;
+ }
+
+ /* If the last character was a space, then replace it with a newline */
+ if (log->wpos > 0 && log->data[log->wpos - 1] == ' ')
+ log->data[log->wpos - 1] = '\n';
+
+done:
+ return cnt;
+}
+
+/**
+ * get_log_data - reads data from SRAM and saves to the log buffer
+ * @trans: Pointer to SRAM transaction data.
+ *
+ * Returns the number of "items" read or SPMI error code for read failures.
+ */
+static int get_log_data(struct fg_trans *trans)
+{
+ int cnt, rc;
+ int last_cnt;
+ int items_read;
+ int total_items_read = 0;
+ u32 offset = trans->offset;
+ size_t item_cnt = trans->cnt;
+ struct fg_log_buffer *log = trans->log;
+
+ if (item_cnt == 0)
+ return 0;
+
+ if (item_cnt > SZ_4K) {
+ pr_err("Reading too many bytes\n");
+ return -EINVAL;
+ }
+
+ rc = fg_mem_read(trans->chip, trans->data,
+ trans->addr, trans->cnt, 0, 0);
+ if (rc) {
+ pr_err("dump failed: rc = %d\n", rc);
+ return rc;
+ }
+ /* Reset the log buffer 'pointers' */
+ log->wpos = log->rpos = 0;
+
+ /* Keep reading data until the log is full */
+ do {
+ last_cnt = item_cnt;
+ cnt = write_next_line_to_log(trans, offset, &item_cnt);
+ items_read = last_cnt - item_cnt;
+ offset += items_read;
+ total_items_read += items_read;
+ } while (cnt && item_cnt > 0);
+
+ /* Adjust the transaction offset and count */
+ trans->cnt = item_cnt;
+ trans->offset += total_items_read;
+
+ return total_items_read;
+}
+
+/**
+ * fg_memif_dfs_reg_read: reads value(s) from SRAM and fills user's buffer a
+ * byte array (coded as string)
+ * @file: file pointer
+ * @buf: where to put the result
+ * @count: maximum space available in @buf
+ * @ppos: starting position
+ * @return number of user bytes read, or negative error value
+ */
+static ssize_t fg_memif_dfs_reg_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct fg_trans *trans = file->private_data;
+ struct fg_log_buffer *log = trans->log;
+ size_t ret;
+ size_t len;
+
+ /* Is the the log buffer empty */
+ if (log->rpos >= log->wpos) {
+ if (get_log_data(trans) <= 0)
+ return 0;
+ }
+
+ len = min(count, log->wpos - log->rpos);
+
+ ret = copy_to_user(buf, &log->data[log->rpos], len);
+ if (ret == len) {
+ pr_err("error copy sram register values to user\n");
+ return -EFAULT;
+ }
+
+ /* 'ret' is the number of bytes not copied */
+ len -= ret;
+
+ *ppos += len;
+ log->rpos += len;
+ return len;
+}
+
+/**
+ * fg_memif_dfs_reg_write: write user's byte array (coded as string) to SRAM.
+ * @file: file pointer
+ * @buf: user data to be written.
+ * @count: maximum space available in @buf
+ * @ppos: starting position
+ * @return number of user byte written, or negative error value
+ */
+static ssize_t fg_memif_dfs_reg_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int bytes_read;
+ int data;
+ int pos = 0;
+ int cnt = 0;
+ u8 *values;
+ size_t ret = 0;
+
+ struct fg_trans *trans = file->private_data;
+ u32 offset = trans->offset;
+
+ /* Make a copy of the user data */
+ char *kbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ ret = copy_from_user(kbuf, buf, count);
+ if (ret == count) {
+ pr_err("failed to copy data from user\n");
+ ret = -EFAULT;
+ goto free_buf;
+ }
+
+ count -= ret;
+ *ppos += count;
+ kbuf[count] = '\0';
+
+ /* Override the text buffer with the raw data */
+ values = kbuf;
+
+ /* Parse the data in the buffer. It should be a string of numbers */
+ while (sscanf(kbuf + pos, "%i%n", &data, &bytes_read) == 1) {
+ pos += bytes_read;
+ values[cnt++] = data & 0xff;
+ }
+
+ if (!cnt)
+ goto free_buf;
+
+ pr_info("address %x, count %d\n", offset, cnt);
+ /* Perform the write(s) */
+
+ ret = fg_mem_write(trans->chip, values, offset,
+ cnt, 0, 0);
+ if (ret) {
+ pr_err("SPMI write failed, err = %zu\n", ret);
+ } else {
+ ret = count;
+ trans->offset += cnt > 4 ? 4 : cnt;
+ }
+
+free_buf:
+ kfree(kbuf);
+ return ret;
+}
+
+static const struct file_operations fg_memif_dfs_reg_fops = {
+ .open = fg_memif_data_open,
+ .release = fg_memif_dfs_close,
+ .read = fg_memif_dfs_reg_read,
+ .write = fg_memif_dfs_reg_write,
+};
+
+/**
+ * fg_dfs_create_fs: create debugfs file system.
+ * @return pointer to root directory or NULL if failed to create fs
+ */
+static struct dentry *fg_dfs_create_fs(void)
+{
+ struct dentry *root, *file;
+
+ pr_debug("Creating FG_MEM debugfs file-system\n");
+ root = debugfs_create_dir(DFS_ROOT_NAME, NULL);
+ if (IS_ERR_OR_NULL(root)) {
+ pr_err("Error creating top level directory err:%ld",
+ (long)root);
+ if (PTR_ERR(root) == -ENODEV)
+ pr_err("debugfs is not enabled in the kernel");
+ return NULL;
+ }
+
+ dbgfs_data.help_msg.size = strlen(dbgfs_data.help_msg.data);
+
+ file = debugfs_create_blob("help", S_IRUGO, root, &dbgfs_data.help_msg);
+ if (!file) {
+ pr_err("error creating help entry\n");
+ goto err_remove_fs;
+ }
+ return root;
+
+err_remove_fs:
+ debugfs_remove_recursive(root);
+ return NULL;
+}
+
+/**
+ * fg_dfs_get_root: return a pointer to FG debugfs root directory.
+ * @return a pointer to the existing directory, or if no root
+ * directory exists then create one. Directory is created with file that
+ * configures SRAM transaction, namely: address, and count.
+ * @returns valid pointer on success or NULL
+ */
+struct dentry *fg_dfs_get_root(void)
+{
+ if (dbgfs_data.root)
+ return dbgfs_data.root;
+
+ if (mutex_lock_interruptible(&dbgfs_data.lock) < 0)
+ return NULL;
+ /* critical section */
+ if (!dbgfs_data.root) { /* double checking idiom */
+ dbgfs_data.root = fg_dfs_create_fs();
+ }
+ mutex_unlock(&dbgfs_data.lock);
+ return dbgfs_data.root;
+}
+
+/*
+ * fg_dfs_create: adds new fg_mem if debugfs entry
+ * @return zero on success
+ */
+int fg_dfs_create(struct fg_chip *chip)
+{
+ struct dentry *root;
+ struct dentry *file;
+
+ root = fg_dfs_get_root();
+ if (!root)
+ return -ENOENT;
+
+ dbgfs_data.chip = chip;
+
+ file = debugfs_create_u32("count", DFS_MODE, root, &(dbgfs_data.cnt));
+ if (!file) {
+ pr_err("error creating 'count' entry\n");
+ goto err_remove_fs;
+ }
+
+ file = debugfs_create_x32("address", DFS_MODE,
+ root, &(dbgfs_data.addr));
+ if (!file) {
+ pr_err("error creating 'address' entry\n");
+ goto err_remove_fs;
+ }
+
+ file = debugfs_create_file("data", DFS_MODE, root, &dbgfs_data,
+ &fg_memif_dfs_reg_fops);
+ if (!file) {
+ pr_err("error creating 'data' entry\n");
+ goto err_remove_fs;
+ }
+
+ return 0;
+
+err_remove_fs:
+ debugfs_remove_recursive(root);
+ return -ENOMEM;
+}
+
+#define EXTERNAL_SENSE_OFFSET_REG 0x41C
+#define EXT_OFFSET_TRIM_REG 0xF8
+#define SEC_ACCESS_REG 0xD0
+#define SEC_ACCESS_UNLOCK 0xA5
+#define BCL_TRIM_REV_FIXED 12
+static int bcl_trim_workaround(struct fg_chip *chip)
+{
+ u8 reg, rc;
+
+ if (chip->tp_rev_addr == 0)
+ return 0;
+
+ rc = fg_read(chip, &reg, chip->tp_rev_addr, 1);
+ if (rc) {
+ pr_err("Failed to read tp reg, rc = %d\n", rc);
+ return rc;
+ }
+ if (reg >= BCL_TRIM_REV_FIXED) {
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("workaround not applied, tp_rev = %d\n", reg);
+ return 0;
+ }
+
+ rc = fg_mem_read(chip, &reg, EXTERNAL_SENSE_OFFSET_REG, 1, 2, 0);
+ if (rc) {
+ pr_err("Failed to read ext sense offset trim, rc = %d\n", rc);
+ return rc;
+ }
+ rc = fg_masked_write(chip, chip->soc_base + SEC_ACCESS_REG,
+ SEC_ACCESS_UNLOCK, SEC_ACCESS_UNLOCK, 1);
+
+ rc |= fg_masked_write(chip, chip->soc_base + EXT_OFFSET_TRIM_REG,
+ 0xFF, reg, 1);
+ if (rc) {
+ pr_err("Failed to write ext sense offset trim, rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+#define FG_ALG_SYSCTL_1 0x4B0
+#define SOC_CNFG 0x450
+#define SOC_DELTA_OFFSET 3
+#define DELTA_SOC_PERCENT 1
+#define I_TERM_QUAL_BIT BIT(1)
+#define PATCH_NEG_CURRENT_BIT BIT(3)
+#define KI_COEFF_PRED_FULL_ADDR 0x408
+#define KI_COEFF_PRED_FULL_4_0_MSB 0x88
+#define KI_COEFF_PRED_FULL_4_0_LSB 0x00
+#define TEMP_FRAC_SHIFT_REG 0x4A4
+#define FG_ADC_CONFIG_REG 0x4B8
+#define FG_BCL_CONFIG_OFFSET 0x3
+#define BCL_FORCED_HPM_IN_CHARGE BIT(2)
+static int fg_common_hw_init(struct fg_chip *chip)
+{
+ int rc;
+ int resume_soc_raw;
+
+ update_iterm(chip);
+ update_cutoff_voltage(chip);
+ update_irq_volt_empty(chip);
+ update_bcl_thresholds(chip);
+
+ resume_soc_raw = settings[FG_MEM_RESUME_SOC].value;
+ if (resume_soc_raw > 0) {
+ rc = fg_set_resume_soc(chip, resume_soc_raw);
+ if (rc) {
+ pr_err("Couldn't set resume SOC for FG\n");
+ return rc;
+ }
+ } else {
+ pr_info("FG auto recharge threshold not specified in DT\n");
+ }
+
+ if (fg_sense_type >= 0) {
+ rc = set_prop_sense_type(chip, fg_sense_type);
+ if (rc) {
+ pr_err("failed to config sense type %d rc=%d\n",
+ fg_sense_type, rc);
+ return rc;
+ }
+ }
+
+ rc = fg_mem_masked_write(chip, settings[FG_MEM_DELTA_SOC].address, 0xFF,
+ soc_to_setpoint(settings[FG_MEM_DELTA_SOC].value),
+ settings[FG_MEM_DELTA_SOC].offset);
+ if (rc) {
+ pr_err("failed to write delta soc rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_mem_masked_write(chip, settings[FG_MEM_BATT_LOW].address, 0xFF,
+ batt_to_setpoint_8b(settings[FG_MEM_BATT_LOW].value),
+ settings[FG_MEM_BATT_LOW].offset);
+ if (rc) {
+ pr_err("failed to write Vbatt_low rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_mem_masked_write(chip, settings[FG_MEM_THERM_DELAY].address,
+ THERM_DELAY_MASK,
+ therm_delay_to_setpoint(settings[FG_MEM_THERM_DELAY].value),
+ settings[FG_MEM_THERM_DELAY].offset);
+ if (rc) {
+ pr_err("failed to write therm_delay rc=%d\n", rc);
+ return rc;
+ }
+
+ if (chip->use_thermal_coefficients) {
+ fg_mem_write(chip, chip->thermal_coefficients,
+ THERMAL_COEFF_ADDR, THERMAL_COEFF_N_BYTES,
+ THERMAL_COEFF_OFFSET, 0);
+ }
+
+ if (!chip->sw_rbias_ctrl) {
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ BATT_TEMP_CNTRL_MASK,
+ TEMP_SENSE_ALWAYS_BIT,
+ BATT_TEMP_OFFSET);
+ if (rc) {
+ pr_err("failed to write BATT_TEMP_OFFSET rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ /* Read the cycle counter back from FG SRAM */
+ if (chip->cyc_ctr.en)
+ restore_cycle_counter(chip);
+
+ return 0;
+}
+
+static int fg_8994_hw_init(struct fg_chip *chip)
+{
+ int rc = 0;
+ u8 data[4];
+ u64 esr_value;
+
+ rc = fg_mem_masked_write(chip, EXTERNAL_SENSE_SELECT,
+ PATCH_NEG_CURRENT_BIT,
+ PATCH_NEG_CURRENT_BIT,
+ EXTERNAL_SENSE_OFFSET);
+ if (rc) {
+ pr_err("failed to write patch current bit rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = bcl_trim_workaround(chip);
+ if (rc) {
+ pr_err("failed to redo bcl trim rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG,
+ BCL_FORCED_HPM_IN_CHARGE,
+ BCL_FORCED_HPM_IN_CHARGE,
+ FG_BCL_CONFIG_OFFSET);
+ if (rc) {
+ pr_err("failed to force hpm in charge rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_mem_masked_write(chip, FG_ALG_SYSCTL_1, I_TERM_QUAL_BIT, 0, 0);
+
+ data[0] = 0xA2;
+ data[1] = 0x12;
+
+ rc = fg_mem_write(chip, data, TEMP_FRAC_SHIFT_REG, 2, 2, 0);
+ if (rc) {
+ pr_err("failed to write temp ocv constants rc=%d\n", rc);
+ return rc;
+ }
+
+ data[0] = KI_COEFF_PRED_FULL_4_0_LSB;
+ data[1] = KI_COEFF_PRED_FULL_4_0_MSB;
+ fg_mem_write(chip, data, KI_COEFF_PRED_FULL_ADDR, 2, 2, 0);
+
+ esr_value = ESR_DEFAULT_VALUE;
+ rc = fg_mem_write(chip, (u8 *)&esr_value, MAXRSCHANGE_REG, 8,
+ ESR_VALUE_OFFSET, 0);
+ if (rc)
+ pr_err("failed to write default ESR value rc=%d\n", rc);
+ else
+ pr_debug("set default value to esr filter\n");
+
+ return 0;
+}
+
+#define FG_USBID_CONFIG_OFFSET 0x2
+#define DISABLE_USBID_DETECT_BIT BIT(0)
+static int fg_8996_hw_init(struct fg_chip *chip)
+{
+ int rc;
+
+ rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG,
+ BCL_FORCED_HPM_IN_CHARGE,
+ BCL_FORCED_HPM_IN_CHARGE,
+ FG_BCL_CONFIG_OFFSET);
+ if (rc) {
+ pr_err("failed to force hpm in charge rc=%d\n", rc);
+ return rc;
+ }
+
+ /* enable usbid conversions for PMi8996 V1.0 */
+ if (chip->pmic_revision[REVID_DIG_MAJOR] == 1
+ && chip->pmic_revision[REVID_ANA_MAJOR] == 0) {
+ rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG,
+ DISABLE_USBID_DETECT_BIT,
+ 0, FG_USBID_CONFIG_OFFSET);
+ if (rc) {
+ pr_err("failed to enable usbid conversions: %d\n", rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int fg_8950_hw_init(struct fg_chip *chip)
+{
+ int rc;
+
+ rc = fg_mem_masked_write(chip, FG_ADC_CONFIG_REG,
+ BCL_FORCED_HPM_IN_CHARGE,
+ BCL_FORCED_HPM_IN_CHARGE,
+ FG_BCL_CONFIG_OFFSET);
+ if (rc)
+ pr_err("failed to force hpm in charge rc=%d\n", rc);
+
+ return rc;
+}
+
+static int fg_hw_init(struct fg_chip *chip)
+{
+ int rc = 0;
+
+ rc = fg_common_hw_init(chip);
+ if (rc) {
+ pr_err("Unable to initialize FG HW rc=%d\n", rc);
+ return rc;
+ }
+
+ /* add PMIC specific hw init */
+ switch (chip->pmic_subtype) {
+ case PMI8994:
+ rc = fg_8994_hw_init(chip);
+ chip->wa_flag |= PULSE_REQUEST_WA;
+ break;
+ case PMI8996:
+ rc = fg_8996_hw_init(chip);
+ /* Setup workaround flag based on PMIC type */
+ if (fg_sense_type == INTERNAL_CURRENT_SENSE)
+ chip->wa_flag |= IADC_GAIN_COMP_WA;
+ if (chip->pmic_revision[REVID_DIG_MAJOR] > 1)
+ chip->wa_flag |= USE_CC_SOC_REG;
+
+ break;
+ case PMI8950:
+ case PMI8937:
+ rc = fg_8950_hw_init(chip);
+ /* Setup workaround flag based on PMIC type */
+ chip->wa_flag |= BCL_HI_POWER_FOR_CHGLED_WA;
+ if (fg_sense_type == INTERNAL_CURRENT_SENSE)
+ chip->wa_flag |= IADC_GAIN_COMP_WA;
+ if (chip->pmic_revision[REVID_DIG_MAJOR] > 1)
+ chip->wa_flag |= USE_CC_SOC_REG;
+
+ break;
+ }
+ if (rc)
+ pr_err("Unable to initialize PMIC specific FG HW rc=%d\n", rc);
+
+ pr_debug("wa_flag=0x%x\n", chip->wa_flag);
+
+ return rc;
+}
+
+#define DIG_MINOR 0x0
+#define DIG_MAJOR 0x1
+#define ANA_MINOR 0x2
+#define ANA_MAJOR 0x3
+#define IACS_INTR_SRC_SLCT BIT(3)
+static int fg_setup_memif_offset(struct fg_chip *chip)
+{
+ int rc;
+ u8 dig_major;
+
+ rc = fg_read(chip, chip->revision, chip->mem_base + DIG_MINOR, 4);
+ if (rc) {
+ pr_err("Unable to read FG revision rc=%d\n", rc);
+ return rc;
+ }
+
+ switch (chip->revision[DIG_MAJOR]) {
+ case DIG_REV_1:
+ case DIG_REV_2:
+ chip->offset = offset[0].address;
+ break;
+ case DIG_REV_3:
+ chip->offset = offset[1].address;
+ chip->ima_supported = true;
+ break;
+ default:
+ pr_err("Digital Major rev=%d not supported\n", dig_major);
+ return -EINVAL;
+ }
+
+ if (chip->ima_supported) {
+ /*
+ * Change the FG_MEM_INT interrupt to track IACS_READY
+ * condition instead of end-of-transaction. This makes sure
+ * that the next transaction starts only after the hw is ready.
+ */
+ rc = fg_masked_write(chip,
+ chip->mem_base + MEM_INTF_IMA_CFG, IACS_INTR_SRC_SLCT,
+ IACS_INTR_SRC_SLCT, 1);
+ if (rc) {
+ pr_err("failed to configure interrupt source %d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int fg_detect_pmic_type(struct fg_chip *chip)
+{
+ struct pmic_revid_data *pmic_rev_id;
+ struct device_node *revid_dev_node;
+
+ revid_dev_node = of_parse_phandle(chip->spmi->dev.of_node,
+ "qcom,pmic-revid", 0);
+ if (!revid_dev_node) {
+ pr_err("Missing qcom,pmic-revid property - driver failed\n");
+ return -EINVAL;
+ }
+
+ pmic_rev_id = get_revid_data(revid_dev_node);
+ if (IS_ERR(pmic_rev_id)) {
+ pr_err("Unable to get pmic_revid rc=%ld\n",
+ PTR_ERR(pmic_rev_id));
+ /*
+ * the revid peripheral must be registered, any failure
+ * here only indicates that the rev-id module has not
+ * probed yet.
+ */
+ return -EPROBE_DEFER;
+ }
+
+ switch (pmic_rev_id->pmic_subtype) {
+ case PMI8994:
+ case PMI8950:
+ case PMI8937:
+ case PMI8996:
+ chip->pmic_subtype = pmic_rev_id->pmic_subtype;
+ chip->pmic_revision[REVID_RESERVED] = pmic_rev_id->rev1;
+ chip->pmic_revision[REVID_VARIANT] = pmic_rev_id->rev2;
+ chip->pmic_revision[REVID_ANA_MAJOR] = pmic_rev_id->rev3;
+ chip->pmic_revision[REVID_DIG_MAJOR] = pmic_rev_id->rev4;
+ break;
+ default:
+ pr_err("PMIC subtype %d not supported\n",
+ pmic_rev_id->pmic_subtype);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define INIT_JEITA_DELAY_MS 1000
+
+static void delayed_init_work(struct work_struct *work)
+{
+ u8 reg[2];
+ int rc;
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ init_work);
+
+ /* hold memory access until initialization finishes */
+ fg_mem_lock(chip);
+
+ rc = fg_hw_init(chip);
+ if (rc) {
+ pr_err("failed to hw init rc = %d\n", rc);
+ fg_mem_release(chip);
+ fg_cleanup(chip);
+ return;
+ }
+ /* release memory access before update_sram_data is called */
+ fg_mem_release(chip);
+
+ schedule_delayed_work(
+ &chip->update_jeita_setting,
+ msecs_to_jiffies(INIT_JEITA_DELAY_MS));
+
+ if (chip->last_sram_update_time == 0)
+ update_sram_data_work(&chip->update_sram_data.work);
+
+ if (chip->last_temp_update_time == 0)
+ update_temp_data(&chip->update_temp_work.work);
+
+ if (!chip->use_otp_profile)
+ schedule_delayed_work(&chip->batt_profile_init, 0);
+
+ if (chip->wa_flag & IADC_GAIN_COMP_WA) {
+ /* read default gain config */
+ rc = fg_mem_read(chip, reg, K_VCOR_REG, 2, DEF_GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read default gain rc=%d\n", rc);
+ goto done;
+ }
+
+ if (reg[1] || reg[0]) {
+ /*
+ * Default gain register has valid value:
+ * - write to gain register.
+ */
+ rc = fg_mem_write(chip, reg, GAIN_REG, 2,
+ GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to write gain rc=%d\n", rc);
+ goto done;
+ }
+ } else {
+ /*
+ * Default gain register is invalid:
+ * - read gain register for default gain value
+ * - write to default gain register.
+ */
+ rc = fg_mem_read(chip, reg, GAIN_REG, 2,
+ GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to read gain rc=%d\n", rc);
+ goto done;
+ }
+ rc = fg_mem_write(chip, reg, K_VCOR_REG, 2,
+ DEF_GAIN_OFFSET, 0);
+ if (rc) {
+ pr_err("Failed to write default gain rc=%d\n",
+ rc);
+ goto done;
+ }
+ }
+
+ chip->iadc_comp_data.dfl_gain_reg[0] = reg[0];
+ chip->iadc_comp_data.dfl_gain_reg[1] = reg[1];
+ chip->iadc_comp_data.dfl_gain = half_float(reg);
+ chip->input_present = is_input_present(chip);
+ chip->otg_present = is_otg_present(chip);
+ chip->init_done = true;
+
+ pr_debug("IADC gain initial config reg_val 0x%x%x gain %lld\n",
+ reg[1], reg[0], chip->iadc_comp_data.dfl_gain);
+ }
+
+ pr_debug("FG: HW_init success\n");
+
+ return;
+done:
+ fg_cleanup(chip);
+}
+
+static int fg_probe(struct spmi_device *spmi)
+{
+ struct device *dev = &(spmi->dev);
+ struct fg_chip *chip;
+ struct spmi_resource *spmi_resource;
+ struct resource *resource;
+ u8 subtype, reg;
+ int rc = 0;
+
+ if (!spmi) {
+ pr_err("no valid spmi pointer\n");
+ return -ENODEV;
+ }
+
+ if (!spmi->dev.of_node) {
+ pr_err("device node missing\n");
+ return -ENODEV;
+ }
+
+ chip = devm_kzalloc(dev, sizeof(struct fg_chip), GFP_KERNEL);
+ if (chip == NULL) {
+ pr_err("Can't allocate fg_chip\n");
+ return -ENOMEM;
+ }
+
+ chip->spmi = spmi;
+ chip->dev = &(spmi->dev);
+
+ wakeup_source_init(&chip->empty_check_wakeup_source.source,
+ "qpnp_fg_empty_check");
+ wakeup_source_init(&chip->memif_wakeup_source.source,
+ "qpnp_fg_memaccess");
+ wakeup_source_init(&chip->profile_wakeup_source.source,
+ "qpnp_fg_profile");
+ wakeup_source_init(&chip->update_temp_wakeup_source.source,
+ "qpnp_fg_update_temp");
+ wakeup_source_init(&chip->update_sram_wakeup_source.source,
+ "qpnp_fg_update_sram");
+ wakeup_source_init(&chip->resume_soc_wakeup_source.source,
+ "qpnp_fg_set_resume_soc");
+ wakeup_source_init(&chip->gain_comp_wakeup_source.source,
+ "qpnp_fg_gain_comp");
+ wakeup_source_init(&chip->capacity_learning_wakeup_source.source,
+ "qpnp_fg_cap_learning");
+ mutex_init(&chip->rw_lock);
+ mutex_init(&chip->cyc_ctr.lock);
+ mutex_init(&chip->learning_data.learning_lock);
+ mutex_init(&chip->rslow_comp.lock);
+ mutex_init(&chip->sysfs_restart_lock);
+ INIT_DELAYED_WORK(&chip->update_jeita_setting, update_jeita_setting);
+ INIT_DELAYED_WORK(&chip->update_sram_data, update_sram_data_work);
+ INIT_DELAYED_WORK(&chip->update_temp_work, update_temp_data);
+ INIT_DELAYED_WORK(&chip->check_empty_work, check_empty_work);
+ INIT_DELAYED_WORK(&chip->batt_profile_init, batt_profile_init);
+ INIT_WORK(&chip->rslow_comp_work, rslow_comp_work);
+ INIT_WORK(&chip->fg_cap_learning_work, fg_cap_learning_work);
+ INIT_WORK(&chip->dump_sram, dump_sram);
+ INIT_WORK(&chip->status_change_work, status_change_work);
+ INIT_WORK(&chip->cycle_count_work, update_cycle_count);
+ INIT_WORK(&chip->battery_age_work, battery_age_work);
+ INIT_WORK(&chip->update_esr_work, update_esr_value);
+ INIT_WORK(&chip->set_resume_soc_work, set_resume_soc_work);
+ INIT_WORK(&chip->sysfs_restart_work, sysfs_restart_work);
+ INIT_WORK(&chip->init_work, delayed_init_work);
+ INIT_WORK(&chip->charge_full_work, charge_full_work);
+ INIT_WORK(&chip->gain_comp_work, iadc_gain_comp_work);
+ INIT_WORK(&chip->bcl_hi_power_work, bcl_hi_power_work);
+ alarm_init(&chip->fg_cap_learning_alarm, ALARM_BOOTTIME,
+ fg_cap_learning_alarm_cb);
+ init_completion(&chip->sram_access_granted);
+ init_completion(&chip->sram_access_revoked);
+ complete_all(&chip->sram_access_revoked);
+ init_completion(&chip->batt_id_avail);
+ init_completion(&chip->first_soc_done);
+ dev_set_drvdata(&spmi->dev, chip);
+
+ spmi_for_each_container_dev(spmi_resource, spmi) {
+ if (!spmi_resource) {
+ pr_err("qpnp_chg: spmi resource absent\n");
+ rc = -ENXIO;
+ goto of_init_fail;
+ }
+
+ resource = spmi_get_resource(spmi, spmi_resource,
+ IORESOURCE_MEM, 0);
+ if (!(resource && resource->start)) {
+ pr_err("node %s IO resource absent!\n",
+ spmi->dev.of_node->full_name);
+ rc = -ENXIO;
+ goto of_init_fail;
+ }
+
+ if (strcmp("qcom,fg-adc-vbat",
+ spmi_resource->of_node->name) == 0) {
+ chip->vbat_adc_addr = resource->start;
+ continue;
+ } else if (strcmp("qcom,fg-adc-ibat",
+ spmi_resource->of_node->name) == 0) {
+ chip->ibat_adc_addr = resource->start;
+ continue;
+ } else if (strcmp("qcom,revid-tp-rev",
+ spmi_resource->of_node->name) == 0) {
+ chip->tp_rev_addr = resource->start;
+ continue;
+ }
+
+ rc = fg_read(chip, &subtype,
+ resource->start + REG_OFFSET_PERP_SUBTYPE, 1);
+ if (rc) {
+ pr_err("Peripheral subtype read failed rc=%d\n", rc);
+ goto of_init_fail;
+ }
+
+ switch (subtype) {
+ case FG_SOC:
+ chip->soc_base = resource->start;
+ break;
+ case FG_MEMIF:
+ chip->mem_base = resource->start;
+ break;
+ case FG_BATT:
+ chip->batt_base = resource->start;
+ break;
+ default:
+ pr_err("Invalid peripheral subtype=0x%x\n", subtype);
+ rc = -EINVAL;
+ }
+ }
+
+ rc = fg_detect_pmic_type(chip);
+ if (rc) {
+ pr_err("Unable to detect PMIC type rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_setup_memif_offset(chip);
+ if (rc) {
+ pr_err("Unable to setup mem_if offsets rc=%d\n", rc);
+ goto of_init_fail;
+ }
+
+ rc = fg_of_init(chip);
+ if (rc) {
+ pr_err("failed to parse devicetree rc%d\n", rc);
+ goto of_init_fail;
+ }
+
+ if (chip->jeita_hysteresis_support) {
+ rc = fg_init_batt_temp_state(chip);
+ if (rc) {
+ pr_err("failed to get battery status rc%d\n", rc);
+ goto of_init_fail;
+ }
+ }
+
+ /* check if the first estimate is already finished at this time */
+ if (is_first_est_done(chip))
+ complete_all(&chip->first_soc_done);
+
+ reg = 0xFF;
+ rc = fg_write(chip, &reg, INT_EN_CLR(chip->mem_base), 1);
+ if (rc) {
+ pr_err("failed to clear interrupts %d\n", rc);
+ goto of_init_fail;
+ }
+
+ rc = fg_init_irqs(chip);
+ if (rc) {
+ pr_err("failed to request interrupts %d\n", rc);
+ goto cancel_work;
+ }
+
+ chip->batt_type = default_batt_type;
+
+ chip->bms_psy.name = "bms";
+ chip->bms_psy.type = POWER_SUPPLY_TYPE_BMS;
+ chip->bms_psy.properties = fg_power_props;
+ chip->bms_psy.num_properties = ARRAY_SIZE(fg_power_props);
+ chip->bms_psy.get_property = fg_power_get_property;
+ chip->bms_psy.set_property = fg_power_set_property;
+ chip->bms_psy.external_power_changed = fg_external_power_changed;
+ chip->bms_psy.supplied_to = fg_supplicants;
+ chip->bms_psy.num_supplicants = ARRAY_SIZE(fg_supplicants);
+ chip->bms_psy.property_is_writeable = fg_property_is_writeable;
+
+ rc = power_supply_register(chip->dev, &chip->bms_psy);
+ if (rc < 0) {
+ pr_err("batt failed to register rc = %d\n", rc);
+ goto of_init_fail;
+ }
+ chip->power_supply_registered = true;
+ /*
+ * Just initialize the batt_psy_name here. Power supply
+ * will be obtained later.
+ */
+ chip->batt_psy_name = "battery";
+
+ if (chip->mem_base) {
+ rc = fg_dfs_create(chip);
+ if (rc < 0) {
+ pr_err("failed to create debugfs rc = %d\n", rc);
+ goto power_supply_unregister;
+ }
+ }
+
+ schedule_work(&chip->init_work);
+
+ pr_info("FG Probe success - FG Revision DIG:%d.%d ANA:%d.%d PMIC subtype=%d\n",
+ chip->revision[DIG_MAJOR], chip->revision[DIG_MINOR],
+ chip->revision[ANA_MAJOR], chip->revision[ANA_MINOR],
+ chip->pmic_subtype);
+
+ return rc;
+
+power_supply_unregister:
+ power_supply_unregister(&chip->bms_psy);
+cancel_work:
+ cancel_delayed_work_sync(&chip->update_jeita_setting);
+ cancel_delayed_work_sync(&chip->update_sram_data);
+ cancel_delayed_work_sync(&chip->update_temp_work);
+ cancel_delayed_work_sync(&chip->check_empty_work);
+ cancel_delayed_work_sync(&chip->batt_profile_init);
+ alarm_try_to_cancel(&chip->fg_cap_learning_alarm);
+ cancel_work_sync(&chip->set_resume_soc_work);
+ cancel_work_sync(&chip->fg_cap_learning_work);
+ cancel_work_sync(&chip->dump_sram);
+ cancel_work_sync(&chip->status_change_work);
+ cancel_work_sync(&chip->cycle_count_work);
+ cancel_work_sync(&chip->update_esr_work);
+ cancel_work_sync(&chip->rslow_comp_work);
+ cancel_work_sync(&chip->sysfs_restart_work);
+ cancel_work_sync(&chip->gain_comp_work);
+ cancel_work_sync(&chip->init_work);
+ cancel_work_sync(&chip->charge_full_work);
+ cancel_work_sync(&chip->bcl_hi_power_work);
+of_init_fail:
+ mutex_destroy(&chip->rslow_comp.lock);
+ mutex_destroy(&chip->rw_lock);
+ mutex_destroy(&chip->cyc_ctr.lock);
+ mutex_destroy(&chip->learning_data.learning_lock);
+ mutex_destroy(&chip->sysfs_restart_lock);
+ wakeup_source_trash(&chip->resume_soc_wakeup_source.source);
+ wakeup_source_trash(&chip->empty_check_wakeup_source.source);
+ wakeup_source_trash(&chip->memif_wakeup_source.source);
+ wakeup_source_trash(&chip->profile_wakeup_source.source);
+ wakeup_source_trash(&chip->update_temp_wakeup_source.source);
+ wakeup_source_trash(&chip->update_sram_wakeup_source.source);
+ wakeup_source_trash(&chip->gain_comp_wakeup_source.source);
+ wakeup_source_trash(&chip->capacity_learning_wakeup_source.source);
+ return rc;
+}
+
+static void check_and_update_sram_data(struct fg_chip *chip)
+{
+ unsigned long current_time = 0, next_update_time, time_left;
+
+ get_current_time(&current_time);
+
+ next_update_time = chip->last_temp_update_time
+ + (TEMP_PERIOD_UPDATE_MS / 1000);
+
+ if (next_update_time > current_time)
+ time_left = next_update_time - current_time;
+ else
+ time_left = 0;
+
+ schedule_delayed_work(
+ &chip->update_temp_work, msecs_to_jiffies(time_left * 1000));
+
+ next_update_time = chip->last_sram_update_time
+ + (fg_sram_update_period_ms / 1000);
+
+ if (next_update_time > current_time)
+ time_left = next_update_time - current_time;
+ else
+ time_left = 0;
+
+ schedule_delayed_work(
+ &chip->update_sram_data, msecs_to_jiffies(time_left * 1000));
+}
+
+static int fg_suspend(struct device *dev)
+{
+ struct fg_chip *chip = dev_get_drvdata(dev);
+
+ if (!chip->sw_rbias_ctrl)
+ return 0;
+
+ cancel_delayed_work(&chip->update_temp_work);
+ cancel_delayed_work(&chip->update_sram_data);
+
+ return 0;
+}
+
+static int fg_resume(struct device *dev)
+{
+ struct fg_chip *chip = dev_get_drvdata(dev);
+
+ if (!chip->sw_rbias_ctrl)
+ return 0;
+
+ check_and_update_sram_data(chip);
+ return 0;
+}
+
+static const struct dev_pm_ops qpnp_fg_pm_ops = {
+ .suspend = fg_suspend,
+ .resume = fg_resume,
+};
+
+static int fg_sense_type_set(const char *val, const struct kernel_param *kp)
+{
+ int rc;
+ struct power_supply *bms_psy;
+ struct fg_chip *chip;
+ int old_fg_sense_type = fg_sense_type;
+
+ rc = param_set_int(val, kp);
+ if (rc) {
+ pr_err("Unable to set fg_sense_type: %d\n", rc);
+ return rc;
+ }
+
+ if (fg_sense_type != 0 && fg_sense_type != 1) {
+ pr_err("Bad value %d\n", fg_sense_type);
+ fg_sense_type = old_fg_sense_type;
+ return -EINVAL;
+ }
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("fg_sense_type set to %d\n", fg_sense_type);
+
+ bms_psy = power_supply_get_by_name("bms");
+ if (!bms_psy) {
+ pr_err("bms psy not found\n");
+ return 0;
+ }
+
+ chip = container_of(bms_psy, struct fg_chip, bms_psy);
+ rc = set_prop_sense_type(chip, fg_sense_type);
+ return rc;
+}
+
+static struct kernel_param_ops fg_sense_type_ops = {
+ .set = fg_sense_type_set,
+ .get = param_get_int,
+};
+
+module_param_cb(sense_type, &fg_sense_type_ops, &fg_sense_type, 0644);
+
+static int fg_restart_set(const char *val, const struct kernel_param *kp)
+{
+ struct power_supply *bms_psy;
+ struct fg_chip *chip;
+
+ bms_psy = power_supply_get_by_name("bms");
+ if (!bms_psy) {
+ pr_err("bms psy not found\n");
+ return 0;
+ }
+ chip = container_of(bms_psy, struct fg_chip, bms_psy);
+
+ mutex_lock(&chip->sysfs_restart_lock);
+ if (fg_restart != 0) {
+ mutex_unlock(&chip->sysfs_restart_lock);
+ return 0;
+ }
+ fg_restart = 1;
+ mutex_unlock(&chip->sysfs_restart_lock);
+
+ if (fg_debug_mask & FG_STATUS)
+ pr_info("fuel gauge restart initiated from sysfs...\n");
+
+ schedule_work(&chip->sysfs_restart_work);
+ return 0;
+}
+
+static struct kernel_param_ops fg_restart_ops = {
+ .set = fg_restart_set,
+ .get = param_get_int,
+};
+
+module_param_cb(restart, &fg_restart_ops, &fg_restart, 0644);
+
+static struct spmi_driver fg_driver = {
+ .driver = {
+ .name = QPNP_FG_DEV_NAME,
+ .of_match_table = fg_match_table,
+ .pm = &qpnp_fg_pm_ops,
+ },
+ .probe = fg_probe,
+ .remove = fg_remove,
+};
+
+static int __init fg_init(void)
+{
+ return spmi_driver_register(&fg_driver);
+}
+
+static void __exit fg_exit(void)
+{
+ return spmi_driver_unregister(&fg_driver);
+}
+
+module_init(fg_init);
+module_exit(fg_exit);
+
+MODULE_DESCRIPTION("QPNP Fuel Gauge Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" QPNP_FG_DEV_NAME);
diff --git a/drivers/power/qpnp-smbcharger.c b/drivers/power/qpnp-smbcharger.c
new file mode 100644
index 000000000000..d9d90ae66eae
--- /dev/null
+++ b/drivers/power/qpnp-smbcharger.c
@@ -0,0 +1,8221 @@
+/* Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "SMBCHG: %s: " fmt, __func__
+
+#include <linux/spmi.h>
+#include <linux/spinlock.h>
+#include <linux/gpio.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/power_supply.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/bitops.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
+#include <linux/spmi.h>
+#include <linux/printk.h>
+#include <linux/ratelimit.h>
+#include <linux/debugfs.h>
+#include <linux/leds.h>
+#include <linux/rtc.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/batterydata-lib.h>
+#include <linux/of_batterydata.h>
+#include <linux/msm_bcl.h>
+#include <linux/ktime.h>
+#include "pmic-voter.h"
+
+/* Mask/Bit helpers */
+#define _SMB_MASK(BITS, POS) \
+ ((unsigned char)(((1 << (BITS)) - 1) << (POS)))
+#define SMB_MASK(LEFT_BIT_POS, RIGHT_BIT_POS) \
+ _SMB_MASK((LEFT_BIT_POS) - (RIGHT_BIT_POS) + 1, \
+ (RIGHT_BIT_POS))
+/* Config registers */
+struct smbchg_regulator {
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+};
+
+struct parallel_usb_cfg {
+ struct power_supply *psy;
+ int min_current_thr_ma;
+ int min_9v_current_thr_ma;
+ int allowed_lowering_ma;
+ int current_max_ma;
+ bool avail;
+ struct mutex lock;
+ int initial_aicl_ma;
+ ktime_t last_disabled;
+ bool enabled_once;
+};
+
+struct ilim_entry {
+ int vmin_uv;
+ int vmax_uv;
+ int icl_pt_ma;
+ int icl_lv_ma;
+ int icl_hv_ma;
+};
+
+struct ilim_map {
+ int num;
+ struct ilim_entry *entries;
+};
+
+struct smbchg_version_tables {
+ const int *dc_ilim_ma_table;
+ int dc_ilim_ma_len;
+ const int *usb_ilim_ma_table;
+ int usb_ilim_ma_len;
+ const int *iterm_ma_table;
+ int iterm_ma_len;
+ const int *fcc_comp_table;
+ int fcc_comp_len;
+ const int *aicl_rerun_period_table;
+ int aicl_rerun_period_len;
+ int rchg_thr_mv;
+};
+
+struct smbchg_chip {
+ struct device *dev;
+ struct spmi_device *spmi;
+ int schg_version;
+
+ /* peripheral register address bases */
+ u16 chgr_base;
+ u16 bat_if_base;
+ u16 usb_chgpth_base;
+ u16 dc_chgpth_base;
+ u16 otg_base;
+ u16 misc_base;
+
+ int fake_battery_soc;
+ u8 revision[4];
+
+ /* configuration parameters */
+ int iterm_ma;
+ int usb_max_current_ma;
+ int typec_current_ma;
+ int dc_max_current_ma;
+ int dc_target_current_ma;
+ int cfg_fastchg_current_ma;
+ int fastchg_current_ma;
+ int vfloat_mv;
+ int fastchg_current_comp;
+ int float_voltage_comp;
+ int resume_delta_mv;
+ int safety_time;
+ int prechg_safety_time;
+ int bmd_pin_src;
+ int jeita_temp_hard_limit;
+ int aicl_rerun_period_s;
+ bool use_vfloat_adjustments;
+ bool iterm_disabled;
+ bool bmd_algo_disabled;
+ bool soft_vfloat_comp_disabled;
+ bool chg_enabled;
+ bool charge_unknown_battery;
+ bool chg_inhibit_en;
+ bool chg_inhibit_source_fg;
+ bool low_volt_dcin;
+ bool cfg_chg_led_support;
+ bool cfg_chg_led_sw_ctrl;
+ bool vbat_above_headroom;
+ bool force_aicl_rerun;
+ bool hvdcp3_supported;
+ bool restricted_charging;
+ bool skip_usb_suspend_for_fake_battery;
+ bool hvdcp_not_supported;
+ bool otg_pinctrl;
+ u8 original_usbin_allowance;
+ struct parallel_usb_cfg parallel;
+ struct delayed_work parallel_en_work;
+ struct dentry *debug_root;
+ struct smbchg_version_tables tables;
+
+ /* wipower params */
+ struct ilim_map wipower_default;
+ struct ilim_map wipower_pt;
+ struct ilim_map wipower_div2;
+ struct qpnp_vadc_chip *vadc_dev;
+ bool wipower_dyn_icl_avail;
+ struct ilim_entry current_ilim;
+ struct mutex wipower_config;
+ bool wipower_configured;
+ struct qpnp_adc_tm_btm_param param;
+
+ /* flash current prediction */
+ int rpara_uohm;
+ int rslow_uohm;
+ int vled_max_uv;
+
+ /* vfloat adjustment */
+ int max_vbat_sample;
+ int n_vbat_samples;
+
+ /* status variables */
+ int wake_reasons;
+ int previous_soc;
+ int usb_online;
+ bool dc_present;
+ bool usb_present;
+ bool batt_present;
+ int otg_retries;
+ ktime_t otg_enable_time;
+ bool aicl_deglitch_short;
+ bool safety_timer_en;
+ bool aicl_complete;
+ bool usb_ov_det;
+ bool otg_pulse_skip_dis;
+ const char *battery_type;
+ enum power_supply_type usb_supply_type;
+ bool very_weak_charger;
+ bool parallel_charger_detected;
+ bool chg_otg_enabled;
+ bool flash_triggered;
+ bool flash_active;
+ bool icl_disabled;
+ u32 wa_flags;
+ int usb_icl_delta;
+ bool typec_dfp;
+
+ /* jeita and temperature */
+ bool batt_hot;
+ bool batt_cold;
+ bool batt_warm;
+ bool batt_cool;
+ unsigned int thermal_levels;
+ unsigned int therm_lvl_sel;
+ unsigned int *thermal_mitigation;
+
+ /* irqs */
+ int batt_hot_irq;
+ int batt_warm_irq;
+ int batt_cool_irq;
+ int batt_cold_irq;
+ int batt_missing_irq;
+ int vbat_low_irq;
+ int chg_hot_irq;
+ int chg_term_irq;
+ int taper_irq;
+ bool taper_irq_enabled;
+ struct mutex taper_irq_lock;
+ int recharge_irq;
+ int fastchg_irq;
+ int wdog_timeout_irq;
+ int power_ok_irq;
+ int dcin_uv_irq;
+ int usbin_uv_irq;
+ int usbin_ov_irq;
+ int src_detect_irq;
+ int otg_fail_irq;
+ int otg_oc_irq;
+ int aicl_done_irq;
+ int usbid_change_irq;
+ int chg_error_irq;
+ bool enable_aicl_wake;
+
+ /* psy */
+ struct power_supply *usb_psy;
+ struct power_supply batt_psy;
+ struct power_supply dc_psy;
+ struct power_supply *bms_psy;
+ struct power_supply *typec_psy;
+ int dc_psy_type;
+ const char *bms_psy_name;
+ const char *battery_psy_name;
+ bool psy_registered;
+
+ struct smbchg_regulator otg_vreg;
+ struct smbchg_regulator ext_otg_vreg;
+ struct work_struct usb_set_online_work;
+ struct delayed_work vfloat_adjust_work;
+ struct delayed_work hvdcp_det_work;
+ spinlock_t sec_access_lock;
+ struct mutex therm_lvl_lock;
+ struct mutex usb_set_online_lock;
+ struct mutex pm_lock;
+ /* aicl deglitch workaround */
+ unsigned long first_aicl_seconds;
+ int aicl_irq_count;
+ struct mutex usb_status_lock;
+ bool hvdcp_3_det_ignore_uv;
+ struct completion src_det_lowered;
+ struct completion src_det_raised;
+ struct completion usbin_uv_lowered;
+ struct completion usbin_uv_raised;
+ int pulse_cnt;
+ struct led_classdev led_cdev;
+ bool skip_usb_notification;
+ u32 vchg_adc_channel;
+ struct qpnp_vadc_chip *vchg_vadc_dev;
+
+ /* voters */
+ struct votable *fcc_votable;
+ struct votable *usb_icl_votable;
+ struct votable *dc_icl_votable;
+ struct votable *usb_suspend_votable;
+ struct votable *dc_suspend_votable;
+ struct votable *battchg_suspend_votable;
+ struct votable *hw_aicl_rerun_disable_votable;
+ struct votable *hw_aicl_rerun_enable_indirect_votable;
+ struct votable *aicl_deglitch_short_votable;
+};
+
+enum qpnp_schg {
+ QPNP_SCHG,
+ QPNP_SCHG_LITE,
+};
+
+static char *version_str[] = {
+ [QPNP_SCHG] = "SCHG",
+ [QPNP_SCHG_LITE] = "SCHG_LITE",
+};
+
+enum pmic_subtype {
+ PMI8994 = 10,
+ PMI8950 = 17,
+ PMI8996 = 19,
+ PMI8937 = 55,
+};
+
+enum smbchg_wa {
+ SMBCHG_AICL_DEGLITCH_WA = BIT(0),
+ SMBCHG_HVDCP_9V_EN_WA = BIT(1),
+ SMBCHG_USB100_WA = BIT(2),
+ SMBCHG_BATT_OV_WA = BIT(3),
+ SMBCHG_CC_ESR_WA = BIT(4),
+ SMBCHG_FLASH_ICL_DISABLE_WA = BIT(5),
+ SMBCHG_RESTART_WA = BIT(6),
+ SMBCHG_FLASH_BUCK_SWITCH_FREQ_WA = BIT(7),
+};
+
+enum print_reason {
+ PR_REGISTER = BIT(0),
+ PR_INTERRUPT = BIT(1),
+ PR_STATUS = BIT(2),
+ PR_DUMP = BIT(3),
+ PR_PM = BIT(4),
+ PR_MISC = BIT(5),
+ PR_WIPOWER = BIT(6),
+ PR_TYPEC = BIT(7),
+};
+
+enum wake_reason {
+ PM_PARALLEL_CHECK = BIT(0),
+ PM_REASON_VFLOAT_ADJUST = BIT(1),
+ PM_ESR_PULSE = BIT(2),
+ PM_PARALLEL_TAPER = BIT(3),
+};
+
+enum fcc_voters {
+ ESR_PULSE_FCC_VOTER,
+ BATT_TYPE_FCC_VOTER,
+ RESTRICTED_CHG_FCC_VOTER,
+ NUM_FCC_VOTER,
+};
+
+enum icl_voters {
+ PSY_ICL_VOTER,
+ THERMAL_ICL_VOTER,
+ HVDCP_ICL_VOTER,
+ USER_ICL_VOTER,
+ WEAK_CHARGER_ICL_VOTER,
+ SW_AICL_ICL_VOTER,
+ CHG_SUSPEND_WORKAROUND_ICL_VOTER,
+ NUM_ICL_VOTER,
+};
+
+enum enable_voters {
+ /* userspace has suspended charging altogether */
+ USER_EN_VOTER,
+ /*
+ * this specific path has been suspended through the power supply
+ * framework
+ */
+ POWER_SUPPLY_EN_VOTER,
+ /*
+ * the usb driver has suspended this path by setting a current limit
+ * of < 2MA
+ */
+ USB_EN_VOTER,
+ /*
+ * when a wireless charger comes online,
+ * the dc path is suspended for a second
+ */
+ WIRELESS_EN_VOTER,
+ /*
+ * the thermal daemon can suspend a charge path when the system
+ * temperature levels rise
+ */
+ THERMAL_EN_VOTER,
+ /*
+ * an external OTG supply is being used, suspend charge path so the
+ * charger does not accidentally try to charge from the external supply.
+ */
+ OTG_EN_VOTER,
+ /*
+ * the charger is very weak, do not draw any current from it
+ */
+ WEAK_CHARGER_EN_VOTER,
+ /*
+ * fake battery voter, if battery id-resistance around 7.5 Kohm
+ */
+ FAKE_BATTERY_EN_VOTER,
+ NUM_EN_VOTERS,
+};
+
+enum battchg_enable_voters {
+ /* userspace has disabled battery charging */
+ BATTCHG_USER_EN_VOTER,
+ /* battery charging disabled while loading battery profiles */
+ BATTCHG_UNKNOWN_BATTERY_EN_VOTER,
+ NUM_BATTCHG_EN_VOTERS,
+};
+
+enum hw_aicl_rerun_enable_indirect_voters {
+ /* enabled via device tree */
+ DEFAULT_CONFIG_HW_AICL_VOTER,
+ /* Varb workaround voter */
+ VARB_WORKAROUND_VOTER,
+ /* SHUTDOWN workaround voter */
+ SHUTDOWN_WORKAROUND_VOTER,
+ NUM_HW_AICL_RERUN_ENABLE_INDIRECT_VOTERS,
+};
+
+enum hw_aicl_rerun_disable_voters {
+ /* the results from enabling clients */
+ HW_AICL_RERUN_ENABLE_INDIRECT_VOTER,
+ /* Weak charger voter */
+ WEAK_CHARGER_HW_AICL_VOTER,
+ NUM_HW_AICL_DISABLE_VOTERS,
+};
+
+enum aicl_short_deglitch_voters {
+ /* Varb workaround voter */
+ VARB_WORKAROUND_SHORT_DEGLITCH_VOTER,
+ /* QC 2.0 */
+ HVDCP_SHORT_DEGLITCH_VOTER,
+ NUM_HW_SHORT_DEGLITCH_VOTERS,
+};
+static int smbchg_debug_mask;
+module_param_named(
+ debug_mask, smbchg_debug_mask, int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_parallel_en = 1;
+module_param_named(
+ parallel_en, smbchg_parallel_en, int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_main_chg_fcc_percent = 50;
+module_param_named(
+ main_chg_fcc_percent, smbchg_main_chg_fcc_percent,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_main_chg_icl_percent = 60;
+module_param_named(
+ main_chg_icl_percent, smbchg_main_chg_icl_percent,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_default_hvdcp_icl_ma = 1800;
+module_param_named(
+ default_hvdcp_icl_ma, smbchg_default_hvdcp_icl_ma,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_default_hvdcp3_icl_ma = 3000;
+module_param_named(
+ default_hvdcp3_icl_ma, smbchg_default_hvdcp3_icl_ma,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int smbchg_default_dcp_icl_ma = 1800;
+module_param_named(
+ default_dcp_icl_ma, smbchg_default_dcp_icl_ma,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int wipower_dyn_icl_en;
+module_param_named(
+ dynamic_icl_wipower_en, wipower_dyn_icl_en,
+ int, S_IRUSR | S_IWUSR
+);
+
+static int wipower_dcin_interval = ADC_MEAS1_INTERVAL_2P0MS;
+module_param_named(
+ wipower_dcin_interval, wipower_dcin_interval,
+ int, S_IRUSR | S_IWUSR
+);
+
+#define WIPOWER_DEFAULT_HYSTERISIS_UV 250000
+static int wipower_dcin_hyst_uv = WIPOWER_DEFAULT_HYSTERISIS_UV;
+module_param_named(
+ wipower_dcin_hyst_uv, wipower_dcin_hyst_uv,
+ int, S_IRUSR | S_IWUSR
+);
+
+#define pr_smb(reason, fmt, ...) \
+ do { \
+ if (smbchg_debug_mask & (reason)) \
+ pr_info(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define pr_smb_rt(reason, fmt, ...) \
+ do { \
+ if (smbchg_debug_mask & (reason)) \
+ pr_info_ratelimited(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug_ratelimited(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+static int smbchg_read(struct smbchg_chip *chip, u8 *val,
+ u16 addr, int count)
+{
+ int rc = 0;
+ struct spmi_device *spmi = chip->spmi;
+
+ if (addr == 0) {
+ dev_err(chip->dev, "addr cannot be zero addr=0x%02x sid=0x%02x rc=%d\n",
+ addr, spmi->sid, rc);
+ return -EINVAL;
+ }
+
+ rc = spmi_ext_register_readl(spmi->ctrl, spmi->sid, addr, val, count);
+ if (rc) {
+ dev_err(chip->dev, "spmi read failed addr=0x%02x sid=0x%02x rc=%d\n",
+ addr, spmi->sid, rc);
+ return rc;
+ }
+ return 0;
+}
+
+/*
+ * Writes an arbitrary number of bytes to a specified register
+ *
+ * Do not use this function for register writes if possible. Instead use the
+ * smbchg_masked_write function.
+ *
+ * The sec_access_lock must be held for all register writes and this function
+ * does not do that. If this function is used, please hold the spinlock or
+ * random secure access writes may fail.
+ */
+static int smbchg_write(struct smbchg_chip *chip, u8 *val,
+ u16 addr, int count)
+{
+ int rc = 0;
+ struct spmi_device *spmi = chip->spmi;
+
+ if (addr == 0) {
+ dev_err(chip->dev, "addr cannot be zero addr=0x%02x sid=0x%02x rc=%d\n",
+ addr, spmi->sid, rc);
+ return -EINVAL;
+ }
+
+ rc = spmi_ext_register_writel(spmi->ctrl, spmi->sid, addr, val, count);
+ if (rc) {
+ dev_err(chip->dev, "write failed addr=0x%02x sid=0x%02x rc=%d\n",
+ addr, spmi->sid, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/*
+ * Writes a register to the specified by the base and limited by the bit mask
+ *
+ * Do not use this function for register writes if possible. Instead use the
+ * smbchg_masked_write function.
+ *
+ * The sec_access_lock must be held for all register writes and this function
+ * does not do that. If this function is used, please hold the spinlock or
+ * random secure access writes may fail.
+ */
+static int smbchg_masked_write_raw(struct smbchg_chip *chip, u16 base, u8 mask,
+ u8 val)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, base, 1);
+ if (rc) {
+ dev_err(chip->dev, "spmi read failed: addr=%03X, rc=%d\n",
+ base, rc);
+ return rc;
+ }
+
+ reg &= ~mask;
+ reg |= val & mask;
+
+ pr_smb(PR_REGISTER, "addr = 0x%x writing 0x%x\n", base, reg);
+
+ rc = smbchg_write(chip, &reg, base, 1);
+ if (rc) {
+ dev_err(chip->dev, "spmi write failed: addr=%03X, rc=%d\n",
+ base, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/*
+ * Writes a register to the specified by the base and limited by the bit mask
+ *
+ * This function holds a spin lock to ensure secure access register writes goes
+ * through. If the secure access unlock register is armed, any old register
+ * write can unarm the secure access unlock, causing the next write to fail.
+ *
+ * Note: do not use this for sec_access registers. Instead use the function
+ * below: smbchg_sec_masked_write
+ */
+static int smbchg_masked_write(struct smbchg_chip *chip, u16 base, u8 mask,
+ u8 val)
+{
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&chip->sec_access_lock, flags);
+ rc = smbchg_masked_write_raw(chip, base, mask, val);
+ spin_unlock_irqrestore(&chip->sec_access_lock, flags);
+
+ return rc;
+}
+
+/*
+ * Unlocks sec access and writes to the register specified.
+ *
+ * This function holds a spin lock to exclude other register writes while
+ * the two writes are taking place.
+ */
+#define SEC_ACCESS_OFFSET 0xD0
+#define SEC_ACCESS_VALUE 0xA5
+#define PERIPHERAL_MASK 0xFF
+static int smbchg_sec_masked_write(struct smbchg_chip *chip, u16 base, u8 mask,
+ u8 val)
+{
+ unsigned long flags;
+ int rc;
+ u16 peripheral_base = base & (~PERIPHERAL_MASK);
+
+ spin_lock_irqsave(&chip->sec_access_lock, flags);
+
+ rc = smbchg_masked_write_raw(chip, peripheral_base + SEC_ACCESS_OFFSET,
+ SEC_ACCESS_VALUE, SEC_ACCESS_VALUE);
+ if (rc) {
+ dev_err(chip->dev, "Unable to unlock sec_access: %d", rc);
+ goto out;
+ }
+
+ rc = smbchg_masked_write_raw(chip, base, mask, val);
+
+out:
+ spin_unlock_irqrestore(&chip->sec_access_lock, flags);
+ return rc;
+}
+
+static void smbchg_stay_awake(struct smbchg_chip *chip, int reason)
+{
+ int reasons;
+
+ mutex_lock(&chip->pm_lock);
+ reasons = chip->wake_reasons | reason;
+ if (reasons != 0 && chip->wake_reasons == 0) {
+ pr_smb(PR_PM, "staying awake: 0x%02x (bit %d)\n",
+ reasons, reason);
+ pm_stay_awake(chip->dev);
+ }
+ chip->wake_reasons = reasons;
+ mutex_unlock(&chip->pm_lock);
+}
+
+static void smbchg_relax(struct smbchg_chip *chip, int reason)
+{
+ int reasons;
+
+ mutex_lock(&chip->pm_lock);
+ reasons = chip->wake_reasons & (~reason);
+ if (reasons == 0 && chip->wake_reasons != 0) {
+ pr_smb(PR_PM, "relaxing: 0x%02x (bit %d)\n",
+ reasons, reason);
+ pm_relax(chip->dev);
+ }
+ chip->wake_reasons = reasons;
+ mutex_unlock(&chip->pm_lock);
+};
+
+enum pwr_path_type {
+ UNKNOWN = 0,
+ PWR_PATH_BATTERY = 1,
+ PWR_PATH_USB = 2,
+ PWR_PATH_DC = 3,
+};
+
+#define PWR_PATH 0x08
+#define PWR_PATH_MASK 0x03
+static enum pwr_path_type smbchg_get_pwr_path(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + PWR_PATH, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read PWR_PATH rc = %d\n", rc);
+ return PWR_PATH_BATTERY;
+ }
+
+ return reg & PWR_PATH_MASK;
+}
+
+#define RID_STS 0xB
+#define RID_MASK 0xF
+#define IDEV_STS 0x8
+#define RT_STS 0x10
+#define USBID_MSB 0xE
+#define USBIN_UV_BIT BIT(0)
+#define USBIN_OV_BIT BIT(1)
+#define USBIN_SRC_DET_BIT BIT(2)
+#define FMB_STS_MASK SMB_MASK(3, 0)
+#define USBID_GND_THRESHOLD 0x495
+static bool is_otg_present_schg(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+ u8 usbid_reg[2];
+ u16 usbid_val;
+ /*
+ * After the falling edge of the usbid change interrupt occurs,
+ * there may still be some time before the ADC conversion for USB RID
+ * finishes in the fuel gauge. In the worst case, this could be up to
+ * 15 ms.
+ *
+ * Sleep for 20 ms (minimum msleep time) to wait for the conversion to
+ * finish and the USB RID status register to be updated before trying
+ * to detect OTG insertions.
+ */
+
+ msleep(20);
+
+ /*
+ * There is a problem with USBID conversions on PMI8994 revisions
+ * 2.0.0. As a workaround, check that the cable is not
+ * detected as factory test before enabling OTG.
+ */
+ rc = smbchg_read(chip, &reg, chip->misc_base + IDEV_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read IDEV_STS rc = %d\n", rc);
+ return false;
+ }
+
+ if ((reg & FMB_STS_MASK) != 0) {
+ pr_smb(PR_STATUS, "IDEV_STS = %02x, not ground\n", reg);
+ return false;
+ }
+
+ rc = smbchg_read(chip, usbid_reg, chip->usb_chgpth_base + USBID_MSB, 2);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read USBID rc = %d\n", rc);
+ return false;
+ }
+ usbid_val = (usbid_reg[0] << 8) | usbid_reg[1];
+
+ if (usbid_val > USBID_GND_THRESHOLD) {
+ pr_smb(PR_STATUS, "USBID = 0x%04x, too high to be ground\n",
+ usbid_val);
+ return false;
+ }
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RID_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read usb rid status rc = %d\n", rc);
+ return false;
+ }
+
+ pr_smb(PR_STATUS, "RID_STS = %02x\n", reg);
+
+ return (reg & RID_MASK) == 0;
+}
+
+#define RID_GND_DET_STS BIT(2)
+static bool is_otg_present_schg_lite(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->otg_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read otg RT status rc = %d\n", rc);
+ return false;
+ }
+
+ return !!(reg & RID_GND_DET_STS);
+}
+
+static bool is_otg_present(struct smbchg_chip *chip)
+{
+ if (chip->schg_version == QPNP_SCHG_LITE)
+ return is_otg_present_schg_lite(chip);
+
+ return is_otg_present_schg(chip);
+}
+
+#define USBIN_9V BIT(5)
+#define USBIN_UNREG BIT(4)
+#define USBIN_LV BIT(3)
+#define DCIN_9V BIT(2)
+#define DCIN_UNREG BIT(1)
+#define DCIN_LV BIT(0)
+#define INPUT_STS 0x0D
+#define DCIN_UV_BIT BIT(0)
+#define DCIN_OV_BIT BIT(1)
+static bool is_dc_present(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->dc_chgpth_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read dc status rc = %d\n", rc);
+ return false;
+ }
+
+ if ((reg & DCIN_UV_BIT) || (reg & DCIN_OV_BIT))
+ return false;
+
+ return true;
+}
+
+static bool is_usb_present(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb rt status rc = %d\n", rc);
+ return false;
+ }
+ if (!(reg & USBIN_SRC_DET_BIT) || (reg & USBIN_OV_BIT))
+ return false;
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + INPUT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb status rc = %d\n", rc);
+ return false;
+ }
+
+ return !!(reg & (USBIN_9V | USBIN_UNREG | USBIN_LV));
+}
+
+static char *usb_type_str[] = {
+ "SDP", /* bit 0 */
+ "OTHER", /* bit 1 */
+ "DCP", /* bit 2 */
+ "CDP", /* bit 3 */
+ "NONE", /* bit 4 error case */
+};
+
+#define N_TYPE_BITS 4
+#define TYPE_BITS_OFFSET 4
+
+static int get_type(u8 type_reg)
+{
+ unsigned long type = type_reg;
+ type >>= TYPE_BITS_OFFSET;
+ return find_first_bit(&type, N_TYPE_BITS);
+}
+
+/* helper to return the string of USB type */
+static inline char *get_usb_type_name(int type)
+{
+ return usb_type_str[type];
+}
+
+static enum power_supply_type usb_type_enum[] = {
+ POWER_SUPPLY_TYPE_USB, /* bit 0 */
+ POWER_SUPPLY_TYPE_USB_DCP, /* bit 1 */
+ POWER_SUPPLY_TYPE_USB_DCP, /* bit 2 */
+ POWER_SUPPLY_TYPE_USB_CDP, /* bit 3 */
+ POWER_SUPPLY_TYPE_USB_DCP, /* bit 4 error case, report DCP */
+};
+
+/* helper to return enum power_supply_type of USB type */
+static inline enum power_supply_type get_usb_supply_type(int type)
+{
+ return usb_type_enum[type];
+}
+
+static void read_usb_type(struct smbchg_chip *chip, char **usb_type_name,
+ enum power_supply_type *usb_supply_type)
+{
+ int rc, type;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->misc_base + IDEV_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read status 5 rc = %d\n", rc);
+ *usb_type_name = "Other";
+ *usb_supply_type = POWER_SUPPLY_TYPE_UNKNOWN;
+ }
+ type = get_type(reg);
+ *usb_type_name = get_usb_type_name(type);
+ *usb_supply_type = get_usb_supply_type(type);
+}
+
+#define CHGR_STS 0x0E
+#define BATT_LESS_THAN_2V BIT(4)
+#define CHG_HOLD_OFF_BIT BIT(3)
+#define CHG_TYPE_MASK SMB_MASK(2, 1)
+#define CHG_TYPE_SHIFT 1
+#define BATT_NOT_CHG_VAL 0x0
+#define BATT_PRE_CHG_VAL 0x1
+#define BATT_FAST_CHG_VAL 0x2
+#define BATT_TAPER_CHG_VAL 0x3
+#define CHG_INHIBIT_BIT BIT(1)
+#define BAT_TCC_REACHED_BIT BIT(7)
+static int get_prop_batt_status(struct smbchg_chip *chip)
+{
+ int rc, status = POWER_SUPPLY_STATUS_DISCHARGING;
+ u8 reg = 0, chg_type;
+ bool charger_present, chg_inhibit;
+
+ charger_present = is_usb_present(chip) | is_dc_present(chip) |
+ chip->hvdcp_3_det_ignore_uv;
+ if (!charger_present)
+ return POWER_SUPPLY_STATUS_DISCHARGING;
+
+ rc = smbchg_read(chip, &reg, chip->chgr_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to read RT_STS rc = %d\n", rc);
+ return POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ if (reg & BAT_TCC_REACHED_BIT)
+ return POWER_SUPPLY_STATUS_FULL;
+
+ chg_inhibit = reg & CHG_INHIBIT_BIT;
+ if (chg_inhibit)
+ return POWER_SUPPLY_STATUS_FULL;
+
+ rc = smbchg_read(chip, &reg, chip->chgr_base + CHGR_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to read CHGR_STS rc = %d\n", rc);
+ return POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ if (reg & CHG_HOLD_OFF_BIT) {
+ /*
+ * when chg hold off happens the battery is
+ * not charging
+ */
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ goto out;
+ }
+
+ chg_type = (reg & CHG_TYPE_MASK) >> CHG_TYPE_SHIFT;
+
+ if (chg_type == BATT_NOT_CHG_VAL && !chip->hvdcp_3_det_ignore_uv)
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ status = POWER_SUPPLY_STATUS_CHARGING;
+out:
+ pr_smb_rt(PR_MISC, "CHGR_STS = 0x%02x\n", reg);
+ return status;
+}
+
+#define BAT_PRES_STATUS 0x08
+#define BAT_PRES_BIT BIT(7)
+static int get_prop_batt_present(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->bat_if_base + BAT_PRES_STATUS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to read CHGR_STS rc = %d\n", rc);
+ return 0;
+ }
+
+ return !!(reg & BAT_PRES_BIT);
+}
+
+static int get_prop_charge_type(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg, chg_type;
+
+ rc = smbchg_read(chip, &reg, chip->chgr_base + CHGR_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to read CHGR_STS rc = %d\n", rc);
+ return 0;
+ }
+
+ chg_type = (reg & CHG_TYPE_MASK) >> CHG_TYPE_SHIFT;
+ if (chg_type == BATT_NOT_CHG_VAL)
+ return POWER_SUPPLY_CHARGE_TYPE_NONE;
+ else if (chg_type == BATT_TAPER_CHG_VAL)
+ return POWER_SUPPLY_CHARGE_TYPE_TAPER;
+ else if (chg_type == BATT_FAST_CHG_VAL)
+ return POWER_SUPPLY_CHARGE_TYPE_FAST;
+ else if (chg_type == BATT_PRE_CHG_VAL)
+ return POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+
+ return POWER_SUPPLY_CHARGE_TYPE_NONE;
+}
+
+static int set_property_on_fg(struct smbchg_chip *chip,
+ enum power_supply_property prop, int val)
+{
+ int rc;
+ union power_supply_propval ret = {0, };
+
+ if (!chip->bms_psy && chip->bms_psy_name)
+ chip->bms_psy =
+ power_supply_get_by_name((char *)chip->bms_psy_name);
+ if (!chip->bms_psy) {
+ pr_smb(PR_STATUS, "no bms psy found\n");
+ return -EINVAL;
+ }
+
+ ret.intval = val;
+ rc = chip->bms_psy->set_property(chip->bms_psy, prop, &ret);
+ if (rc)
+ pr_smb(PR_STATUS,
+ "bms psy does not allow updating prop %d rc = %d\n",
+ prop, rc);
+
+ return rc;
+}
+
+static int get_property_from_fg(struct smbchg_chip *chip,
+ enum power_supply_property prop, int *val)
+{
+ int rc;
+ union power_supply_propval ret = {0, };
+
+ if (!chip->bms_psy && chip->bms_psy_name)
+ chip->bms_psy =
+ power_supply_get_by_name((char *)chip->bms_psy_name);
+ if (!chip->bms_psy) {
+ pr_smb(PR_STATUS, "no bms psy found\n");
+ return -EINVAL;
+ }
+
+ rc = chip->bms_psy->get_property(chip->bms_psy, prop, &ret);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "bms psy doesn't support reading prop %d rc = %d\n",
+ prop, rc);
+ return rc;
+ }
+
+ *val = ret.intval;
+ return rc;
+}
+
+#define DEFAULT_BATT_CAPACITY 50
+static int get_prop_batt_capacity(struct smbchg_chip *chip)
+{
+ int capacity, rc;
+
+ if (chip->fake_battery_soc >= 0)
+ return chip->fake_battery_soc;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_CAPACITY, &capacity);
+ if (rc) {
+ pr_smb(PR_STATUS, "Couldn't get capacity rc = %d\n", rc);
+ capacity = DEFAULT_BATT_CAPACITY;
+ }
+ return capacity;
+}
+
+#define DEFAULT_BATT_TEMP 200
+static int get_prop_batt_temp(struct smbchg_chip *chip)
+{
+ int temp, rc;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_TEMP, &temp);
+ if (rc) {
+ pr_smb(PR_STATUS, "Couldn't get temperature rc = %d\n", rc);
+ temp = DEFAULT_BATT_TEMP;
+ }
+ return temp;
+}
+
+#define DEFAULT_BATT_CURRENT_NOW 0
+static int get_prop_batt_current_now(struct smbchg_chip *chip)
+{
+ int ua, rc;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_CURRENT_NOW, &ua);
+ if (rc) {
+ pr_smb(PR_STATUS, "Couldn't get current rc = %d\n", rc);
+ ua = DEFAULT_BATT_CURRENT_NOW;
+ }
+ return ua;
+}
+
+#define DEFAULT_BATT_VOLTAGE_NOW 0
+static int get_prop_batt_voltage_now(struct smbchg_chip *chip)
+{
+ int uv, rc;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_VOLTAGE_NOW, &uv);
+ if (rc) {
+ pr_smb(PR_STATUS, "Couldn't get voltage rc = %d\n", rc);
+ uv = DEFAULT_BATT_VOLTAGE_NOW;
+ }
+ return uv;
+}
+
+#define DEFAULT_BATT_VOLTAGE_MAX_DESIGN 4200000
+static int get_prop_batt_voltage_max_design(struct smbchg_chip *chip)
+{
+ int uv, rc;
+
+ rc = get_property_from_fg(chip,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, &uv);
+ if (rc) {
+ pr_smb(PR_STATUS, "Couldn't get voltage rc = %d\n", rc);
+ uv = DEFAULT_BATT_VOLTAGE_MAX_DESIGN;
+ }
+ return uv;
+}
+
+static int get_prop_batt_health(struct smbchg_chip *chip)
+{
+ if (chip->batt_hot)
+ return POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (chip->batt_cold)
+ return POWER_SUPPLY_HEALTH_COLD;
+ else if (chip->batt_warm)
+ return POWER_SUPPLY_HEALTH_WARM;
+ else if (chip->batt_cool)
+ return POWER_SUPPLY_HEALTH_COOL;
+ else
+ return POWER_SUPPLY_HEALTH_GOOD;
+}
+
+static void get_property_from_typec(struct smbchg_chip *chip,
+ enum power_supply_property property,
+ union power_supply_propval *prop)
+{
+ int rc;
+
+ rc = chip->typec_psy->get_property(chip->typec_psy, property, prop);
+ if (rc)
+ pr_smb(PR_TYPEC,
+ "typec psy doesn't support reading prop %d rc = %d\n",
+ property, rc);
+}
+
+static void update_typec_status(struct smbchg_chip *chip)
+{
+ union power_supply_propval type = {0, };
+ union power_supply_propval capability = {0, };
+ int rc;
+
+ get_property_from_typec(chip, POWER_SUPPLY_PROP_TYPE, &type);
+ if (type.intval != POWER_SUPPLY_TYPE_UNKNOWN) {
+ get_property_from_typec(chip,
+ POWER_SUPPLY_PROP_CURRENT_CAPABILITY,
+ &capability);
+ chip->typec_current_ma = capability.intval;
+
+ if (!chip->skip_usb_notification) {
+ rc = chip->usb_psy->set_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_MAX,
+ &capability);
+ if (rc)
+ pr_err("typec failed to set current max rc=%d\n",
+ rc);
+ pr_smb(PR_TYPEC, "SMB Type-C mode = %d, current=%d\n",
+ type.intval, capability.intval);
+ }
+ } else {
+ pr_smb(PR_TYPEC,
+ "typec detection not completed continuing with USB update\n");
+ }
+}
+
+/*
+ * finds the index of the closest value in the array. If there are two that
+ * are equally close, the lower index will be returned
+ */
+static int find_closest_in_array(const int *arr, int len, int val)
+{
+ int i, closest = 0;
+
+ if (len == 0)
+ return closest;
+ for (i = 0; i < len; i++)
+ if (abs(val - arr[i]) < abs(val - arr[closest]))
+ closest = i;
+
+ return closest;
+}
+
+/* finds the index of the closest smaller value in the array. */
+static int find_smaller_in_array(const int *table, int val, int len)
+{
+ int i;
+
+ for (i = len - 1; i >= 0; i--) {
+ if (val >= table[i])
+ break;
+ }
+
+ return i;
+}
+
+static const int iterm_ma_table_8994[] = {
+ 300,
+ 50,
+ 100,
+ 150,
+ 200,
+ 250,
+ 500,
+ 600
+};
+
+static const int iterm_ma_table_8996[] = {
+ 300,
+ 50,
+ 100,
+ 150,
+ 200,
+ 250,
+ 400,
+ 500
+};
+
+static const int usb_ilim_ma_table_8994[] = {
+ 300,
+ 400,
+ 450,
+ 475,
+ 500,
+ 550,
+ 600,
+ 650,
+ 700,
+ 900,
+ 950,
+ 1000,
+ 1100,
+ 1200,
+ 1400,
+ 1450,
+ 1500,
+ 1600,
+ 1800,
+ 1850,
+ 1880,
+ 1910,
+ 1930,
+ 1950,
+ 1970,
+ 2000,
+ 2050,
+ 2100,
+ 2300,
+ 2400,
+ 2500,
+ 3000
+};
+
+static const int usb_ilim_ma_table_8996[] = {
+ 300,
+ 400,
+ 500,
+ 600,
+ 700,
+ 800,
+ 900,
+ 1000,
+ 1100,
+ 1200,
+ 1300,
+ 1400,
+ 1450,
+ 1500,
+ 1550,
+ 1600,
+ 1700,
+ 1800,
+ 1900,
+ 1950,
+ 2000,
+ 2050,
+ 2100,
+ 2200,
+ 2300,
+ 2400,
+ 2500,
+ 2600,
+ 2700,
+ 2800,
+ 2900,
+ 3000
+};
+
+static int dc_ilim_ma_table_8994[] = {
+ 300,
+ 400,
+ 450,
+ 475,
+ 500,
+ 550,
+ 600,
+ 650,
+ 700,
+ 900,
+ 950,
+ 1000,
+ 1100,
+ 1200,
+ 1400,
+ 1450,
+ 1500,
+ 1600,
+ 1800,
+ 1850,
+ 1880,
+ 1910,
+ 1930,
+ 1950,
+ 1970,
+ 2000,
+};
+
+static int dc_ilim_ma_table_8996[] = {
+ 300,
+ 400,
+ 500,
+ 600,
+ 700,
+ 800,
+ 900,
+ 1000,
+ 1100,
+ 1200,
+ 1300,
+ 1400,
+ 1450,
+ 1500,
+ 1550,
+ 1600,
+ 1700,
+ 1800,
+ 1900,
+ 1950,
+ 2000,
+ 2050,
+ 2100,
+ 2200,
+ 2300,
+ 2400,
+};
+
+static const int fcc_comp_table_8994[] = {
+ 250,
+ 700,
+ 900,
+ 1200,
+};
+
+static const int fcc_comp_table_8996[] = {
+ 250,
+ 1100,
+ 1200,
+ 1500,
+};
+
+static const int aicl_rerun_period[] = {
+ 45,
+ 90,
+ 180,
+ 360,
+};
+
+static const int aicl_rerun_period_schg_lite[] = {
+ 3, /* 2.8s */
+ 6, /* 5.6s */
+ 11, /* 11.3s */
+ 23, /* 22.5s */
+ 45,
+ 90,
+ 180,
+ 360,
+};
+
+static void use_pmi8994_tables(struct smbchg_chip *chip)
+{
+ chip->tables.usb_ilim_ma_table = usb_ilim_ma_table_8994;
+ chip->tables.usb_ilim_ma_len = ARRAY_SIZE(usb_ilim_ma_table_8994);
+ chip->tables.dc_ilim_ma_table = dc_ilim_ma_table_8994;
+ chip->tables.dc_ilim_ma_len = ARRAY_SIZE(dc_ilim_ma_table_8994);
+ chip->tables.iterm_ma_table = iterm_ma_table_8994;
+ chip->tables.iterm_ma_len = ARRAY_SIZE(iterm_ma_table_8994);
+ chip->tables.fcc_comp_table = fcc_comp_table_8994;
+ chip->tables.fcc_comp_len = ARRAY_SIZE(fcc_comp_table_8994);
+ chip->tables.rchg_thr_mv = 200;
+ chip->tables.aicl_rerun_period_table = aicl_rerun_period;
+ chip->tables.aicl_rerun_period_len = ARRAY_SIZE(aicl_rerun_period);
+}
+
+static void use_pmi8996_tables(struct smbchg_chip *chip)
+{
+ chip->tables.usb_ilim_ma_table = usb_ilim_ma_table_8996;
+ chip->tables.usb_ilim_ma_len = ARRAY_SIZE(usb_ilim_ma_table_8996);
+ chip->tables.dc_ilim_ma_table = dc_ilim_ma_table_8996;
+ chip->tables.dc_ilim_ma_len = ARRAY_SIZE(dc_ilim_ma_table_8996);
+ chip->tables.iterm_ma_table = iterm_ma_table_8996;
+ chip->tables.iterm_ma_len = ARRAY_SIZE(iterm_ma_table_8996);
+ chip->tables.fcc_comp_table = fcc_comp_table_8996;
+ chip->tables.fcc_comp_len = ARRAY_SIZE(fcc_comp_table_8996);
+ chip->tables.rchg_thr_mv = 150;
+ chip->tables.aicl_rerun_period_table = aicl_rerun_period;
+ chip->tables.aicl_rerun_period_len = ARRAY_SIZE(aicl_rerun_period);
+}
+
+#define CMD_CHG_REG 0x42
+#define EN_BAT_CHG_BIT BIT(1)
+static int smbchg_charging_en(struct smbchg_chip *chip, bool en)
+{
+ /* The en bit is configured active low */
+ return smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG,
+ EN_BAT_CHG_BIT, en ? 0 : EN_BAT_CHG_BIT);
+}
+
+#define CMD_IL 0x40
+#define USBIN_SUSPEND_BIT BIT(4)
+#define CURRENT_100_MA 100
+#define CURRENT_150_MA 150
+#define CURRENT_500_MA 500
+#define CURRENT_900_MA 900
+#define CURRENT_1500_MA 1500
+#define SUSPEND_CURRENT_MA 2
+#define ICL_OVERRIDE_BIT BIT(2)
+static int smbchg_usb_suspend(struct smbchg_chip *chip, bool suspend)
+{
+ int rc;
+
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ USBIN_SUSPEND_BIT, suspend ? USBIN_SUSPEND_BIT : 0);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set usb suspend rc = %d\n", rc);
+ return rc;
+}
+
+#define DCIN_SUSPEND_BIT BIT(3)
+static int smbchg_dc_suspend(struct smbchg_chip *chip, bool suspend)
+{
+ int rc = 0;
+
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ DCIN_SUSPEND_BIT, suspend ? DCIN_SUSPEND_BIT : 0);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set dc suspend rc = %d\n", rc);
+ return rc;
+}
+
+#define IL_CFG 0xF2
+#define DCIN_INPUT_MASK SMB_MASK(4, 0)
+static int smbchg_set_dc_current_max(struct smbchg_chip *chip, int current_ma)
+{
+ int i;
+ u8 dc_cur_val;
+
+ i = find_smaller_in_array(chip->tables.dc_ilim_ma_table,
+ current_ma, chip->tables.dc_ilim_ma_len);
+
+ if (i < 0) {
+ dev_err(chip->dev, "Cannot find %dma current_table\n",
+ current_ma);
+ return -EINVAL;
+ }
+
+ chip->dc_max_current_ma = chip->tables.dc_ilim_ma_table[i];
+ dc_cur_val = i & DCIN_INPUT_MASK;
+
+ pr_smb(PR_STATUS, "dc current set to %d mA\n",
+ chip->dc_max_current_ma);
+ return smbchg_sec_masked_write(chip, chip->dc_chgpth_base + IL_CFG,
+ DCIN_INPUT_MASK, dc_cur_val);
+}
+
+#define AICL_WL_SEL_CFG 0xF5
+#define AICL_WL_SEL_MASK SMB_MASK(1, 0)
+#define AICL_WL_SEL_SCHG_LITE_MASK SMB_MASK(2, 0)
+static int smbchg_set_aicl_rerun_period_s(struct smbchg_chip *chip,
+ int period_s)
+{
+ int i;
+ u8 reg, mask;
+
+ i = find_smaller_in_array(chip->tables.aicl_rerun_period_table,
+ period_s, chip->tables.aicl_rerun_period_len);
+
+ if (i < 0) {
+ dev_err(chip->dev, "Cannot find %ds in aicl rerun period\n",
+ period_s);
+ return -EINVAL;
+ }
+
+ if (chip->schg_version == QPNP_SCHG_LITE)
+ mask = AICL_WL_SEL_SCHG_LITE_MASK;
+ else
+ mask = AICL_WL_SEL_MASK;
+
+ reg = i & mask;
+
+ pr_smb(PR_STATUS, "aicl rerun period set to %ds\n",
+ chip->tables.aicl_rerun_period_table[i]);
+ return smbchg_sec_masked_write(chip,
+ chip->dc_chgpth_base + AICL_WL_SEL_CFG,
+ mask, reg);
+}
+
+static struct power_supply *get_parallel_psy(struct smbchg_chip *chip)
+{
+ if (!chip->parallel.avail)
+ return NULL;
+ if (chip->parallel.psy)
+ return chip->parallel.psy;
+ chip->parallel.psy = power_supply_get_by_name("usb-parallel");
+ if (!chip->parallel.psy)
+ pr_smb(PR_STATUS, "parallel charger not found\n");
+ return chip->parallel.psy;
+}
+
+static void smbchg_usb_update_online_work(struct work_struct *work)
+{
+ struct smbchg_chip *chip = container_of(work,
+ struct smbchg_chip,
+ usb_set_online_work);
+ bool user_enabled = !get_client_vote(chip->usb_suspend_votable,
+ USER_EN_VOTER);
+ int online;
+
+ online = user_enabled && chip->usb_present && !chip->very_weak_charger;
+
+ mutex_lock(&chip->usb_set_online_lock);
+ if (chip->usb_online != online) {
+ pr_smb(PR_MISC, "setting usb psy online = %d\n", online);
+ power_supply_set_online(chip->usb_psy, online);
+ chip->usb_online = online;
+ }
+ mutex_unlock(&chip->usb_set_online_lock);
+}
+
+#define CHGPTH_CFG 0xF4
+#define CFG_USB_2_3_SEL_BIT BIT(7)
+#define CFG_USB_2 0
+#define CFG_USB_3 BIT(7)
+#define USBIN_INPUT_MASK SMB_MASK(4, 0)
+#define USBIN_MODE_CHG_BIT BIT(0)
+#define USBIN_LIMITED_MODE 0
+#define USBIN_HC_MODE BIT(0)
+#define USB51_MODE_BIT BIT(1)
+#define USB51_100MA 0
+#define USB51_500MA BIT(1)
+static int smbchg_set_high_usb_chg_current(struct smbchg_chip *chip,
+ int current_ma)
+{
+ int i, rc;
+ u8 usb_cur_val;
+
+ if (current_ma == CURRENT_100_MA) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_2);
+ if (rc < 0) {
+ pr_err("Couldn't set CFG_USB_2 rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT | ICL_OVERRIDE_BIT,
+ USBIN_LIMITED_MODE | USB51_100MA | ICL_OVERRIDE_BIT);
+ if (rc < 0) {
+ pr_err("Couldn't set ICL_OVERRIDE rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_STATUS,
+ "Forcing 100mA current limit\n");
+ chip->usb_max_current_ma = CURRENT_100_MA;
+ return rc;
+ }
+
+ i = find_smaller_in_array(chip->tables.usb_ilim_ma_table,
+ current_ma, chip->tables.usb_ilim_ma_len);
+ if (i < 0) {
+ dev_err(chip->dev,
+ "Cannot find %dma current_table using %d\n",
+ current_ma, CURRENT_150_MA);
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_3);
+ rc |= smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT,
+ USBIN_LIMITED_MODE | USB51_100MA);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set %dmA rc=%d\n",
+ CURRENT_150_MA, rc);
+ else
+ chip->usb_max_current_ma = 150;
+ return rc;
+ }
+
+ usb_cur_val = i & USBIN_INPUT_MASK;
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + IL_CFG,
+ USBIN_INPUT_MASK, usb_cur_val);
+ if (rc < 0) {
+ dev_err(chip->dev, "cannot write to config c rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT, USBIN_HC_MODE);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't write cfg 5 rc = %d\n", rc);
+ chip->usb_max_current_ma = chip->tables.usb_ilim_ma_table[i];
+ return rc;
+}
+
+/* if APSD results are used
+ * if SDP is detected it will look at 500mA setting
+ * if set it will draw 500mA
+ * if unset it will draw 100mA
+ * if CDP/DCP it will look at 0x0C setting
+ * i.e. values in 0x41[1, 0] does not matter
+ */
+static int smbchg_set_usb_current_max(struct smbchg_chip *chip,
+ int current_ma)
+{
+ int rc = 0;
+
+ /*
+ * if the battery is not present, do not allow the usb ICL to lower in
+ * order to avoid browning out the device during a hotswap.
+ */
+ if (!chip->batt_present && current_ma < chip->usb_max_current_ma) {
+ pr_info_ratelimited("Ignoring usb current->%d, battery is absent\n",
+ current_ma);
+ return 0;
+ }
+ pr_smb(PR_STATUS, "USB current_ma = %d\n", current_ma);
+
+ if (current_ma <= SUSPEND_CURRENT_MA) {
+ /* suspend the usb if current <= 2mA */
+ rc = vote(chip->usb_suspend_votable, USB_EN_VOTER, true, 0);
+ chip->usb_max_current_ma = 0;
+ goto out;
+ } else {
+ rc = vote(chip->usb_suspend_votable, USB_EN_VOTER, false, 0);
+ }
+
+ switch (chip->usb_supply_type) {
+ case POWER_SUPPLY_TYPE_USB:
+ if ((current_ma < CURRENT_150_MA) &&
+ (chip->wa_flags & SMBCHG_USB100_WA))
+ current_ma = CURRENT_150_MA;
+
+ if (current_ma < CURRENT_150_MA) {
+ /* force 100mA */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_2);
+ if (rc < 0) {
+ pr_err("Couldn't set CHGPTH_CFG rc = %d\n", rc);
+ goto out;
+ }
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT,
+ USBIN_LIMITED_MODE | USB51_100MA);
+ if (rc < 0) {
+ pr_err("Couldn't set CMD_IL rc = %d\n", rc);
+ goto out;
+ }
+ chip->usb_max_current_ma = 100;
+ }
+ /* specific current values */
+ if (current_ma == CURRENT_150_MA) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_3);
+ if (rc < 0) {
+ pr_err("Couldn't set CHGPTH_CFG rc = %d\n", rc);
+ goto out;
+ }
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT,
+ USBIN_LIMITED_MODE | USB51_100MA);
+ if (rc < 0) {
+ pr_err("Couldn't set CMD_IL rc = %d\n", rc);
+ goto out;
+ }
+ chip->usb_max_current_ma = 150;
+ }
+ if (current_ma == CURRENT_500_MA) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_2);
+ if (rc < 0) {
+ pr_err("Couldn't set CHGPTH_CFG rc = %d\n", rc);
+ goto out;
+ }
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT,
+ USBIN_LIMITED_MODE | USB51_500MA);
+ if (rc < 0) {
+ pr_err("Couldn't set CMD_IL rc = %d\n", rc);
+ goto out;
+ }
+ chip->usb_max_current_ma = 500;
+ }
+ if (current_ma == CURRENT_900_MA) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ CFG_USB_2_3_SEL_BIT, CFG_USB_3);
+ if (rc < 0) {
+ pr_err("Couldn't set CHGPTH_CFG rc = %d\n", rc);
+ goto out;
+ }
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + CMD_IL,
+ USBIN_MODE_CHG_BIT | USB51_MODE_BIT,
+ USBIN_LIMITED_MODE | USB51_500MA);
+ if (rc < 0) {
+ pr_err("Couldn't set CMD_IL rc = %d\n", rc);
+ goto out;
+ }
+ chip->usb_max_current_ma = 900;
+ }
+ break;
+ case POWER_SUPPLY_TYPE_USB_CDP:
+ if (current_ma < CURRENT_1500_MA) {
+ /* use override for CDP */
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + CMD_IL,
+ ICL_OVERRIDE_BIT, ICL_OVERRIDE_BIT);
+ if (rc < 0)
+ pr_err("Couldn't set override rc = %d\n", rc);
+ }
+ /* fall through */
+ default:
+ rc = smbchg_set_high_usb_chg_current(chip, current_ma);
+ if (rc < 0)
+ pr_err("Couldn't set %dmA rc = %d\n", current_ma, rc);
+ break;
+ }
+
+out:
+ pr_smb(PR_STATUS, "usb type = %d current set to %d mA\n",
+ chip->usb_supply_type, chip->usb_max_current_ma);
+ return rc;
+}
+
+#define USBIN_HVDCP_STS 0x0C
+#define USBIN_HVDCP_SEL_BIT BIT(4)
+#define USBIN_HVDCP_SEL_9V_BIT BIT(1)
+#define SCHG_LITE_USBIN_HVDCP_SEL_9V_BIT BIT(2)
+#define SCHG_LITE_USBIN_HVDCP_SEL_BIT BIT(0)
+static int smbchg_get_min_parallel_current_ma(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg, hvdcp_sel, hvdcp_sel_9v;
+
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + USBIN_HVDCP_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb status rc = %d\n", rc);
+ return 0;
+ }
+ if (chip->schg_version == QPNP_SCHG_LITE) {
+ hvdcp_sel = SCHG_LITE_USBIN_HVDCP_SEL_BIT;
+ hvdcp_sel_9v = SCHG_LITE_USBIN_HVDCP_SEL_9V_BIT;
+ } else {
+ hvdcp_sel = USBIN_HVDCP_SEL_BIT;
+ hvdcp_sel_9v = USBIN_HVDCP_SEL_9V_BIT;
+ }
+
+ if ((reg & hvdcp_sel) && (reg & hvdcp_sel_9v))
+ return chip->parallel.min_9v_current_thr_ma;
+ return chip->parallel.min_current_thr_ma;
+}
+
+static bool is_hvdcp_present(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg, hvdcp_sel;
+
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + USBIN_HVDCP_STS, 1);
+ if (rc < 0) {
+ pr_err("Couldn't read hvdcp status rc = %d\n", rc);
+ return false;
+ }
+
+ pr_smb(PR_STATUS, "HVDCP_STS = 0x%02x\n", reg);
+ /*
+ * If a valid HVDCP is detected, notify it to the usb_psy only
+ * if USB is still present.
+ */
+ if (chip->schg_version == QPNP_SCHG_LITE)
+ hvdcp_sel = SCHG_LITE_USBIN_HVDCP_SEL_BIT;
+ else
+ hvdcp_sel = USBIN_HVDCP_SEL_BIT;
+
+ if ((reg & hvdcp_sel) && is_usb_present(chip))
+ return true;
+
+ return false;
+}
+
+#define FCC_CFG 0xF2
+#define FCC_500MA_VAL 0x4
+#define FCC_MASK SMB_MASK(4, 0)
+static int smbchg_set_fastchg_current_raw(struct smbchg_chip *chip,
+ int current_ma)
+{
+ int i, rc;
+ u8 cur_val;
+
+ /* the fcc enumerations are the same as the usb currents */
+ i = find_smaller_in_array(chip->tables.usb_ilim_ma_table,
+ current_ma, chip->tables.usb_ilim_ma_len);
+ if (i < 0) {
+ dev_err(chip->dev,
+ "Cannot find %dma current_table using %d\n",
+ current_ma, CURRENT_500_MA);
+
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + FCC_CFG,
+ FCC_MASK,
+ FCC_500MA_VAL);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set %dmA rc=%d\n",
+ CURRENT_500_MA, rc);
+ else
+ chip->fastchg_current_ma = 500;
+ return rc;
+ }
+
+ if (chip->tables.usb_ilim_ma_table[i] == chip->fastchg_current_ma) {
+ pr_smb(PR_STATUS, "skipping fastchg current request: %d\n",
+ chip->fastchg_current_ma);
+ return 0;
+ }
+
+ cur_val = i & FCC_MASK;
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + FCC_CFG,
+ FCC_MASK, cur_val);
+ if (rc < 0) {
+ dev_err(chip->dev, "cannot write to fcc cfg rc = %d\n", rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "fastcharge current requested %d, set to %d\n",
+ current_ma, chip->tables.usb_ilim_ma_table[cur_val]);
+
+ chip->fastchg_current_ma = chip->tables.usb_ilim_ma_table[cur_val];
+ return rc;
+}
+
+#define ICL_STS_1_REG 0x7
+#define ICL_STS_2_REG 0x9
+#define ICL_STS_MASK 0x1F
+#define AICL_SUSP_BIT BIT(6)
+#define AICL_STS_BIT BIT(5)
+#define USBIN_SUSPEND_STS_BIT BIT(3)
+#define USBIN_ACTIVE_PWR_SRC_BIT BIT(1)
+#define DCIN_ACTIVE_PWR_SRC_BIT BIT(0)
+#define PARALLEL_REENABLE_TIMER_MS 1000
+#define PARALLEL_CHG_THRESHOLD_CURRENT 1800
+static bool smbchg_is_usbin_active_pwr_src(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + ICL_STS_2_REG, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Could not read usb icl sts 2: %d\n", rc);
+ return false;
+ }
+
+ return !(reg & USBIN_SUSPEND_STS_BIT)
+ && (reg & USBIN_ACTIVE_PWR_SRC_BIT);
+}
+
+static int smbchg_parallel_usb_charging_en(struct smbchg_chip *chip, bool en)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+
+ if (!parallel_psy || !chip->parallel_charger_detected)
+ return 0;
+
+ pval.intval = en;
+ return parallel_psy->set_property(parallel_psy,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED, &pval);
+}
+
+#define ESR_PULSE_CURRENT_DELTA_MA 200
+static int smbchg_sw_esr_pulse_en(struct smbchg_chip *chip, bool en)
+{
+ int rc, fg_current_now, icl_ma;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_CURRENT_NOW,
+ &fg_current_now);
+ if (rc) {
+ pr_smb(PR_STATUS, "bms psy does not support OCV\n");
+ return 0;
+ }
+
+ icl_ma = max(chip->iterm_ma + ESR_PULSE_CURRENT_DELTA_MA,
+ fg_current_now - ESR_PULSE_CURRENT_DELTA_MA);
+ rc = vote(chip->fcc_votable, ESR_PULSE_FCC_VOTER, en, icl_ma);
+ if (rc < 0) {
+ pr_err("Couldn't Vote FCC en = %d rc = %d\n", en, rc);
+ return rc;
+ }
+ rc = smbchg_parallel_usb_charging_en(chip, !en);
+ return rc;
+}
+
+#define USB_AICL_CFG 0xF3
+#define AICL_EN_BIT BIT(2)
+static void smbchg_rerun_aicl(struct smbchg_chip *chip)
+{
+ pr_smb(PR_STATUS, "Rerunning AICL...\n");
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, 0);
+ /* Add a delay so that AICL successfully clears */
+ msleep(50);
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, AICL_EN_BIT);
+}
+
+static void taper_irq_en(struct smbchg_chip *chip, bool en)
+{
+ mutex_lock(&chip->taper_irq_lock);
+ if (en != chip->taper_irq_enabled) {
+ if (en) {
+ enable_irq(chip->taper_irq);
+ enable_irq_wake(chip->taper_irq);
+ } else {
+ disable_irq_wake(chip->taper_irq);
+ disable_irq_nosync(chip->taper_irq);
+ }
+ chip->taper_irq_enabled = en;
+ }
+ mutex_unlock(&chip->taper_irq_lock);
+}
+
+static int smbchg_get_aicl_level_ma(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + ICL_STS_1_REG, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Could not read usb icl sts 1: %d\n", rc);
+ return 0;
+ }
+ if (reg & AICL_SUSP_BIT) {
+ pr_warn("AICL suspended: %02x\n", reg);
+ return 0;
+ }
+ reg &= ICL_STS_MASK;
+ if (reg >= chip->tables.usb_ilim_ma_len) {
+ pr_warn("invalid AICL value: %02x\n", reg);
+ return 0;
+ }
+ return chip->tables.usb_ilim_ma_table[reg];
+}
+
+static void smbchg_parallel_usb_disable(struct smbchg_chip *chip)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+
+ if (!parallel_psy || !chip->parallel_charger_detected)
+ return;
+ pr_smb(PR_STATUS, "disabling parallel charger\n");
+ chip->parallel.last_disabled = ktime_get_boottime();
+ taper_irq_en(chip, false);
+ chip->parallel.initial_aicl_ma = 0;
+ chip->parallel.current_max_ma = 0;
+ power_supply_set_current_limit(parallel_psy,
+ SUSPEND_CURRENT_MA * 1000);
+ power_supply_set_present(parallel_psy, false);
+ smbchg_set_fastchg_current_raw(chip,
+ get_effective_result_locked(chip->fcc_votable));
+ smbchg_set_usb_current_max(chip,
+ get_effective_result_locked(chip->usb_icl_votable));
+ smbchg_rerun_aicl(chip);
+}
+
+#define PARALLEL_TAPER_MAX_TRIES 3
+#define PARALLEL_FCC_PERCENT_REDUCTION 75
+#define MINIMUM_PARALLEL_FCC_MA 500
+#define CHG_ERROR_BIT BIT(0)
+#define BAT_TAPER_MODE_BIT BIT(6)
+static void smbchg_parallel_usb_taper(struct smbchg_chip *chip)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+ int parallel_fcc_ma, tries = 0;
+ u8 reg = 0;
+
+ if (!parallel_psy || !chip->parallel_charger_detected)
+ return;
+
+ smbchg_stay_awake(chip, PM_PARALLEL_TAPER);
+try_again:
+ mutex_lock(&chip->parallel.lock);
+ if (chip->parallel.current_max_ma == 0) {
+ pr_smb(PR_STATUS, "Not parallel charging, skipping\n");
+ goto done;
+ }
+ parallel_psy->get_property(parallel_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ tries += 1;
+ parallel_fcc_ma = pval.intval / 1000;
+ pr_smb(PR_STATUS, "try #%d parallel charger fcc = %d\n",
+ tries, parallel_fcc_ma);
+ if (parallel_fcc_ma < MINIMUM_PARALLEL_FCC_MA
+ || tries > PARALLEL_TAPER_MAX_TRIES) {
+ smbchg_parallel_usb_disable(chip);
+ goto done;
+ }
+ pval.intval = ((parallel_fcc_ma
+ * PARALLEL_FCC_PERCENT_REDUCTION) / 100);
+ pr_smb(PR_STATUS, "reducing FCC of parallel charger to %d\n",
+ pval.intval);
+ /* Change it to uA */
+ pval.intval *= 1000;
+ parallel_psy->set_property(parallel_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ /*
+ * sleep here for 100 ms in order to make sure the charger has a chance
+ * to go back into constant current charging
+ */
+ mutex_unlock(&chip->parallel.lock);
+ msleep(100);
+
+ mutex_lock(&chip->parallel.lock);
+ if (chip->parallel.current_max_ma == 0) {
+ pr_smb(PR_STATUS, "Not parallel charging, skipping\n");
+ goto done;
+ }
+ smbchg_read(chip, &reg, chip->chgr_base + RT_STS, 1);
+ if (reg & BAT_TAPER_MODE_BIT) {
+ mutex_unlock(&chip->parallel.lock);
+ goto try_again;
+ }
+ taper_irq_en(chip, true);
+done:
+ mutex_unlock(&chip->parallel.lock);
+ smbchg_relax(chip, PM_PARALLEL_TAPER);
+}
+
+static void smbchg_parallel_usb_enable(struct smbchg_chip *chip,
+ int total_current_ma)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+ int new_parallel_cl_ma, set_parallel_cl_ma, new_pmi_cl_ma, rc;
+ int current_table_index, target_icl_ma;
+ int fcc_ma, main_fastchg_current_ma;
+ int target_parallel_fcc_ma, supplied_parallel_fcc_ma;
+ int parallel_chg_fcc_percent;
+
+ if (!parallel_psy || !chip->parallel_charger_detected)
+ return;
+
+ pr_smb(PR_STATUS, "Attempting to enable parallel charger\n");
+
+ rc = power_supply_set_voltage_limit(parallel_psy, chip->vfloat_mv + 50);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set Vflt on parallel psy rc: %d\n", rc);
+ return;
+ }
+ /* Set USB ICL */
+ target_icl_ma = get_effective_result_locked(chip->usb_icl_votable);
+ new_parallel_cl_ma = total_current_ma
+ * (100 - smbchg_main_chg_icl_percent) / 100;
+ taper_irq_en(chip, true);
+ power_supply_set_present(parallel_psy, true);
+ power_supply_set_current_limit(parallel_psy,
+ new_parallel_cl_ma * 1000);
+ /* read back the real amount of current we are getting */
+ parallel_psy->get_property(parallel_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+ set_parallel_cl_ma = pval.intval / 1000;
+ chip->parallel.current_max_ma = new_parallel_cl_ma;
+ pr_smb(PR_MISC, "Requested ICL = %d from parallel, got %d\n",
+ new_parallel_cl_ma, set_parallel_cl_ma);
+ new_pmi_cl_ma = max(0, target_icl_ma - set_parallel_cl_ma);
+ pr_smb(PR_STATUS, "New Total USB current = %d[%d, %d]\n",
+ total_current_ma, new_pmi_cl_ma,
+ set_parallel_cl_ma);
+ smbchg_set_usb_current_max(chip, new_pmi_cl_ma);
+
+ /* begin splitting the fast charge current */
+ fcc_ma = get_effective_result_locked(chip->fcc_votable);
+ parallel_chg_fcc_percent =
+ 100 - smbchg_main_chg_fcc_percent;
+ target_parallel_fcc_ma =
+ (fcc_ma * parallel_chg_fcc_percent) / 100;
+ pval.intval = target_parallel_fcc_ma * 1000;
+ parallel_psy->set_property(parallel_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ /* check how much actual current is supplied by the parallel charger */
+ parallel_psy->get_property(parallel_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ supplied_parallel_fcc_ma = pval.intval / 1000;
+ pr_smb(PR_MISC, "Requested FCC = %d from parallel, got %d\n",
+ target_parallel_fcc_ma, supplied_parallel_fcc_ma);
+
+ /* then for the main charger, use the left over FCC */
+ current_table_index = find_smaller_in_array(
+ chip->tables.usb_ilim_ma_table,
+ fcc_ma - supplied_parallel_fcc_ma,
+ chip->tables.usb_ilim_ma_len);
+ main_fastchg_current_ma =
+ chip->tables.usb_ilim_ma_table[current_table_index];
+ smbchg_set_fastchg_current_raw(chip, main_fastchg_current_ma);
+ pr_smb(PR_STATUS, "FCC = %d[%d, %d]\n", fcc_ma, main_fastchg_current_ma,
+ supplied_parallel_fcc_ma);
+
+ chip->parallel.enabled_once = true;
+
+ return;
+}
+
+static bool smbchg_is_parallel_usb_ok(struct smbchg_chip *chip,
+ int *ret_total_current_ma)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ union power_supply_propval pval = {0, };
+ int min_current_thr_ma, rc, type;
+ int total_current_ma, current_limit_ma, parallel_cl_ma;
+ ktime_t kt_since_last_disable;
+ u8 reg;
+ int fcc_ma = get_effective_result_locked(chip->fcc_votable);
+ int fcc_voter_id = get_effective_client_id_locked(chip->fcc_votable);
+ int usb_icl_ma = get_effective_result_locked(chip->usb_icl_votable);
+
+ if (!parallel_psy || !smbchg_parallel_en
+ || !chip->parallel_charger_detected) {
+ pr_smb(PR_STATUS, "Parallel charging not enabled\n");
+ return false;
+ }
+
+ kt_since_last_disable = ktime_sub(ktime_get_boottime(),
+ chip->parallel.last_disabled);
+ if (chip->parallel.current_max_ma == 0
+ && chip->parallel.enabled_once
+ && ktime_to_ms(kt_since_last_disable)
+ < PARALLEL_REENABLE_TIMER_MS) {
+ pr_smb(PR_STATUS, "Only been %lld since disable, skipping\n",
+ ktime_to_ms(kt_since_last_disable));
+ return false;
+ }
+
+ /*
+ * If the battery is not present, try not to change parallel charging
+ * from OFF to ON or from ON to OFF, as it could cause the device to
+ * brown out in the instant that the USB settings are changed.
+ *
+ * Only allow parallel charging check to report false (thereby turnin
+ * off parallel charging) if the battery is still there, or if parallel
+ * charging is disabled in the first place.
+ */
+ if (get_prop_charge_type(chip) != POWER_SUPPLY_CHARGE_TYPE_FAST
+ && (get_prop_batt_present(chip)
+ || chip->parallel.current_max_ma == 0)) {
+ pr_smb(PR_STATUS, "Not in fast charge, skipping\n");
+ return false;
+ }
+
+ if (get_prop_batt_health(chip) != POWER_SUPPLY_HEALTH_GOOD) {
+ pr_smb(PR_STATUS, "JEITA active, skipping\n");
+ return false;
+ }
+
+ rc = smbchg_read(chip, &reg, chip->misc_base + IDEV_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read status 5 rc = %d\n", rc);
+ return false;
+ }
+
+ type = get_type(reg);
+ if (get_usb_supply_type(type) == POWER_SUPPLY_TYPE_USB_CDP) {
+ pr_smb(PR_STATUS, "CDP adapter, skipping\n");
+ return false;
+ }
+
+ if (get_usb_supply_type(type) == POWER_SUPPLY_TYPE_USB) {
+ pr_smb(PR_STATUS, "SDP adapter, skipping\n");
+ return false;
+ }
+
+ /*
+ * If USBIN is suspended or not the active power source, do not enable
+ * parallel charging. The device may be charging off of DCIN.
+ */
+ if (!smbchg_is_usbin_active_pwr_src(chip)) {
+ pr_smb(PR_STATUS, "USB not active power source: %02x\n", reg);
+ return false;
+ }
+
+ min_current_thr_ma = smbchg_get_min_parallel_current_ma(chip);
+ if (min_current_thr_ma <= 0) {
+ pr_smb(PR_STATUS, "parallel charger unavailable for thr: %d\n",
+ min_current_thr_ma);
+ return false;
+ }
+
+ if (usb_icl_ma < min_current_thr_ma) {
+ pr_smb(PR_STATUS, "Weak USB chg skip enable: %d < %d\n",
+ usb_icl_ma, min_current_thr_ma);
+ return false;
+ }
+
+ /*
+ * Suspend the parallel charger if the charging current is < 1800 mA
+ * and is not because of an ESR pulse.
+ */
+ if (fcc_voter_id != ESR_PULSE_FCC_VOTER
+ && fcc_ma < PARALLEL_CHG_THRESHOLD_CURRENT) {
+ pr_smb(PR_STATUS, "FCC %d lower than %d\n",
+ fcc_ma,
+ PARALLEL_CHG_THRESHOLD_CURRENT);
+ return false;
+ }
+
+ current_limit_ma = smbchg_get_aicl_level_ma(chip);
+ if (current_limit_ma <= 0)
+ return false;
+
+ if (chip->parallel.initial_aicl_ma == 0) {
+ if (current_limit_ma < min_current_thr_ma) {
+ pr_smb(PR_STATUS, "Initial AICL very low: %d < %d\n",
+ current_limit_ma, min_current_thr_ma);
+ return false;
+ }
+ chip->parallel.initial_aicl_ma = current_limit_ma;
+ }
+
+ parallel_psy->get_property(parallel_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+ parallel_cl_ma = pval.intval / 1000;
+ /*
+ * Read back the real amount of current we are getting
+ * Treat 2mA as 0 because that is the suspend current setting
+ */
+ if (parallel_cl_ma <= SUSPEND_CURRENT_MA)
+ parallel_cl_ma = 0;
+
+ /*
+ * Set the parallel charge path's input current limit (ICL)
+ * to the total current / 2
+ */
+ total_current_ma = min(current_limit_ma + parallel_cl_ma, usb_icl_ma);
+
+ if (total_current_ma < chip->parallel.initial_aicl_ma
+ - chip->parallel.allowed_lowering_ma) {
+ pr_smb(PR_STATUS,
+ "Total current reduced a lot: %d (%d + %d) < %d - %d\n",
+ total_current_ma,
+ current_limit_ma, parallel_cl_ma,
+ chip->parallel.initial_aicl_ma,
+ chip->parallel.allowed_lowering_ma);
+ return false;
+ }
+
+ *ret_total_current_ma = total_current_ma;
+ return true;
+}
+
+#define PARALLEL_CHARGER_EN_DELAY_MS 500
+static void smbchg_parallel_usb_en_work(struct work_struct *work)
+{
+ struct smbchg_chip *chip = container_of(work,
+ struct smbchg_chip,
+ parallel_en_work.work);
+ int previous_aicl_ma, total_current_ma, aicl_ma;
+ bool in_progress;
+
+ /* do a check to see if the aicl is stable */
+ previous_aicl_ma = smbchg_get_aicl_level_ma(chip);
+ msleep(PARALLEL_CHARGER_EN_DELAY_MS);
+ aicl_ma = smbchg_get_aicl_level_ma(chip);
+ if (previous_aicl_ma == aicl_ma) {
+ pr_smb(PR_STATUS, "AICL at %d\n", aicl_ma);
+ } else {
+ pr_smb(PR_STATUS,
+ "AICL changed [%d -> %d], recheck %d ms\n",
+ previous_aicl_ma, aicl_ma,
+ PARALLEL_CHARGER_EN_DELAY_MS);
+ goto recheck;
+ }
+
+ mutex_lock(&chip->parallel.lock);
+ in_progress = (chip->parallel.current_max_ma != 0);
+ if (smbchg_is_parallel_usb_ok(chip, &total_current_ma)) {
+ smbchg_parallel_usb_enable(chip, total_current_ma);
+ } else {
+ if (in_progress) {
+ pr_smb(PR_STATUS, "parallel charging unavailable\n");
+ smbchg_parallel_usb_disable(chip);
+ }
+ }
+ mutex_unlock(&chip->parallel.lock);
+ smbchg_relax(chip, PM_PARALLEL_CHECK);
+ return;
+
+recheck:
+ schedule_delayed_work(&chip->parallel_en_work, 0);
+}
+
+static void smbchg_parallel_usb_check_ok(struct smbchg_chip *chip)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+
+ if (!parallel_psy || !chip->parallel_charger_detected)
+ return;
+
+ smbchg_stay_awake(chip, PM_PARALLEL_CHECK);
+ schedule_delayed_work(&chip->parallel_en_work, 0);
+}
+
+static int charging_suspend_vote_cb(struct device *dev, int suspend,
+ int client, int last_suspend,
+ int last_client)
+{
+ int rc;
+ struct smbchg_chip *chip = dev_get_drvdata(dev);
+
+ rc = smbchg_charging_en(chip, !suspend);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't configure batt chg: 0x%x rc = %d\n",
+ !suspend, rc);
+ }
+
+ return rc;
+}
+
+static int usb_suspend_vote_cb(struct device *dev, int suspend,
+ int client, int last_suspend,
+ int last_client)
+{
+ int rc;
+ struct smbchg_chip *chip = dev_get_drvdata(dev);
+
+ rc = smbchg_usb_suspend(chip, suspend);
+ if (rc < 0)
+ return rc;
+
+ if (client == THERMAL_EN_VOTER || client == POWER_SUPPLY_EN_VOTER ||
+ client == USER_EN_VOTER ||
+ client == FAKE_BATTERY_EN_VOTER)
+ smbchg_parallel_usb_check_ok(chip);
+
+ return rc;
+}
+
+static int dc_suspend_vote_cb(struct device *dev, int suspend,
+ int client, int last_suspend,
+ int last_client)
+{
+ int rc;
+ struct smbchg_chip *chip = dev_get_drvdata(dev);
+
+ rc = smbchg_dc_suspend(chip, suspend);
+ if (rc < 0)
+ return rc;
+
+ if (chip->dc_psy_type != -EINVAL && chip->psy_registered)
+ power_supply_changed(&chip->dc_psy);
+
+ return rc;
+}
+
+static int set_fastchg_current_vote_cb(struct device *dev,
+ int fcc_ma,
+ int client,
+ int last_fcc_ma,
+ int last_client)
+{
+ struct smbchg_chip *chip = dev_get_drvdata(dev);
+ int rc;
+
+ if (chip->parallel.current_max_ma == 0) {
+ rc = smbchg_set_fastchg_current_raw(chip, fcc_ma);
+ if (rc < 0) {
+ pr_err("Can't set FCC fcc_ma=%d rc=%d\n", fcc_ma, rc);
+ return rc;
+ }
+ }
+ /*
+ * check if parallel charging can be enabled, and if enabled,
+ * distribute the fcc
+ */
+ smbchg_parallel_usb_check_ok(chip);
+ return 0;
+}
+
+static int smbchg_set_fastchg_current_user(struct smbchg_chip *chip,
+ int current_ma)
+{
+ int rc = 0;
+
+ pr_smb(PR_STATUS, "User setting FCC to %d\n", current_ma);
+
+ rc = vote(chip->fcc_votable, BATT_TYPE_FCC_VOTER, true, current_ma);
+ if (rc < 0)
+ pr_err("Couldn't vote en rc %d\n", rc);
+ return rc;
+}
+
+static struct ilim_entry *smbchg_wipower_find_entry(struct smbchg_chip *chip,
+ struct ilim_map *map, int uv)
+{
+ int i;
+ struct ilim_entry *ret = &(chip->wipower_default.entries[0]);
+
+ for (i = 0; i < map->num; i++) {
+ if (is_between(map->entries[i].vmin_uv, map->entries[i].vmax_uv,
+ uv))
+ ret = &map->entries[i];
+ }
+ return ret;
+}
+
+#define ZIN_ICL_PT 0xFC
+#define ZIN_ICL_LV 0xFD
+#define ZIN_ICL_HV 0xFE
+#define ZIN_ICL_MASK SMB_MASK(4, 0)
+static int smbchg_dcin_ilim_config(struct smbchg_chip *chip, int offset, int ma)
+{
+ int i, rc;
+
+ i = find_smaller_in_array(chip->tables.dc_ilim_ma_table,
+ ma, chip->tables.dc_ilim_ma_len);
+
+ if (i < 0)
+ i = 0;
+
+ rc = smbchg_sec_masked_write(chip, chip->bat_if_base + offset,
+ ZIN_ICL_MASK, i);
+ if (rc)
+ dev_err(chip->dev, "Couldn't write bat if offset %d value = %d rc = %d\n",
+ offset, i, rc);
+ return rc;
+}
+
+static int smbchg_wipower_ilim_config(struct smbchg_chip *chip,
+ struct ilim_entry *ilim)
+{
+ int rc = 0;
+
+ if (chip->current_ilim.icl_pt_ma != ilim->icl_pt_ma) {
+ rc = smbchg_dcin_ilim_config(chip, ZIN_ICL_PT, ilim->icl_pt_ma);
+ if (rc)
+ dev_err(chip->dev, "failed to write batif offset %d %dma rc = %d\n",
+ ZIN_ICL_PT, ilim->icl_pt_ma, rc);
+ else
+ chip->current_ilim.icl_pt_ma = ilim->icl_pt_ma;
+ }
+
+ if (chip->current_ilim.icl_lv_ma != ilim->icl_lv_ma) {
+ rc = smbchg_dcin_ilim_config(chip, ZIN_ICL_LV, ilim->icl_lv_ma);
+ if (rc)
+ dev_err(chip->dev, "failed to write batif offset %d %dma rc = %d\n",
+ ZIN_ICL_LV, ilim->icl_lv_ma, rc);
+ else
+ chip->current_ilim.icl_lv_ma = ilim->icl_lv_ma;
+ }
+
+ if (chip->current_ilim.icl_hv_ma != ilim->icl_hv_ma) {
+ rc = smbchg_dcin_ilim_config(chip, ZIN_ICL_HV, ilim->icl_hv_ma);
+ if (rc)
+ dev_err(chip->dev, "failed to write batif offset %d %dma rc = %d\n",
+ ZIN_ICL_HV, ilim->icl_hv_ma, rc);
+ else
+ chip->current_ilim.icl_hv_ma = ilim->icl_hv_ma;
+ }
+ return rc;
+}
+
+static void btm_notify_dcin(enum qpnp_tm_state state, void *ctx);
+static int smbchg_wipower_dcin_btm_configure(struct smbchg_chip *chip,
+ struct ilim_entry *ilim)
+{
+ int rc;
+
+ if (ilim->vmin_uv == chip->current_ilim.vmin_uv
+ && ilim->vmax_uv == chip->current_ilim.vmax_uv)
+ return 0;
+
+ chip->param.channel = DCIN;
+ chip->param.btm_ctx = chip;
+ if (wipower_dcin_interval < ADC_MEAS1_INTERVAL_0MS)
+ wipower_dcin_interval = ADC_MEAS1_INTERVAL_0MS;
+
+ if (wipower_dcin_interval > ADC_MEAS1_INTERVAL_16S)
+ wipower_dcin_interval = ADC_MEAS1_INTERVAL_16S;
+
+ chip->param.timer_interval = wipower_dcin_interval;
+ chip->param.threshold_notification = &btm_notify_dcin;
+ chip->param.high_thr = ilim->vmax_uv + wipower_dcin_hyst_uv;
+ chip->param.low_thr = ilim->vmin_uv - wipower_dcin_hyst_uv;
+ chip->param.state_request = ADC_TM_HIGH_LOW_THR_ENABLE;
+ rc = qpnp_vadc_channel_monitor(chip->vadc_dev, &chip->param);
+ if (rc) {
+ dev_err(chip->dev, "Couldn't configure btm for dcin rc = %d\n",
+ rc);
+ } else {
+ chip->current_ilim.vmin_uv = ilim->vmin_uv;
+ chip->current_ilim.vmax_uv = ilim->vmax_uv;
+ pr_smb(PR_STATUS, "btm ilim = (%duV %duV %dmA %dmA %dmA)\n",
+ ilim->vmin_uv, ilim->vmax_uv,
+ ilim->icl_pt_ma, ilim->icl_lv_ma, ilim->icl_hv_ma);
+ }
+ return rc;
+}
+
+static int smbchg_wipower_icl_configure(struct smbchg_chip *chip,
+ int dcin_uv, bool div2)
+{
+ int rc = 0;
+ struct ilim_map *map = div2 ? &chip->wipower_div2 : &chip->wipower_pt;
+ struct ilim_entry *ilim = smbchg_wipower_find_entry(chip, map, dcin_uv);
+
+ rc = smbchg_wipower_ilim_config(chip, ilim);
+ if (rc) {
+ dev_err(chip->dev, "failed to config ilim rc = %d, dcin_uv = %d , div2 = %d, ilim = (%duV %duV %dmA %dmA %dmA)\n",
+ rc, dcin_uv, div2,
+ ilim->vmin_uv, ilim->vmax_uv,
+ ilim->icl_pt_ma, ilim->icl_lv_ma, ilim->icl_hv_ma);
+ return rc;
+ }
+
+ rc = smbchg_wipower_dcin_btm_configure(chip, ilim);
+ if (rc) {
+ dev_err(chip->dev, "failed to config btm rc = %d, dcin_uv = %d , div2 = %d, ilim = (%duV %duV %dmA %dmA %dmA)\n",
+ rc, dcin_uv, div2,
+ ilim->vmin_uv, ilim->vmax_uv,
+ ilim->icl_pt_ma, ilim->icl_lv_ma, ilim->icl_hv_ma);
+ return rc;
+ }
+ chip->wipower_configured = true;
+ return 0;
+}
+
+static void smbchg_wipower_icl_deconfigure(struct smbchg_chip *chip)
+{
+ int rc;
+ struct ilim_entry *ilim = &(chip->wipower_default.entries[0]);
+
+ if (!chip->wipower_configured)
+ return;
+
+ rc = smbchg_wipower_ilim_config(chip, ilim);
+ if (rc)
+ dev_err(chip->dev, "Couldn't config default ilim rc = %d\n",
+ rc);
+
+ rc = qpnp_vadc_end_channel_monitor(chip->vadc_dev);
+ if (rc)
+ dev_err(chip->dev, "Couldn't de configure btm for dcin rc = %d\n",
+ rc);
+
+ chip->wipower_configured = false;
+ chip->current_ilim.vmin_uv = 0;
+ chip->current_ilim.vmax_uv = 0;
+ chip->current_ilim.icl_pt_ma = ilim->icl_pt_ma;
+ chip->current_ilim.icl_lv_ma = ilim->icl_lv_ma;
+ chip->current_ilim.icl_hv_ma = ilim->icl_hv_ma;
+ pr_smb(PR_WIPOWER, "De config btm\n");
+}
+
+#define FV_STS 0x0C
+#define DIV2_ACTIVE BIT(7)
+static void __smbchg_wipower_check(struct smbchg_chip *chip)
+{
+ int chg_type;
+ bool usb_present, dc_present;
+ int rc;
+ int dcin_uv;
+ bool div2;
+ struct qpnp_vadc_result adc_result;
+ u8 reg;
+
+ if (!wipower_dyn_icl_en) {
+ smbchg_wipower_icl_deconfigure(chip);
+ return;
+ }
+
+ chg_type = get_prop_charge_type(chip);
+ usb_present = is_usb_present(chip);
+ dc_present = is_dc_present(chip);
+ if (chg_type != POWER_SUPPLY_CHARGE_TYPE_NONE
+ && !usb_present
+ && dc_present
+ && chip->dc_psy_type == POWER_SUPPLY_TYPE_WIPOWER) {
+ rc = qpnp_vadc_read(chip->vadc_dev, DCIN, &adc_result);
+ if (rc) {
+ pr_smb(PR_STATUS, "error DCIN read rc = %d\n", rc);
+ return;
+ }
+ dcin_uv = adc_result.physical;
+
+ /* check div_by_2 */
+ rc = smbchg_read(chip, &reg, chip->chgr_base + FV_STS, 1);
+ if (rc) {
+ pr_smb(PR_STATUS, "error DCIN read rc = %d\n", rc);
+ return;
+ }
+ div2 = !!(reg & DIV2_ACTIVE);
+
+ pr_smb(PR_WIPOWER,
+ "config ICL chg_type = %d usb = %d dc = %d dcin_uv(adc_code) = %d (0x%x) div2 = %d\n",
+ chg_type, usb_present, dc_present, dcin_uv,
+ adc_result.adc_code, div2);
+ smbchg_wipower_icl_configure(chip, dcin_uv, div2);
+ } else {
+ pr_smb(PR_WIPOWER,
+ "deconfig ICL chg_type = %d usb = %d dc = %d\n",
+ chg_type, usb_present, dc_present);
+ smbchg_wipower_icl_deconfigure(chip);
+ }
+}
+
+static void smbchg_wipower_check(struct smbchg_chip *chip)
+{
+ if (!chip->wipower_dyn_icl_avail)
+ return;
+
+ mutex_lock(&chip->wipower_config);
+ __smbchg_wipower_check(chip);
+ mutex_unlock(&chip->wipower_config);
+}
+
+static void btm_notify_dcin(enum qpnp_tm_state state, void *ctx)
+{
+ struct smbchg_chip *chip = ctx;
+
+ mutex_lock(&chip->wipower_config);
+ pr_smb(PR_WIPOWER, "%s state\n",
+ state == ADC_TM_LOW_STATE ? "low" : "high");
+ chip->current_ilim.vmin_uv = 0;
+ chip->current_ilim.vmax_uv = 0;
+ __smbchg_wipower_check(chip);
+ mutex_unlock(&chip->wipower_config);
+}
+
+static int force_dcin_icl_write(void *data, u64 val)
+{
+ struct smbchg_chip *chip = data;
+
+ smbchg_wipower_check(chip);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_dcin_icl_ops, NULL,
+ force_dcin_icl_write, "0x%02llx\n");
+
+/*
+ * set the dc charge path's maximum allowed current draw
+ * that may be limited by the system's thermal level
+ */
+static int set_dc_current_limit_vote_cb(struct device *dev,
+ int icl_ma,
+ int client,
+ int last_icl_ma,
+ int last_client)
+{
+ struct smbchg_chip *chip = dev_get_drvdata(dev);
+
+ return smbchg_set_dc_current_max(chip, icl_ma);
+}
+
+/*
+ * set the usb charge path's maximum allowed current draw
+ * that may be limited by the system's thermal level
+ */
+static int set_usb_current_limit_vote_cb(struct device *dev,
+ int icl_ma,
+ int client,
+ int last_icl_ma,
+ int last_client)
+{
+ struct smbchg_chip *chip = dev_get_drvdata(dev);
+ int rc, aicl_ma, effective_id;
+
+ effective_id = get_effective_client_id_locked(chip->usb_icl_votable);
+
+ /* disable parallel charging if HVDCP is voting for 300mA */
+ if (effective_id == HVDCP_ICL_VOTER)
+ smbchg_parallel_usb_disable(chip);
+
+ if (chip->parallel.current_max_ma == 0) {
+ rc = smbchg_set_usb_current_max(chip, icl_ma);
+ if (rc) {
+ pr_err("Failed to set usb current max: %d\n", rc);
+ return rc;
+ }
+ }
+
+ /* skip the aicl rerun if hvdcp icl voter is active */
+ if (effective_id == HVDCP_ICL_VOTER)
+ return 0;
+
+ aicl_ma = smbchg_get_aicl_level_ma(chip);
+ if (icl_ma > aicl_ma)
+ smbchg_rerun_aicl(chip);
+ smbchg_parallel_usb_check_ok(chip);
+ return 0;
+}
+
+static int smbchg_system_temp_level_set(struct smbchg_chip *chip,
+ int lvl_sel)
+{
+ int rc = 0;
+ int prev_therm_lvl;
+ int thermal_icl_ma;
+
+ if (!chip->thermal_mitigation) {
+ dev_err(chip->dev, "Thermal mitigation not supported\n");
+ return -EINVAL;
+ }
+
+ if (lvl_sel < 0) {
+ dev_err(chip->dev, "Unsupported level selected %d\n", lvl_sel);
+ return -EINVAL;
+ }
+
+ if (lvl_sel >= chip->thermal_levels) {
+ dev_err(chip->dev, "Unsupported level selected %d forcing %d\n",
+ lvl_sel, chip->thermal_levels - 1);
+ lvl_sel = chip->thermal_levels - 1;
+ }
+
+ if (lvl_sel == chip->therm_lvl_sel)
+ return 0;
+
+ mutex_lock(&chip->therm_lvl_lock);
+ prev_therm_lvl = chip->therm_lvl_sel;
+ chip->therm_lvl_sel = lvl_sel;
+ if (chip->therm_lvl_sel == (chip->thermal_levels - 1)) {
+ /*
+ * Disable charging if highest value selected by
+ * setting the DC and USB path in suspend
+ */
+ rc = vote(chip->dc_suspend_votable, THERMAL_EN_VOTER, true, 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set dc suspend rc %d\n", rc);
+ goto out;
+ }
+ rc = vote(chip->usb_suspend_votable, THERMAL_EN_VOTER, true, 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set usb suspend rc %d\n", rc);
+ goto out;
+ }
+ goto out;
+ }
+
+ if (chip->therm_lvl_sel == 0) {
+ rc = vote(chip->usb_icl_votable, THERMAL_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't disable USB thermal ICL vote rc=%d\n",
+ rc);
+
+ rc = vote(chip->dc_icl_votable, THERMAL_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't disable DC thermal ICL vote rc=%d\n",
+ rc);
+ } else {
+ thermal_icl_ma =
+ (int)chip->thermal_mitigation[chip->therm_lvl_sel];
+ rc = vote(chip->usb_icl_votable, THERMAL_ICL_VOTER, true,
+ thermal_icl_ma);
+ if (rc < 0)
+ pr_err("Couldn't vote for USB thermal ICL rc=%d\n", rc);
+
+ rc = vote(chip->dc_icl_votable, THERMAL_ICL_VOTER, true,
+ thermal_icl_ma);
+ if (rc < 0)
+ pr_err("Couldn't vote for DC thermal ICL rc=%d\n", rc);
+ }
+
+ if (prev_therm_lvl == chip->thermal_levels - 1) {
+ /*
+ * If previously highest value was selected charging must have
+ * been disabed. Enable charging by taking the DC and USB path
+ * out of suspend.
+ */
+ rc = vote(chip->dc_suspend_votable, THERMAL_EN_VOTER, false, 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set dc suspend rc %d\n", rc);
+ goto out;
+ }
+ rc = vote(chip->usb_suspend_votable, THERMAL_EN_VOTER,
+ false, 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set usb suspend rc %d\n", rc);
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&chip->therm_lvl_lock);
+ return rc;
+}
+
+static int smbchg_ibat_ocp_threshold_ua = 4500000;
+module_param(smbchg_ibat_ocp_threshold_ua, int, 0644);
+
+#define UCONV 1000000LL
+#define MCONV 1000LL
+#define FLASH_V_THRESHOLD 3000000
+#define FLASH_VDIP_MARGIN 100000
+#define VPH_FLASH_VDIP (FLASH_V_THRESHOLD + FLASH_VDIP_MARGIN)
+#define BUCK_EFFICIENCY 800LL
+static int smbchg_calc_max_flash_current(struct smbchg_chip *chip)
+{
+ int ocv_uv, esr_uohm, rbatt_uohm, ibat_now, rc;
+ int64_t ibat_flash_ua, avail_flash_ua, avail_flash_power_fw;
+ int64_t ibat_safe_ua, vin_flash_uv, vph_flash_uv;
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_VOLTAGE_OCV, &ocv_uv);
+ if (rc) {
+ pr_smb(PR_STATUS, "bms psy does not support OCV\n");
+ return 0;
+ }
+
+ rc = get_property_from_fg(chip, POWER_SUPPLY_PROP_RESISTANCE,
+ &esr_uohm);
+ if (rc) {
+ pr_smb(PR_STATUS, "bms psy does not support resistance\n");
+ return 0;
+ }
+
+ rc = msm_bcl_read(BCL_PARAM_CURRENT, &ibat_now);
+ if (rc) {
+ pr_smb(PR_STATUS, "BCL current read failed: %d\n", rc);
+ return 0;
+ }
+
+ rbatt_uohm = esr_uohm + chip->rpara_uohm + chip->rslow_uohm;
+ /*
+ * Calculate the maximum current that can pulled out of the battery
+ * before the battery voltage dips below a safe threshold.
+ */
+ ibat_safe_ua = div_s64((ocv_uv - VPH_FLASH_VDIP) * UCONV,
+ rbatt_uohm);
+
+ if (ibat_safe_ua <= smbchg_ibat_ocp_threshold_ua) {
+ /*
+ * If the calculated current is below the OCP threshold, then
+ * use it as the possible flash current.
+ */
+ ibat_flash_ua = ibat_safe_ua - ibat_now;
+ vph_flash_uv = VPH_FLASH_VDIP;
+ } else {
+ /*
+ * If the calculated current is above the OCP threshold, then
+ * use the ocp threshold instead.
+ *
+ * Any higher current will be tripping the battery OCP.
+ */
+ ibat_flash_ua = smbchg_ibat_ocp_threshold_ua - ibat_now;
+ vph_flash_uv = ocv_uv - div64_s64((int64_t)rbatt_uohm
+ * smbchg_ibat_ocp_threshold_ua, UCONV);
+ }
+ /* Calculate the input voltage of the flash module. */
+ vin_flash_uv = max((chip->vled_max_uv + 500000LL),
+ div64_s64((vph_flash_uv * 1200), 1000));
+ /* Calculate the available power for the flash module. */
+ avail_flash_power_fw = BUCK_EFFICIENCY * vph_flash_uv * ibat_flash_ua;
+ /*
+ * Calculate the available amount of current the flash module can draw
+ * before collapsing the battery. (available power/ flash input voltage)
+ */
+ avail_flash_ua = div64_s64(avail_flash_power_fw, vin_flash_uv * MCONV);
+ pr_smb(PR_MISC,
+ "avail_iflash=%lld, ocv=%d, ibat=%d, rbatt=%d\n",
+ avail_flash_ua, ocv_uv, ibat_now, rbatt_uohm);
+ return (int)avail_flash_ua;
+}
+
+#define FCC_CMP_CFG 0xF3
+#define FCC_COMP_MASK SMB_MASK(1, 0)
+static int smbchg_fastchg_current_comp_set(struct smbchg_chip *chip,
+ int comp_current)
+{
+ int rc;
+ u8 i;
+
+ for (i = 0; i < chip->tables.fcc_comp_len; i++)
+ if (comp_current == chip->tables.fcc_comp_table[i])
+ break;
+
+ if (i >= chip->tables.fcc_comp_len)
+ return -EINVAL;
+
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + FCC_CMP_CFG,
+ FCC_COMP_MASK, i);
+
+ if (rc)
+ dev_err(chip->dev, "Couldn't set fastchg current comp rc = %d\n",
+ rc);
+
+ return rc;
+}
+
+#define CFG_TCC_REG 0xF9
+#define CHG_ITERM_MASK SMB_MASK(2, 0)
+static int smbchg_iterm_set(struct smbchg_chip *chip, int iterm_ma)
+{
+ int rc;
+ u8 reg;
+
+ reg = find_closest_in_array(
+ chip->tables.iterm_ma_table,
+ chip->tables.iterm_ma_len,
+ iterm_ma);
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->chgr_base + CFG_TCC_REG,
+ CHG_ITERM_MASK, reg);
+ if (rc) {
+ dev_err(chip->dev,
+ "Couldn't set iterm rc = %d\n", rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "set tcc (%d) to 0x%02x\n",
+ iterm_ma, reg);
+ chip->iterm_ma = iterm_ma;
+
+ return 0;
+}
+
+#define FV_CMP_CFG 0xF5
+#define FV_COMP_MASK SMB_MASK(5, 0)
+static int smbchg_float_voltage_comp_set(struct smbchg_chip *chip, int code)
+{
+ int rc;
+ u8 val;
+
+ val = code & FV_COMP_MASK;
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + FV_CMP_CFG,
+ FV_COMP_MASK, val);
+
+ if (rc)
+ dev_err(chip->dev, "Couldn't set float voltage comp rc = %d\n",
+ rc);
+
+ return rc;
+}
+
+#define VFLOAT_CFG_REG 0xF4
+#define MIN_FLOAT_MV 3600
+#define MAX_FLOAT_MV 4500
+#define VFLOAT_MASK SMB_MASK(5, 0)
+
+#define MID_RANGE_FLOAT_MV_MIN 3600
+#define MID_RANGE_FLOAT_MIN_VAL 0x05
+#define MID_RANGE_FLOAT_STEP_MV 20
+
+#define HIGH_RANGE_FLOAT_MIN_MV 4340
+#define HIGH_RANGE_FLOAT_MIN_VAL 0x2A
+#define HIGH_RANGE_FLOAT_STEP_MV 10
+
+#define VHIGH_RANGE_FLOAT_MIN_MV 4360
+#define VHIGH_RANGE_FLOAT_MIN_VAL 0x2C
+#define VHIGH_RANGE_FLOAT_STEP_MV 20
+static int smbchg_float_voltage_set(struct smbchg_chip *chip, int vfloat_mv)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ int rc, delta;
+ u8 temp;
+
+ if ((vfloat_mv < MIN_FLOAT_MV) || (vfloat_mv > MAX_FLOAT_MV)) {
+ dev_err(chip->dev, "bad float voltage mv =%d asked to set\n",
+ vfloat_mv);
+ return -EINVAL;
+ }
+
+ if (vfloat_mv <= HIGH_RANGE_FLOAT_MIN_MV) {
+ /* mid range */
+ delta = vfloat_mv - MID_RANGE_FLOAT_MV_MIN;
+ temp = MID_RANGE_FLOAT_MIN_VAL + delta
+ / MID_RANGE_FLOAT_STEP_MV;
+ vfloat_mv -= delta % MID_RANGE_FLOAT_STEP_MV;
+ } else if (vfloat_mv <= VHIGH_RANGE_FLOAT_MIN_MV) {
+ /* high range */
+ delta = vfloat_mv - HIGH_RANGE_FLOAT_MIN_MV;
+ temp = HIGH_RANGE_FLOAT_MIN_VAL + delta
+ / HIGH_RANGE_FLOAT_STEP_MV;
+ vfloat_mv -= delta % HIGH_RANGE_FLOAT_STEP_MV;
+ } else {
+ /* very high range */
+ delta = vfloat_mv - VHIGH_RANGE_FLOAT_MIN_MV;
+ temp = VHIGH_RANGE_FLOAT_MIN_VAL + delta
+ / VHIGH_RANGE_FLOAT_STEP_MV;
+ vfloat_mv -= delta % VHIGH_RANGE_FLOAT_STEP_MV;
+ }
+
+ if (parallel_psy) {
+ rc = power_supply_set_voltage_limit(parallel_psy,
+ vfloat_mv + 50);
+ if (rc)
+ dev_err(chip->dev, "Couldn't set float voltage on parallel psy rc: %d\n",
+ rc);
+ }
+
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + VFLOAT_CFG_REG,
+ VFLOAT_MASK, temp);
+
+ if (rc)
+ dev_err(chip->dev, "Couldn't set float voltage rc = %d\n", rc);
+ else
+ chip->vfloat_mv = vfloat_mv;
+
+ return rc;
+}
+
+static int smbchg_float_voltage_get(struct smbchg_chip *chip)
+{
+ return chip->vfloat_mv;
+}
+
+#define SFT_CFG 0xFD
+#define SFT_EN_MASK SMB_MASK(5, 4)
+#define SFT_TO_MASK SMB_MASK(3, 2)
+#define PRECHG_SFT_TO_MASK SMB_MASK(1, 0)
+#define SFT_TIMER_DISABLE_BIT BIT(5)
+#define PRECHG_SFT_TIMER_DISABLE_BIT BIT(4)
+#define SAFETY_TIME_MINUTES_SHIFT 2
+static int smbchg_safety_timer_enable(struct smbchg_chip *chip, bool enable)
+{
+ int rc;
+ u8 reg;
+
+ if (enable == chip->safety_timer_en)
+ return 0;
+
+ if (enable)
+ reg = 0;
+ else
+ reg = SFT_TIMER_DISABLE_BIT | PRECHG_SFT_TIMER_DISABLE_BIT;
+
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + SFT_CFG,
+ SFT_EN_MASK, reg);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't %s safety timer rc = %d\n",
+ enable ? "enable" : "disable", rc);
+ return rc;
+ }
+ chip->safety_timer_en = enable;
+ return 0;
+}
+
+enum skip_reason {
+ REASON_OTG_ENABLED = BIT(0),
+ REASON_FLASH_ENABLED = BIT(1)
+};
+
+#define BAT_IF_TRIM7_REG 0xF7
+#define CFG_750KHZ_BIT BIT(1)
+#define MISC_CFG_NTC_VOUT_REG 0xF3
+#define CFG_NTC_VOUT_FSW_BIT BIT(0)
+static int smbchg_switch_buck_frequency(struct smbchg_chip *chip,
+ bool flash_active)
+{
+ int rc;
+
+ if (!(chip->wa_flags & SMBCHG_FLASH_BUCK_SWITCH_FREQ_WA))
+ return 0;
+
+ if (chip->flash_active == flash_active) {
+ pr_smb(PR_STATUS, "Fsw not changed, flash_active: %d\n",
+ flash_active);
+ return 0;
+ }
+
+ /*
+ * As per the systems team recommendation, before the flash fires,
+ * buck switching frequency(Fsw) needs to be increased to 1MHz. Once the
+ * flash is disabled, Fsw needs to be set back to 750KHz.
+ */
+ rc = smbchg_sec_masked_write(chip, chip->misc_base +
+ MISC_CFG_NTC_VOUT_REG, CFG_NTC_VOUT_FSW_BIT,
+ flash_active ? CFG_NTC_VOUT_FSW_BIT : 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set switching frequency multiplier rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smbchg_sec_masked_write(chip, chip->bat_if_base + BAT_IF_TRIM7_REG,
+ CFG_750KHZ_BIT, flash_active ? 0 : CFG_750KHZ_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Cannot set switching freq: %d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_STATUS, "Fsw @ %sHz\n", flash_active ? "1M" : "750K");
+ chip->flash_active = flash_active;
+ return 0;
+}
+
+#define OTG_TRIM6 0xF6
+#define TR_ENB_SKIP_BIT BIT(2)
+#define OTG_EN_BIT BIT(0)
+static int smbchg_otg_pulse_skip_disable(struct smbchg_chip *chip,
+ enum skip_reason reason, bool disable)
+{
+ int rc;
+ bool disabled;
+
+ disabled = !!chip->otg_pulse_skip_dis;
+ pr_smb(PR_STATUS, "%s pulse skip, reason %d\n",
+ disable ? "disabling" : "enabling", reason);
+ if (disable)
+ chip->otg_pulse_skip_dis |= reason;
+ else
+ chip->otg_pulse_skip_dis &= ~reason;
+ if (disabled == !!chip->otg_pulse_skip_dis)
+ return 0;
+ disabled = !!chip->otg_pulse_skip_dis;
+
+ rc = smbchg_sec_masked_write(chip, chip->otg_base + OTG_TRIM6,
+ TR_ENB_SKIP_BIT, disabled ? TR_ENB_SKIP_BIT : 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't %s otg pulse skip rc = %d\n",
+ disabled ? "disable" : "enable", rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "%s pulse skip\n", disabled ? "disabled" : "enabled");
+ return 0;
+}
+
+#define LOW_PWR_OPTIONS_REG 0xFF
+#define FORCE_TLIM_BIT BIT(4)
+static int smbchg_force_tlim_en(struct smbchg_chip *chip, bool enable)
+{
+ int rc;
+
+ rc = smbchg_sec_masked_write(chip, chip->otg_base + LOW_PWR_OPTIONS_REG,
+ FORCE_TLIM_BIT, enable ? FORCE_TLIM_BIT : 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't %s otg force tlim rc = %d\n",
+ enable ? "enable" : "disable", rc);
+ return rc;
+ }
+ return rc;
+}
+
+static void smbchg_vfloat_adjust_check(struct smbchg_chip *chip)
+{
+ if (!chip->use_vfloat_adjustments)
+ return;
+
+ smbchg_stay_awake(chip, PM_REASON_VFLOAT_ADJUST);
+ pr_smb(PR_STATUS, "Starting vfloat adjustments\n");
+ schedule_delayed_work(&chip->vfloat_adjust_work, 0);
+}
+
+#define FV_STS_REG 0xC
+#define AICL_INPUT_STS_BIT BIT(6)
+static bool smbchg_is_input_current_limited(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->chgr_base + FV_STS_REG, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read FV_STS rc=%d\n", rc);
+ return false;
+ }
+
+ return !!(reg & AICL_INPUT_STS_BIT);
+}
+
+#define SW_ESR_PULSE_MS 1500
+static void smbchg_cc_esr_wa_check(struct smbchg_chip *chip)
+{
+ int rc, esr_count;
+
+ if (!(chip->wa_flags & SMBCHG_CC_ESR_WA))
+ return;
+
+ if (!is_usb_present(chip) && !is_dc_present(chip)) {
+ pr_smb(PR_STATUS, "No inputs present, skipping\n");
+ return;
+ }
+
+ if (get_prop_charge_type(chip) != POWER_SUPPLY_CHARGE_TYPE_FAST) {
+ pr_smb(PR_STATUS, "Not in fast charge, skipping\n");
+ return;
+ }
+
+ if (!smbchg_is_input_current_limited(chip)) {
+ pr_smb(PR_STATUS, "Not input current limited, skipping\n");
+ return;
+ }
+
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_UPDATE_NOW, 1);
+ rc = get_property_from_fg(chip,
+ POWER_SUPPLY_PROP_ESR_COUNT, &esr_count);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "could not read ESR counter rc = %d\n", rc);
+ return;
+ }
+
+ /*
+ * The esr_count is counting down the number of fuel gauge cycles
+ * before a ESR pulse is needed.
+ *
+ * After a successful ESR pulse, this count is reset to some
+ * high number like 28. If this reaches 0, then the fuel gauge
+ * hardware should force a ESR pulse.
+ *
+ * However, if the device is in constant current charge mode while
+ * being input current limited, the ESR pulse will not affect the
+ * battery current, so the measurement will fail.
+ *
+ * As a failsafe, force a manual ESR pulse if this value is read as
+ * 0.
+ */
+ if (esr_count != 0) {
+ pr_smb(PR_STATUS, "ESR count is not zero, skipping\n");
+ return;
+ }
+
+ pr_smb(PR_STATUS, "Lowering charge current for ESR pulse\n");
+ smbchg_stay_awake(chip, PM_ESR_PULSE);
+ smbchg_sw_esr_pulse_en(chip, true);
+ msleep(SW_ESR_PULSE_MS);
+ pr_smb(PR_STATUS, "Raising charge current for ESR pulse\n");
+ smbchg_relax(chip, PM_ESR_PULSE);
+ smbchg_sw_esr_pulse_en(chip, false);
+}
+
+static void smbchg_soc_changed(struct smbchg_chip *chip)
+{
+ smbchg_cc_esr_wa_check(chip);
+}
+
+#define DC_AICL_CFG 0xF3
+#define MISC_TRIM_OPT_15_8 0xF5
+#define USB_AICL_DEGLITCH_MASK (BIT(5) | BIT(4) | BIT(3))
+#define USB_AICL_DEGLITCH_SHORT (BIT(5) | BIT(4) | BIT(3))
+#define USB_AICL_DEGLITCH_LONG 0
+#define DC_AICL_DEGLITCH_MASK (BIT(5) | BIT(4) | BIT(3))
+#define DC_AICL_DEGLITCH_SHORT (BIT(5) | BIT(4) | BIT(3))
+#define DC_AICL_DEGLITCH_LONG 0
+#define AICL_RERUN_MASK (BIT(5) | BIT(4))
+#define AICL_RERUN_ON (BIT(5) | BIT(4))
+#define AICL_RERUN_OFF 0
+
+static int smbchg_hw_aicl_rerun_enable_indirect_cb(struct device *dev,
+ int enable,
+ int client, int last_enable,
+ int last_client)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = dev_get_drvdata(dev);
+
+ /*
+ * If the indirect voting result of all the clients is to enable hw aicl
+ * rerun, then remove our vote to disable hw aicl rerun
+ */
+ rc = vote(chip->hw_aicl_rerun_disable_votable,
+ HW_AICL_RERUN_ENABLE_INDIRECT_VOTER, !enable, 0);
+ if (rc < 0) {
+ pr_err("Couldn't vote for hw rerun rc= %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int smbchg_hw_aicl_rerun_disable_cb(struct device *dev, int disable,
+ int client, int last_disable,
+ int last_client)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = dev_get_drvdata(dev);
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->misc_base + MISC_TRIM_OPT_15_8,
+ AICL_RERUN_MASK, disable ? AICL_RERUN_OFF : AICL_RERUN_ON);
+ if (rc < 0)
+ pr_err("Couldn't write to MISC_TRIM_OPTIONS_15_8 rc=%d\n", rc);
+
+ return rc;
+}
+
+static int smbchg_aicl_deglitch_config_cb(struct device *dev, int shorter,
+ int client, int last_result,
+ int last_client)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = dev_get_drvdata(dev);
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + USB_AICL_CFG,
+ USB_AICL_DEGLITCH_MASK,
+ shorter ? USB_AICL_DEGLITCH_SHORT : USB_AICL_DEGLITCH_LONG);
+ if (rc < 0) {
+ pr_err("Couldn't write to USB_AICL_CFG rc=%d\n", rc);
+ return rc;
+ }
+ rc = smbchg_sec_masked_write(chip,
+ chip->dc_chgpth_base + DC_AICL_CFG,
+ DC_AICL_DEGLITCH_MASK,
+ shorter ? DC_AICL_DEGLITCH_SHORT : DC_AICL_DEGLITCH_LONG);
+ if (rc < 0) {
+ pr_err("Couldn't write to DC_AICL_CFG rc=%d\n", rc);
+ return rc;
+ }
+ return rc;
+}
+
+static void smbchg_aicl_deglitch_wa_en(struct smbchg_chip *chip, bool en)
+{
+ int rc;
+
+ rc = vote(chip->aicl_deglitch_short_votable,
+ VARB_WORKAROUND_VOTER, en, 0);
+ if (rc < 0) {
+ pr_err("Couldn't vote %s deglitch rc=%d\n",
+ en ? "short" : "long", rc);
+ return;
+ }
+ pr_smb(PR_STATUS, "AICL deglitch set to %s\n", en ? "short" : "long");
+
+ rc = vote(chip->hw_aicl_rerun_enable_indirect_votable,
+ VARB_WORKAROUND_VOTER, en, 0);
+ if (rc < 0) {
+ pr_err("Couldn't vote hw aicl rerun rc= %d\n", rc);
+ return;
+ }
+ chip->aicl_deglitch_short = en;
+}
+
+static void smbchg_aicl_deglitch_wa_check(struct smbchg_chip *chip)
+{
+ union power_supply_propval prop = {0,};
+ int rc;
+ bool low_volt_chgr = true;
+
+ if (!(chip->wa_flags & SMBCHG_AICL_DEGLITCH_WA))
+ return;
+
+ if (!is_usb_present(chip) && !is_dc_present(chip)) {
+ pr_smb(PR_STATUS, "Charger removed\n");
+ smbchg_aicl_deglitch_wa_en(chip, false);
+ return;
+ }
+
+ if (!chip->bms_psy)
+ return;
+
+ if (is_usb_present(chip)) {
+ if (is_hvdcp_present(chip))
+ low_volt_chgr = false;
+ } else if (is_dc_present(chip)) {
+ if (chip->dc_psy_type == POWER_SUPPLY_TYPE_WIPOWER)
+ low_volt_chgr = false;
+ else
+ low_volt_chgr = chip->low_volt_dcin;
+ }
+
+ if (!low_volt_chgr) {
+ pr_smb(PR_STATUS, "High volt charger! Don't set deglitch\n");
+ smbchg_aicl_deglitch_wa_en(chip, false);
+ return;
+ }
+
+ /* It is possible that battery voltage went high above threshold
+ * when the charger is inserted and can go low because of system
+ * load. We shouldn't be reconfiguring AICL deglitch when this
+ * happens as it will lead to oscillation again which is being
+ * fixed here. Do it once when the battery voltage crosses the
+ * threshold (e.g. 4.2 V) and clear it only when the charger
+ * is removed.
+ */
+ if (!chip->vbat_above_headroom) {
+ rc = chip->bms_psy->get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN, &prop);
+ if (rc < 0) {
+ pr_err("could not read voltage_min, rc=%d\n", rc);
+ return;
+ }
+ chip->vbat_above_headroom = !prop.intval;
+ }
+ smbchg_aicl_deglitch_wa_en(chip, chip->vbat_above_headroom);
+}
+
+#define MISC_TEST_REG 0xE2
+#define BB_LOOP_DISABLE_ICL BIT(2)
+static int smbchg_icl_loop_disable_check(struct smbchg_chip *chip)
+{
+ bool icl_disabled = !chip->chg_otg_enabled && chip->flash_triggered;
+ int rc = 0;
+
+ if ((chip->wa_flags & SMBCHG_FLASH_ICL_DISABLE_WA)
+ && icl_disabled != chip->icl_disabled) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->misc_base + MISC_TEST_REG,
+ BB_LOOP_DISABLE_ICL,
+ icl_disabled ? BB_LOOP_DISABLE_ICL : 0);
+ chip->icl_disabled = icl_disabled;
+ }
+
+ return rc;
+}
+
+#define UNKNOWN_BATT_TYPE "Unknown Battery"
+#define LOADING_BATT_TYPE "Loading Battery Data"
+static int smbchg_config_chg_battery_type(struct smbchg_chip *chip)
+{
+ int rc = 0, max_voltage_uv = 0, fastchg_ma = 0, ret = 0, iterm_ua = 0;
+ struct device_node *batt_node, *profile_node;
+ struct device_node *node = chip->spmi->dev.of_node;
+ union power_supply_propval prop = {0,};
+
+ rc = chip->bms_psy->get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_BATTERY_TYPE, &prop);
+ if (rc) {
+ pr_smb(PR_STATUS, "Unable to read battery-type rc=%d\n", rc);
+ return 0;
+ }
+ if (!strcmp(prop.strval, UNKNOWN_BATT_TYPE) ||
+ !strcmp(prop.strval, LOADING_BATT_TYPE)) {
+ pr_smb(PR_MISC, "Battery-type not identified\n");
+ return 0;
+ }
+ /* quit if there is no change in the battery-type from previous */
+ if (chip->battery_type && !strcmp(prop.strval, chip->battery_type))
+ return 0;
+
+ batt_node = of_parse_phandle(node, "qcom,battery-data", 0);
+ if (!batt_node) {
+ pr_smb(PR_MISC, "No batterydata available\n");
+ return 0;
+ }
+
+ profile_node = of_batterydata_get_best_profile(batt_node,
+ "bms", NULL);
+ if (!profile_node) {
+ pr_err("couldn't find profile handle\n");
+ return -EINVAL;
+ }
+ chip->battery_type = prop.strval;
+
+ /* change vfloat */
+ rc = of_property_read_u32(profile_node, "qcom,max-voltage-uv",
+ &max_voltage_uv);
+ if (rc) {
+ pr_warn("couldn't find battery max voltage rc=%d\n", rc);
+ ret = rc;
+ } else {
+ if (chip->vfloat_mv != (max_voltage_uv / 1000)) {
+ pr_info("Vfloat changed from %dmV to %dmV for battery-type %s\n",
+ chip->vfloat_mv, (max_voltage_uv / 1000),
+ chip->battery_type);
+ rc = smbchg_float_voltage_set(chip,
+ (max_voltage_uv / 1000));
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set float voltage rc = %d\n", rc);
+ return rc;
+ }
+ }
+ }
+
+ /* change chg term */
+ rc = of_property_read_u32(profile_node, "qcom,chg-term-ua",
+ &iterm_ua);
+ if (rc && rc != -EINVAL) {
+ pr_warn("couldn't read battery term current=%d\n", rc);
+ ret = rc;
+ } else if (!rc) {
+ if (chip->iterm_ma != (iterm_ua / 1000)
+ && !chip->iterm_disabled) {
+ pr_info("Term current changed from %dmA to %dmA for battery-type %s\n",
+ chip->iterm_ma, (iterm_ua / 1000),
+ chip->battery_type);
+ rc = smbchg_iterm_set(chip,
+ (iterm_ua / 1000));
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set iterm rc = %d\n", rc);
+ return rc;
+ }
+ }
+ chip->iterm_ma = iterm_ua / 1000;
+ }
+
+ /*
+ * Only configure from profile if fastchg-ma is not defined in the
+ * charger device node.
+ */
+ if (!of_find_property(chip->spmi->dev.of_node,
+ "qcom,fastchg-current-ma", NULL)) {
+ rc = of_property_read_u32(profile_node,
+ "qcom,fastchg-current-ma", &fastchg_ma);
+ if (rc) {
+ ret = rc;
+ } else {
+ pr_smb(PR_MISC,
+ "fastchg-ma changed from to %dma for battery-type %s\n",
+ fastchg_ma, chip->battery_type);
+ rc = vote(chip->fcc_votable, BATT_TYPE_FCC_VOTER, true,
+ fastchg_ma);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't vote for fastchg current rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+ }
+
+ return ret;
+}
+
+#define MAX_INV_BATT_ID 7700
+#define MIN_INV_BATT_ID 7300
+static void check_battery_type(struct smbchg_chip *chip)
+{
+ union power_supply_propval prop = {0,};
+ bool en;
+
+ if (!chip->bms_psy && chip->bms_psy_name)
+ chip->bms_psy =
+ power_supply_get_by_name((char *)chip->bms_psy_name);
+ if (chip->bms_psy) {
+ chip->bms_psy->get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_BATTERY_TYPE, &prop);
+ en = (strcmp(prop.strval, UNKNOWN_BATT_TYPE) != 0
+ || chip->charge_unknown_battery)
+ && (strcmp(prop.strval, LOADING_BATT_TYPE) != 0);
+ vote(chip->battchg_suspend_votable,
+ BATTCHG_UNKNOWN_BATTERY_EN_VOTER, !en, 0);
+
+ if (!chip->skip_usb_suspend_for_fake_battery) {
+ chip->bms_psy->get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_RESISTANCE_ID, &prop);
+ /* suspend USB path for invalid battery-id */
+ en = (prop.intval <= MAX_INV_BATT_ID &&
+ prop.intval >= MIN_INV_BATT_ID) ? 1 : 0;
+ vote(chip->usb_suspend_votable, FAKE_BATTERY_EN_VOTER,
+ en, 0);
+ }
+ }
+}
+
+static void smbchg_external_power_changed(struct power_supply *psy)
+{
+ struct smbchg_chip *chip = container_of(psy,
+ struct smbchg_chip, batt_psy);
+ union power_supply_propval prop = {0,};
+ int rc, current_limit = 0, soc;
+ enum power_supply_type usb_supply_type;
+ char *usb_type_name = "null";
+
+ if (chip->bms_psy_name)
+ chip->bms_psy =
+ power_supply_get_by_name((char *)chip->bms_psy_name);
+
+ smbchg_aicl_deglitch_wa_check(chip);
+ if (chip->bms_psy) {
+ check_battery_type(chip);
+ soc = get_prop_batt_capacity(chip);
+ if (chip->previous_soc != soc) {
+ chip->previous_soc = soc;
+ smbchg_soc_changed(chip);
+ }
+
+ rc = smbchg_config_chg_battery_type(chip);
+ if (rc)
+ pr_smb(PR_MISC,
+ "Couldn't update charger configuration rc=%d\n",
+ rc);
+ }
+
+ rc = chip->usb_psy->get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED, &prop);
+ if (rc == 0)
+ vote(chip->usb_suspend_votable, POWER_SUPPLY_EN_VOTER,
+ !prop.intval, 0);
+
+ rc = chip->usb_psy->get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &prop);
+ if (rc == 0)
+ current_limit = prop.intval / 1000;
+
+ read_usb_type(chip, &usb_type_name, &usb_supply_type);
+
+ if (usb_supply_type != POWER_SUPPLY_TYPE_USB)
+ goto skip_current_for_non_sdp;
+
+ pr_smb(PR_MISC, "usb type = %s current_limit = %d\n",
+ usb_type_name, current_limit);
+
+ rc = vote(chip->usb_icl_votable, PSY_ICL_VOTER, true,
+ current_limit);
+ if (rc < 0)
+ pr_err("Couldn't update USB PSY ICL vote rc=%d\n", rc);
+
+skip_current_for_non_sdp:
+ smbchg_vfloat_adjust_check(chip);
+
+ power_supply_changed(&chip->batt_psy);
+}
+
+static int smbchg_otg_regulator_enable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ chip->otg_retries = 0;
+ chip->chg_otg_enabled = true;
+ smbchg_icl_loop_disable_check(chip);
+ smbchg_otg_pulse_skip_disable(chip, REASON_OTG_ENABLED, true);
+
+ /* If pin control mode then return from here */
+ if (chip->otg_pinctrl)
+ return rc;
+
+ /* sleep to make sure the pulse skip is actually disabled */
+ msleep(20);
+ rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG,
+ OTG_EN_BIT, OTG_EN_BIT);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't enable OTG mode rc=%d\n", rc);
+ else
+ chip->otg_enable_time = ktime_get();
+ pr_smb(PR_STATUS, "Enabling OTG Boost\n");
+ return rc;
+}
+
+static int smbchg_otg_regulator_disable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ if (!chip->otg_pinctrl) {
+ rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG,
+ OTG_EN_BIT, 0);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't disable OTG mode rc=%d\n",
+ rc);
+ }
+
+ chip->chg_otg_enabled = false;
+ smbchg_otg_pulse_skip_disable(chip, REASON_OTG_ENABLED, false);
+ smbchg_icl_loop_disable_check(chip);
+ pr_smb(PR_STATUS, "Disabling OTG Boost\n");
+ return rc;
+}
+
+static int smbchg_otg_regulator_is_enable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ u8 reg = 0;
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ rc = smbchg_read(chip, &reg, chip->bat_if_base + CMD_CHG_REG, 1);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read OTG enable bit rc=%d\n", rc);
+ return rc;
+ }
+
+ return (reg & OTG_EN_BIT) ? 1 : 0;
+}
+
+struct regulator_ops smbchg_otg_reg_ops = {
+ .enable = smbchg_otg_regulator_enable,
+ .disable = smbchg_otg_regulator_disable,
+ .is_enabled = smbchg_otg_regulator_is_enable,
+};
+
+#define USBIN_CHGR_CFG 0xF1
+#define ADAPTER_ALLOWANCE_MASK 0x7
+#define USBIN_ADAPTER_9V 0x3
+#define USBIN_ADAPTER_5V_9V_CONT 0x2
+#define USBIN_ADAPTER_5V_UNREGULATED_9V 0x5
+#define HVDCP_EN_BIT BIT(3)
+static int smbchg_external_otg_regulator_enable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ rc = vote(chip->usb_suspend_votable, OTG_EN_VOTER, true, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't suspend charger rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_read(chip, &chip->original_usbin_allowance,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb allowance rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * To disallow source detect and usbin_uv interrupts, set the adapter
+ * allowance to 9V, so that the audio boost operating in reverse never
+ * gets detected as a valid input
+ */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't disable HVDCP rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG,
+ 0xFF, USBIN_ADAPTER_9V);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't write usb allowance rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_STATUS, "Enabling OTG Boost\n");
+ return rc;
+}
+
+static int smbchg_external_otg_regulator_disable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ rc = vote(chip->usb_suspend_votable, OTG_EN_VOTER, false, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't unsuspend charger rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Reenable HVDCP and set the adapter allowance back to the original
+ * value in order to allow normal USBs to be recognized as a valid
+ * input.
+ */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, HVDCP_EN_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't enable HVDCP rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG,
+ 0xFF, chip->original_usbin_allowance);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't write usb allowance rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_STATUS, "Disabling OTG Boost\n");
+ return rc;
+}
+
+static int smbchg_external_otg_regulator_is_enable(struct regulator_dev *rdev)
+{
+ struct smbchg_chip *chip = rdev_get_drvdata(rdev);
+
+ return get_client_vote(chip->usb_suspend_votable, OTG_EN_VOTER);
+}
+
+struct regulator_ops smbchg_external_otg_reg_ops = {
+ .enable = smbchg_external_otg_regulator_enable,
+ .disable = smbchg_external_otg_regulator_disable,
+ .is_enabled = smbchg_external_otg_regulator_is_enable,
+};
+
+static int smbchg_regulator_init(struct smbchg_chip *chip)
+{
+ int rc = 0;
+ struct regulator_init_data *init_data;
+ struct regulator_config cfg = {};
+ struct device_node *regulator_node;
+
+ regulator_node = of_get_child_by_name(chip->dev->of_node,
+ "qcom,smbcharger-boost-otg");
+
+ init_data = of_get_regulator_init_data(chip->dev, regulator_node);
+ if (!init_data) {
+ dev_err(chip->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ if (init_data->constraints.name) {
+ chip->otg_vreg.rdesc.owner = THIS_MODULE;
+ chip->otg_vreg.rdesc.type = REGULATOR_VOLTAGE;
+ chip->otg_vreg.rdesc.ops = &smbchg_otg_reg_ops;
+ chip->otg_vreg.rdesc.name = init_data->constraints.name;
+
+ cfg.dev = chip->dev;
+ cfg.init_data = init_data;
+ cfg.driver_data = chip;
+ cfg.of_node = regulator_node;
+
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_STATUS;
+
+ chip->otg_vreg.rdev = devm_regulator_register(chip->dev,
+ &chip->otg_vreg.rdesc, &cfg);
+ if (IS_ERR(chip->otg_vreg.rdev)) {
+ rc = PTR_ERR(chip->otg_vreg.rdev);
+ chip->otg_vreg.rdev = NULL;
+ if (rc != -EPROBE_DEFER)
+ dev_err(chip->dev,
+ "OTG reg failed, rc=%d\n", rc);
+ }
+ }
+
+ if (rc)
+ return rc;
+
+ regulator_node = of_get_child_by_name(chip->dev->of_node,
+ "qcom,smbcharger-external-otg");
+ if (!regulator_node) {
+ dev_dbg(chip->dev, "external-otg node absent\n");
+ return 0;
+ }
+ init_data = of_get_regulator_init_data(chip->dev, regulator_node);
+ if (!init_data) {
+ dev_err(chip->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ if (init_data->constraints.name) {
+ if (of_get_property(chip->dev->of_node,
+ "otg-parent-supply", NULL))
+ init_data->supply_regulator = "otg-parent";
+ chip->ext_otg_vreg.rdesc.owner = THIS_MODULE;
+ chip->ext_otg_vreg.rdesc.type = REGULATOR_VOLTAGE;
+ chip->ext_otg_vreg.rdesc.ops = &smbchg_external_otg_reg_ops;
+ chip->ext_otg_vreg.rdesc.name = init_data->constraints.name;
+
+ cfg.dev = chip->dev;
+ cfg.init_data = init_data;
+ cfg.driver_data = chip;
+ cfg.of_node = regulator_node;
+
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_STATUS;
+
+ chip->ext_otg_vreg.rdev = devm_regulator_register(chip->dev,
+ &chip->ext_otg_vreg.rdesc,
+ &cfg);
+ if (IS_ERR(chip->ext_otg_vreg.rdev)) {
+ rc = PTR_ERR(chip->ext_otg_vreg.rdev);
+ chip->ext_otg_vreg.rdev = NULL;
+ if (rc != -EPROBE_DEFER)
+ dev_err(chip->dev,
+ "external OTG reg failed, rc=%d\n", rc);
+ }
+ }
+
+ return rc;
+}
+
+#define CMD_CHG_LED_REG 0x43
+#define CHG_LED_CTRL_BIT BIT(0)
+#define LED_SW_CTRL_BIT 0x1
+#define LED_CHG_CTRL_BIT 0x0
+#define CHG_LED_ON 0x03
+#define CHG_LED_OFF 0x00
+#define LED_BLINKING_PATTERN1 0x01
+#define LED_BLINKING_PATTERN2 0x02
+#define LED_BLINKING_CFG_MASK SMB_MASK(2, 1)
+#define CHG_LED_SHIFT 1
+static int smbchg_chg_led_controls(struct smbchg_chip *chip)
+{
+ u8 reg, mask;
+ int rc;
+
+ if (chip->cfg_chg_led_sw_ctrl) {
+ /* turn-off LED by default for software control */
+ mask = CHG_LED_CTRL_BIT | LED_BLINKING_CFG_MASK;
+ reg = LED_SW_CTRL_BIT;
+ } else {
+ mask = CHG_LED_CTRL_BIT;
+ reg = LED_CHG_CTRL_BIT;
+ }
+
+ rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_LED_REG,
+ mask, reg);
+ if (rc < 0)
+ dev_err(chip->dev,
+ "Couldn't write LED_CTRL_BIT rc=%d\n", rc);
+ return rc;
+}
+
+static void smbchg_chg_led_brightness_set(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ struct smbchg_chip *chip = container_of(cdev,
+ struct smbchg_chip, led_cdev);
+ u8 reg;
+ int rc;
+
+ reg = (value > LED_OFF) ? CHG_LED_ON << CHG_LED_SHIFT :
+ CHG_LED_OFF << CHG_LED_SHIFT;
+
+ if (value > LED_OFF)
+ power_supply_set_hi_power_state(chip->bms_psy, 1);
+ else
+ power_supply_set_hi_power_state(chip->bms_psy, 0);
+
+ pr_smb(PR_STATUS,
+ "set the charger led brightness to value=%d\n",
+ value);
+ rc = smbchg_sec_masked_write(chip,
+ chip->bat_if_base + CMD_CHG_LED_REG,
+ LED_BLINKING_CFG_MASK, reg);
+ if (rc)
+ dev_err(chip->dev, "Couldn't write CHG_LED rc=%d\n",
+ rc);
+}
+
+static enum
+led_brightness smbchg_chg_led_brightness_get(struct led_classdev *cdev)
+{
+ struct smbchg_chip *chip = container_of(cdev,
+ struct smbchg_chip, led_cdev);
+ u8 reg_val, chg_led_sts;
+ int rc;
+
+ rc = smbchg_read(chip, &reg_val, chip->bat_if_base + CMD_CHG_LED_REG,
+ 1);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read CHG_LED_REG sts rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ chg_led_sts = (reg_val & LED_BLINKING_CFG_MASK) >> CHG_LED_SHIFT;
+
+ pr_smb(PR_STATUS, "chg_led_sts = %02x\n", chg_led_sts);
+
+ return (chg_led_sts == CHG_LED_OFF) ? LED_OFF : LED_FULL;
+}
+
+static void smbchg_chg_led_blink_set(struct smbchg_chip *chip,
+ unsigned long blinking)
+{
+ u8 reg;
+ int rc;
+
+ if (blinking == 0) {
+ reg = CHG_LED_OFF << CHG_LED_SHIFT;
+ power_supply_set_hi_power_state(chip->bms_psy, 0);
+ } else {
+ power_supply_set_hi_power_state(chip->bms_psy, 1);
+ if (blinking == 1)
+ reg = LED_BLINKING_PATTERN1 << CHG_LED_SHIFT;
+ else if (blinking == 2)
+ reg = LED_BLINKING_PATTERN2 << CHG_LED_SHIFT;
+ else
+ reg = LED_BLINKING_PATTERN1 << CHG_LED_SHIFT;
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->bat_if_base + CMD_CHG_LED_REG,
+ LED_BLINKING_CFG_MASK, reg);
+ if (rc)
+ dev_err(chip->dev, "Couldn't write CHG_LED rc=%d\n",
+ rc);
+}
+
+static ssize_t smbchg_chg_led_blink_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct led_classdev *cdev = dev_get_drvdata(dev);
+ struct smbchg_chip *chip = container_of(cdev, struct smbchg_chip,
+ led_cdev);
+ unsigned long blinking;
+ ssize_t rc = -EINVAL;
+
+ rc = kstrtoul(buf, 10, &blinking);
+ if (rc)
+ return rc;
+
+ smbchg_chg_led_blink_set(chip, blinking);
+
+ return len;
+}
+
+static DEVICE_ATTR(blink, 0664, NULL, smbchg_chg_led_blink_store);
+
+static struct attribute *led_blink_attributes[] = {
+ &dev_attr_blink.attr,
+ NULL,
+};
+
+static struct attribute_group smbchg_led_attr_group = {
+ .attrs = led_blink_attributes
+};
+
+static int smbchg_register_chg_led(struct smbchg_chip *chip)
+{
+ int rc;
+
+ chip->led_cdev.name = "red";
+ chip->led_cdev.brightness_set = smbchg_chg_led_brightness_set;
+ chip->led_cdev.brightness_get = smbchg_chg_led_brightness_get;
+
+ rc = led_classdev_register(chip->dev, &chip->led_cdev);
+ if (rc) {
+ dev_err(chip->dev, "unable to register charger led, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = sysfs_create_group(&chip->led_cdev.dev->kobj,
+ &smbchg_led_attr_group);
+ if (rc) {
+ dev_err(chip->dev, "led sysfs rc: %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int vf_adjust_low_threshold = 5;
+module_param(vf_adjust_low_threshold, int, 0644);
+
+static int vf_adjust_high_threshold = 7;
+module_param(vf_adjust_high_threshold, int, 0644);
+
+static int vf_adjust_n_samples = 10;
+module_param(vf_adjust_n_samples, int, 0644);
+
+static int vf_adjust_max_delta_mv = 40;
+module_param(vf_adjust_max_delta_mv, int, 0644);
+
+static int vf_adjust_trim_steps_per_adjust = 1;
+module_param(vf_adjust_trim_steps_per_adjust, int, 0644);
+
+#define CENTER_TRIM_CODE 7
+#define MAX_LIN_CODE 14
+#define MAX_TRIM_CODE 15
+#define SCALE_SHIFT 4
+#define VF_TRIM_OFFSET_MASK SMB_MASK(3, 0)
+#define VF_STEP_SIZE_MV 10
+#define SCALE_LSB_MV 17
+static int smbchg_trim_add_steps(int prev_trim, int delta_steps)
+{
+ int scale_steps;
+ int linear_offset, linear_scale;
+ int offset_code = prev_trim & VF_TRIM_OFFSET_MASK;
+ int scale_code = (prev_trim & ~VF_TRIM_OFFSET_MASK) >> SCALE_SHIFT;
+
+ if (abs(delta_steps) > 1) {
+ pr_smb(PR_STATUS,
+ "Cant trim multiple steps delta_steps = %d\n",
+ delta_steps);
+ return prev_trim;
+ }
+ if (offset_code <= CENTER_TRIM_CODE)
+ linear_offset = offset_code + CENTER_TRIM_CODE;
+ else if (offset_code > CENTER_TRIM_CODE)
+ linear_offset = MAX_TRIM_CODE - offset_code;
+
+ if (scale_code <= CENTER_TRIM_CODE)
+ linear_scale = scale_code + CENTER_TRIM_CODE;
+ else if (scale_code > CENTER_TRIM_CODE)
+ linear_scale = scale_code - (CENTER_TRIM_CODE + 1);
+
+ /* check if we can accomodate delta steps with just the offset */
+ if (linear_offset + delta_steps >= 0
+ && linear_offset + delta_steps <= MAX_LIN_CODE) {
+ linear_offset += delta_steps;
+
+ if (linear_offset > CENTER_TRIM_CODE)
+ offset_code = linear_offset - CENTER_TRIM_CODE;
+ else
+ offset_code = MAX_TRIM_CODE - linear_offset;
+
+ return (prev_trim & ~VF_TRIM_OFFSET_MASK) | offset_code;
+ }
+
+ /* changing offset cannot satisfy delta steps, change the scale bits */
+ scale_steps = delta_steps > 0 ? 1 : -1;
+
+ if (linear_scale + scale_steps < 0
+ || linear_scale + scale_steps > MAX_LIN_CODE) {
+ pr_smb(PR_STATUS,
+ "Cant trim scale_steps = %d delta_steps = %d\n",
+ scale_steps, delta_steps);
+ return prev_trim;
+ }
+
+ linear_scale += scale_steps;
+
+ if (linear_scale > CENTER_TRIM_CODE)
+ scale_code = linear_scale - CENTER_TRIM_CODE;
+ else
+ scale_code = linear_scale + (CENTER_TRIM_CODE + 1);
+ prev_trim = (prev_trim & VF_TRIM_OFFSET_MASK)
+ | scale_code << SCALE_SHIFT;
+
+ /*
+ * now that we have changed scale which is a 17mV jump, change the
+ * offset bits (10mV) too so the effective change is just 7mV
+ */
+ delta_steps = -1 * delta_steps;
+
+ linear_offset = clamp(linear_offset + delta_steps, 0, MAX_LIN_CODE);
+ if (linear_offset > CENTER_TRIM_CODE)
+ offset_code = linear_offset - CENTER_TRIM_CODE;
+ else
+ offset_code = MAX_TRIM_CODE - linear_offset;
+
+ return (prev_trim & ~VF_TRIM_OFFSET_MASK) | offset_code;
+}
+
+#define TRIM_14 0xFE
+#define VF_TRIM_MASK 0xFF
+static int smbchg_adjust_vfloat_mv_trim(struct smbchg_chip *chip,
+ int delta_mv)
+{
+ int sign, delta_steps, rc = 0;
+ u8 prev_trim, new_trim;
+ int i;
+
+ sign = delta_mv > 0 ? 1 : -1;
+ delta_steps = (delta_mv + sign * VF_STEP_SIZE_MV / 2)
+ / VF_STEP_SIZE_MV;
+
+ rc = smbchg_read(chip, &prev_trim, chip->misc_base + TRIM_14, 1);
+ if (rc) {
+ dev_err(chip->dev, "Unable to read trim 14: %d\n", rc);
+ return rc;
+ }
+
+ for (i = 1; i <= abs(delta_steps)
+ && i <= vf_adjust_trim_steps_per_adjust; i++) {
+ new_trim = (u8)smbchg_trim_add_steps(prev_trim,
+ delta_steps > 0 ? 1 : -1);
+ if (new_trim == prev_trim) {
+ pr_smb(PR_STATUS,
+ "VFloat trim unchanged from %02x\n", prev_trim);
+ /* treat no trim change as an error */
+ return -EINVAL;
+ }
+
+ rc = smbchg_sec_masked_write(chip, chip->misc_base + TRIM_14,
+ VF_TRIM_MASK, new_trim);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't change vfloat trim rc=%d\n", rc);
+ }
+ pr_smb(PR_STATUS,
+ "VFlt trim %02x to %02x, delta steps: %d\n",
+ prev_trim, new_trim, delta_steps);
+ prev_trim = new_trim;
+ }
+
+ return rc;
+}
+
+#define VFLOAT_RESAMPLE_DELAY_MS 10000
+static void smbchg_vfloat_adjust_work(struct work_struct *work)
+{
+ struct smbchg_chip *chip = container_of(work,
+ struct smbchg_chip,
+ vfloat_adjust_work.work);
+ int vbat_uv, vbat_mv, ibat_ua, rc, delta_vfloat_mv;
+ bool taper, enable;
+
+ smbchg_stay_awake(chip, PM_REASON_VFLOAT_ADJUST);
+ taper = (get_prop_charge_type(chip)
+ == POWER_SUPPLY_CHARGE_TYPE_TAPER);
+ enable = taper && (chip->parallel.current_max_ma == 0);
+
+ if (!enable) {
+ pr_smb(PR_MISC,
+ "Stopping vfloat adj taper=%d parallel_ma = %d\n",
+ taper, chip->parallel.current_max_ma);
+ goto stop;
+ }
+
+ if (get_prop_batt_health(chip) != POWER_SUPPLY_HEALTH_GOOD) {
+ pr_smb(PR_STATUS, "JEITA active, skipping\n");
+ goto stop;
+ }
+
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_UPDATE_NOW, 1);
+ rc = get_property_from_fg(chip,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW, &vbat_uv);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "bms psy does not support voltage rc = %d\n", rc);
+ goto stop;
+ }
+ vbat_mv = vbat_uv / 1000;
+
+ if ((vbat_mv - chip->vfloat_mv) < -1 * vf_adjust_max_delta_mv) {
+ pr_smb(PR_STATUS, "Skip vbat out of range: %d\n", vbat_mv);
+ goto reschedule;
+ }
+
+ rc = get_property_from_fg(chip,
+ POWER_SUPPLY_PROP_CURRENT_NOW, &ibat_ua);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "bms psy does not support current_now rc = %d\n", rc);
+ goto stop;
+ }
+
+ if (ibat_ua / 1000 > -chip->iterm_ma) {
+ pr_smb(PR_STATUS, "Skip ibat too high: %d\n", ibat_ua);
+ goto reschedule;
+ }
+
+ pr_smb(PR_STATUS, "sample number = %d vbat_mv = %d ibat_ua = %d\n",
+ chip->n_vbat_samples,
+ vbat_mv,
+ ibat_ua);
+
+ chip->max_vbat_sample = max(chip->max_vbat_sample, vbat_mv);
+ chip->n_vbat_samples += 1;
+ if (chip->n_vbat_samples < vf_adjust_n_samples) {
+ pr_smb(PR_STATUS, "Skip %d samples; max = %d\n",
+ chip->n_vbat_samples, chip->max_vbat_sample);
+ goto reschedule;
+ }
+ /* if max vbat > target vfloat, delta_vfloat_mv could be negative */
+ delta_vfloat_mv = chip->vfloat_mv - chip->max_vbat_sample;
+ pr_smb(PR_STATUS, "delta_vfloat_mv = %d, samples = %d, mvbat = %d\n",
+ delta_vfloat_mv, chip->n_vbat_samples, chip->max_vbat_sample);
+ /*
+ * enough valid samples has been collected, adjust trim codes
+ * based on maximum of collected vbat samples if necessary
+ */
+ if (delta_vfloat_mv > vf_adjust_high_threshold
+ || delta_vfloat_mv < -1 * vf_adjust_low_threshold) {
+ rc = smbchg_adjust_vfloat_mv_trim(chip, delta_vfloat_mv);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "Stopping vfloat adj after trim adj rc = %d\n",
+ rc);
+ goto stop;
+ }
+ chip->max_vbat_sample = 0;
+ chip->n_vbat_samples = 0;
+ goto reschedule;
+ }
+
+stop:
+ chip->max_vbat_sample = 0;
+ chip->n_vbat_samples = 0;
+ smbchg_relax(chip, PM_REASON_VFLOAT_ADJUST);
+ return;
+
+reschedule:
+ schedule_delayed_work(&chip->vfloat_adjust_work,
+ msecs_to_jiffies(VFLOAT_RESAMPLE_DELAY_MS));
+ return;
+}
+
+static int smbchg_charging_status_change(struct smbchg_chip *chip)
+{
+ smbchg_vfloat_adjust_check(chip);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS,
+ get_prop_batt_status(chip));
+ return 0;
+}
+
+#define BB_CLMP_SEL 0xF8
+#define BB_CLMP_MASK SMB_MASK(1, 0)
+#define BB_CLMP_VFIX_3338MV 0x1
+#define BB_CLMP_VFIX_3512MV 0x2
+static int smbchg_set_optimal_charging_mode(struct smbchg_chip *chip, int type)
+{
+ int rc;
+ bool hvdcp2 = (type == POWER_SUPPLY_TYPE_USB_HVDCP
+ && smbchg_is_usbin_active_pwr_src(chip));
+
+ /*
+ * Set the charger switching freq to 1MHZ if HVDCP 2.0,
+ * or 750KHZ otherwise
+ */
+ rc = smbchg_sec_masked_write(chip,
+ chip->bat_if_base + BAT_IF_TRIM7_REG,
+ CFG_750KHZ_BIT, hvdcp2 ? 0 : CFG_750KHZ_BIT);
+ if (rc) {
+ dev_err(chip->dev, "Cannot set switching freq: %d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Set the charger switch frequency clamp voltage threshold to 3.338V
+ * if HVDCP 2.0, or 3.512V otherwise.
+ */
+ rc = smbchg_sec_masked_write(chip, chip->bat_if_base + BB_CLMP_SEL,
+ BB_CLMP_MASK,
+ hvdcp2 ? BB_CLMP_VFIX_3338MV : BB_CLMP_VFIX_3512MV);
+ if (rc) {
+ dev_err(chip->dev, "Cannot set switching freq: %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+#define DEFAULT_SDP_MA 100
+#define DEFAULT_CDP_MA 1500
+static int smbchg_change_usb_supply_type(struct smbchg_chip *chip,
+ enum power_supply_type type)
+{
+ int rc, current_limit_ma;
+
+ /*
+ * if the type is not unknown, set the type before changing ICL vote
+ * in order to ensure that the correct current limit registers are
+ * used
+ */
+ if (type != POWER_SUPPLY_TYPE_UNKNOWN)
+ chip->usb_supply_type = type;
+
+ /*
+ * Type-C only supports STD(900), MEDIUM(1500) and HIGH(3000) current
+ * modes, skip all BC 1.2 current if external typec is supported.
+ * Note: for SDP supporting current based on USB notifications.
+ */
+ if (chip->typec_psy && (type != POWER_SUPPLY_TYPE_USB))
+ current_limit_ma = chip->typec_current_ma;
+ else if (type == POWER_SUPPLY_TYPE_USB)
+ current_limit_ma = DEFAULT_SDP_MA;
+ else if (type == POWER_SUPPLY_TYPE_USB)
+ current_limit_ma = DEFAULT_SDP_MA;
+ else if (type == POWER_SUPPLY_TYPE_USB_CDP)
+ current_limit_ma = DEFAULT_CDP_MA;
+ else if (type == POWER_SUPPLY_TYPE_USB_HVDCP)
+ current_limit_ma = smbchg_default_hvdcp_icl_ma;
+ else if (type == POWER_SUPPLY_TYPE_USB_HVDCP_3)
+ current_limit_ma = smbchg_default_hvdcp3_icl_ma;
+ else
+ current_limit_ma = smbchg_default_dcp_icl_ma;
+
+ pr_smb(PR_STATUS, "Type %d: setting mA = %d\n",
+ type, current_limit_ma);
+ rc = vote(chip->usb_icl_votable, PSY_ICL_VOTER, true,
+ current_limit_ma);
+ if (rc < 0) {
+ pr_err("Couldn't vote for new USB ICL rc=%d\n", rc);
+ goto out;
+ }
+
+ if (!chip->skip_usb_notification)
+ power_supply_set_supply_type(chip->usb_psy, type);
+
+ /* otherwise if it is unknown, set type after the vote */
+ if (type == POWER_SUPPLY_TYPE_UNKNOWN)
+ chip->usb_supply_type = type;
+
+ /* set the correct buck switching frequency */
+ rc = smbchg_set_optimal_charging_mode(chip, type);
+ if (rc < 0)
+ pr_err("Couldn't set charger optimal mode rc=%d\n", rc);
+
+out:
+ return rc;
+}
+
+#define HVDCP_ADAPTER_SEL_MASK SMB_MASK(5, 4)
+#define HVDCP_5V 0x00
+#define HVDCP_9V 0x10
+#define USB_CMD_HVDCP_1 0x42
+#define FORCE_HVDCP_2p0 BIT(3)
+
+static int force_9v_hvdcp(struct smbchg_chip *chip)
+{
+ int rc;
+
+ /* Force 5V HVDCP */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_5V);
+ if (rc) {
+ pr_err("Couldn't set hvdcp config in chgpath_chg rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Force QC2.0 */
+ rc = smbchg_masked_write(chip,
+ chip->usb_chgpth_base + USB_CMD_HVDCP_1,
+ FORCE_HVDCP_2p0, FORCE_HVDCP_2p0);
+ rc |= smbchg_masked_write(chip,
+ chip->usb_chgpth_base + USB_CMD_HVDCP_1,
+ FORCE_HVDCP_2p0, 0);
+ if (rc < 0) {
+ pr_err("Couldn't force QC2.0 rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Delay to switch into HVDCP 2.0 and avoid UV */
+ msleep(500);
+
+ /* Force 9V HVDCP */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_9V);
+ if (rc)
+ pr_err("Couldn't set hvdcp config in chgpath_chg rc=%d\n", rc);
+
+ return rc;
+}
+
+static void smbchg_hvdcp_det_work(struct work_struct *work)
+{
+ struct smbchg_chip *chip = container_of(work,
+ struct smbchg_chip,
+ hvdcp_det_work.work);
+ int rc;
+
+ if (is_hvdcp_present(chip)) {
+ if (!chip->hvdcp3_supported &&
+ (chip->wa_flags & SMBCHG_HVDCP_9V_EN_WA)) {
+ /* force HVDCP 2.0 */
+ rc = force_9v_hvdcp(chip);
+ if (rc)
+ pr_err("could not force 9V HVDCP continuing rc=%d\n",
+ rc);
+ }
+ smbchg_change_usb_supply_type(chip,
+ POWER_SUPPLY_TYPE_USB_HVDCP);
+ if (chip->psy_registered)
+ power_supply_changed(&chip->batt_psy);
+ smbchg_aicl_deglitch_wa_check(chip);
+ }
+}
+
+static int set_usb_psy_dp_dm(struct smbchg_chip *chip, int state)
+{
+ int rc;
+ u8 reg;
+
+ /*
+ * ensure that we are not in the middle of an insertion where usbin_uv
+ * is low and src_detect hasnt gone high. If so force dp=F dm=F
+ * which guarantees proper type detection
+ */
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RT_STS, 1);
+ if (!rc && !(reg & USBIN_UV_BIT) && !(reg & USBIN_SRC_DET_BIT)) {
+ pr_smb(PR_MISC, "overwriting state = %d with %d\n",
+ state, POWER_SUPPLY_DP_DM_DPF_DMF);
+ state = POWER_SUPPLY_DP_DM_DPF_DMF;
+ }
+ pr_smb(PR_MISC, "setting usb psy dp dm = %d\n", state);
+ return power_supply_set_dp_dm(chip->usb_psy, state);
+}
+
+#define APSD_CFG 0xF5
+#define AUTO_SRC_DETECT_EN_BIT BIT(0)
+#define APSD_TIMEOUT_MS 1500
+static void restore_from_hvdcp_detection(struct smbchg_chip *chip)
+{
+ int rc;
+
+ pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc);
+
+ /* switch to 9V HVDCP */
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_9V);
+ if (rc < 0)
+ pr_err("Couldn't configure HVDCP 9V rc=%d\n", rc);
+
+ /* enable HVDCP */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, HVDCP_EN_BIT);
+ if (rc < 0)
+ pr_err("Couldn't enable HVDCP rc=%d\n", rc);
+
+ /* enable APSD */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + APSD_CFG,
+ AUTO_SRC_DETECT_EN_BIT, AUTO_SRC_DETECT_EN_BIT);
+ if (rc < 0)
+ pr_err("Couldn't enable APSD rc=%d\n", rc);
+
+ /* Reset back to 5V unregulated */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG,
+ ADAPTER_ALLOWANCE_MASK, USBIN_ADAPTER_5V_UNREGULATED_9V);
+ if (rc < 0)
+ pr_err("Couldn't write usb allowance rc=%d\n", rc);
+
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, AICL_EN_BIT);
+ if (rc < 0)
+ pr_err("Couldn't enable AICL rc=%d\n", rc);
+
+ chip->hvdcp_3_det_ignore_uv = false;
+ chip->pulse_cnt = 0;
+}
+
+#define RESTRICTED_CHG_FCC_PERCENT 50
+static int smbchg_restricted_charging(struct smbchg_chip *chip, bool enable)
+{
+ int current_table_index, fastchg_current;
+ int rc = 0;
+
+ /* If enable, set the fcc to the set point closest
+ * to 50% of the configured fcc while remaining below it
+ */
+ current_table_index = find_smaller_in_array(
+ chip->tables.usb_ilim_ma_table,
+ chip->cfg_fastchg_current_ma
+ * RESTRICTED_CHG_FCC_PERCENT / 100,
+ chip->tables.usb_ilim_ma_len);
+ fastchg_current =
+ chip->tables.usb_ilim_ma_table[current_table_index];
+ rc = vote(chip->fcc_votable, RESTRICTED_CHG_FCC_VOTER, enable,
+ fastchg_current);
+
+ pr_smb(PR_STATUS, "restricted_charging set to %d\n", enable);
+ chip->restricted_charging = enable;
+
+ return rc;
+}
+
+static void handle_usb_removal(struct smbchg_chip *chip)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ int rc;
+
+ pr_smb(PR_STATUS, "triggered\n");
+ smbchg_aicl_deglitch_wa_check(chip);
+ /* Clear the OV detected status set before */
+ if (chip->usb_ov_det)
+ chip->usb_ov_det = false;
+ /* Clear typec current status */
+ if (chip->typec_psy)
+ chip->typec_current_ma = 0;
+ smbchg_change_usb_supply_type(chip, POWER_SUPPLY_TYPE_UNKNOWN);
+ if (!chip->skip_usb_notification) {
+ pr_smb(PR_MISC, "setting usb psy present = %d\n",
+ chip->usb_present);
+ power_supply_set_present(chip->usb_psy, chip->usb_present);
+ }
+ set_usb_psy_dp_dm(chip, POWER_SUPPLY_DP_DM_DPR_DMR);
+ schedule_work(&chip->usb_set_online_work);
+ pr_smb(PR_MISC, "setting usb psy health UNKNOWN\n");
+ rc = power_supply_set_health_state(chip->usb_psy,
+ POWER_SUPPLY_HEALTH_UNKNOWN);
+ if (rc < 0)
+ pr_smb(PR_STATUS,
+ "usb psy does not allow updating prop %d rc = %d\n",
+ POWER_SUPPLY_HEALTH_UNKNOWN, rc);
+
+ if (parallel_psy && chip->parallel_charger_detected)
+ power_supply_set_present(parallel_psy, false);
+ if (chip->parallel.avail && chip->aicl_done_irq
+ && chip->enable_aicl_wake) {
+ disable_irq_wake(chip->aicl_done_irq);
+ chip->enable_aicl_wake = false;
+ }
+ chip->parallel.enabled_once = false;
+ chip->vbat_above_headroom = false;
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ ICL_OVERRIDE_BIT, 0);
+ if (rc < 0)
+ pr_err("Couldn't set override rc = %d\n", rc);
+
+ vote(chip->usb_icl_votable, WEAK_CHARGER_ICL_VOTER, false, 0);
+ chip->usb_icl_delta = 0;
+ vote(chip->usb_icl_votable, SW_AICL_ICL_VOTER, false, 0);
+ vote(chip->aicl_deglitch_short_votable,
+ HVDCP_SHORT_DEGLITCH_VOTER, false, 0);
+ if (!chip->hvdcp_not_supported)
+ restore_from_hvdcp_detection(chip);
+}
+
+static bool is_src_detect_high(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb rt status rc = %d\n", rc);
+ return false;
+ }
+ return reg &= USBIN_SRC_DET_BIT;
+}
+
+static bool is_usbin_uv_high(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb rt status rc = %d\n", rc);
+ return false;
+ }
+ return reg &= USBIN_UV_BIT;
+}
+
+#define HVDCP_NOTIFY_MS 2500
+static void handle_usb_insertion(struct smbchg_chip *chip)
+{
+ struct power_supply *parallel_psy = get_parallel_psy(chip);
+ enum power_supply_type usb_supply_type;
+ int rc;
+ char *usb_type_name = "null";
+
+ pr_smb(PR_STATUS, "triggered\n");
+ /* usb inserted */
+ read_usb_type(chip, &usb_type_name, &usb_supply_type);
+ pr_smb(PR_STATUS,
+ "inserted type = %d (%s)", usb_supply_type, usb_type_name);
+
+ smbchg_aicl_deglitch_wa_check(chip);
+ if (chip->typec_psy)
+ update_typec_status(chip);
+ smbchg_change_usb_supply_type(chip, usb_supply_type);
+ if (!chip->skip_usb_notification) {
+ pr_smb(PR_MISC, "setting usb psy present = %d\n",
+ chip->usb_present);
+ power_supply_set_present(chip->usb_psy, chip->usb_present);
+ }
+
+ /* Notify the USB psy if OV condition is not present */
+ if (!chip->usb_ov_det) {
+ /*
+ * Note that this could still be a very weak charger
+ * if the handle_usb_insertion was triggered from
+ * the falling edge of an USBIN_OV interrupt
+ */
+ pr_smb(PR_MISC, "setting usb psy health %s\n",
+ chip->very_weak_charger
+ ? "UNSPEC_FAILURE" : "GOOD");
+ rc = power_supply_set_health_state(chip->usb_psy,
+ chip->very_weak_charger
+ ? POWER_SUPPLY_HEALTH_UNSPEC_FAILURE
+ : POWER_SUPPLY_HEALTH_GOOD);
+ if (rc < 0)
+ pr_smb(PR_STATUS,
+ "usb psy does not allow updating prop %d rc = %d\n",
+ POWER_SUPPLY_HEALTH_GOOD, rc);
+ }
+ schedule_work(&chip->usb_set_online_work);
+
+ if (!chip->hvdcp_not_supported &&
+ (usb_supply_type == POWER_SUPPLY_TYPE_USB_DCP)) {
+ cancel_delayed_work_sync(&chip->hvdcp_det_work);
+ schedule_delayed_work(&chip->hvdcp_det_work,
+ msecs_to_jiffies(HVDCP_NOTIFY_MS));
+ }
+
+ if (parallel_psy) {
+ rc = power_supply_set_present(parallel_psy, true);
+ chip->parallel_charger_detected = rc ? false : true;
+ if (rc)
+ pr_debug("parallel-charger absent rc=%d\n", rc);
+ }
+
+ if (chip->parallel.avail && chip->aicl_done_irq
+ && !chip->enable_aicl_wake) {
+ rc = enable_irq_wake(chip->aicl_done_irq);
+ chip->enable_aicl_wake = true;
+ }
+}
+
+void update_usb_status(struct smbchg_chip *chip, bool usb_present, bool force)
+{
+ mutex_lock(&chip->usb_status_lock);
+ if (force) {
+ chip->usb_present = usb_present;
+ chip->usb_present ? handle_usb_insertion(chip)
+ : handle_usb_removal(chip);
+ goto unlock;
+ }
+ if (!chip->usb_present && usb_present) {
+ chip->usb_present = usb_present;
+ handle_usb_insertion(chip);
+ } else if (chip->usb_present && !usb_present) {
+ chip->usb_present = usb_present;
+ handle_usb_removal(chip);
+ }
+
+ /* update FG */
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS,
+ get_prop_batt_status(chip));
+unlock:
+ mutex_unlock(&chip->usb_status_lock);
+}
+
+static int otg_oc_reset(struct smbchg_chip *chip)
+{
+ int rc;
+
+ rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG,
+ OTG_EN_BIT, 0);
+ if (rc)
+ pr_err("Failed to disable OTG rc=%d\n", rc);
+
+ msleep(20);
+
+ /*
+ * There is a possibility that an USBID interrupt might have
+ * occurred notifying USB power supply to disable OTG. We
+ * should not enable OTG in such cases.
+ */
+ if (!is_otg_present(chip)) {
+ pr_smb(PR_STATUS,
+ "OTG is not present, not enabling OTG_EN_BIT\n");
+ goto out;
+ }
+
+ rc = smbchg_masked_write(chip, chip->bat_if_base + CMD_CHG_REG,
+ OTG_EN_BIT, OTG_EN_BIT);
+ if (rc)
+ pr_err("Failed to re-enable OTG rc=%d\n", rc);
+
+out:
+ return rc;
+}
+
+static int get_current_time(unsigned long *now_tm_sec)
+{
+ struct rtc_time tm;
+ struct rtc_device *rtc;
+ int rc;
+
+ rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
+ if (rtc == NULL) {
+ pr_err("%s: unable to open rtc device (%s)\n",
+ __FILE__, CONFIG_RTC_HCTOSYS_DEVICE);
+ return -EINVAL;
+ }
+
+ rc = rtc_read_time(rtc, &tm);
+ if (rc) {
+ pr_err("Error reading rtc device (%s) : %d\n",
+ CONFIG_RTC_HCTOSYS_DEVICE, rc);
+ goto close_time;
+ }
+
+ rc = rtc_valid_tm(&tm);
+ if (rc) {
+ pr_err("Invalid RTC time (%s): %d\n",
+ CONFIG_RTC_HCTOSYS_DEVICE, rc);
+ goto close_time;
+ }
+ rtc_tm_to_time(&tm, now_tm_sec);
+
+close_time:
+ rtc_class_close(rtc);
+ return rc;
+}
+
+#define AICL_IRQ_LIMIT_SECONDS 60
+#define AICL_IRQ_LIMIT_COUNT 25
+static void increment_aicl_count(struct smbchg_chip *chip)
+{
+ bool bad_charger = false;
+ int max_aicl_count, rc;
+ u8 reg;
+ long elapsed_seconds;
+ unsigned long now_seconds;
+
+ pr_smb(PR_INTERRUPT, "aicl count c:%d dgltch:%d first:%ld\n",
+ chip->aicl_irq_count, chip->aicl_deglitch_short,
+ chip->first_aicl_seconds);
+
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + ICL_STS_1_REG, 1);
+ if (!rc)
+ chip->aicl_complete = reg & AICL_STS_BIT;
+ else
+ chip->aicl_complete = false;
+
+ if (chip->aicl_deglitch_short || chip->force_aicl_rerun) {
+ if (!chip->aicl_irq_count)
+ get_current_time(&chip->first_aicl_seconds);
+ get_current_time(&now_seconds);
+ elapsed_seconds = now_seconds
+ - chip->first_aicl_seconds;
+
+ if (elapsed_seconds > AICL_IRQ_LIMIT_SECONDS) {
+ pr_smb(PR_INTERRUPT,
+ "resetting: elp:%ld first:%ld now:%ld c=%d\n",
+ elapsed_seconds, chip->first_aicl_seconds,
+ now_seconds, chip->aicl_irq_count);
+ chip->aicl_irq_count = 1;
+ get_current_time(&chip->first_aicl_seconds);
+ return;
+ }
+ /*
+ * Double the amount of AICLs allowed if parallel charging is
+ * enabled.
+ */
+ max_aicl_count = AICL_IRQ_LIMIT_COUNT
+ * (chip->parallel.avail ? 2 : 1);
+ chip->aicl_irq_count++;
+
+ if (chip->aicl_irq_count > max_aicl_count) {
+ pr_smb(PR_INTERRUPT, "elp:%ld first:%ld now:%ld c=%d\n",
+ elapsed_seconds, chip->first_aicl_seconds,
+ now_seconds, chip->aicl_irq_count);
+ pr_smb(PR_INTERRUPT, "Disable AICL rerun\n");
+ chip->very_weak_charger = true;
+ bad_charger = true;
+
+ /*
+ * Disable AICL rerun since many interrupts were
+ * triggered in a short time
+ */
+ /* disable hw aicl */
+ rc = vote(chip->hw_aicl_rerun_disable_votable,
+ WEAK_CHARGER_HW_AICL_VOTER, true, 0);
+ if (rc < 0) {
+ pr_err("Couldn't disable hw aicl rerun rc=%d\n",
+ rc);
+ return;
+ }
+
+ /* Vote 100mA current limit */
+ rc = vote(chip->usb_icl_votable, WEAK_CHARGER_ICL_VOTER,
+ true, CURRENT_100_MA);
+ if (rc < 0) {
+ pr_err("Can't vote %d current limit rc=%d\n",
+ CURRENT_100_MA, rc);
+ }
+
+ chip->aicl_irq_count = 0;
+ } else if ((get_prop_charge_type(chip) ==
+ POWER_SUPPLY_CHARGE_TYPE_FAST) &&
+ (reg & AICL_SUSP_BIT)) {
+ /*
+ * If the AICL_SUSP_BIT is on, then AICL reruns have
+ * already been disabled. Set the very weak charger
+ * flag so that the driver reports a bad charger
+ * and does not reenable AICL reruns.
+ */
+ chip->very_weak_charger = true;
+ bad_charger = true;
+ }
+ if (bad_charger) {
+ pr_smb(PR_MISC,
+ "setting usb psy health UNSPEC_FAILURE\n");
+ rc = power_supply_set_health_state(chip->usb_psy,
+ POWER_SUPPLY_HEALTH_UNSPEC_FAILURE);
+ if (rc)
+ pr_err("Couldn't set health on usb psy rc:%d\n",
+ rc);
+ schedule_work(&chip->usb_set_online_work);
+ }
+ }
+}
+
+static int wait_for_usbin_uv(struct smbchg_chip *chip, bool high)
+{
+ int rc;
+ int tries = 3;
+ struct completion *completion = &chip->usbin_uv_lowered;
+ bool usbin_uv;
+
+ if (high)
+ completion = &chip->usbin_uv_raised;
+
+ while (tries--) {
+ rc = wait_for_completion_interruptible_timeout(
+ completion,
+ msecs_to_jiffies(APSD_TIMEOUT_MS));
+ if (rc >= 0)
+ break;
+ }
+
+ usbin_uv = is_usbin_uv_high(chip);
+
+ if (high == usbin_uv)
+ return 0;
+
+ pr_err("usbin uv didnt go to a %s state, still at %s, tries = %d, rc = %d\n",
+ high ? "risen" : "lowered",
+ usbin_uv ? "high" : "low",
+ tries, rc);
+ return -EINVAL;
+}
+
+static int wait_for_src_detect(struct smbchg_chip *chip, bool high)
+{
+ int rc;
+ int tries = 3;
+ struct completion *completion = &chip->src_det_lowered;
+ bool src_detect;
+
+ if (high)
+ completion = &chip->src_det_raised;
+
+ while (tries--) {
+ rc = wait_for_completion_interruptible_timeout(
+ completion,
+ msecs_to_jiffies(APSD_TIMEOUT_MS));
+ if (rc >= 0)
+ break;
+ }
+
+ src_detect = is_src_detect_high(chip);
+
+ if (high == src_detect)
+ return 0;
+
+ pr_err("src detect didnt go to a %s state, still at %s, tries = %d, rc = %d\n",
+ high ? "risen" : "lowered",
+ src_detect ? "high" : "low",
+ tries, rc);
+ return -EINVAL;
+}
+
+static int fake_insertion_removal(struct smbchg_chip *chip, bool insertion)
+{
+ int rc;
+ bool src_detect;
+ bool usbin_uv;
+
+ if (insertion) {
+ reinit_completion(&chip->src_det_raised);
+ reinit_completion(&chip->usbin_uv_lowered);
+ } else {
+ reinit_completion(&chip->src_det_lowered);
+ reinit_completion(&chip->usbin_uv_raised);
+ }
+
+ /* ensure that usbin uv real time status is in the right state */
+ usbin_uv = is_usbin_uv_high(chip);
+ if (usbin_uv != insertion) {
+ pr_err("Skip faking, usbin uv is already %d\n", usbin_uv);
+ return -EINVAL;
+ }
+
+ /* ensure that src_detect real time status is in the right state */
+ src_detect = is_src_detect_high(chip);
+ if (src_detect == insertion) {
+ pr_err("Skip faking, src detect is already %d\n", src_detect);
+ return -EINVAL;
+ }
+
+ pr_smb(PR_MISC, "Allow only %s charger\n",
+ insertion ? "5-9V" : "9V only");
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG,
+ ADAPTER_ALLOWANCE_MASK,
+ insertion ?
+ USBIN_ADAPTER_5V_9V_CONT : USBIN_ADAPTER_9V);
+ if (rc < 0) {
+ pr_err("Couldn't write usb allowance rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on %s usbin uv\n",
+ insertion ? "falling" : "rising");
+ rc = wait_for_usbin_uv(chip, !insertion);
+ if (rc < 0) {
+ pr_err("wait for usbin uv failed rc = %d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on %s src det\n",
+ insertion ? "rising" : "falling");
+ rc = wait_for_src_detect(chip, insertion);
+ if (rc < 0) {
+ pr_err("wait for src detect failed rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int smbchg_prepare_for_pulsing(struct smbchg_chip *chip)
+{
+ int rc = 0;
+ u8 reg;
+
+ /* switch to 5V HVDCP */
+ pr_smb(PR_MISC, "Switch to 5V HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_5V);
+ if (rc < 0) {
+ pr_err("Couldn't configure HVDCP 5V rc=%d\n", rc);
+ goto out;
+ }
+
+ /* wait for HVDCP to lower to 5V */
+ msleep(500);
+ /*
+ * Check if the same hvdcp session is in progress. src_det should be
+ * high and that we are still in 5V hvdcp
+ */
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low after 500mS sleep\n");
+ goto out;
+ }
+
+ /* disable HVDCP */
+ pr_smb(PR_MISC, "Disable HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't disable HVDCP rc=%d\n", rc);
+ goto out;
+ }
+
+ pr_smb(PR_MISC, "HVDCP voting for 300mA ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, true, 300);
+ if (rc < 0) {
+ pr_err("Couldn't vote for 300mA HVDCP ICL rc=%d\n", rc);
+ goto out;
+ }
+
+ pr_smb(PR_MISC, "Disable AICL\n");
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, 0);
+
+ chip->hvdcp_3_det_ignore_uv = true;
+ /* fake a removal */
+ pr_smb(PR_MISC, "Faking Removal\n");
+ rc = fake_insertion_removal(chip, false);
+ if (rc < 0) {
+ pr_err("Couldn't fake removal HVDCP Removed rc=%d\n", rc);
+ goto handle_removal;
+ }
+
+ /* disable APSD */
+ pr_smb(PR_MISC, "Disabling APSD\n");
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + APSD_CFG,
+ AUTO_SRC_DETECT_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't disable APSD rc=%d\n", rc);
+ goto out;
+ }
+
+ /* fake an insertion */
+ pr_smb(PR_MISC, "Faking Insertion\n");
+ rc = fake_insertion_removal(chip, true);
+ if (rc < 0) {
+ pr_err("Couldn't fake insertion rc=%d\n", rc);
+ goto handle_removal;
+ }
+ chip->hvdcp_3_det_ignore_uv = false;
+
+ pr_smb(PR_MISC, "Enable AICL\n");
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, AICL_EN_BIT);
+
+ set_usb_psy_dp_dm(chip, POWER_SUPPLY_DP_DM_DP0P6_DMF);
+ /*
+ * DCP will switch to HVDCP in this time by removing the short
+ * between DP DM
+ */
+ msleep(HVDCP_NOTIFY_MS);
+ /*
+ * Check if the same hvdcp session is in progress. src_det should be
+ * high and the usb type should be none since APSD was disabled
+ */
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low after 2s sleep\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ smbchg_read(chip, &reg, chip->misc_base + IDEV_STS, 1);
+ if ((reg >> TYPE_BITS_OFFSET) != 0) {
+ pr_smb(PR_MISC, "type bits set after 2s sleep - abort\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ set_usb_psy_dp_dm(chip, POWER_SUPPLY_DP_DM_DP0P6_DM3P3);
+ /* Wait 60mS after entering continuous mode */
+ msleep(60);
+
+ return 0;
+out:
+ chip->hvdcp_3_det_ignore_uv = false;
+ restore_from_hvdcp_detection(chip);
+ return rc;
+handle_removal:
+ chip->hvdcp_3_det_ignore_uv = false;
+ update_usb_status(chip, 0, 0);
+ return rc;
+}
+
+static int smbchg_unprepare_for_pulsing(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ set_usb_psy_dp_dm(chip, POWER_SUPPLY_DP_DM_DPF_DMF);
+
+ /* switch to 9V HVDCP */
+ pr_smb(PR_MISC, "Switch to 9V HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_9V);
+ if (rc < 0) {
+ pr_err("Couldn't configure HVDCP 9V rc=%d\n", rc);
+ return rc;
+ }
+
+ /* enable HVDCP */
+ pr_smb(PR_MISC, "Enable HVDCP\n");
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, HVDCP_EN_BIT);
+ if (rc < 0) {
+ pr_err("Couldn't enable HVDCP rc=%d\n", rc);
+ return rc;
+ }
+
+ /* enable APSD */
+ pr_smb(PR_MISC, "Enabling APSD\n");
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + APSD_CFG,
+ AUTO_SRC_DETECT_EN_BIT, AUTO_SRC_DETECT_EN_BIT);
+ if (rc < 0) {
+ pr_err("Couldn't enable APSD rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Disable AICL */
+ pr_smb(PR_MISC, "Disable AICL\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't disable AICL rc=%d\n", rc);
+ return rc;
+ }
+
+ /* fake a removal */
+ chip->hvdcp_3_det_ignore_uv = true;
+ pr_smb(PR_MISC, "Faking Removal\n");
+ rc = fake_insertion_removal(chip, false);
+ if (rc < 0) {
+ pr_err("Couldn't fake removal rc=%d\n", rc);
+ goto out;
+ }
+
+ /*
+ * reset the enabled once flag for parallel charging so
+ * parallel charging can immediately restart after the HVDCP pulsing
+ * is complete
+ */
+ chip->parallel.enabled_once = false;
+
+ /* fake an insertion */
+ pr_smb(PR_MISC, "Faking Insertion\n");
+ rc = fake_insertion_removal(chip, true);
+ if (rc < 0) {
+ pr_err("Couldn't fake insertion rc=%d\n", rc);
+ goto out;
+ }
+ chip->hvdcp_3_det_ignore_uv = false;
+
+ /* Enable AICL */
+ pr_smb(PR_MISC, "Enable AICL\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Couldn't enable AICL rc=%d\n", rc);
+ return rc;
+ }
+
+out:
+ /*
+ * There are many QC 2.0 chargers that collapse before the aicl deglitch
+ * timer can mitigate. Hence set the aicl deglitch time to a shorter
+ * period.
+ */
+
+ rc = vote(chip->aicl_deglitch_short_votable,
+ HVDCP_SHORT_DEGLITCH_VOTER, true, 0);
+ if (rc < 0)
+ pr_err("Couldn't reduce aicl deglitch rc=%d\n", rc);
+
+ pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc);
+
+ chip->hvdcp_3_det_ignore_uv = false;
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "HVDCP removed\n");
+ update_usb_status(chip, 0, 0);
+ }
+ return rc;
+}
+
+#define USB_CMD_APSD 0x41
+#define APSD_RERUN BIT(0)
+static int rerun_apsd(struct smbchg_chip *chip)
+{
+ int rc;
+
+ reinit_completion(&chip->src_det_raised);
+ reinit_completion(&chip->usbin_uv_lowered);
+ reinit_completion(&chip->src_det_lowered);
+ reinit_completion(&chip->usbin_uv_raised);
+
+ /* re-run APSD */
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + USB_CMD_APSD,
+ APSD_RERUN, APSD_RERUN);
+ if (rc) {
+ pr_err("Couldn't re-run APSD rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on rising usbin uv\n");
+ rc = wait_for_usbin_uv(chip, true);
+ if (rc < 0) {
+ pr_err("wait for usbin uv failed rc = %d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on falling src det\n");
+ rc = wait_for_src_detect(chip, false);
+ if (rc < 0) {
+ pr_err("wait for src detect failed rc = %d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on falling usbin uv\n");
+ rc = wait_for_usbin_uv(chip, false);
+ if (rc < 0) {
+ pr_err("wait for usbin uv failed rc = %d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Waiting on rising src det\n");
+ rc = wait_for_src_detect(chip, true);
+ if (rc < 0) {
+ pr_err("wait for src detect failed rc = %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+#define SCHG_LITE_USBIN_HVDCP_5_9V 0x8
+#define SCHG_LITE_USBIN_HVDCP_5_9V_SEL_MASK 0x38
+#define SCHG_LITE_USBIN_HVDCP_SEL_IDLE BIT(3)
+static bool is_hvdcp_5v_cont_mode(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg = 0;
+
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + USBIN_HVDCP_STS, 1);
+ if (rc) {
+ pr_err("Unable to read HVDCP status rc=%d\n", rc);
+ return false;
+ }
+
+ pr_smb(PR_STATUS, "HVDCP status = %x\n", reg);
+
+ if (reg & SCHG_LITE_USBIN_HVDCP_SEL_IDLE) {
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + INPUT_STS, 1);
+ if (rc) {
+ pr_err("Unable to read INPUT status rc=%d\n", rc);
+ return false;
+ }
+ pr_smb(PR_STATUS, "INPUT status = %x\n", reg);
+ if ((reg & SCHG_LITE_USBIN_HVDCP_5_9V_SEL_MASK) ==
+ SCHG_LITE_USBIN_HVDCP_5_9V)
+ return true;
+ }
+ return false;
+}
+
+static int smbchg_prepare_for_pulsing_lite(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ /* check if HVDCP is already in 5V continuous mode */
+ if (is_hvdcp_5v_cont_mode(chip)) {
+ pr_smb(PR_MISC, "HVDCP by default is in 5V continuous mode\n");
+ return 0;
+ }
+
+ /* switch to 5V HVDCP */
+ pr_smb(PR_MISC, "Switch to 5V HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_5V);
+ if (rc < 0) {
+ pr_err("Couldn't configure HVDCP 5V rc=%d\n", rc);
+ goto out;
+ }
+
+ /* wait for HVDCP to lower to 5V */
+ msleep(500);
+ /*
+ * Check if the same hvdcp session is in progress. src_det should be
+ * high and that we are still in 5V hvdcp
+ */
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low after 500mS sleep\n");
+ goto out;
+ }
+
+ pr_smb(PR_MISC, "HVDCP voting for 300mA ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, true, 300);
+ if (rc < 0) {
+ pr_err("Couldn't vote for 300mA HVDCP ICL rc=%d\n", rc);
+ goto out;
+ }
+
+ pr_smb(PR_MISC, "Disable AICL\n");
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, 0);
+
+ chip->hvdcp_3_det_ignore_uv = true;
+
+ /* re-run APSD */
+ rc = rerun_apsd(chip);
+ if (rc) {
+ pr_err("APSD rerun failed\n");
+ goto out;
+ }
+
+ chip->hvdcp_3_det_ignore_uv = false;
+
+ pr_smb(PR_MISC, "Enable AICL\n");
+ smbchg_sec_masked_write(chip, chip->usb_chgpth_base + USB_AICL_CFG,
+ AICL_EN_BIT, AICL_EN_BIT);
+ /*
+ * DCP will switch to HVDCP in this time by removing the short
+ * between DP DM
+ */
+ msleep(HVDCP_NOTIFY_MS);
+ /*
+ * Check if the same hvdcp session is in progress. src_det should be
+ * high and the usb type should be none since APSD was disabled
+ */
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low after 2s sleep\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* We are set if HVDCP in 5V continuous mode */
+ if (!is_hvdcp_5v_cont_mode(chip)) {
+ pr_err("HVDCP could not be set in 5V continuous mode\n");
+ goto out;
+ }
+
+ return 0;
+out:
+ chip->hvdcp_3_det_ignore_uv = false;
+ restore_from_hvdcp_detection(chip);
+ return rc;
+}
+
+static int smbchg_unprepare_for_pulsing_lite(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ pr_smb(PR_MISC, "Forcing 9V HVDCP 2.0\n");
+ rc = force_9v_hvdcp(chip);
+ if (rc) {
+ pr_err("Failed to force 9V HVDCP=%d\n", rc);
+ return rc;
+ }
+
+ pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc);
+
+ return rc;
+}
+
+#define CMD_HVDCP_2 0x43
+#define SINGLE_INCREMENT BIT(0)
+#define SINGLE_DECREMENT BIT(1)
+static int smbchg_dp_pulse_lite(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ pr_smb(PR_MISC, "Increment DP\n");
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_HVDCP_2,
+ SINGLE_INCREMENT, SINGLE_INCREMENT);
+ if (rc)
+ pr_err("Single-increment failed rc=%d\n", rc);
+
+ return rc;
+}
+
+static int smbchg_dm_pulse_lite(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ pr_smb(PR_MISC, "Decrement DM\n");
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_HVDCP_2,
+ SINGLE_DECREMENT, SINGLE_DECREMENT);
+ if (rc)
+ pr_err("Single-decrement failed rc=%d\n", rc);
+
+ return rc;
+}
+
+static int smbchg_hvdcp3_confirmed(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ /*
+ * reset the enabled once flag for parallel charging because this is
+ * effectively a new insertion.
+ */
+ chip->parallel.enabled_once = false;
+
+ pr_smb(PR_MISC, "Retracting HVDCP vote for ICL\n");
+ rc = vote(chip->usb_icl_votable, HVDCP_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't retract HVDCP ICL vote rc=%d\n", rc);
+
+ smbchg_change_usb_supply_type(chip, POWER_SUPPLY_TYPE_USB_HVDCP_3);
+
+ return rc;
+}
+
+static int smbchg_dp_dm(struct smbchg_chip *chip, int val)
+{
+ int rc = 0;
+ int target_icl_vote_ma;
+
+ switch (val) {
+ case POWER_SUPPLY_DP_DM_PREPARE:
+ if (!is_hvdcp_present(chip)) {
+ pr_err("No pulsing unless HVDCP\n");
+ return -ENODEV;
+ }
+ if (chip->schg_version == QPNP_SCHG_LITE)
+ rc = smbchg_prepare_for_pulsing_lite(chip);
+ else
+ rc = smbchg_prepare_for_pulsing(chip);
+ break;
+ case POWER_SUPPLY_DP_DM_UNPREPARE:
+ if (chip->schg_version == QPNP_SCHG_LITE)
+ rc = smbchg_unprepare_for_pulsing_lite(chip);
+ else
+ rc = smbchg_unprepare_for_pulsing(chip);
+ break;
+ case POWER_SUPPLY_DP_DM_CONFIRMED_HVDCP3:
+ rc = smbchg_hvdcp3_confirmed(chip);
+ break;
+ case POWER_SUPPLY_DP_DM_DP_PULSE:
+ if (chip->schg_version == QPNP_SCHG)
+ rc = set_usb_psy_dp_dm(chip,
+ POWER_SUPPLY_DP_DM_DP_PULSE);
+ else
+ rc = smbchg_dp_pulse_lite(chip);
+ if (!rc)
+ chip->pulse_cnt++;
+ pr_smb(PR_MISC, "pulse_cnt = %d\n", chip->pulse_cnt);
+ break;
+ case POWER_SUPPLY_DP_DM_DM_PULSE:
+ if (chip->schg_version == QPNP_SCHG)
+ rc = set_usb_psy_dp_dm(chip,
+ POWER_SUPPLY_DP_DM_DM_PULSE);
+ else
+ rc = smbchg_dm_pulse_lite(chip);
+ if (!rc && chip->pulse_cnt)
+ chip->pulse_cnt--;
+ pr_smb(PR_MISC, "pulse_cnt = %d\n", chip->pulse_cnt);
+ break;
+ case POWER_SUPPLY_DP_DM_HVDCP3_SUPPORTED:
+ chip->hvdcp3_supported = true;
+ pr_smb(PR_MISC, "HVDCP3 supported\n");
+ break;
+ case POWER_SUPPLY_DP_DM_ICL_DOWN:
+ chip->usb_icl_delta -= 100;
+ target_icl_vote_ma = get_client_vote(chip->usb_icl_votable,
+ PSY_ICL_VOTER);
+ vote(chip->usb_icl_votable, SW_AICL_ICL_VOTER, true,
+ target_icl_vote_ma + chip->usb_icl_delta);
+ break;
+ case POWER_SUPPLY_DP_DM_ICL_UP:
+ chip->usb_icl_delta += 100;
+ target_icl_vote_ma = get_client_vote(chip->usb_icl_votable,
+ PSY_ICL_VOTER);
+ vote(chip->usb_icl_votable, SW_AICL_ICL_VOTER, true,
+ target_icl_vote_ma + chip->usb_icl_delta);
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static void update_typec_capability_status(struct smbchg_chip *chip,
+ const union power_supply_propval *val)
+{
+ int rc;
+
+ pr_smb(PR_TYPEC, "typec capability = %dma\n", val->intval);
+
+ if (!chip->skip_usb_notification) {
+ rc = chip->usb_psy->set_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_MAX, val);
+ if (rc)
+ pr_err("typec failed to set current max rc=%d\n", rc);
+ }
+
+ pr_debug("changing ICL from %dma to %dma\n", chip->typec_current_ma,
+ val->intval);
+ chip->typec_current_ma = val->intval;
+ smbchg_change_usb_supply_type(chip, chip->usb_supply_type);
+}
+
+static void update_typec_otg_status(struct smbchg_chip *chip, int mode,
+ bool force)
+{
+ pr_smb(PR_TYPEC, "typec mode = %d\n", mode);
+
+ if (mode == POWER_SUPPLY_TYPE_DFP) {
+ chip->typec_dfp = true;
+ power_supply_set_usb_otg(chip->usb_psy, chip->typec_dfp);
+ /* update FG */
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS,
+ get_prop_batt_status(chip));
+ } else if (force || chip->typec_dfp) {
+ chip->typec_dfp = false;
+ power_supply_set_usb_otg(chip->usb_psy, chip->typec_dfp);
+ /* update FG */
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS,
+ get_prop_batt_status(chip));
+ }
+}
+
+#define CHARGE_OUTPUT_VTG_RATIO 840
+static int smbchg_get_iusb(struct smbchg_chip *chip)
+{
+ int rc, iusb_ua = -EINVAL;
+ struct qpnp_vadc_result adc_result;
+
+ if (!is_usb_present(chip) && !is_dc_present(chip))
+ return 0;
+
+ if (chip->vchg_vadc_dev && chip->vchg_adc_channel != -EINVAL) {
+ rc = qpnp_vadc_read(chip->vchg_vadc_dev,
+ chip->vchg_adc_channel, &adc_result);
+ if (rc) {
+ pr_smb(PR_STATUS,
+ "error in VCHG (channel-%d) read rc = %d\n",
+ chip->vchg_adc_channel, rc);
+ return 0;
+ }
+ iusb_ua = div_s64(adc_result.physical * 1000,
+ CHARGE_OUTPUT_VTG_RATIO);
+ }
+
+ return iusb_ua;
+}
+
+static enum power_supply_property smbchg_battery_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
+ POWER_SUPPLY_PROP_FLASH_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+ POWER_SUPPLY_PROP_FLASH_ACTIVE,
+ POWER_SUPPLY_PROP_FLASH_TRIGGER,
+ POWER_SUPPLY_PROP_DP_DM,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+ POWER_SUPPLY_PROP_RERUN_AICL,
+ POWER_SUPPLY_PROP_RESTRICTED_CHARGING,
+};
+
+static int smbchg_battery_set_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ const union power_supply_propval *val)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = container_of(psy,
+ struct smbchg_chip, batt_psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED:
+ vote(chip->battchg_suspend_votable, BATTCHG_USER_EN_VOTER,
+ !val->intval, 0);
+ break;
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ rc = vote(chip->usb_suspend_votable, USER_EN_VOTER,
+ !val->intval, 0);
+ rc = vote(chip->dc_suspend_votable, USER_EN_VOTER,
+ !val->intval, 0);
+ chip->chg_enabled = val->intval;
+ schedule_work(&chip->usb_set_online_work);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ chip->fake_battery_soc = val->intval;
+ power_supply_changed(&chip->batt_psy);
+ break;
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ smbchg_system_temp_level_set(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ rc = smbchg_set_fastchg_current_user(chip, val->intval / 1000);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ rc = smbchg_float_voltage_set(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE:
+ rc = smbchg_safety_timer_enable(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_FLASH_ACTIVE:
+ rc = smbchg_switch_buck_frequency(chip, val->intval);
+ if (rc) {
+ pr_err("Couldn't switch buck frequency, rc=%d\n", rc);
+ /*
+ * Trigger a panic if there is an error while switching
+ * buck frequency. This will prevent LS FET damage.
+ */
+ BUG_ON(1);
+ }
+
+ rc = smbchg_otg_pulse_skip_disable(chip,
+ REASON_FLASH_ENABLED, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_FLASH_TRIGGER:
+ chip->flash_triggered = !!val->intval;
+ smbchg_icl_loop_disable_check(chip);
+ break;
+ case POWER_SUPPLY_PROP_FORCE_TLIM:
+ rc = smbchg_force_tlim_en(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_DP_DM:
+ rc = smbchg_dp_dm(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_RERUN_AICL:
+ smbchg_rerun_aicl(chip);
+ break;
+ case POWER_SUPPLY_PROP_RESTRICTED_CHARGING:
+ rc = smbchg_restricted_charging(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_CAPABILITY:
+ if (chip->typec_psy)
+ update_typec_capability_status(chip, val);
+ break;
+ case POWER_SUPPLY_PROP_TYPEC_MODE:
+ if (chip->typec_psy)
+ update_typec_otg_status(chip, val->intval, false);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int smbchg_battery_is_writeable(struct power_supply *psy,
+ enum power_supply_property prop)
+{
+ int rc;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED:
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ case POWER_SUPPLY_PROP_CAPACITY:
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ case POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE:
+ case POWER_SUPPLY_PROP_DP_DM:
+ case POWER_SUPPLY_PROP_RERUN_AICL:
+ case POWER_SUPPLY_PROP_RESTRICTED_CHARGING:
+ rc = 1;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static int smbchg_battery_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct smbchg_chip *chip = container_of(psy,
+ struct smbchg_chip, batt_psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = get_prop_batt_status(chip);
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = get_prop_batt_present(chip);
+ break;
+ case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED:
+ val->intval =
+ !get_effective_result(chip->battchg_suspend_votable);
+ break;
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ val->intval = chip->chg_enabled;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ val->intval = get_prop_charge_type(chip);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ val->intval = smbchg_float_voltage_get(chip);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = get_prop_batt_health(chip);
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_FLASH_CURRENT_MAX:
+ val->intval = smbchg_calc_max_flash_current(chip);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ val->intval = chip->fastchg_current_ma * 1000;
+ break;
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ val->intval = chip->therm_lvl_sel;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
+ val->intval = smbchg_get_aicl_level_ma(chip) * 1000;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED:
+ val->intval = (int)chip->aicl_complete;
+ break;
+ case POWER_SUPPLY_PROP_RESTRICTED_CHARGING:
+ val->intval = (int)chip->restricted_charging;
+ break;
+ /* properties from fg */
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = get_prop_batt_capacity(chip);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ val->intval = get_prop_batt_current_now(chip);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = get_prop_batt_voltage_now(chip);
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = get_prop_batt_temp(chip);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = get_prop_batt_voltage_max_design(chip);
+ break;
+ case POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE:
+ val->intval = chip->safety_timer_en;
+ break;
+ case POWER_SUPPLY_PROP_FLASH_ACTIVE:
+ val->intval = chip->otg_pulse_skip_dis;
+ break;
+ case POWER_SUPPLY_PROP_FLASH_TRIGGER:
+ val->intval = chip->flash_triggered;
+ break;
+ case POWER_SUPPLY_PROP_DP_DM:
+ val->intval = chip->pulse_cnt;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+ val->intval = smbchg_is_input_current_limited(chip);
+ break;
+ case POWER_SUPPLY_PROP_RERUN_AICL:
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_NOW:
+ val->intval = smbchg_get_iusb(chip);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static char *smbchg_dc_supplicants[] = {
+ "bms",
+};
+
+static enum power_supply_property smbchg_dc_properties[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+};
+
+static int smbchg_dc_set_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ const union power_supply_propval *val)
+{
+ int rc = 0;
+ struct smbchg_chip *chip = container_of(psy,
+ struct smbchg_chip, dc_psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ rc = vote(chip->dc_suspend_votable, POWER_SUPPLY_EN_VOTER,
+ !val->intval, 0);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = vote(chip->dc_icl_votable, USER_ICL_VOTER, true,
+ val->intval / 1000);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int smbchg_dc_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct smbchg_chip *chip = container_of(psy,
+ struct smbchg_chip, dc_psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = is_dc_present(chip);
+ break;
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ val->intval = !get_effective_result(chip->dc_suspend_votable);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ /* return if dc is charging the battery */
+ val->intval = (smbchg_get_pwr_path(chip) == PWR_PATH_DC)
+ && (get_prop_batt_status(chip)
+ == POWER_SUPPLY_STATUS_CHARGING);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ val->intval = chip->dc_max_current_ma * 1000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int smbchg_dc_is_writeable(struct power_supply *psy,
+ enum power_supply_property prop)
+{
+ int rc;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ rc = 1;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+#define HOT_BAT_HARD_BIT BIT(0)
+#define HOT_BAT_SOFT_BIT BIT(1)
+#define COLD_BAT_HARD_BIT BIT(2)
+#define COLD_BAT_SOFT_BIT BIT(3)
+#define BAT_OV_BIT BIT(4)
+#define BAT_LOW_BIT BIT(5)
+#define BAT_MISSING_BIT BIT(6)
+#define BAT_TERM_MISSING_BIT BIT(7)
+static irqreturn_t batt_hot_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1);
+ chip->batt_hot = !!(reg & HOT_BAT_HARD_BIT);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->psy_registered)
+ power_supply_changed(&chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_wipower_check(chip);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH,
+ get_prop_batt_health(chip));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t batt_cold_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1);
+ chip->batt_cold = !!(reg & COLD_BAT_HARD_BIT);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->psy_registered)
+ power_supply_changed(&chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_wipower_check(chip);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH,
+ get_prop_batt_health(chip));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t batt_warm_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1);
+ chip->batt_warm = !!(reg & HOT_BAT_SOFT_BIT);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->psy_registered)
+ power_supply_changed(&chip->batt_psy);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH,
+ get_prop_batt_health(chip));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t batt_cool_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1);
+ chip->batt_cool = !!(reg & COLD_BAT_SOFT_BIT);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->psy_registered)
+ power_supply_changed(&chip->batt_psy);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH,
+ get_prop_batt_health(chip));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t batt_pres_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1);
+ chip->batt_present = !(reg & BAT_MISSING_BIT);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ if (chip->psy_registered)
+ power_supply_changed(&chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_HEALTH,
+ get_prop_batt_health(chip));
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vbat_low_handler(int irq, void *_chip)
+{
+ pr_warn_ratelimited("vbat low\n");
+ return IRQ_HANDLED;
+}
+
+#define CHG_COMP_SFT_BIT BIT(3)
+static irqreturn_t chg_error_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ int rc = 0;
+ u8 reg;
+
+ pr_smb(PR_INTERRUPT, "chg-error triggered\n");
+
+ rc = smbchg_read(chip, &reg, chip->chgr_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to read RT_STS rc = %d\n", rc);
+ } else {
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ if (reg & CHG_COMP_SFT_BIT)
+ set_property_on_fg(chip,
+ POWER_SUPPLY_PROP_SAFETY_TIMER_EXPIRED,
+ 1);
+ }
+
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->psy_registered)
+ power_supply_changed(&chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_wipower_check(chip);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fastchg_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+
+ pr_smb(PR_INTERRUPT, "p2f triggered\n");
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->psy_registered)
+ power_supply_changed(&chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_wipower_check(chip);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t chg_hot_handler(int irq, void *_chip)
+{
+ pr_warn_ratelimited("chg hot\n");
+ smbchg_wipower_check(_chip);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t chg_term_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+
+ pr_smb(PR_INTERRUPT, "tcc triggered\n");
+ /*
+ * Charge termination is a pulse and not level triggered. That means,
+ * TCC bit in RT_STS can get cleared by the time this interrupt is
+ * handled. Instead of relying on that to determine whether the
+ * charge termination had happened, we've to simply notify the FG
+ * about this as long as the interrupt is handled.
+ */
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_CHARGE_DONE, 1);
+
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->psy_registered)
+ power_supply_changed(&chip->batt_psy);
+ smbchg_charging_status_change(chip);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t taper_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ taper_irq_en(chip, false);
+ smbchg_read(chip, &reg, chip->chgr_base + RT_STS, 1);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_taper(chip);
+ if (chip->psy_registered)
+ power_supply_changed(&chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_wipower_check(chip);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t recharge_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->chgr_base + RT_STS, 1);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ smbchg_parallel_usb_check_ok(chip);
+ if (chip->psy_registered)
+ power_supply_changed(&chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wdog_timeout_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->misc_base + RT_STS, 1);
+ pr_warn_ratelimited("wdog timeout rt_stat = 0x%02x\n", reg);
+ if (chip->psy_registered)
+ power_supply_changed(&chip->batt_psy);
+ smbchg_charging_status_change(chip);
+ return IRQ_HANDLED;
+}
+
+/**
+ * power_ok_handler() - called when the switcher turns on or turns off
+ * @chip: pointer to smbchg_chip
+ * @rt_stat: the status bit indicating switcher turning on or off
+ */
+static irqreturn_t power_ok_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ u8 reg = 0;
+
+ smbchg_read(chip, &reg, chip->misc_base + RT_STS, 1);
+ pr_smb(PR_INTERRUPT, "triggered: 0x%02x\n", reg);
+ return IRQ_HANDLED;
+}
+
+/**
+ * dcin_uv_handler() - called when the dc voltage crosses the uv threshold
+ * @chip: pointer to smbchg_chip
+ * @rt_stat: the status bit indicating whether dc voltage is uv
+ */
+#define DCIN_UNSUSPEND_DELAY_MS 1000
+static irqreturn_t dcin_uv_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ bool dc_present = is_dc_present(chip);
+
+ pr_smb(PR_STATUS, "chip->dc_present = %d dc_present = %d\n",
+ chip->dc_present, dc_present);
+
+ if (chip->dc_present != dc_present) {
+ /* dc changed */
+ chip->dc_present = dc_present;
+ if (chip->dc_psy_type != -EINVAL && chip->psy_registered)
+ power_supply_changed(&chip->dc_psy);
+ smbchg_charging_status_change(chip);
+ smbchg_aicl_deglitch_wa_check(chip);
+ chip->vbat_above_headroom = false;
+ }
+
+ smbchg_wipower_check(chip);
+ return IRQ_HANDLED;
+}
+
+/**
+ * usbin_ov_handler() - this is called when an overvoltage condition occurs
+ * @chip: pointer to smbchg_chip chip
+ */
+static irqreturn_t usbin_ov_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ int rc;
+ u8 reg;
+ bool usb_present;
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read usb rt status rc = %d\n", rc);
+ goto out;
+ }
+
+ /* OV condition is detected. Notify it to USB psy */
+ if (reg & USBIN_OV_BIT) {
+ chip->usb_ov_det = true;
+ if (chip->usb_psy) {
+ pr_smb(PR_MISC, "setting usb psy health OV\n");
+ rc = power_supply_set_health_state(chip->usb_psy,
+ POWER_SUPPLY_HEALTH_OVERVOLTAGE);
+ if (rc)
+ pr_smb(PR_STATUS,
+ "usb psy does not allow updating prop %d rc = %d\n",
+ POWER_SUPPLY_HEALTH_OVERVOLTAGE, rc);
+ }
+ } else {
+ chip->usb_ov_det = false;
+ /* If USB is present, then handle the USB insertion */
+ usb_present = is_usb_present(chip);
+ if (usb_present)
+ update_usb_status(chip, usb_present, false);
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+/**
+ * usbin_uv_handler() - this is called when USB charger is removed
+ * @chip: pointer to smbchg_chip chip
+ * @rt_stat: the status bit indicating chg insertion/removal
+ */
+#define ICL_MODE_MASK SMB_MASK(5, 4)
+#define ICL_MODE_HIGH_CURRENT 0
+static irqreturn_t usbin_uv_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ int aicl_level = smbchg_get_aicl_level_ma(chip);
+ int rc;
+ u8 reg;
+
+ rc = smbchg_read(chip, &reg, chip->usb_chgpth_base + RT_STS, 1);
+ if (rc) {
+ pr_err("could not read rt sts: %d", rc);
+ goto out;
+ }
+
+ pr_smb(PR_STATUS,
+ "%s chip->usb_present = %d rt_sts = 0x%02x hvdcp_3_det_ignore_uv = %d aicl = %d\n",
+ chip->hvdcp_3_det_ignore_uv ? "Ignoring":"",
+ chip->usb_present, reg, chip->hvdcp_3_det_ignore_uv,
+ aicl_level);
+
+ /*
+ * set usb_psy's dp=f dm=f if this is a new insertion, i.e. it is
+ * not already src_detected and usbin_uv is seen falling
+ */
+ if (!(reg & USBIN_UV_BIT) && !(reg & USBIN_SRC_DET_BIT)) {
+ pr_smb(PR_MISC, "setting usb psy dp=f dm=f\n");
+ power_supply_set_dp_dm(chip->usb_psy,
+ POWER_SUPPLY_DP_DM_DPF_DMF);
+ }
+
+ if (reg & USBIN_UV_BIT)
+ complete_all(&chip->usbin_uv_raised);
+ else
+ complete_all(&chip->usbin_uv_lowered);
+
+ if (chip->hvdcp_3_det_ignore_uv)
+ goto out;
+
+ if ((reg & USBIN_UV_BIT) && (reg & USBIN_SRC_DET_BIT)) {
+ pr_smb(PR_STATUS, "Very weak charger detected\n");
+ chip->very_weak_charger = true;
+ rc = smbchg_read(chip, &reg,
+ chip->usb_chgpth_base + ICL_STS_2_REG, 1);
+ if (rc) {
+ dev_err(chip->dev, "Could not read usb icl sts 2: %d\n",
+ rc);
+ goto out;
+ }
+ if ((reg & ICL_MODE_MASK) != ICL_MODE_HIGH_CURRENT) {
+ /*
+ * If AICL is not even enabled, this is either an
+ * SDP or a grossly out of spec charger. Do not
+ * draw any current from it.
+ */
+ rc = vote(chip->usb_suspend_votable,
+ WEAK_CHARGER_EN_VOTER, true, 0);
+ if (rc < 0)
+ pr_err("could not disable charger: %d", rc);
+ } else if (aicl_level == chip->tables.usb_ilim_ma_table[0]) {
+ /*
+ * we are in a situation where the adapter is not able
+ * to supply even 300mA. Disable hw aicl reruns else it
+ * is only a matter of time when we get back here again
+ */
+ rc = vote(chip->hw_aicl_rerun_disable_votable,
+ WEAK_CHARGER_HW_AICL_VOTER, true, 0);
+ if (rc < 0)
+ pr_err("Couldn't disable hw aicl rerun rc=%d\n",
+ rc);
+ }
+ pr_smb(PR_MISC, "setting usb psy health UNSPEC_FAILURE\n");
+ rc = power_supply_set_health_state(chip->usb_psy,
+ POWER_SUPPLY_HEALTH_UNSPEC_FAILURE);
+ if (rc)
+ pr_err("Couldn't set health on usb psy rc:%d\n", rc);
+ schedule_work(&chip->usb_set_online_work);
+ }
+
+ smbchg_wipower_check(chip);
+out:
+ return IRQ_HANDLED;
+}
+
+/**
+ * src_detect_handler() - this is called on rising edge when USB charger type
+ * is detected and on falling edge when USB voltage falls
+ * below the coarse detect voltage(1V), use it for
+ * handling USB charger insertion and removal.
+ * @chip: pointer to smbchg_chip
+ * @rt_stat: the status bit indicating chg insertion/removal
+ */
+static irqreturn_t src_detect_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ bool usb_present = is_usb_present(chip);
+ bool src_detect = is_src_detect_high(chip);
+ int rc;
+
+ pr_smb(PR_STATUS,
+ "%s chip->usb_present = %d usb_present = %d src_detect = %d hvdcp_3_det_ignore_uv=%d\n",
+ chip->hvdcp_3_det_ignore_uv ? "Ignoring":"",
+ chip->usb_present, usb_present, src_detect,
+ chip->hvdcp_3_det_ignore_uv);
+
+ if (src_detect)
+ complete_all(&chip->src_det_raised);
+ else
+ complete_all(&chip->src_det_lowered);
+
+ if (chip->hvdcp_3_det_ignore_uv)
+ goto out;
+
+ /*
+ * When VBAT is above the AICL threshold (4.25V) - 180mV (4.07V),
+ * an input collapse due to AICL will actually cause an USBIN_UV
+ * interrupt to fire as well.
+ *
+ * Handle USB insertions and removals in the source detect handler
+ * instead of the USBIN_UV handler since the latter is untrustworthy
+ * when the battery voltage is high.
+ */
+ chip->very_weak_charger = false;
+ /*
+ * a src detect marks a new insertion or a real removal,
+ * vote for enable aicl hw reruns
+ */
+ rc = vote(chip->hw_aicl_rerun_disable_votable,
+ WEAK_CHARGER_HW_AICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't enable hw aicl rerun rc=%d\n", rc);
+
+ rc = vote(chip->usb_suspend_votable, WEAK_CHARGER_EN_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("could not enable charger: %d\n", rc);
+
+ if (src_detect) {
+ update_usb_status(chip, usb_present, 0);
+ } else {
+ update_usb_status(chip, 0, false);
+ chip->aicl_irq_count = 0;
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+/**
+ * otg_oc_handler() - called when the usb otg goes over current
+ */
+#define NUM_OTG_RETRIES 5
+#define OTG_OC_RETRY_DELAY_US 50000
+static irqreturn_t otg_oc_handler(int irq, void *_chip)
+{
+ int rc;
+ struct smbchg_chip *chip = _chip;
+ s64 elapsed_us = ktime_us_delta(ktime_get(), chip->otg_enable_time);
+
+ pr_smb(PR_INTERRUPT, "triggered\n");
+
+ if (chip->schg_version == QPNP_SCHG_LITE) {
+ pr_warn("OTG OC triggered - OTG disabled\n");
+ return IRQ_HANDLED;
+ }
+
+ if (elapsed_us > OTG_OC_RETRY_DELAY_US)
+ chip->otg_retries = 0;
+
+ /*
+ * Due to a HW bug in the PMI8994 charger, the current inrush that
+ * occurs when connecting certain OTG devices can cause the OTG
+ * overcurrent protection to trip.
+ *
+ * The work around is to try reenabling the OTG when getting an
+ * overcurrent interrupt once.
+ */
+ if (chip->otg_retries < NUM_OTG_RETRIES) {
+ chip->otg_retries += 1;
+ pr_smb(PR_STATUS,
+ "Retrying OTG enable. Try #%d, elapsed_us %lld\n",
+ chip->otg_retries, elapsed_us);
+ rc = otg_oc_reset(chip);
+ if (rc)
+ pr_err("Failed to reset OTG OC state rc=%d\n", rc);
+ chip->otg_enable_time = ktime_get();
+ }
+ return IRQ_HANDLED;
+}
+
+/**
+ * otg_fail_handler() - called when the usb otg fails
+ * (when vbat < OTG UVLO threshold)
+ */
+static irqreturn_t otg_fail_handler(int irq, void *_chip)
+{
+ pr_smb(PR_INTERRUPT, "triggered\n");
+ return IRQ_HANDLED;
+}
+
+/**
+ * aicl_done_handler() - called when the usb AICL algorithm is finished
+ * and a current is set.
+ */
+static irqreturn_t aicl_done_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ bool usb_present = is_usb_present(chip);
+ int aicl_level = smbchg_get_aicl_level_ma(chip);
+
+ pr_smb(PR_INTERRUPT, "triggered, aicl: %d\n", aicl_level);
+
+ increment_aicl_count(chip);
+
+ if (usb_present)
+ smbchg_parallel_usb_check_ok(chip);
+
+ if (chip->aicl_complete)
+ power_supply_changed(&chip->batt_psy);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * usbid_change_handler() - called when the usb RID changes.
+ * This is used mostly for detecting OTG
+ */
+static irqreturn_t usbid_change_handler(int irq, void *_chip)
+{
+ struct smbchg_chip *chip = _chip;
+ bool otg_present;
+
+ pr_smb(PR_INTERRUPT, "triggered\n");
+
+ otg_present = is_otg_present(chip);
+ if (chip->usb_psy) {
+ pr_smb(PR_MISC, "setting usb psy OTG = %d\n",
+ otg_present ? 1 : 0);
+ power_supply_set_usb_otg(chip->usb_psy, otg_present ? 1 : 0);
+ }
+ if (otg_present)
+ pr_smb(PR_STATUS, "OTG detected\n");
+
+ /* update FG */
+ set_property_on_fg(chip, POWER_SUPPLY_PROP_STATUS,
+ get_prop_batt_status(chip));
+
+ return IRQ_HANDLED;
+}
+
+static int determine_initial_status(struct smbchg_chip *chip)
+{
+ union power_supply_propval type = {0, };
+
+ /*
+ * It is okay to read the interrupt status here since
+ * interrupts aren't requested. reading interrupt status
+ * clears the interrupt so be careful to read interrupt
+ * status only in interrupt handling code
+ */
+
+ batt_pres_handler(0, chip);
+ batt_hot_handler(0, chip);
+ batt_warm_handler(0, chip);
+ batt_cool_handler(0, chip);
+ batt_cold_handler(0, chip);
+ if (chip->typec_psy) {
+ get_property_from_typec(chip, POWER_SUPPLY_PROP_TYPE, &type);
+ update_typec_otg_status(chip, type.intval, true);
+ } else {
+ usbid_change_handler(0, chip);
+ }
+ src_detect_handler(0, chip);
+
+ chip->usb_present = is_usb_present(chip);
+ chip->dc_present = is_dc_present(chip);
+
+ if (chip->usb_present) {
+ pr_smb(PR_MISC, "setting usb psy dp=f dm=f\n");
+ power_supply_set_dp_dm(chip->usb_psy,
+ POWER_SUPPLY_DP_DM_DPF_DMF);
+ handle_usb_insertion(chip);
+ } else {
+ handle_usb_removal(chip);
+ }
+
+ return 0;
+}
+
+static int prechg_time[] = {
+ 24,
+ 48,
+ 96,
+ 192,
+};
+static int chg_time[] = {
+ 192,
+ 384,
+ 768,
+ 1536,
+};
+
+enum bpd_type {
+ BPD_TYPE_BAT_NONE,
+ BPD_TYPE_BAT_ID,
+ BPD_TYPE_BAT_THM,
+ BPD_TYPE_BAT_THM_BAT_ID,
+ BPD_TYPE_DEFAULT,
+};
+
+static const char * const bpd_label[] = {
+ [BPD_TYPE_BAT_NONE] = "bpd_none",
+ [BPD_TYPE_BAT_ID] = "bpd_id",
+ [BPD_TYPE_BAT_THM] = "bpd_thm",
+ [BPD_TYPE_BAT_THM_BAT_ID] = "bpd_thm_id",
+};
+
+static inline int get_bpd(const char *name)
+{
+ int i = 0;
+ for (i = 0; i < ARRAY_SIZE(bpd_label); i++) {
+ if (strcmp(bpd_label[i], name) == 0)
+ return i;
+ }
+ return -EINVAL;
+}
+
+#define REVISION1_REG 0x0
+#define DIG_MINOR 0
+#define DIG_MAJOR 1
+#define ANA_MINOR 2
+#define ANA_MAJOR 3
+#define CHGR_CFG1 0xFB
+#define RECHG_THRESHOLD_SRC_BIT BIT(1)
+#define TERM_I_SRC_BIT BIT(2)
+#define TERM_SRC_FG BIT(2)
+#define CHG_INHIB_CFG_REG 0xF7
+#define CHG_INHIBIT_50MV_VAL 0x00
+#define CHG_INHIBIT_100MV_VAL 0x01
+#define CHG_INHIBIT_200MV_VAL 0x02
+#define CHG_INHIBIT_300MV_VAL 0x03
+#define CHG_INHIBIT_MASK 0x03
+#define USE_REGISTER_FOR_CURRENT BIT(2)
+#define CHGR_CFG2 0xFC
+#define CHG_EN_SRC_BIT BIT(7)
+#define CHG_EN_POLARITY_BIT BIT(6)
+#define P2F_CHG_TRAN BIT(5)
+#define CHG_BAT_OV_ECC BIT(4)
+#define I_TERM_BIT BIT(3)
+#define AUTO_RECHG_BIT BIT(2)
+#define CHARGER_INHIBIT_BIT BIT(0)
+#define USB51_COMMAND_POL BIT(2)
+#define USB51AC_CTRL BIT(1)
+#define TR_8OR32B 0xFE
+#define BUCK_8_16_FREQ_BIT BIT(0)
+#define BM_CFG 0xF3
+#define BATT_MISSING_ALGO_BIT BIT(2)
+#define BMD_PIN_SRC_MASK SMB_MASK(1, 0)
+#define PIN_SRC_SHIFT 0
+#define CHGR_CFG 0xFF
+#define RCHG_LVL_BIT BIT(0)
+#define VCHG_EN_BIT BIT(1)
+#define VCHG_INPUT_CURRENT_BIT BIT(3)
+#define CFG_AFVC 0xF6
+#define VFLOAT_COMP_ENABLE_MASK SMB_MASK(2, 0)
+#define TR_RID_REG 0xFA
+#define FG_INPUT_FET_DELAY_BIT BIT(3)
+#define TRIM_OPTIONS_7_0 0xF6
+#define INPUT_MISSING_POLLER_EN_BIT BIT(3)
+#define CHGR_CCMP_CFG 0xFA
+#define JEITA_TEMP_HARD_LIMIT_BIT BIT(5)
+#define HVDCP_ADAPTER_SEL_MASK SMB_MASK(5, 4)
+#define HVDCP_ADAPTER_SEL_9V_BIT BIT(4)
+#define HVDCP_AUTH_ALG_EN_BIT BIT(6)
+#define CMD_APSD 0x41
+#define APSD_RERUN_BIT BIT(0)
+#define OTG_CFG 0xF1
+#define HICCUP_ENABLED_BIT BIT(6)
+#define OTG_PIN_POLARITY_BIT BIT(4)
+#define OTG_PIN_ACTIVE_LOW BIT(4)
+#define OTG_EN_CTRL_MASK SMB_MASK(3, 2)
+#define OTG_PIN_CTRL_RID_DIS 0x04
+#define OTG_CMD_CTRL_RID_EN 0x08
+#define AICL_ADC_BIT BIT(6)
+static void batt_ov_wa_check(struct smbchg_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ /* disable-'battery OV disables charging' feature */
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG2,
+ CHG_BAT_OV_ECC, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set chgr_cfg2 rc=%d\n", rc);
+ return;
+ }
+
+ /*
+ * if battery OV is set:
+ * restart charging by disable/enable charging
+ */
+ rc = smbchg_read(chip, &reg, chip->bat_if_base + RT_STS, 1);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read Battery RT status rc = %d\n", rc);
+ return;
+ }
+
+ if (reg & BAT_OV_BIT) {
+ rc = smbchg_charging_en(chip, false);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't disable charging: rc = %d\n", rc);
+ return;
+ }
+
+ /* delay for charging-disable to take affect */
+ msleep(200);
+
+ rc = smbchg_charging_en(chip, true);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't enable charging: rc = %d\n", rc);
+ return;
+ }
+ }
+}
+
+static int smbchg_hw_init(struct smbchg_chip *chip)
+{
+ int rc, i;
+ u8 reg, mask;
+
+ rc = smbchg_read(chip, chip->revision,
+ chip->misc_base + REVISION1_REG, 4);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read revision rc=%d\n",
+ rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "Charger Revision DIG: %d.%d; ANA: %d.%d\n",
+ chip->revision[DIG_MAJOR], chip->revision[DIG_MINOR],
+ chip->revision[ANA_MAJOR], chip->revision[ANA_MINOR]);
+
+ /* Setup 9V HVDCP */
+ if (!chip->hvdcp_not_supported) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_9V);
+ if (rc < 0) {
+ pr_err("Couldn't set hvdcp config in chgpath_chg rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (chip->aicl_rerun_period_s > 0) {
+ rc = smbchg_set_aicl_rerun_period_s(chip,
+ chip->aicl_rerun_period_s);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set AICL rerun timer rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + TR_RID_REG,
+ FG_INPUT_FET_DELAY_BIT, FG_INPUT_FET_DELAY_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't disable fg input fet delay rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smbchg_sec_masked_write(chip, chip->misc_base + TRIM_OPTIONS_7_0,
+ INPUT_MISSING_POLLER_EN_BIT, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't disable input missing poller rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /*
+ * Do not force using current from the register i.e. use auto
+ * power source detect (APSD) mA ratings for the initial current values.
+ *
+ * If this is set, AICL will not rerun at 9V for HVDCPs
+ */
+ rc = smbchg_masked_write(chip, chip->usb_chgpth_base + CMD_IL,
+ USE_REGISTER_FOR_CURRENT, 0);
+
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set input limit cmd rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * set chg en by cmd register, set chg en by writing bit 1,
+ * enable auto pre to fast, enable auto recharge by default.
+ * enable current termination and charge inhibition based on
+ * the device tree configuration.
+ */
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG2,
+ CHG_EN_SRC_BIT | CHG_EN_POLARITY_BIT | P2F_CHG_TRAN
+ | I_TERM_BIT | AUTO_RECHG_BIT | CHARGER_INHIBIT_BIT,
+ CHG_EN_POLARITY_BIT
+ | (chip->chg_inhibit_en ? CHARGER_INHIBIT_BIT : 0)
+ | (chip->iterm_disabled ? I_TERM_BIT : 0));
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set chgr_cfg2 rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * enable battery charging to make sure it hasn't been changed earlier
+ * by the bootloader.
+ */
+ rc = smbchg_charging_en(chip, true);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't enable battery charging=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Based on the configuration, use the analog sensors or the fuelgauge
+ * adc for recharge threshold source.
+ */
+
+ if (chip->chg_inhibit_source_fg)
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG1,
+ TERM_I_SRC_BIT | RECHG_THRESHOLD_SRC_BIT,
+ TERM_SRC_FG | RECHG_THRESHOLD_SRC_BIT);
+ else
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG1,
+ TERM_I_SRC_BIT | RECHG_THRESHOLD_SRC_BIT, 0);
+
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set chgr_cfg2 rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * control USB suspend via command bits and set correct 100/500mA
+ * polarity on the usb current
+ */
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ USB51_COMMAND_POL | USB51AC_CTRL, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set usb_chgpth cfg rc=%d\n", rc);
+ return rc;
+ }
+
+ check_battery_type(chip);
+
+ /* set the float voltage */
+ if (chip->vfloat_mv != -EINVAL) {
+ rc = smbchg_float_voltage_set(chip, chip->vfloat_mv);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set float voltage rc = %d\n", rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "set vfloat to %d\n", chip->vfloat_mv);
+ }
+
+ /* set the fast charge current compensation */
+ if (chip->fastchg_current_comp != -EINVAL) {
+ rc = smbchg_fastchg_current_comp_set(chip,
+ chip->fastchg_current_comp);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set fastchg current comp rc = %d\n",
+ rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "set fastchg current comp to %d\n",
+ chip->fastchg_current_comp);
+ }
+
+ /* set the float voltage compensation */
+ if (chip->float_voltage_comp != -EINVAL) {
+ rc = smbchg_float_voltage_comp_set(chip,
+ chip->float_voltage_comp);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set float voltage comp rc = %d\n",
+ rc);
+ return rc;
+ }
+ pr_smb(PR_STATUS, "set float voltage comp to %d\n",
+ chip->float_voltage_comp);
+ }
+
+ /* set iterm */
+ if (chip->iterm_ma != -EINVAL) {
+ if (chip->iterm_disabled) {
+ dev_err(chip->dev, "Error: Both iterm_disabled and iterm_ma set\n");
+ return -EINVAL;
+ } else {
+ smbchg_iterm_set(chip, chip->iterm_ma);
+ }
+ }
+
+ /* set the safety time voltage */
+ if (chip->safety_time != -EINVAL) {
+ reg = (chip->safety_time > 0 ? 0 : SFT_TIMER_DISABLE_BIT) |
+ (chip->prechg_safety_time > 0
+ ? 0 : PRECHG_SFT_TIMER_DISABLE_BIT);
+
+ for (i = 0; i < ARRAY_SIZE(chg_time); i++) {
+ if (chip->safety_time <= chg_time[i]) {
+ reg |= i << SAFETY_TIME_MINUTES_SHIFT;
+ break;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(prechg_time); i++) {
+ if (chip->prechg_safety_time <= prechg_time[i]) {
+ reg |= i;
+ break;
+ }
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->chgr_base + SFT_CFG,
+ SFT_EN_MASK | SFT_TO_MASK |
+ (chip->prechg_safety_time > 0
+ ? PRECHG_SFT_TO_MASK : 0), reg);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set safety timer rc = %d\n",
+ rc);
+ return rc;
+ }
+ chip->safety_timer_en = true;
+ } else {
+ rc = smbchg_read(chip, &reg, chip->chgr_base + SFT_CFG, 1);
+ if (rc < 0)
+ dev_err(chip->dev, "Unable to read SFT_CFG rc = %d\n",
+ rc);
+ else if (!(reg & SFT_EN_MASK))
+ chip->safety_timer_en = true;
+ }
+
+ /* configure jeita temperature hard limit */
+ if (chip->jeita_temp_hard_limit >= 0) {
+ rc = smbchg_sec_masked_write(chip,
+ chip->chgr_base + CHGR_CCMP_CFG,
+ JEITA_TEMP_HARD_LIMIT_BIT,
+ chip->jeita_temp_hard_limit
+ ? 0 : JEITA_TEMP_HARD_LIMIT_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set jeita temp hard limit rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* make the buck switch faster to prevent some vbus oscillation */
+ rc = smbchg_sec_masked_write(chip,
+ chip->usb_chgpth_base + TR_8OR32B,
+ BUCK_8_16_FREQ_BIT, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set buck frequency rc = %d\n", rc);
+ return rc;
+ }
+
+ /* battery missing detection */
+ mask = BATT_MISSING_ALGO_BIT;
+ reg = chip->bmd_algo_disabled ? BATT_MISSING_ALGO_BIT : 0;
+ if (chip->bmd_pin_src < BPD_TYPE_DEFAULT) {
+ mask |= BMD_PIN_SRC_MASK;
+ reg |= chip->bmd_pin_src << PIN_SRC_SHIFT;
+ }
+ rc = smbchg_sec_masked_write(chip,
+ chip->bat_if_base + BM_CFG, mask, reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set batt_missing config = %d\n",
+ rc);
+ return rc;
+ }
+
+ if (chip->vchg_adc_channel != -EINVAL) {
+ /* configure and enable VCHG */
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CHGR_CFG,
+ VCHG_INPUT_CURRENT_BIT | VCHG_EN_BIT,
+ VCHG_INPUT_CURRENT_BIT | VCHG_EN_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set recharge rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ smbchg_charging_status_change(chip);
+
+ vote(chip->usb_suspend_votable, USER_EN_VOTER, !chip->chg_enabled, 0);
+ vote(chip->dc_suspend_votable, USER_EN_VOTER, !chip->chg_enabled, 0);
+ /* resume threshold */
+ if (chip->resume_delta_mv != -EINVAL) {
+
+ /*
+ * Configure only if the recharge threshold source is not
+ * fuel gauge ADC.
+ */
+ if (!chip->chg_inhibit_source_fg) {
+ if (chip->resume_delta_mv < 100)
+ reg = CHG_INHIBIT_50MV_VAL;
+ else if (chip->resume_delta_mv < 200)
+ reg = CHG_INHIBIT_100MV_VAL;
+ else if (chip->resume_delta_mv < 300)
+ reg = CHG_INHIBIT_200MV_VAL;
+ else
+ reg = CHG_INHIBIT_300MV_VAL;
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->chgr_base + CHG_INHIB_CFG_REG,
+ CHG_INHIBIT_MASK, reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set inhibit val rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = smbchg_sec_masked_write(chip,
+ chip->chgr_base + CHGR_CFG,
+ RCHG_LVL_BIT,
+ (chip->resume_delta_mv
+ < chip->tables.rchg_thr_mv)
+ ? 0 : RCHG_LVL_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set recharge rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* DC path current settings */
+ if (chip->dc_psy_type != -EINVAL) {
+ rc = vote(chip->dc_icl_votable, PSY_ICL_VOTER, true,
+ chip->dc_target_current_ma);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't vote for initial DC ICL rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+
+ /*
+ * on some devices the battery is powered via external sources which
+ * could raise its voltage above the float voltage. smbchargers go
+ * in to reverse boost in such a situation and the workaround is to
+ * disable float voltage compensation (note that the battery will appear
+ * hot/cold when powered via external source).
+ */
+ if (chip->soft_vfloat_comp_disabled) {
+ rc = smbchg_sec_masked_write(chip, chip->chgr_base + CFG_AFVC,
+ VFLOAT_COMP_ENABLE_MASK, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't disable soft vfloat rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = vote(chip->fcc_votable, BATT_TYPE_FCC_VOTER, true,
+ chip->cfg_fastchg_current_ma);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't vote fastchg ma rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_read(chip, &chip->original_usbin_allowance,
+ chip->usb_chgpth_base + USBIN_CHGR_CFG, 1);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't read usb allowance rc=%d\n", rc);
+
+ if (chip->wipower_dyn_icl_avail) {
+ rc = smbchg_wipower_ilim_config(chip,
+ &(chip->wipower_default.entries[0]));
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set default wipower ilim = %d\n",
+ rc);
+ return rc;
+ }
+ }
+ /* unsuspend dc path, it could be suspended by the bootloader */
+ rc = smbchg_dc_suspend(chip, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't unsuspend dc path= %d\n", rc);
+ return rc;
+ }
+
+ if (chip->force_aicl_rerun) {
+ /* vote to enable hw aicl */
+ rc = vote(chip->hw_aicl_rerun_enable_indirect_votable,
+ DEFAULT_CONFIG_HW_AICL_VOTER, true, 0);
+ if (rc < 0) {
+ pr_err("Couldn't vote enable hw aicl rerun rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (chip->schg_version == QPNP_SCHG_LITE) {
+ /* enable OTG hiccup mode */
+ rc = smbchg_sec_masked_write(chip, chip->otg_base + OTG_CFG,
+ HICCUP_ENABLED_BIT, HICCUP_ENABLED_BIT);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set OTG OC config rc = %d\n",
+ rc);
+ }
+
+ if (chip->otg_pinctrl) {
+ /* configure OTG enable to pin control active low */
+ rc = smbchg_sec_masked_write(chip, chip->otg_base + OTG_CFG,
+ OTG_PIN_POLARITY_BIT | OTG_EN_CTRL_MASK,
+ OTG_PIN_ACTIVE_LOW | OTG_PIN_CTRL_RID_DIS);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set OTG EN config rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (chip->wa_flags & SMBCHG_BATT_OV_WA)
+ batt_ov_wa_check(chip);
+
+ /* turn off AICL adc for improved accuracy */
+ rc = smbchg_sec_masked_write(chip,
+ chip->misc_base + MISC_TRIM_OPT_15_8, AICL_ADC_BIT, 0);
+ if (rc)
+ pr_err("Couldn't write to MISC_TRIM_OPTIONS_15_8 rc=%d\n",
+ rc);
+
+ return rc;
+}
+
+static struct of_device_id smbchg_match_table[] = {
+ {
+ .compatible = "qcom,qpnp-smbcharger",
+ },
+ { },
+};
+
+#define DC_MA_MIN 300
+#define DC_MA_MAX 2000
+#define OF_PROP_READ(chip, prop, dt_property, retval, optional) \
+do { \
+ if (retval) \
+ break; \
+ if (optional) \
+ prop = -EINVAL; \
+ \
+ retval = of_property_read_u32(chip->spmi->dev.of_node, \
+ "qcom," dt_property , \
+ &prop); \
+ \
+ if ((retval == -EINVAL) && optional) \
+ retval = 0; \
+ else if (retval) \
+ dev_err(chip->dev, "Error reading " #dt_property \
+ " property rc = %d\n", rc); \
+} while (0)
+
+#define ILIM_ENTRIES 3
+#define VOLTAGE_RANGE_ENTRIES 2
+#define RANGE_ENTRY (ILIM_ENTRIES + VOLTAGE_RANGE_ENTRIES)
+static int smb_parse_wipower_map_dt(struct smbchg_chip *chip,
+ struct ilim_map *map, char *property)
+{
+ struct device_node *node = chip->dev->of_node;
+ int total_elements, size;
+ struct property *prop;
+ const __be32 *data;
+ int num, i;
+
+ prop = of_find_property(node, property, &size);
+ if (!prop) {
+ dev_err(chip->dev, "%s missing\n", property);
+ return -EINVAL;
+ }
+
+ total_elements = size / sizeof(int);
+ if (total_elements % RANGE_ENTRY) {
+ dev_err(chip->dev, "%s table not in multiple of %d, total elements = %d\n",
+ property, RANGE_ENTRY, total_elements);
+ return -EINVAL;
+ }
+
+ data = prop->value;
+ num = total_elements / RANGE_ENTRY;
+ map->entries = devm_kzalloc(chip->dev,
+ num * sizeof(struct ilim_entry), GFP_KERNEL);
+ if (!map->entries) {
+ dev_err(chip->dev, "kzalloc failed for default ilim\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < num; i++) {
+ map->entries[i].vmin_uv = be32_to_cpup(data++);
+ map->entries[i].vmax_uv = be32_to_cpup(data++);
+ map->entries[i].icl_pt_ma = be32_to_cpup(data++);
+ map->entries[i].icl_lv_ma = be32_to_cpup(data++);
+ map->entries[i].icl_hv_ma = be32_to_cpup(data++);
+ }
+ map->num = num;
+ return 0;
+}
+
+static int smb_parse_wipower_dt(struct smbchg_chip *chip)
+{
+ int rc = 0;
+
+ chip->wipower_dyn_icl_avail = false;
+
+ if (!chip->vadc_dev)
+ goto err;
+
+ rc = smb_parse_wipower_map_dt(chip, &chip->wipower_default,
+ "qcom,wipower-default-ilim-map");
+ if (rc) {
+ dev_err(chip->dev, "failed to parse wipower-pt-ilim-map rc = %d\n",
+ rc);
+ goto err;
+ }
+
+ rc = smb_parse_wipower_map_dt(chip, &chip->wipower_pt,
+ "qcom,wipower-pt-ilim-map");
+ if (rc) {
+ dev_err(chip->dev, "failed to parse wipower-pt-ilim-map rc = %d\n",
+ rc);
+ goto err;
+ }
+
+ rc = smb_parse_wipower_map_dt(chip, &chip->wipower_div2,
+ "qcom,wipower-div2-ilim-map");
+ if (rc) {
+ dev_err(chip->dev, "failed to parse wipower-div2-ilim-map rc = %d\n",
+ rc);
+ goto err;
+ }
+ chip->wipower_dyn_icl_avail = true;
+ return 0;
+err:
+ chip->wipower_default.num = 0;
+ chip->wipower_pt.num = 0;
+ chip->wipower_default.num = 0;
+ if (chip->wipower_default.entries)
+ devm_kfree(chip->dev, chip->wipower_default.entries);
+ if (chip->wipower_pt.entries)
+ devm_kfree(chip->dev, chip->wipower_pt.entries);
+ if (chip->wipower_div2.entries)
+ devm_kfree(chip->dev, chip->wipower_div2.entries);
+ chip->wipower_default.entries = NULL;
+ chip->wipower_pt.entries = NULL;
+ chip->wipower_div2.entries = NULL;
+ chip->vadc_dev = NULL;
+ return rc;
+}
+
+#define DEFAULT_VLED_MAX_UV 3500000
+#define DEFAULT_FCC_MA 2000
+static int smb_parse_dt(struct smbchg_chip *chip)
+{
+ int rc = 0, ocp_thresh = -EINVAL;
+ struct device_node *node = chip->dev->of_node;
+ const char *dc_psy_type, *bpd;
+
+ if (!node) {
+ dev_err(chip->dev, "device tree info. missing\n");
+ return -EINVAL;
+ }
+
+ /* read optional u32 properties */
+ OF_PROP_READ(chip, ocp_thresh,
+ "ibat-ocp-threshold-ua", rc, 1);
+ if (ocp_thresh >= 0)
+ smbchg_ibat_ocp_threshold_ua = ocp_thresh;
+ OF_PROP_READ(chip, chip->iterm_ma, "iterm-ma", rc, 1);
+ OF_PROP_READ(chip, chip->cfg_fastchg_current_ma,
+ "fastchg-current-ma", rc, 1);
+ if (chip->cfg_fastchg_current_ma == -EINVAL)
+ chip->cfg_fastchg_current_ma = DEFAULT_FCC_MA;
+ OF_PROP_READ(chip, chip->vfloat_mv, "float-voltage-mv", rc, 1);
+ OF_PROP_READ(chip, chip->safety_time, "charging-timeout-mins", rc, 1);
+ OF_PROP_READ(chip, chip->vled_max_uv, "vled-max-uv", rc, 1);
+ if (chip->vled_max_uv < 0)
+ chip->vled_max_uv = DEFAULT_VLED_MAX_UV;
+ OF_PROP_READ(chip, chip->rpara_uohm, "rparasitic-uohm", rc, 1);
+ if (chip->rpara_uohm < 0)
+ chip->rpara_uohm = 0;
+ OF_PROP_READ(chip, chip->prechg_safety_time, "precharging-timeout-mins",
+ rc, 1);
+ OF_PROP_READ(chip, chip->fastchg_current_comp, "fastchg-current-comp",
+ rc, 1);
+ OF_PROP_READ(chip, chip->float_voltage_comp, "float-voltage-comp",
+ rc, 1);
+ if (chip->safety_time != -EINVAL &&
+ (chip->safety_time > chg_time[ARRAY_SIZE(chg_time) - 1])) {
+ dev_err(chip->dev, "Bad charging-timeout-mins %d\n",
+ chip->safety_time);
+ return -EINVAL;
+ }
+ if (chip->prechg_safety_time != -EINVAL &&
+ (chip->prechg_safety_time >
+ prechg_time[ARRAY_SIZE(prechg_time) - 1])) {
+ dev_err(chip->dev, "Bad precharging-timeout-mins %d\n",
+ chip->prechg_safety_time);
+ return -EINVAL;
+ }
+ OF_PROP_READ(chip, chip->resume_delta_mv, "resume-delta-mv", rc, 1);
+ OF_PROP_READ(chip, chip->parallel.min_current_thr_ma,
+ "parallel-usb-min-current-ma", rc, 1);
+ OF_PROP_READ(chip, chip->parallel.min_9v_current_thr_ma,
+ "parallel-usb-9v-min-current-ma", rc, 1);
+ OF_PROP_READ(chip, chip->parallel.allowed_lowering_ma,
+ "parallel-allowed-lowering-ma", rc, 1);
+ if (chip->parallel.min_current_thr_ma != -EINVAL
+ && chip->parallel.min_9v_current_thr_ma != -EINVAL)
+ chip->parallel.avail = true;
+ /*
+ * use the dt values if they exist, otherwise do not touch the params
+ */
+ of_property_read_u32(chip->spmi->dev.of_node,
+ "qcom,parallel-main-chg-fcc-percent",
+ &smbchg_main_chg_fcc_percent);
+ of_property_read_u32(chip->spmi->dev.of_node,
+ "qcom,parallel-main-chg-icl-percent",
+ &smbchg_main_chg_icl_percent);
+ pr_smb(PR_STATUS, "parallel usb thr: %d, 9v thr: %d\n",
+ chip->parallel.min_current_thr_ma,
+ chip->parallel.min_9v_current_thr_ma);
+ OF_PROP_READ(chip, chip->jeita_temp_hard_limit,
+ "jeita-temp-hard-limit", rc, 1);
+ OF_PROP_READ(chip, chip->aicl_rerun_period_s,
+ "aicl-rerun-period-s", rc, 1);
+ OF_PROP_READ(chip, chip->vchg_adc_channel,
+ "vchg-adc-channel-id", rc, 1);
+
+ /* read boolean configuration properties */
+ chip->use_vfloat_adjustments = of_property_read_bool(node,
+ "qcom,autoadjust-vfloat");
+ chip->bmd_algo_disabled = of_property_read_bool(node,
+ "qcom,bmd-algo-disabled");
+ chip->iterm_disabled = of_property_read_bool(node,
+ "qcom,iterm-disabled");
+ chip->soft_vfloat_comp_disabled = of_property_read_bool(node,
+ "qcom,soft-vfloat-comp-disabled");
+ chip->chg_enabled = !(of_property_read_bool(node,
+ "qcom,charging-disabled"));
+ chip->charge_unknown_battery = of_property_read_bool(node,
+ "qcom,charge-unknown-battery");
+ chip->chg_inhibit_en = of_property_read_bool(node,
+ "qcom,chg-inhibit-en");
+ chip->chg_inhibit_source_fg = of_property_read_bool(node,
+ "qcom,chg-inhibit-fg");
+ chip->low_volt_dcin = of_property_read_bool(node,
+ "qcom,low-volt-dcin");
+ chip->force_aicl_rerun = of_property_read_bool(node,
+ "qcom,force-aicl-rerun");
+ chip->skip_usb_suspend_for_fake_battery = of_property_read_bool(node,
+ "qcom,skip-usb-suspend-for-fake-battery");
+
+ /* parse the battery missing detection pin source */
+ rc = of_property_read_string(chip->spmi->dev.of_node,
+ "qcom,bmd-pin-src", &bpd);
+ if (rc) {
+ /* Select BAT_THM as default BPD scheme */
+ chip->bmd_pin_src = BPD_TYPE_DEFAULT;
+ rc = 0;
+ } else {
+ chip->bmd_pin_src = get_bpd(bpd);
+ if (chip->bmd_pin_src < 0) {
+ dev_err(chip->dev,
+ "failed to determine bpd schema %d\n", rc);
+ return rc;
+ }
+ }
+
+ /* parse the dc power supply configuration */
+ rc = of_property_read_string(node, "qcom,dc-psy-type", &dc_psy_type);
+ if (rc) {
+ chip->dc_psy_type = -EINVAL;
+ rc = 0;
+ } else {
+ if (strcmp(dc_psy_type, "Mains") == 0)
+ chip->dc_psy_type = POWER_SUPPLY_TYPE_MAINS;
+ else if (strcmp(dc_psy_type, "Wireless") == 0)
+ chip->dc_psy_type = POWER_SUPPLY_TYPE_WIRELESS;
+ else if (strcmp(dc_psy_type, "Wipower") == 0)
+ chip->dc_psy_type = POWER_SUPPLY_TYPE_WIPOWER;
+ }
+ if (chip->dc_psy_type != -EINVAL) {
+ OF_PROP_READ(chip, chip->dc_target_current_ma,
+ "dc-psy-ma", rc, 0);
+ if (rc)
+ return rc;
+ if (chip->dc_target_current_ma < DC_MA_MIN
+ || chip->dc_target_current_ma > DC_MA_MAX) {
+ dev_err(chip->dev, "Bad dc mA %d\n",
+ chip->dc_target_current_ma);
+ return -EINVAL;
+ }
+ }
+
+ if (chip->dc_psy_type == POWER_SUPPLY_TYPE_WIPOWER)
+ smb_parse_wipower_dt(chip);
+
+ /* read the bms power supply name */
+ rc = of_property_read_string(node, "qcom,bms-psy-name",
+ &chip->bms_psy_name);
+ if (rc)
+ chip->bms_psy_name = NULL;
+
+ /* read the battery power supply name */
+ rc = of_property_read_string(node, "qcom,battery-psy-name",
+ &chip->battery_psy_name);
+ if (rc)
+ chip->battery_psy_name = "battery";
+
+ /* Get the charger led support property */
+ chip->cfg_chg_led_sw_ctrl =
+ of_property_read_bool(node, "qcom,chg-led-sw-controls");
+ chip->cfg_chg_led_support =
+ of_property_read_bool(node, "qcom,chg-led-support");
+
+ if (of_find_property(node, "qcom,thermal-mitigation",
+ &chip->thermal_levels)) {
+ chip->thermal_mitigation = devm_kzalloc(chip->dev,
+ chip->thermal_levels,
+ GFP_KERNEL);
+
+ if (chip->thermal_mitigation == NULL) {
+ dev_err(chip->dev, "thermal mitigation kzalloc() failed.\n");
+ return -ENOMEM;
+ }
+
+ chip->thermal_levels /= sizeof(int);
+ rc = of_property_read_u32_array(node,
+ "qcom,thermal-mitigation",
+ chip->thermal_mitigation, chip->thermal_levels);
+ if (rc) {
+ dev_err(chip->dev,
+ "Couldn't read threm limits rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ chip->skip_usb_notification
+ = of_property_read_bool(node,
+ "qcom,skip-usb-notification");
+
+ chip->otg_pinctrl = of_property_read_bool(node, "qcom,otg-pinctrl");
+
+ return 0;
+}
+
+#define SUBTYPE_REG 0x5
+#define SMBCHG_CHGR_SUBTYPE 0x1
+#define SMBCHG_OTG_SUBTYPE 0x8
+#define SMBCHG_BAT_IF_SUBTYPE 0x3
+#define SMBCHG_USB_CHGPTH_SUBTYPE 0x4
+#define SMBCHG_DC_CHGPTH_SUBTYPE 0x5
+#define SMBCHG_MISC_SUBTYPE 0x7
+#define SMBCHG_LITE_CHGR_SUBTYPE 0x51
+#define SMBCHG_LITE_OTG_SUBTYPE 0x58
+#define SMBCHG_LITE_BAT_IF_SUBTYPE 0x53
+#define SMBCHG_LITE_USB_CHGPTH_SUBTYPE 0x54
+#define SMBCHG_LITE_DC_CHGPTH_SUBTYPE 0x55
+#define SMBCHG_LITE_MISC_SUBTYPE 0x57
+#define REQUEST_IRQ(chip, resource, irq_num, irq_name, irq_handler, flags, rc)\
+do { \
+ irq_num = spmi_get_irq_byname(chip->spmi, \
+ resource, irq_name); \
+ if (irq_num < 0) { \
+ dev_err(chip->dev, "Unable to get " irq_name " irq\n"); \
+ return -ENXIO; \
+ } \
+ rc = devm_request_threaded_irq(chip->dev, \
+ irq_num, NULL, irq_handler, flags, irq_name, \
+ chip); \
+ if (rc < 0) { \
+ dev_err(chip->dev, "Unable to request " irq_name " irq: %d\n",\
+ rc); \
+ return -ENXIO; \
+ } \
+} while (0)
+
+static int smbchg_request_irqs(struct smbchg_chip *chip)
+{
+ int rc = 0;
+ struct resource *resource;
+ struct spmi_resource *spmi_resource;
+ u8 subtype;
+ struct spmi_device *spmi = chip->spmi;
+ unsigned long flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
+ | IRQF_ONESHOT;
+
+ spmi_for_each_container_dev(spmi_resource, chip->spmi) {
+ if (!spmi_resource) {
+ dev_err(chip->dev, "spmi resource absent\n");
+ return rc;
+ }
+
+ resource = spmi_get_resource(spmi, spmi_resource,
+ IORESOURCE_MEM, 0);
+ if (!(resource && resource->start)) {
+ dev_err(chip->dev, "node %s IO resource absent!\n",
+ spmi->dev.of_node->full_name);
+ return rc;
+ }
+
+ rc = smbchg_read(chip, &subtype,
+ resource->start + SUBTYPE_REG, 1);
+ if (rc) {
+ dev_err(chip->dev, "Peripheral subtype read failed rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ switch (subtype) {
+ case SMBCHG_CHGR_SUBTYPE:
+ case SMBCHG_LITE_CHGR_SUBTYPE:
+ REQUEST_IRQ(chip, spmi_resource, chip->chg_error_irq,
+ "chg-error", chg_error_handler, flags, rc);
+ REQUEST_IRQ(chip, spmi_resource, chip->taper_irq,
+ "chg-taper-thr", taper_handler,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT), rc);
+ disable_irq_nosync(chip->taper_irq);
+ REQUEST_IRQ(chip, spmi_resource, chip->chg_term_irq,
+ "chg-tcc-thr", chg_term_handler,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT), rc);
+ REQUEST_IRQ(chip, spmi_resource, chip->recharge_irq,
+ "chg-rechg-thr", recharge_handler, flags, rc);
+ REQUEST_IRQ(chip, spmi_resource, chip->fastchg_irq,
+ "chg-p2f-thr", fastchg_handler, flags, rc);
+ enable_irq_wake(chip->chg_term_irq);
+ enable_irq_wake(chip->chg_error_irq);
+ enable_irq_wake(chip->fastchg_irq);
+ break;
+ case SMBCHG_BAT_IF_SUBTYPE:
+ case SMBCHG_LITE_BAT_IF_SUBTYPE:
+ REQUEST_IRQ(chip, spmi_resource, chip->batt_hot_irq,
+ "batt-hot", batt_hot_handler, flags, rc);
+ REQUEST_IRQ(chip, spmi_resource, chip->batt_warm_irq,
+ "batt-warm", batt_warm_handler, flags, rc);
+ REQUEST_IRQ(chip, spmi_resource, chip->batt_cool_irq,
+ "batt-cool", batt_cool_handler, flags, rc);
+ REQUEST_IRQ(chip, spmi_resource, chip->batt_cold_irq,
+ "batt-cold", batt_cold_handler, flags, rc);
+ REQUEST_IRQ(chip, spmi_resource, chip->batt_missing_irq,
+ "batt-missing", batt_pres_handler, flags, rc);
+ REQUEST_IRQ(chip, spmi_resource, chip->vbat_low_irq,
+ "batt-low", vbat_low_handler, flags, rc);
+ enable_irq_wake(chip->batt_hot_irq);
+ enable_irq_wake(chip->batt_warm_irq);
+ enable_irq_wake(chip->batt_cool_irq);
+ enable_irq_wake(chip->batt_cold_irq);
+ enable_irq_wake(chip->batt_missing_irq);
+ enable_irq_wake(chip->vbat_low_irq);
+ break;
+ case SMBCHG_USB_CHGPTH_SUBTYPE:
+ case SMBCHG_LITE_USB_CHGPTH_SUBTYPE:
+ REQUEST_IRQ(chip, spmi_resource, chip->usbin_uv_irq,
+ "usbin-uv", usbin_uv_handler,
+ flags | IRQF_EARLY_RESUME, rc);
+ REQUEST_IRQ(chip, spmi_resource, chip->usbin_ov_irq,
+ "usbin-ov", usbin_ov_handler, flags, rc);
+ REQUEST_IRQ(chip, spmi_resource, chip->src_detect_irq,
+ "usbin-src-det",
+ src_detect_handler, flags, rc);
+ REQUEST_IRQ(chip, spmi_resource, chip->aicl_done_irq,
+ "aicl-done",
+ aicl_done_handler,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT),
+ rc);
+ if (chip->schg_version != QPNP_SCHG_LITE) {
+ REQUEST_IRQ(chip, spmi_resource,
+ chip->otg_fail_irq, "otg-fail",
+ otg_fail_handler, flags, rc);
+ REQUEST_IRQ(chip, spmi_resource,
+ chip->otg_oc_irq, "otg-oc",
+ otg_oc_handler,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT),
+ rc);
+ REQUEST_IRQ(chip, spmi_resource,
+ chip->usbid_change_irq, "usbid-change",
+ usbid_change_handler,
+ (IRQF_TRIGGER_FALLING | IRQF_ONESHOT),
+ rc);
+ enable_irq_wake(chip->otg_oc_irq);
+ enable_irq_wake(chip->usbid_change_irq);
+ enable_irq_wake(chip->otg_fail_irq);
+ }
+ enable_irq_wake(chip->usbin_uv_irq);
+ enable_irq_wake(chip->usbin_ov_irq);
+ enable_irq_wake(chip->src_detect_irq);
+ if (chip->parallel.avail && chip->usb_present) {
+ rc = enable_irq_wake(chip->aicl_done_irq);
+ chip->enable_aicl_wake = true;
+ }
+ break;
+ case SMBCHG_DC_CHGPTH_SUBTYPE:
+ case SMBCHG_LITE_DC_CHGPTH_SUBTYPE:
+ REQUEST_IRQ(chip, spmi_resource, chip->dcin_uv_irq,
+ "dcin-uv", dcin_uv_handler, flags, rc);
+ enable_irq_wake(chip->dcin_uv_irq);
+ break;
+ case SMBCHG_MISC_SUBTYPE:
+ case SMBCHG_LITE_MISC_SUBTYPE:
+ REQUEST_IRQ(chip, spmi_resource, chip->power_ok_irq,
+ "power-ok", power_ok_handler, flags, rc);
+ REQUEST_IRQ(chip, spmi_resource, chip->chg_hot_irq,
+ "temp-shutdown", chg_hot_handler, flags, rc);
+ REQUEST_IRQ(chip, spmi_resource,
+ chip->wdog_timeout_irq,
+ "wdog-timeout",
+ wdog_timeout_handler, flags, rc);
+ enable_irq_wake(chip->chg_hot_irq);
+ enable_irq_wake(chip->wdog_timeout_irq);
+ break;
+ case SMBCHG_OTG_SUBTYPE:
+ break;
+ case SMBCHG_LITE_OTG_SUBTYPE:
+ REQUEST_IRQ(chip, spmi_resource,
+ chip->usbid_change_irq, "usbid-change",
+ usbid_change_handler,
+ (IRQF_TRIGGER_FALLING | IRQF_ONESHOT),
+ rc);
+ REQUEST_IRQ(chip, spmi_resource,
+ chip->otg_oc_irq, "otg-oc",
+ otg_oc_handler,
+ (IRQF_TRIGGER_RISING | IRQF_ONESHOT), rc);
+ REQUEST_IRQ(chip, spmi_resource,
+ chip->otg_fail_irq, "otg-fail",
+ otg_fail_handler, flags, rc);
+ enable_irq_wake(chip->usbid_change_irq);
+ enable_irq_wake(chip->otg_oc_irq);
+ enable_irq_wake(chip->otg_fail_irq);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+#define REQUIRE_BASE(chip, base, rc) \
+do { \
+ if (!rc && !chip->base) { \
+ dev_err(chip->dev, "Missing " #base "\n"); \
+ rc = -EINVAL; \
+ } \
+} while (0)
+
+static int smbchg_parse_peripherals(struct smbchg_chip *chip)
+{
+ int rc = 0;
+ struct resource *resource;
+ struct spmi_resource *spmi_resource;
+ u8 subtype;
+ struct spmi_device *spmi = chip->spmi;
+
+ spmi_for_each_container_dev(spmi_resource, chip->spmi) {
+ if (!spmi_resource) {
+ dev_err(chip->dev, "spmi resource absent\n");
+ return rc;
+ }
+
+ resource = spmi_get_resource(spmi, spmi_resource,
+ IORESOURCE_MEM, 0);
+ if (!(resource && resource->start)) {
+ dev_err(chip->dev, "node %s IO resource absent!\n",
+ spmi->dev.of_node->full_name);
+ return rc;
+ }
+
+ rc = smbchg_read(chip, &subtype,
+ resource->start + SUBTYPE_REG, 1);
+ if (rc) {
+ dev_err(chip->dev, "Peripheral subtype read failed rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ switch (subtype) {
+ case SMBCHG_CHGR_SUBTYPE:
+ case SMBCHG_LITE_CHGR_SUBTYPE:
+ chip->chgr_base = resource->start;
+ break;
+ case SMBCHG_BAT_IF_SUBTYPE:
+ case SMBCHG_LITE_BAT_IF_SUBTYPE:
+ chip->bat_if_base = resource->start;
+ break;
+ case SMBCHG_USB_CHGPTH_SUBTYPE:
+ case SMBCHG_LITE_USB_CHGPTH_SUBTYPE:
+ chip->usb_chgpth_base = resource->start;
+ break;
+ case SMBCHG_DC_CHGPTH_SUBTYPE:
+ case SMBCHG_LITE_DC_CHGPTH_SUBTYPE:
+ chip->dc_chgpth_base = resource->start;
+ break;
+ case SMBCHG_MISC_SUBTYPE:
+ case SMBCHG_LITE_MISC_SUBTYPE:
+ chip->misc_base = resource->start;
+ break;
+ case SMBCHG_OTG_SUBTYPE:
+ case SMBCHG_LITE_OTG_SUBTYPE:
+ chip->otg_base = resource->start;
+ break;
+ }
+ }
+
+ REQUIRE_BASE(chip, chgr_base, rc);
+ REQUIRE_BASE(chip, bat_if_base, rc);
+ REQUIRE_BASE(chip, usb_chgpth_base, rc);
+ REQUIRE_BASE(chip, dc_chgpth_base, rc);
+ REQUIRE_BASE(chip, misc_base, rc);
+
+ return rc;
+}
+
+static inline void dump_reg(struct smbchg_chip *chip, u16 addr,
+ const char *name)
+{
+ u8 reg;
+
+ smbchg_read(chip, &reg, addr, 1);
+ pr_smb(PR_DUMP, "%s - %04X = %02X\n", name, addr, reg);
+}
+
+/* dumps useful registers for debug */
+static void dump_regs(struct smbchg_chip *chip)
+{
+ u16 addr;
+
+ /* charger peripheral */
+ for (addr = 0xB; addr <= 0x10; addr++)
+ dump_reg(chip, chip->chgr_base + addr, "CHGR Status");
+ for (addr = 0xF0; addr <= 0xFF; addr++)
+ dump_reg(chip, chip->chgr_base + addr, "CHGR Config");
+ /* battery interface peripheral */
+ dump_reg(chip, chip->bat_if_base + RT_STS, "BAT_IF Status");
+ dump_reg(chip, chip->bat_if_base + CMD_CHG_REG, "BAT_IF Command");
+ for (addr = 0xF0; addr <= 0xFB; addr++)
+ dump_reg(chip, chip->bat_if_base + addr, "BAT_IF Config");
+ /* usb charge path peripheral */
+ for (addr = 0x7; addr <= 0x10; addr++)
+ dump_reg(chip, chip->usb_chgpth_base + addr, "USB Status");
+ dump_reg(chip, chip->usb_chgpth_base + CMD_IL, "USB Command");
+ for (addr = 0xF0; addr <= 0xF5; addr++)
+ dump_reg(chip, chip->usb_chgpth_base + addr, "USB Config");
+ /* dc charge path peripheral */
+ dump_reg(chip, chip->dc_chgpth_base + RT_STS, "DC Status");
+ for (addr = 0xF0; addr <= 0xF6; addr++)
+ dump_reg(chip, chip->dc_chgpth_base + addr, "DC Config");
+ /* misc peripheral */
+ dump_reg(chip, chip->misc_base + IDEV_STS, "MISC Status");
+ dump_reg(chip, chip->misc_base + RT_STS, "MISC Status");
+ for (addr = 0xF0; addr <= 0xF3; addr++)
+ dump_reg(chip, chip->misc_base + addr, "MISC CFG");
+}
+
+static int create_debugfs_entries(struct smbchg_chip *chip)
+{
+ struct dentry *ent;
+
+ chip->debug_root = debugfs_create_dir("qpnp-smbcharger", NULL);
+ if (!chip->debug_root) {
+ dev_err(chip->dev, "Couldn't create debug dir\n");
+ return -EINVAL;
+ }
+
+ ent = debugfs_create_file("force_dcin_icl_check",
+ S_IFREG | S_IWUSR | S_IRUGO,
+ chip->debug_root, chip,
+ &force_dcin_icl_ops);
+ if (!ent) {
+ dev_err(chip->dev,
+ "Couldn't create force dcin icl check file\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int smbchg_check_chg_version(struct smbchg_chip *chip)
+{
+ struct pmic_revid_data *pmic_rev_id;
+ struct device_node *revid_dev_node;
+ int rc;
+
+ revid_dev_node = of_parse_phandle(chip->spmi->dev.of_node,
+ "qcom,pmic-revid", 0);
+ if (!revid_dev_node) {
+ pr_err("Missing qcom,pmic-revid property - driver failed\n");
+ return -EINVAL;
+ }
+
+ pmic_rev_id = get_revid_data(revid_dev_node);
+ if (IS_ERR(pmic_rev_id)) {
+ rc = PTR_ERR(revid_dev_node);
+ if (rc != -EPROBE_DEFER)
+ pr_err("Unable to get pmic_revid rc=%d\n", rc);
+ return rc;
+ }
+
+ switch (pmic_rev_id->pmic_subtype) {
+ case PMI8994:
+ chip->wa_flags |= SMBCHG_AICL_DEGLITCH_WA
+ | SMBCHG_BATT_OV_WA
+ | SMBCHG_CC_ESR_WA
+ | SMBCHG_RESTART_WA;
+ use_pmi8994_tables(chip);
+ chip->schg_version = QPNP_SCHG;
+ break;
+ case PMI8950:
+ case PMI8937:
+ chip->wa_flags |= SMBCHG_BATT_OV_WA;
+ if (pmic_rev_id->rev4 < 2) /* PMI8950 1.0 */ {
+ chip->wa_flags |= SMBCHG_AICL_DEGLITCH_WA;
+ } else { /* rev > PMI8950 v1.0 */
+ chip->wa_flags |= SMBCHG_HVDCP_9V_EN_WA
+ | SMBCHG_USB100_WA;
+ }
+ use_pmi8994_tables(chip);
+ chip->tables.aicl_rerun_period_table =
+ aicl_rerun_period_schg_lite;
+ chip->tables.aicl_rerun_period_len =
+ ARRAY_SIZE(aicl_rerun_period_schg_lite);
+
+ chip->schg_version = QPNP_SCHG_LITE;
+ if (pmic_rev_id->pmic_subtype == PMI8937)
+ chip->hvdcp_not_supported = true;
+ break;
+ case PMI8996:
+ chip->wa_flags |= SMBCHG_CC_ESR_WA
+ | SMBCHG_FLASH_ICL_DISABLE_WA
+ | SMBCHG_RESTART_WA
+ | SMBCHG_FLASH_BUCK_SWITCH_FREQ_WA;
+ use_pmi8996_tables(chip);
+ chip->schg_version = QPNP_SCHG;
+ break;
+ default:
+ pr_err("PMIC subtype %d not supported, WA flags not set\n",
+ pmic_rev_id->pmic_subtype);
+ }
+
+ pr_smb(PR_STATUS, "pmic=%s, wa_flags=0x%x, hvdcp_supported=%s\n",
+ pmic_rev_id->pmic_name, chip->wa_flags,
+ chip->hvdcp_not_supported ? "false" : "true");
+
+ return 0;
+}
+
+static void rerun_hvdcp_det_if_necessary(struct smbchg_chip *chip)
+{
+ enum power_supply_type usb_supply_type;
+ char *usb_type_name;
+ int rc;
+
+ if (!(chip->wa_flags & SMBCHG_RESTART_WA))
+ return;
+
+ read_usb_type(chip, &usb_type_name, &usb_supply_type);
+ if (usb_supply_type == POWER_SUPPLY_TYPE_USB_DCP
+ && !is_hvdcp_present(chip)) {
+ pr_smb(PR_STATUS, "DCP found rerunning APSD\n");
+ rc = vote(chip->usb_icl_votable,
+ CHG_SUSPEND_WORKAROUND_ICL_VOTER, true, 300);
+ if (rc < 0)
+ pr_err("Couldn't vote for 300mA for suspend wa, going ahead rc=%d\n",
+ rc);
+
+ pr_smb(PR_STATUS, "Faking Removal\n");
+ fake_insertion_removal(chip, false);
+ msleep(500);
+ pr_smb(PR_STATUS, "Faking Insertion\n");
+ fake_insertion_removal(chip, true);
+
+ read_usb_type(chip, &usb_type_name, &usb_supply_type);
+ if (usb_supply_type != POWER_SUPPLY_TYPE_USB_DCP) {
+ msleep(500);
+ pr_smb(PR_STATUS, "Fake Removal again as type!=DCP\n");
+ fake_insertion_removal(chip, false);
+ msleep(500);
+ pr_smb(PR_STATUS, "Fake Insert again as type!=DCP\n");
+ fake_insertion_removal(chip, true);
+ }
+
+ rc = vote(chip->usb_icl_votable,
+ CHG_SUSPEND_WORKAROUND_ICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't vote for 0 for suspend wa, going ahead rc=%d\n",
+ rc);
+ }
+}
+
+static int smbchg_probe(struct spmi_device *spmi)
+{
+ int rc;
+ struct smbchg_chip *chip;
+ struct power_supply *usb_psy, *typec_psy = NULL;
+ struct qpnp_vadc_chip *vadc_dev, *vchg_vadc_dev;
+ const char *typec_psy_name;
+
+ usb_psy = power_supply_get_by_name("usb");
+ if (!usb_psy) {
+ pr_smb(PR_STATUS, "USB supply not found, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (of_property_read_bool(spmi->dev.of_node, "qcom,external-typec")) {
+ /* read the type power supply name */
+ rc = of_property_read_string(spmi->dev.of_node,
+ "qcom,typec-psy-name", &typec_psy_name);
+ if (rc) {
+ pr_err("failed to get prop typec-psy-name rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ typec_psy = power_supply_get_by_name(typec_psy_name);
+ if (!typec_psy) {
+ pr_smb(PR_STATUS,
+ "Type-C supply not found, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+ }
+
+ if (of_find_property(spmi->dev.of_node, "qcom,dcin-vadc", NULL)) {
+ vadc_dev = qpnp_get_vadc(&spmi->dev, "dcin");
+ if (IS_ERR(vadc_dev)) {
+ rc = PTR_ERR(vadc_dev);
+ if (rc != -EPROBE_DEFER)
+ dev_err(&spmi->dev, "Couldn't get vadc rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (of_find_property(spmi->dev.of_node, "qcom,vchg_sns-vadc", NULL)) {
+ vchg_vadc_dev = qpnp_get_vadc(&spmi->dev, "vchg_sns");
+ if (IS_ERR(vchg_vadc_dev)) {
+ rc = PTR_ERR(vchg_vadc_dev);
+ if (rc != -EPROBE_DEFER)
+ dev_err(&spmi->dev, "Couldn't get vadc 'vchg' rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ chip = devm_kzalloc(&spmi->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip) {
+ dev_err(&spmi->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ chip->fcc_votable = create_votable(&spmi->dev,
+ "SMBCHG: fcc",
+ VOTE_MIN, NUM_FCC_VOTER, 2000,
+ set_fastchg_current_vote_cb);
+ if (IS_ERR(chip->fcc_votable))
+ return PTR_ERR(chip->fcc_votable);
+
+ chip->usb_icl_votable = create_votable(&spmi->dev,
+ "SMBCHG: usb_icl",
+ VOTE_MIN, NUM_ICL_VOTER, 3000,
+ set_usb_current_limit_vote_cb);
+ if (IS_ERR(chip->usb_icl_votable))
+ return PTR_ERR(chip->usb_icl_votable);
+
+ chip->dc_icl_votable = create_votable(&spmi->dev,
+ "SMBCHG: dcl_icl",
+ VOTE_MIN, NUM_ICL_VOTER, 3000,
+ set_dc_current_limit_vote_cb);
+ if (IS_ERR(chip->dc_icl_votable))
+ return PTR_ERR(chip->dc_icl_votable);
+
+ chip->usb_suspend_votable = create_votable(&spmi->dev,
+ "SMBCHG: usb_suspend",
+ VOTE_SET_ANY, NUM_EN_VOTERS, 0,
+ usb_suspend_vote_cb);
+ if (IS_ERR(chip->usb_suspend_votable))
+ return PTR_ERR(chip->usb_suspend_votable);
+
+ chip->dc_suspend_votable = create_votable(&spmi->dev,
+ "SMBCHG: dc_suspend",
+ VOTE_SET_ANY, NUM_EN_VOTERS, 0,
+ dc_suspend_vote_cb);
+ if (IS_ERR(chip->dc_suspend_votable))
+ return PTR_ERR(chip->dc_suspend_votable);
+
+ chip->battchg_suspend_votable = create_votable(&spmi->dev,
+ "SMBCHG: battchg_suspend",
+ VOTE_SET_ANY, NUM_BATTCHG_EN_VOTERS, 0,
+ charging_suspend_vote_cb);
+ if (IS_ERR(chip->battchg_suspend_votable))
+ return PTR_ERR(chip->battchg_suspend_votable);
+
+ chip->hw_aicl_rerun_disable_votable = create_votable(&spmi->dev,
+ "SMBCHG: hwaicl_disable",
+ VOTE_SET_ANY, NUM_HW_AICL_DISABLE_VOTERS, 0,
+ smbchg_hw_aicl_rerun_disable_cb);
+ if (IS_ERR(chip->hw_aicl_rerun_disable_votable))
+ return PTR_ERR(chip->hw_aicl_rerun_disable_votable);
+
+ chip->hw_aicl_rerun_enable_indirect_votable = create_votable(&spmi->dev,
+ "SMBCHG: hwaicl_enable_indirect",
+ VOTE_SET_ANY, NUM_HW_AICL_RERUN_ENABLE_INDIRECT_VOTERS,
+ 0, smbchg_hw_aicl_rerun_enable_indirect_cb);
+ if (IS_ERR(chip->hw_aicl_rerun_enable_indirect_votable))
+ return PTR_ERR(chip->hw_aicl_rerun_enable_indirect_votable);
+
+ chip->aicl_deglitch_short_votable = create_votable(&spmi->dev,
+ "SMBCHG: hwaicl_short_deglitch",
+ VOTE_SET_ANY, NUM_HW_SHORT_DEGLITCH_VOTERS, 0,
+ smbchg_aicl_deglitch_config_cb);
+ if (IS_ERR(chip->aicl_deglitch_short_votable))
+ return PTR_ERR(chip->aicl_deglitch_short_votable);
+
+ INIT_WORK(&chip->usb_set_online_work, smbchg_usb_update_online_work);
+ INIT_DELAYED_WORK(&chip->parallel_en_work,
+ smbchg_parallel_usb_en_work);
+ INIT_DELAYED_WORK(&chip->vfloat_adjust_work, smbchg_vfloat_adjust_work);
+ INIT_DELAYED_WORK(&chip->hvdcp_det_work, smbchg_hvdcp_det_work);
+ init_completion(&chip->src_det_lowered);
+ init_completion(&chip->src_det_raised);
+ init_completion(&chip->usbin_uv_lowered);
+ init_completion(&chip->usbin_uv_raised);
+ chip->vadc_dev = vadc_dev;
+ chip->vchg_vadc_dev = vchg_vadc_dev;
+ chip->spmi = spmi;
+ chip->dev = &spmi->dev;
+ chip->usb_psy = usb_psy;
+ chip->typec_psy = typec_psy;
+ chip->fake_battery_soc = -EINVAL;
+ chip->usb_online = -EINVAL;
+ dev_set_drvdata(&spmi->dev, chip);
+
+ spin_lock_init(&chip->sec_access_lock);
+ mutex_init(&chip->therm_lvl_lock);
+ mutex_init(&chip->usb_set_online_lock);
+ mutex_init(&chip->parallel.lock);
+ mutex_init(&chip->taper_irq_lock);
+ mutex_init(&chip->pm_lock);
+ mutex_init(&chip->wipower_config);
+ mutex_init(&chip->usb_status_lock);
+ device_init_wakeup(chip->dev, true);
+
+ rc = smbchg_parse_peripherals(chip);
+ if (rc) {
+ dev_err(chip->dev, "Error parsing DT peripherals: %d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_check_chg_version(chip);
+ if (rc) {
+ pr_err("Unable to check schg version rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smb_parse_dt(chip);
+ if (rc < 0) {
+ dev_err(&spmi->dev, "Unable to parse DT nodes: %d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_regulator_init(chip);
+ if (rc) {
+ dev_err(&spmi->dev,
+ "Couldn't initialize regulator rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smbchg_hw_init(chip);
+ if (rc < 0) {
+ dev_err(&spmi->dev,
+ "Unable to intialize hardware rc = %d\n", rc);
+ goto out;
+ }
+
+ rc = determine_initial_status(chip);
+ if (rc < 0) {
+ dev_err(&spmi->dev,
+ "Unable to determine init status rc = %d\n", rc);
+ goto out;
+ }
+
+ chip->previous_soc = -EINVAL;
+ chip->batt_psy.name = chip->battery_psy_name;
+ chip->batt_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ chip->batt_psy.get_property = smbchg_battery_get_property;
+ chip->batt_psy.set_property = smbchg_battery_set_property;
+ chip->batt_psy.properties = smbchg_battery_properties;
+ chip->batt_psy.num_properties = ARRAY_SIZE(smbchg_battery_properties);
+ chip->batt_psy.external_power_changed = smbchg_external_power_changed;
+ chip->batt_psy.property_is_writeable = smbchg_battery_is_writeable;
+
+ rc = power_supply_register(chip->dev, &chip->batt_psy);
+ if (rc < 0) {
+ dev_err(&spmi->dev,
+ "Unable to register batt_psy rc = %d\n", rc);
+ goto out;
+ }
+ if (chip->dc_psy_type != -EINVAL) {
+ chip->dc_psy.name = "dc";
+ chip->dc_psy.type = chip->dc_psy_type;
+ chip->dc_psy.get_property = smbchg_dc_get_property;
+ chip->dc_psy.set_property = smbchg_dc_set_property;
+ chip->dc_psy.property_is_writeable = smbchg_dc_is_writeable;
+ chip->dc_psy.properties = smbchg_dc_properties;
+ chip->dc_psy.num_properties = ARRAY_SIZE(smbchg_dc_properties);
+ chip->dc_psy.supplied_to = smbchg_dc_supplicants;
+ chip->dc_psy.num_supplicants
+ = ARRAY_SIZE(smbchg_dc_supplicants);
+ rc = power_supply_register(chip->dev, &chip->dc_psy);
+ if (rc < 0) {
+ dev_err(&spmi->dev,
+ "Unable to register dc_psy rc = %d\n", rc);
+ goto unregister_batt_psy;
+ }
+ }
+ chip->psy_registered = true;
+
+ if (chip->cfg_chg_led_support &&
+ chip->schg_version == QPNP_SCHG_LITE) {
+ rc = smbchg_register_chg_led(chip);
+ if (rc) {
+ dev_err(chip->dev,
+ "Unable to register charger led: %d\n",
+ rc);
+ goto unregister_dc_psy;
+ }
+
+ rc = smbchg_chg_led_controls(chip);
+ if (rc) {
+ dev_err(chip->dev,
+ "Failed to set charger led controld bit: %d\n",
+ rc);
+ goto unregister_led_class;
+ }
+ }
+
+ rc = smbchg_request_irqs(chip);
+ if (rc < 0) {
+ dev_err(&spmi->dev, "Unable to request irqs rc = %d\n", rc);
+ goto unregister_led_class;
+ }
+
+ if (!chip->skip_usb_notification) {
+ pr_smb(PR_MISC, "setting usb psy present = %d\n",
+ chip->usb_present);
+ power_supply_set_present(chip->usb_psy, chip->usb_present);
+ }
+
+ rerun_hvdcp_det_if_necessary(chip);
+
+ dump_regs(chip);
+ create_debugfs_entries(chip);
+ dev_info(chip->dev,
+ "SMBCHG successfully probe Charger version=%s Revision DIG:%d.%d ANA:%d.%d batt=%d dc=%d usb=%d\n",
+ version_str[chip->schg_version],
+ chip->revision[DIG_MAJOR], chip->revision[DIG_MINOR],
+ chip->revision[ANA_MAJOR], chip->revision[ANA_MINOR],
+ get_prop_batt_present(chip),
+ chip->dc_present, chip->usb_present);
+ return 0;
+
+unregister_led_class:
+ if (chip->cfg_chg_led_support && chip->schg_version == QPNP_SCHG_LITE)
+ led_classdev_unregister(&chip->led_cdev);
+unregister_dc_psy:
+ power_supply_unregister(&chip->dc_psy);
+unregister_batt_psy:
+ power_supply_unregister(&chip->batt_psy);
+out:
+ handle_usb_removal(chip);
+ return rc;
+}
+
+static int smbchg_remove(struct spmi_device *spmi)
+{
+ struct smbchg_chip *chip = dev_get_drvdata(&spmi->dev);
+
+ debugfs_remove_recursive(chip->debug_root);
+
+ if (chip->dc_psy_type != -EINVAL)
+ power_supply_unregister(&chip->dc_psy);
+
+ power_supply_unregister(&chip->batt_psy);
+
+ return 0;
+}
+
+static void smbchg_shutdown(struct spmi_device *spmi)
+{
+ struct smbchg_chip *chip = dev_get_drvdata(&spmi->dev);
+ int i, rc;
+
+ if (!(chip->wa_flags & SMBCHG_RESTART_WA))
+ return;
+
+ if (!is_hvdcp_present(chip))
+ return;
+
+ pr_smb(PR_MISC, "Disable Parallel\n");
+ mutex_lock(&chip->parallel.lock);
+ smbchg_parallel_en = 0;
+ smbchg_parallel_usb_disable(chip);
+ mutex_unlock(&chip->parallel.lock);
+
+ pr_smb(PR_MISC, "Disable all interrupts\n");
+ disable_irq(chip->aicl_done_irq);
+ disable_irq(chip->batt_cold_irq);
+ disable_irq(chip->batt_cool_irq);
+ disable_irq(chip->batt_hot_irq);
+ disable_irq(chip->batt_missing_irq);
+ disable_irq(chip->batt_warm_irq);
+ disable_irq(chip->chg_error_irq);
+ disable_irq(chip->chg_hot_irq);
+ disable_irq(chip->chg_term_irq);
+ disable_irq(chip->dcin_uv_irq);
+ disable_irq(chip->fastchg_irq);
+ disable_irq(chip->otg_fail_irq);
+ disable_irq(chip->otg_oc_irq);
+ disable_irq(chip->power_ok_irq);
+ disable_irq(chip->recharge_irq);
+ disable_irq(chip->src_detect_irq);
+ disable_irq(chip->taper_irq);
+ disable_irq(chip->usbid_change_irq);
+ disable_irq(chip->usbin_ov_irq);
+ disable_irq(chip->usbin_uv_irq);
+ disable_irq(chip->vbat_low_irq);
+ disable_irq(chip->wdog_timeout_irq);
+
+ /* remove all votes for short deglitch */
+ for (i = 0; i < NUM_HW_SHORT_DEGLITCH_VOTERS; i++)
+ vote(chip->aicl_deglitch_short_votable, i, false, 0);
+
+ /* vote to ensure AICL rerun is enabled */
+ rc = vote(chip->hw_aicl_rerun_enable_indirect_votable,
+ SHUTDOWN_WORKAROUND_VOTER, true, 0);
+ if (rc < 0)
+ pr_err("Couldn't vote to enable indirect AICL rerun\n");
+ rc = vote(chip->hw_aicl_rerun_disable_votable,
+ WEAK_CHARGER_HW_AICL_VOTER, false, 0);
+ if (rc < 0)
+ pr_err("Couldn't vote to enable AICL rerun\n");
+
+ /* switch to 5V HVDCP */
+ pr_smb(PR_MISC, "Switch to 5V HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_ADAPTER_SEL_MASK, HVDCP_5V);
+ if (rc < 0) {
+ pr_err("Couldn't configure HVDCP 5V rc=%d\n", rc);
+ return;
+ }
+
+ pr_smb(PR_MISC, "Wait 500mS to lower to 5V\n");
+ /* wait for HVDCP to lower to 5V */
+ msleep(500);
+ /*
+ * Check if the same hvdcp session is in progress. src_det should be
+ * high and that we are still in 5V hvdcp
+ */
+ if (!is_src_detect_high(chip)) {
+ pr_smb(PR_MISC, "src det low after 500mS sleep\n");
+ return;
+ }
+
+ /* disable HVDCP */
+ pr_smb(PR_MISC, "Disable HVDCP\n");
+ rc = smbchg_sec_masked_write(chip, chip->usb_chgpth_base + CHGPTH_CFG,
+ HVDCP_EN_BIT, 0);
+ if (rc < 0)
+ pr_err("Couldn't disable HVDCP rc=%d\n", rc);
+
+ chip->hvdcp_3_det_ignore_uv = true;
+ /* fake a removal */
+ pr_smb(PR_MISC, "Faking Removal\n");
+ rc = fake_insertion_removal(chip, false);
+ if (rc < 0)
+ pr_err("Couldn't fake removal HVDCP Removed rc=%d\n", rc);
+
+ /* fake an insertion */
+ pr_smb(PR_MISC, "Faking Insertion\n");
+ rc = fake_insertion_removal(chip, true);
+ if (rc < 0)
+ pr_err("Couldn't fake insertion rc=%d\n", rc);
+
+ pr_smb(PR_MISC, "Wait 1S to settle\n");
+ msleep(1000);
+ chip->hvdcp_3_det_ignore_uv = false;
+
+ pr_smb(PR_STATUS, "wrote power off configurations\n");
+}
+
+static const struct dev_pm_ops smbchg_pm_ops = {
+};
+
+MODULE_DEVICE_TABLE(spmi, smbchg_id);
+
+static struct spmi_driver smbchg_driver = {
+ .driver = {
+ .name = "qpnp-smbcharger",
+ .owner = THIS_MODULE,
+ .of_match_table = smbchg_match_table,
+ .pm = &smbchg_pm_ops,
+ },
+ .probe = smbchg_probe,
+ .remove = smbchg_remove,
+ .shutdown = smbchg_shutdown,
+};
+
+static int __init smbchg_init(void)
+{
+ return spmi_driver_register(&smbchg_driver);
+}
+
+static void __exit smbchg_exit(void)
+{
+ return spmi_driver_unregister(&smbchg_driver);
+}
+
+module_init(smbchg_init);
+module_exit(smbchg_exit);
+
+MODULE_DESCRIPTION("QPNP SMB Charger");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qpnp-smbcharger");
diff --git a/drivers/power/smb1351-charger.c b/drivers/power/smb1351-charger.c
new file mode 100644
index 000000000000..c0522680eb30
--- /dev/null
+++ b/drivers/power/smb1351-charger.c
@@ -0,0 +1,3268 @@
+/* Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/i2c.h>
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
+#include <linux/of.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/qpnp/qpnp-adc.h>
+#include <linux/pinctrl/consumer.h>
+
+/* Mask/Bit helpers */
+#define _SMB1351_MASK(BITS, POS) \
+ ((unsigned char)(((1 << (BITS)) - 1) << (POS)))
+#define SMB1351_MASK(LEFT_BIT_POS, RIGHT_BIT_POS) \
+ _SMB1351_MASK((LEFT_BIT_POS) - (RIGHT_BIT_POS) + 1, \
+ (RIGHT_BIT_POS))
+
+/* Configuration registers */
+#define CHG_CURRENT_CTRL_REG 0x0
+#define FAST_CHG_CURRENT_MASK SMB1351_MASK(7, 4)
+#define AC_INPUT_CURRENT_LIMIT_MASK SMB1351_MASK(3, 0)
+
+#define CHG_OTH_CURRENT_CTRL_REG 0x1
+#define PRECHG_CURRENT_MASK SMB1351_MASK(7, 5)
+#define ITERM_MASK SMB1351_MASK(4, 2)
+#define USB_2_3_MODE_SEL_BIT BIT(1)
+#define USB_2_3_MODE_SEL_BY_I2C 0
+#define USB_2_3_MODE_SEL_BY_PIN 0x2
+#define USB_5_1_CMD_POLARITY_BIT BIT(0)
+#define USB_CMD_POLARITY_500_1_100_0 0
+#define USB_CMD_POLARITY_500_0_100_1 0x1
+
+#define VARIOUS_FUNC_REG 0x2
+#define SUSPEND_MODE_CTRL_BIT BIT(7)
+#define SUSPEND_MODE_CTRL_BY_PIN 0
+#define SUSPEND_MODE_CTRL_BY_I2C 0x80
+#define BATT_TO_SYS_POWER_CTRL_BIT BIT(6)
+#define MAX_SYS_VOLTAGE BIT(5)
+#define AICL_EN_BIT BIT(4)
+#define AICL_DET_TH_BIT BIT(3)
+#define APSD_EN_BIT BIT(2)
+#define BATT_OV_BIT BIT(1)
+#define VCHG_FUNC_BIT BIT(0)
+
+#define VFLOAT_REG 0x3
+#define PRECHG_TO_FAST_VOLTAGE_CFG_MASK SMB1351_MASK(7, 6)
+#define VFLOAT_MASK SMB1351_MASK(5, 0)
+
+#define CHG_CTRL_REG 0x4
+#define AUTO_RECHG_BIT BIT(7)
+#define AUTO_RECHG_ENABLE 0
+#define AUTO_RECHG_DISABLE 0x80
+#define ITERM_EN_BIT BIT(6)
+#define ITERM_ENABLE 0
+#define ITERM_DISABLE 0x40
+#define MAPPED_AC_INPUT_CURRENT_LIMIT_MASK SMB1351_MASK(5, 4)
+#define AUTO_RECHG_TH_BIT BIT(3)
+#define AUTO_RECHG_TH_50MV 0
+#define AUTO_RECHG_TH_100MV 0x8
+#define AFCV_MASK SMB1351_MASK(2, 0)
+
+#define CHG_STAT_TIMERS_CTRL_REG 0x5
+#define STAT_OUTPUT_POLARITY_BIT BIT(7)
+#define STAT_OUTPUT_MODE_BIT BIT(6)
+#define STAT_OUTPUT_CTRL_BIT BIT(5)
+#define OTH_CHG_IL_BIT BIT(4)
+#define COMPLETE_CHG_TIMEOUT_MASK SMB1351_MASK(3, 2)
+#define PRECHG_TIMEOUT_MASK SMB1351_MASK(1, 0)
+
+#define CHG_PIN_EN_CTRL_REG 0x6
+#define LED_BLINK_FUNC_BIT BIT(7)
+#define EN_PIN_CTRL_MASK SMB1351_MASK(6, 5)
+#define EN_BY_I2C_0_DISABLE 0
+#define EN_BY_I2C_0_ENABLE 0x20
+#define EN_BY_PIN_HIGH_ENABLE 0x40
+#define EN_BY_PIN_LOW_ENABLE 0x60
+#define USBCS_CTRL_BIT BIT(4)
+#define USBCS_CTRL_BY_I2C 0
+#define USBCS_CTRL_BY_PIN 0x10
+#define USBCS_INPUT_STATE_BIT BIT(3)
+#define CHG_ERR_BIT BIT(2)
+#define APSD_DONE_BIT BIT(1)
+#define USB_FAIL_BIT BIT(0)
+
+#define THERM_A_CTRL_REG 0x7
+#define MIN_SYS_VOLTAGE_MASK SMB1351_MASK(7, 6)
+#define LOAD_BATT_10MA_FVC_BIT BIT(5)
+#define THERM_MONITOR_BIT BIT(4)
+#define THERM_MONITOR_EN 0
+#define SOFT_COLD_TEMP_LIMIT_MASK SMB1351_MASK(3, 2)
+#define SOFT_HOT_TEMP_LIMIT_MASK SMB1351_MASK(1, 0)
+
+#define WDOG_SAFETY_TIMER_CTRL_REG 0x8
+#define AICL_FAIL_OPTION_BIT BIT(7)
+#define AICL_FAIL_TO_SUSPEND 0
+#define AICL_FAIL_TO_150_MA 0x80
+#define WDOG_TIMEOUT_MASK SMB1351_MASK(6, 5)
+#define WDOG_IRQ_SAFETY_TIMER_MASK SMB1351_MASK(4, 3)
+#define WDOG_IRQ_SAFETY_TIMER_EN_BIT BIT(2)
+#define WDOG_OPTION_BIT BIT(1)
+#define WDOG_TIMER_EN_BIT BIT(0)
+
+#define OTG_USBIN_AICL_CTRL_REG 0x9
+#define OTG_ID_PIN_CTRL_MASK SMB1351_MASK(7, 6)
+#define OTG_PIN_POLARITY_BIT BIT(5)
+#define DCIN_IC_GLITCH_FILTER_HV_ADAPTER_MASK SMB1351_MASK(4, 3)
+#define DCIN_IC_GLITCH_FILTER_LV_ADAPTER_BIT BIT(2)
+#define USBIN_AICL_CFG1_BIT BIT(1)
+#define USBIN_AICL_CFG0_BIT BIT(0)
+
+#define OTG_TLIM_CTRL_REG 0xA
+#define SWITCH_FREQ_MASK SMB1351_MASK(7, 6)
+#define THERM_LOOP_TEMP_SEL_MASK SMB1351_MASK(5, 4)
+#define OTG_OC_LIMIT_MASK SMB1351_MASK(3, 2)
+#define OTG_BATT_UVLO_TH_MASK SMB1351_MASK(1, 0)
+
+#define HARD_SOFT_LIMIT_CELL_TEMP_REG 0xB
+#define HARD_LIMIT_COLD_TEMP_ALARM_TRIP_MASK SMB1351_MASK(7, 6)
+#define HARD_LIMIT_HOT_TEMP_ALARM_TRIP_MASK SMB1351_MASK(5, 4)
+#define SOFT_LIMIT_COLD_TEMP_ALARM_TRIP_MASK SMB1351_MASK(3, 2)
+#define SOFT_LIMIT_HOT_TEMP_ALARM_TRIP_MASK SMB1351_MASK(1, 0)
+
+#define FAULT_INT_REG 0xC
+#define HOT_COLD_HARD_LIMIT_BIT BIT(7)
+#define HOT_COLD_SOFT_LIMIT_BIT BIT(6)
+#define BATT_UVLO_IN_OTG_BIT BIT(5)
+#define OTG_OC_BIT BIT(4)
+#define INPUT_OVLO_BIT BIT(3)
+#define INPUT_UVLO_BIT BIT(2)
+#define AICL_DONE_FAIL_BIT BIT(1)
+#define INTERNAL_OVER_TEMP_BIT BIT(0)
+
+#define STATUS_INT_REG 0xD
+#define CHG_OR_PRECHG_TIMEOUT_BIT BIT(7)
+#define RID_CHANGE_BIT BIT(6)
+#define BATT_OVP_BIT BIT(5)
+#define FAST_TERM_TAPER_RECHG_INHIBIT_BIT BIT(4)
+#define WDOG_TIMER_BIT BIT(3)
+#define POK_BIT BIT(2)
+#define BATT_MISSING_BIT BIT(1)
+#define BATT_LOW_BIT BIT(0)
+
+#define VARIOUS_FUNC_2_REG 0xE
+#define CHG_HOLD_OFF_TIMER_AFTER_PLUGIN_BIT BIT(7)
+#define CHG_INHIBIT_BIT BIT(6)
+#define FAST_CHG_CC_IN_BATT_SOFT_LIMIT_MODE_BIT BIT(5)
+#define FVCL_IN_BATT_SOFT_LIMIT_MODE_MASK SMB1351_MASK(4, 3)
+#define HARD_TEMP_LIMIT_BEHAVIOR_BIT BIT(2)
+#define PRECHG_TO_FASTCHG_BIT BIT(1)
+#define STAT_PIN_CONFIG_BIT BIT(0)
+
+#define FLEXCHARGER_REG 0x10
+#define AFVC_IRQ_BIT BIT(7)
+#define CHG_CONFIG_MASK SMB1351_MASK(6, 4)
+#define LOW_BATT_VOLTAGE_DET_TH_MASK SMB1351_MASK(3, 0)
+
+#define VARIOUS_FUNC_3_REG 0x11
+#define SAFETY_TIMER_EN_MASK SMB1351_MASK(7, 6)
+#define BLOCK_SUSPEND_DURING_VBATT_LOW_BIT BIT(5)
+#define TIMEOUT_SEL_FOR_APSD_BIT BIT(4)
+#define SDP_SUSPEND_BIT BIT(3)
+#define QC_2P1_AUTO_INCREMENT_MODE_BIT BIT(2)
+#define QC_2P1_AUTH_ALGO_BIT BIT(1)
+#define DCD_EN_BIT BIT(0)
+
+#define HVDCP_BATT_MISSING_CTRL_REG 0x12
+#define HVDCP_ADAPTER_SEL_MASK SMB1351_MASK(7, 6)
+#define HVDCP_EN_BIT BIT(5)
+#define HVDCP_AUTO_INCREMENT_LIMIT_BIT BIT(4)
+#define BATT_MISSING_ON_INPUT_PLUGIN_BIT BIT(3)
+#define BATT_MISSING_2P6S_POLLER_BIT BIT(2)
+#define BATT_MISSING_ALGO_BIT BIT(1)
+#define BATT_MISSING_THERM_PIN_SOURCE_BIT BIT(0)
+
+#define PON_OPTIONS_REG 0x13
+#define SYSOK_INOK_POLARITY_BIT BIT(7)
+#define SYSOK_OPTIONS_MASK SMB1351_MASK(6, 4)
+#define INPUT_MISSING_POLLER_CONFIG_BIT BIT(3)
+#define VBATT_LOW_DISABLED_OR_RESET_STATE_BIT BIT(2)
+#define QC_2P1_AUTH_ALGO_IRQ_EN_BIT BIT(0)
+
+#define OTG_MODE_POWER_OPTIONS_REG 0x14
+#define ADAPTER_CONFIG_MASK SMB1351_MASK(7, 6)
+#define MAP_HVDCP_BIT BIT(5)
+#define SDP_LOW_BATT_FORCE_USB5_OVER_USB1_BIT BIT(4)
+#define OTG_HICCUP_MODE_BIT BIT(2)
+#define INPUT_CURRENT_LIMIT_MASK SMB1351_MASK(1, 0)
+
+#define CHARGER_I2C_CTRL_REG 0x15
+#define FULLON_MODE_EN_BIT BIT(7)
+#define I2C_HS_MODE_EN_BIT BIT(6)
+#define SYSON_LDO_OUTPUT_SEL_BIT BIT(5)
+#define VBATT_TRACKING_VOLTAGE_DIFF_BIT BIT(4)
+#define DISABLE_AFVC_WHEN_ENTER_TAPER_BIT BIT(3)
+#define VCHG_IINV_BIT BIT(2)
+#define AFVC_OVERRIDE_BIT BIT(1)
+#define SYSOK_PIN_CONFIG_BIT BIT(0)
+
+#define VERSION_REG 0x2E
+#define VERSION_MASK BIT(1)
+
+/* Command registers */
+#define CMD_I2C_REG 0x30
+#define CMD_RELOAD_BIT BIT(7)
+#define CMD_BQ_CFG_ACCESS_BIT BIT(6)
+
+#define CMD_INPUT_LIMIT_REG 0x31
+#define CMD_OVERRIDE_BIT BIT(7)
+#define CMD_SUSPEND_MODE_BIT BIT(6)
+#define CMD_INPUT_CURRENT_MODE_BIT BIT(3)
+#define CMD_INPUT_CURRENT_MODE_APSD 0
+#define CMD_INPUT_CURRENT_MODE_CMD 0x08
+#define CMD_USB_2_3_SEL_BIT BIT(2)
+#define CMD_USB_2_MODE 0
+#define CMD_USB_3_MODE 0x4
+#define CMD_USB_1_5_AC_CTRL_MASK SMB1351_MASK(1, 0)
+#define CMD_USB_100_MODE 0
+#define CMD_USB_500_MODE 0x2
+#define CMD_USB_AC_MODE 0x1
+
+#define CMD_CHG_REG 0x32
+#define CMD_DISABLE_THERM_MONITOR_BIT BIT(4)
+#define CMD_TURN_OFF_STAT_PIN_BIT BIT(3)
+#define CMD_PRE_TO_FAST_EN_BIT BIT(2)
+#define CMD_CHG_EN_BIT BIT(1)
+#define CMD_CHG_DISABLE 0
+#define CMD_CHG_ENABLE 0x2
+#define CMD_OTG_EN_BIT BIT(0)
+
+#define CMD_DEAD_BATT_REG 0x33
+#define CMD_STOP_DEAD_BATT_TIMER_MASK SMB1351_MASK(7, 0)
+
+#define CMD_HVDCP_REG 0x34
+#define CMD_APSD_RE_RUN_BIT BIT(7)
+#define CMD_FORCE_HVDCP_2P0_BIT BIT(5)
+#define CMD_HVDCP_MODE_MASK SMB1351_MASK(5, 0)
+
+/* Status registers */
+#define STATUS_0_REG 0x36
+#define STATUS_AICL_BIT BIT(7)
+#define STATUS_INPUT_CURRENT_LIMIT_MASK SMB1351_MASK(6, 5)
+#define STATUS_DCIN_INPUT_CURRENT_LIMIT_MASK SMB1351_MASK(4, 0)
+
+#define STATUS_1_REG 0x37
+#define STATUS_INPUT_RANGE_MASK SMB1351_MASK(7, 4)
+#define STATUS_INPUT_USB_BIT BIT(0)
+
+#define STATUS_2_REG 0x38
+#define STATUS_FAST_CHG_BIT BIT(7)
+#define STATUS_HARD_LIMIT_BIT BIT(6)
+#define STATUS_FLOAT_VOLTAGE_MASK SMB1351_MASK(5, 0)
+
+#define STATUS_3_REG 0x39
+#define STATUS_CHG_BIT BIT(7)
+#define STATUS_PRECHG_CURRENT_MASK SMB1351_MASK(6, 4)
+#define STATUS_FAST_CHG_CURRENT_MASK SMB1351_MASK(3, 0)
+
+#define STATUS_4_REG 0x3A
+#define STATUS_OTG_BIT BIT(7)
+#define STATUS_AFVC_BIT BIT(6)
+#define STATUS_DONE_BIT BIT(5)
+#define STATUS_BATT_LESS_THAN_2V_BIT BIT(4)
+#define STATUS_HOLD_OFF_BIT BIT(3)
+#define STATUS_CHG_MASK SMB1351_MASK(2, 1)
+#define STATUS_NO_CHARGING 0
+#define STATUS_FAST_CHARGING 0x4
+#define STATUS_PRE_CHARGING 0x2
+#define STATUS_TAPER_CHARGING 0x6
+#define STATUS_CHG_EN_STATUS_BIT BIT(0)
+
+#define STATUS_5_REG 0x3B
+#define STATUS_SOURCE_DETECTED_MASK SMB1351_MASK(7, 0)
+#define STATUS_PORT_CDP 0x80
+#define STATUS_PORT_DCP 0x40
+#define STATUS_PORT_OTHER 0x20
+#define STATUS_PORT_SDP 0x10
+#define STATUS_PORT_ACA_A 0x8
+#define STATUS_PORT_ACA_B 0x4
+#define STATUS_PORT_ACA_C 0x2
+#define STATUS_PORT_ACA_DOCK 0x1
+
+#define STATUS_6_REG 0x3C
+#define STATUS_DCD_TIMEOUT_BIT BIT(7)
+#define STATUS_DCD_GOOD_DG_BIT BIT(6)
+#define STATUS_OCD_GOOD_DG_BIT BIT(5)
+#define STATUS_RID_ABD_DG_BIT BIT(4)
+#define STATUS_RID_FLOAT_STATE_MACHINE_BIT BIT(3)
+#define STATUS_RID_A_STATE_MACHINE_BIT BIT(2)
+#define STATUS_RID_B_STATE_MACHINE_BIT BIT(1)
+#define STATUS_RID_C_STATE_MACHINE_BIT BIT(0)
+
+#define STATUS_7_REG 0x3D
+#define STATUS_HVDCP_MASK SMB1351_MASK(7, 0)
+
+#define STATUS_8_REG 0x3E
+#define STATUS_USNIN_HV_INPUT_SEL_BIT BIT(5)
+#define STATUS_USBIN_LV_UNDER_INPUT_SEL_BIT BIT(4)
+#define STATUS_USBIN_LV_INPUT_SEL_BIT BIT(3)
+
+/* Revision register */
+#define CHG_REVISION_REG 0x3F
+#define GUI_REVISION_MASK SMB1351_MASK(7, 4)
+#define DEVICE_REVISION_MASK SMB1351_MASK(3, 0)
+
+/* IRQ status registers */
+#define IRQ_A_REG 0x40
+#define IRQ_HOT_HARD_BIT BIT(6)
+#define IRQ_COLD_HARD_BIT BIT(4)
+#define IRQ_HOT_SOFT_BIT BIT(2)
+#define IRQ_COLD_SOFT_BIT BIT(0)
+
+#define IRQ_B_REG 0x41
+#define IRQ_BATT_TERMINAL_REMOVED_BIT BIT(6)
+#define IRQ_BATT_MISSING_BIT BIT(4)
+#define IRQ_LOW_BATT_VOLTAGE_BIT BIT(2)
+#define IRQ_INTERNAL_TEMP_LIMIT_BIT BIT(0)
+
+#define IRQ_C_REG 0x42
+#define IRQ_PRE_TO_FAST_VOLTAGE_BIT BIT(6)
+#define IRQ_RECHG_BIT BIT(4)
+#define IRQ_TAPER_BIT BIT(2)
+#define IRQ_TERM_BIT BIT(0)
+
+#define IRQ_D_REG 0x43
+#define IRQ_BATT_OV_BIT BIT(6)
+#define IRQ_CHG_ERROR_BIT BIT(4)
+#define IRQ_CHG_TIMEOUT_BIT BIT(2)
+#define IRQ_PRECHG_TIMEOUT_BIT BIT(0)
+
+#define IRQ_E_REG 0x44
+#define IRQ_USBIN_OV_BIT BIT(6)
+#define IRQ_USBIN_UV_BIT BIT(4)
+#define IRQ_AFVC_BIT BIT(2)
+#define IRQ_POWER_OK_BIT BIT(0)
+
+#define IRQ_F_REG 0x45
+#define IRQ_OTG_OVER_CURRENT_BIT BIT(6)
+#define IRQ_OTG_FAIL_BIT BIT(4)
+#define IRQ_RID_BIT BIT(2)
+#define IRQ_OTG_OC_RETRY_BIT BIT(0)
+
+#define IRQ_G_REG 0x46
+#define IRQ_SOURCE_DET_BIT BIT(6)
+#define IRQ_AICL_DONE_BIT BIT(4)
+#define IRQ_AICL_FAIL_BIT BIT(2)
+#define IRQ_CHG_INHIBIT_BIT BIT(0)
+
+#define IRQ_H_REG 0x47
+#define IRQ_IC_LIMIT_STATUS_BIT BIT(5)
+#define IRQ_HVDCP_2P1_STATUS_BIT BIT(4)
+#define IRQ_HVDCP_AUTH_DONE_BIT BIT(2)
+#define IRQ_WDOG_TIMEOUT_BIT BIT(0)
+
+/* constants */
+#define USB2_MIN_CURRENT_MA 100
+#define USB2_MAX_CURRENT_MA 500
+#define USB3_MIN_CURRENT_MA 150
+#define USB3_MAX_CURRENT_MA 900
+#define SMB1351_IRQ_REG_COUNT 8
+#define SMB1351_CHG_PRE_MIN_MA 100
+#define SMB1351_CHG_FAST_MIN_MA 1000
+#define SMB1351_CHG_FAST_MAX_MA 4500
+#define SMB1351_CHG_PRE_SHIFT 5
+#define SMB1351_CHG_FAST_SHIFT 4
+#define DEFAULT_BATT_CAPACITY 50
+#define DEFAULT_BATT_TEMP 250
+#define SUSPEND_CURRENT_MA 2
+
+#define CHG_ITERM_200MA 0x0
+#define CHG_ITERM_300MA 0x04
+#define CHG_ITERM_400MA 0x08
+#define CHG_ITERM_500MA 0x0C
+#define CHG_ITERM_600MA 0x10
+#define CHG_ITERM_700MA 0x14
+
+#define ADC_TM_WARM_COOL_THR_ENABLE ADC_TM_HIGH_LOW_THR_ENABLE
+
+enum reason {
+ USER = BIT(0),
+ THERMAL = BIT(1),
+ CURRENT = BIT(2),
+ SOC = BIT(3),
+};
+
+static char *pm_batt_supplied_to[] = {
+ "bms",
+};
+
+struct smb1351_regulator {
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+};
+
+enum chip_version {
+ SMB_UNKNOWN = 0,
+ SMB1350,
+ SMB1351,
+ SMB_MAX_TYPE,
+};
+
+static const char *smb1351_version_str[SMB_MAX_TYPE] = {
+ [SMB_UNKNOWN] = "Unknown",
+ [SMB1350] = "SMB1350",
+ [SMB1351] = "SMB1351",
+};
+
+struct smb1351_charger {
+ struct i2c_client *client;
+ struct device *dev;
+
+ bool recharge_disabled;
+ int recharge_mv;
+ bool iterm_disabled;
+ int iterm_ma;
+ int vfloat_mv;
+ int chg_present;
+ int fake_battery_soc;
+ bool chg_autonomous_mode;
+ bool disable_apsd;
+ bool using_pmic_therm;
+ bool jeita_supported;
+ bool battery_missing;
+ const char *bms_psy_name;
+ bool resume_completed;
+ bool irq_waiting;
+ struct delayed_work chg_remove_work;
+ struct delayed_work hvdcp_det_work;
+
+ /* status tracking */
+ bool batt_full;
+ bool batt_hot;
+ bool batt_cold;
+ bool batt_warm;
+ bool batt_cool;
+
+ int battchg_disabled_status;
+ int usb_suspended_status;
+ int target_fastchg_current_max_ma;
+ int fastchg_current_max_ma;
+ int workaround_flags;
+
+ int parallel_pin_polarity_setting;
+ bool parallel_charger;
+ bool parallel_charger_present;
+ bool bms_controlled_charging;
+ bool apsd_rerun;
+ bool usbin_ov;
+ bool chg_remove_work_scheduled;
+ bool force_hvdcp_2p0;
+ enum chip_version version;
+
+ /* psy */
+ struct power_supply *usb_psy;
+ int usb_psy_ma;
+ struct power_supply *bms_psy;
+ struct power_supply batt_psy;
+ struct power_supply parallel_psy;
+
+ struct smb1351_regulator otg_vreg;
+ struct mutex irq_complete;
+
+ struct dentry *debug_root;
+ u32 peek_poke_address;
+
+ /* adc_tm parameters */
+ struct qpnp_vadc_chip *vadc_dev;
+ struct qpnp_adc_tm_chip *adc_tm_dev;
+ struct qpnp_adc_tm_btm_param adc_param;
+
+ /* jeita parameters */
+ int batt_hot_decidegc;
+ int batt_cold_decidegc;
+ int batt_warm_decidegc;
+ int batt_cool_decidegc;
+ int batt_missing_decidegc;
+ unsigned int batt_warm_ma;
+ unsigned int batt_warm_mv;
+ unsigned int batt_cool_ma;
+ unsigned int batt_cool_mv;
+
+ /* pinctrl parameters */
+ const char *pinctrl_state_name;
+ struct pinctrl *smb_pinctrl;
+};
+
+struct smb_irq_info {
+ const char *name;
+ int (*smb_irq)(struct smb1351_charger *chip, u8 rt_stat);
+ int high;
+ int low;
+};
+
+struct irq_handler_info {
+ u8 stat_reg;
+ u8 val;
+ u8 prev_val;
+ struct smb_irq_info irq_info[4];
+};
+
+/* USB input charge current */
+static int usb_chg_current[] = {
+ 500, 685, 1000, 1100, 1200, 1300, 1500, 1600,
+ 1700, 1800, 2000, 2200, 2500, 3000,
+};
+
+static int fast_chg_current[] = {
+ 1000, 1200, 1400, 1600, 1800, 2000, 2200,
+ 2400, 2600, 2800, 3000, 3400, 3600, 3800,
+ 4000, 4640,
+};
+
+static int pre_chg_current[] = {
+ 200, 300, 400, 500, 600, 700,
+};
+
+struct battery_status {
+ bool batt_hot;
+ bool batt_warm;
+ bool batt_cool;
+ bool batt_cold;
+ bool batt_present;
+};
+
+enum {
+ BATT_HOT = 0,
+ BATT_WARM,
+ BATT_NORMAL,
+ BATT_COOL,
+ BATT_COLD,
+ BATT_MISSING,
+ BATT_STATUS_MAX,
+};
+
+static struct battery_status batt_s[] = {
+ [BATT_HOT] = {1, 0, 0, 0, 1},
+ [BATT_WARM] = {0, 1, 0, 0, 1},
+ [BATT_NORMAL] = {0, 0, 0, 0, 1},
+ [BATT_COOL] = {0, 0, 1, 0, 1},
+ [BATT_COLD] = {0, 0, 0, 1, 1},
+ [BATT_MISSING] = {0, 0, 0, 1, 0},
+};
+
+static int smb1351_read_reg(struct smb1351_charger *chip, int reg, u8 *val)
+{
+ s32 ret;
+
+ pm_stay_awake(chip->dev);
+ ret = i2c_smbus_read_byte_data(chip->client, reg);
+ if (ret < 0) {
+ pr_err("i2c read fail: can't read from %02x: %d\n", reg, ret);
+ pm_relax(chip->dev);
+ return ret;
+ } else {
+ *val = ret;
+ }
+ pm_relax(chip->dev);
+ pr_debug("Reading 0x%02x=0x%02x\n", reg, *val);
+ return 0;
+}
+
+static int smb1351_write_reg(struct smb1351_charger *chip, int reg, u8 val)
+{
+ s32 ret;
+
+ pm_stay_awake(chip->dev);
+ ret = i2c_smbus_write_byte_data(chip->client, reg, val);
+ if (ret < 0) {
+ pr_err("i2c write fail: can't write %02x to %02x: %d\n",
+ val, reg, ret);
+ pm_relax(chip->dev);
+ return ret;
+ }
+ pm_relax(chip->dev);
+ pr_debug("Writing 0x%02x=0x%02x\n", reg, val);
+ return 0;
+}
+
+static int smb1351_masked_write(struct smb1351_charger *chip, int reg,
+ u8 mask, u8 val)
+{
+ s32 rc;
+ u8 temp;
+
+ rc = smb1351_read_reg(chip, reg, &temp);
+ if (rc) {
+ pr_err("read failed: reg=%03X, rc=%d\n", reg, rc);
+ return rc;
+ }
+ temp &= ~mask;
+ temp |= val & mask;
+ rc = smb1351_write_reg(chip, reg, temp);
+ if (rc) {
+ pr_err("write failed: reg=%03X, rc=%d\n", reg, rc);
+ return rc;
+ }
+ return 0;
+}
+
+static int smb1351_enable_volatile_writes(struct smb1351_charger *chip)
+{
+ int rc;
+
+ rc = smb1351_masked_write(chip, CMD_I2C_REG, CMD_BQ_CFG_ACCESS_BIT,
+ CMD_BQ_CFG_ACCESS_BIT);
+ if (rc)
+ pr_err("Couldn't write CMD_BQ_CFG_ACCESS_BIT rc=%d\n", rc);
+
+ return rc;
+}
+
+static int smb1351_usb_suspend(struct smb1351_charger *chip, int reason,
+ bool suspend)
+{
+ int rc = 0;
+ int suspended;
+
+ suspended = chip->usb_suspended_status;
+
+ pr_debug("reason = %d requested_suspend = %d suspended_status = %d\n",
+ reason, suspend, suspended);
+
+ if (suspend == false)
+ suspended &= ~reason;
+ else
+ suspended |= reason;
+
+ pr_debug("new suspended_status = %d\n", suspended);
+
+ rc = smb1351_masked_write(chip, CMD_INPUT_LIMIT_REG,
+ CMD_SUSPEND_MODE_BIT,
+ suspended ? CMD_SUSPEND_MODE_BIT : 0);
+ if (rc)
+ pr_err("Couldn't suspend rc = %d\n", rc);
+ else
+ chip->usb_suspended_status = suspended;
+
+ return rc;
+}
+
+static int smb1351_battchg_disable(struct smb1351_charger *chip,
+ int reason, int disable)
+{
+ int rc = 0;
+ int disabled;
+
+ if (chip->chg_autonomous_mode) {
+ pr_debug("Charger in autonomous mode\n");
+ return 0;
+ }
+
+ disabled = chip->battchg_disabled_status;
+
+ pr_debug("reason = %d requested_disable = %d disabled_status = %d\n",
+ reason, disable, disabled);
+ if (disable == true)
+ disabled |= reason;
+ else
+ disabled &= ~reason;
+
+ pr_debug("new disabled_status = %d\n", disabled);
+
+ rc = smb1351_masked_write(chip, CMD_CHG_REG, CMD_CHG_EN_BIT,
+ disabled ? 0 : CMD_CHG_ENABLE);
+ if (rc)
+ pr_err("Couldn't %s charging rc=%d\n",
+ disable ? "disable" : "enable", rc);
+ else
+ chip->battchg_disabled_status = disabled;
+
+ return rc;
+}
+
+static int smb1351_fastchg_current_set(struct smb1351_charger *chip,
+ unsigned int fastchg_current)
+{
+ int i, rc;
+ bool is_pre_chg = false;
+
+
+ if ((fastchg_current < SMB1351_CHG_PRE_MIN_MA) ||
+ (fastchg_current > SMB1351_CHG_FAST_MAX_MA)) {
+ pr_err("bad pre_fastchg current mA=%d asked to set\n",
+ fastchg_current);
+ return -EINVAL;
+ }
+
+ /*
+ * fast chg current could not support less than 1000mA
+ * use pre chg to instead for the parallel charging
+ */
+ if (fastchg_current < SMB1351_CHG_FAST_MIN_MA) {
+ is_pre_chg = true;
+ pr_debug("is_pre_chg true, current is %d\n", fastchg_current);
+ }
+
+ if (is_pre_chg) {
+ /* set prechg current */
+ for (i = ARRAY_SIZE(pre_chg_current) - 1; i >= 0; i--) {
+ if (pre_chg_current[i] <= fastchg_current)
+ break;
+ }
+ if (i < 0)
+ i = 0;
+ chip->fastchg_current_max_ma = pre_chg_current[i];
+ pr_debug("prechg setting %02x\n", i);
+
+ i = i << SMB1351_CHG_PRE_SHIFT;
+
+ rc = smb1351_masked_write(chip, CHG_OTH_CURRENT_CTRL_REG,
+ PRECHG_CURRENT_MASK, i);
+ if (rc)
+ pr_err("Couldn't write CHG_OTH_CURRENT_CTRL_REG rc=%d\n",
+ rc);
+
+ return smb1351_masked_write(chip, VARIOUS_FUNC_2_REG,
+ PRECHG_TO_FASTCHG_BIT, PRECHG_TO_FASTCHG_BIT);
+ } else {
+ if (chip->version == SMB_UNKNOWN)
+ return -EINVAL;
+
+ /* SMB1350 supports FCC upto 2600 mA */
+ if (chip->version == SMB1350 && fastchg_current > 2600)
+ fastchg_current = 2600;
+
+ /* set fastchg current */
+ for (i = ARRAY_SIZE(fast_chg_current) - 1; i >= 0; i--) {
+ if (fast_chg_current[i] <= fastchg_current)
+ break;
+ }
+ if (i < 0)
+ i = 0;
+ chip->fastchg_current_max_ma = fast_chg_current[i];
+
+ i = i << SMB1351_CHG_FAST_SHIFT;
+ pr_debug("fastchg limit=%d setting %02x\n",
+ chip->fastchg_current_max_ma, i);
+
+ /* make sure pre chg mode is disabled */
+ rc = smb1351_masked_write(chip, VARIOUS_FUNC_2_REG,
+ PRECHG_TO_FASTCHG_BIT, 0);
+ if (rc)
+ pr_err("Couldn't write VARIOUS_FUNC_2_REG rc=%d\n", rc);
+
+ return smb1351_masked_write(chip, CHG_CURRENT_CTRL_REG,
+ FAST_CHG_CURRENT_MASK, i);
+ }
+}
+
+#define MIN_FLOAT_MV 3500
+#define MAX_FLOAT_MV 4500
+#define VFLOAT_STEP_MV 20
+
+static int smb1351_float_voltage_set(struct smb1351_charger *chip,
+ int vfloat_mv)
+{
+ u8 temp;
+
+ if ((vfloat_mv < MIN_FLOAT_MV) || (vfloat_mv > MAX_FLOAT_MV)) {
+ pr_err("bad float voltage mv =%d asked to set\n", vfloat_mv);
+ return -EINVAL;
+ }
+
+ temp = (vfloat_mv - MIN_FLOAT_MV) / VFLOAT_STEP_MV;
+
+ return smb1351_masked_write(chip, VFLOAT_REG, VFLOAT_MASK, temp);
+}
+
+static int smb1351_iterm_set(struct smb1351_charger *chip, int iterm_ma)
+{
+ int rc;
+ u8 reg;
+
+ if (iterm_ma <= 200)
+ reg = CHG_ITERM_200MA;
+ else if (iterm_ma <= 300)
+ reg = CHG_ITERM_300MA;
+ else if (iterm_ma <= 400)
+ reg = CHG_ITERM_400MA;
+ else if (iterm_ma <= 500)
+ reg = CHG_ITERM_500MA;
+ else if (iterm_ma <= 600)
+ reg = CHG_ITERM_600MA;
+ else
+ reg = CHG_ITERM_700MA;
+
+ rc = smb1351_masked_write(chip, CHG_OTH_CURRENT_CTRL_REG,
+ ITERM_MASK, reg);
+ if (rc) {
+ pr_err("Couldn't set iterm rc = %d\n", rc);
+ return rc;
+ }
+ /* enable the iterm */
+ rc = smb1351_masked_write(chip, CHG_CTRL_REG,
+ ITERM_EN_BIT, ITERM_ENABLE);
+ if (rc) {
+ pr_err("Couldn't enable iterm rc = %d\n", rc);
+ return rc;
+ }
+ return 0;
+}
+
+static int smb1351_chg_otg_regulator_enable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ struct smb1351_charger *chip = rdev_get_drvdata(rdev);
+
+ rc = smb1351_masked_write(chip, CMD_CHG_REG, CMD_OTG_EN_BIT,
+ CMD_OTG_EN_BIT);
+ if (rc)
+ pr_err("Couldn't enable OTG mode rc=%d\n", rc);
+ return rc;
+}
+
+static int smb1351_chg_otg_regulator_disable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ struct smb1351_charger *chip = rdev_get_drvdata(rdev);
+
+ rc = smb1351_masked_write(chip, CMD_CHG_REG, CMD_OTG_EN_BIT, 0);
+ if (rc)
+ pr_err("Couldn't disable OTG mode rc=%d\n", rc);
+ return rc;
+}
+
+static int smb1351_chg_otg_regulator_is_enable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ u8 reg = 0;
+ struct smb1351_charger *chip = rdev_get_drvdata(rdev);
+
+ rc = smb1351_read_reg(chip, CMD_CHG_REG, &reg);
+ if (rc) {
+ pr_err("Couldn't read OTG enable bit rc=%d\n", rc);
+ return rc;
+ }
+
+ return (reg & CMD_OTG_EN_BIT) ? 1 : 0;
+}
+
+struct regulator_ops smb1351_chg_otg_reg_ops = {
+ .enable = smb1351_chg_otg_regulator_enable,
+ .disable = smb1351_chg_otg_regulator_disable,
+ .is_enabled = smb1351_chg_otg_regulator_is_enable,
+};
+
+static int smb1351_regulator_init(struct smb1351_charger *chip)
+{
+ int rc = 0;
+ struct regulator_init_data *init_data;
+ struct regulator_config cfg = {};
+
+ init_data = of_get_regulator_init_data(chip->dev, chip->dev->of_node);
+ if (!init_data) {
+ pr_err("Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ if (init_data->constraints.name) {
+ chip->otg_vreg.rdesc.owner = THIS_MODULE;
+ chip->otg_vreg.rdesc.type = REGULATOR_VOLTAGE;
+ chip->otg_vreg.rdesc.ops = &smb1351_chg_otg_reg_ops;
+ chip->otg_vreg.rdesc.name = init_data->constraints.name;
+
+ cfg.dev = chip->dev;
+ cfg.init_data = init_data;
+ cfg.driver_data = chip;
+ cfg.of_node = chip->dev->of_node;
+
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_STATUS;
+
+ chip->otg_vreg.rdev = regulator_register(
+ &chip->otg_vreg.rdesc, &cfg);
+ if (IS_ERR(chip->otg_vreg.rdev)) {
+ rc = PTR_ERR(chip->otg_vreg.rdev);
+ chip->otg_vreg.rdev = NULL;
+ if (rc != -EPROBE_DEFER)
+ pr_err("OTG reg failed, rc=%d\n", rc);
+ }
+ }
+
+ return rc;
+}
+
+static int smb_chip_get_version(struct smb1351_charger *chip)
+{
+ u8 ver;
+ int rc = 0;
+
+ if (chip->version == SMB_UNKNOWN) {
+ rc = smb1351_read_reg(chip, VERSION_REG, &ver);
+ if (rc) {
+ pr_err("Couldn't read version rc=%d\n", rc);
+ return rc;
+ }
+
+ /* If bit 1 is set, it is SMB1350 */
+ if (ver & VERSION_MASK)
+ chip->version = SMB1350;
+ else
+ chip->version = SMB1351;
+ }
+
+ return rc;
+}
+
+static int smb1351_hw_init(struct smb1351_charger *chip)
+{
+ int rc;
+ u8 reg = 0, mask = 0;
+
+ /* configure smb_pinctrl to enable irqs */
+ if (chip->pinctrl_state_name) {
+ chip->smb_pinctrl = pinctrl_get_select(chip->dev,
+ chip->pinctrl_state_name);
+ if (IS_ERR(chip->smb_pinctrl)) {
+ pr_err("Could not get/set %s pinctrl state rc = %ld\n",
+ chip->pinctrl_state_name,
+ PTR_ERR(chip->smb_pinctrl));
+ return PTR_ERR(chip->smb_pinctrl);
+ }
+ }
+
+ /*
+ * If the charger is pre-configured for autonomous operation,
+ * do not apply additional settings
+ */
+ if (chip->chg_autonomous_mode) {
+ pr_debug("Charger configured for autonomous mode\n");
+ return 0;
+ }
+
+ rc = smb_chip_get_version(chip);
+ if (rc) {
+ pr_err("Couldn't get version rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = smb1351_enable_volatile_writes(chip);
+ if (rc) {
+ pr_err("Couldn't configure volatile writes rc=%d\n", rc);
+ return rc;
+ }
+
+ /* setup battery missing source */
+ reg = BATT_MISSING_THERM_PIN_SOURCE_BIT;
+ mask = BATT_MISSING_THERM_PIN_SOURCE_BIT;
+ rc = smb1351_masked_write(chip, HVDCP_BATT_MISSING_CTRL_REG,
+ mask, reg);
+ if (rc) {
+ pr_err("Couldn't set HVDCP_BATT_MISSING_CTRL_REG rc=%d\n", rc);
+ return rc;
+ }
+ /* setup defaults for CHG_PIN_EN_CTRL_REG */
+ reg = EN_BY_I2C_0_DISABLE | USBCS_CTRL_BY_I2C | CHG_ERR_BIT |
+ APSD_DONE_BIT | LED_BLINK_FUNC_BIT;
+ mask = EN_PIN_CTRL_MASK | USBCS_CTRL_BIT | CHG_ERR_BIT |
+ APSD_DONE_BIT | LED_BLINK_FUNC_BIT;
+ rc = smb1351_masked_write(chip, CHG_PIN_EN_CTRL_REG, mask, reg);
+ if (rc) {
+ pr_err("Couldn't set CHG_PIN_EN_CTRL_REG rc=%d\n", rc);
+ return rc;
+ }
+ /* setup USB 2.0/3.0 detection and USB 500/100 command polarity */
+ reg = USB_2_3_MODE_SEL_BY_I2C | USB_CMD_POLARITY_500_1_100_0;
+ mask = USB_2_3_MODE_SEL_BIT | USB_5_1_CMD_POLARITY_BIT;
+ rc = smb1351_masked_write(chip, CHG_OTH_CURRENT_CTRL_REG, mask, reg);
+ if (rc) {
+ pr_err("Couldn't set CHG_OTH_CURRENT_CTRL_REG rc=%d\n", rc);
+ return rc;
+ }
+ /* setup USB suspend, AICL and APSD */
+ reg = SUSPEND_MODE_CTRL_BY_I2C | AICL_EN_BIT;
+ if (!chip->disable_apsd)
+ reg |= APSD_EN_BIT;
+ mask = SUSPEND_MODE_CTRL_BIT | AICL_EN_BIT | APSD_EN_BIT;
+ rc = smb1351_masked_write(chip, VARIOUS_FUNC_REG, mask, reg);
+ if (rc) {
+ pr_err("Couldn't set VARIOUS_FUNC_REG rc=%d\n", rc);
+ return rc;
+ }
+ /* Fault and Status IRQ configuration */
+ reg = HOT_COLD_HARD_LIMIT_BIT | HOT_COLD_SOFT_LIMIT_BIT
+ | INPUT_OVLO_BIT | INPUT_UVLO_BIT | AICL_DONE_FAIL_BIT;
+ rc = smb1351_write_reg(chip, FAULT_INT_REG, reg);
+ if (rc) {
+ pr_err("Couldn't set FAULT_INT_REG rc=%d\n", rc);
+ return rc;
+ }
+ reg = CHG_OR_PRECHG_TIMEOUT_BIT | BATT_OVP_BIT |
+ FAST_TERM_TAPER_RECHG_INHIBIT_BIT |
+ BATT_MISSING_BIT | BATT_LOW_BIT;
+ rc = smb1351_write_reg(chip, STATUS_INT_REG, reg);
+ if (rc) {
+ pr_err("Couldn't set STATUS_INT_REG rc=%d\n", rc);
+ return rc;
+ }
+ /* setup THERM Monitor */
+ if (!chip->using_pmic_therm) {
+ rc = smb1351_masked_write(chip, THERM_A_CTRL_REG,
+ THERM_MONITOR_BIT, THERM_MONITOR_EN);
+ if (rc) {
+ pr_err("Couldn't set THERM_A_CTRL_REG rc=%d\n", rc);
+ return rc;
+ }
+ }
+ /* set the fast charge current limit */
+ rc = smb1351_fastchg_current_set(chip,
+ chip->target_fastchg_current_max_ma);
+ if (rc) {
+ pr_err("Couldn't set fastchg current rc=%d\n", rc);
+ return rc;
+ }
+
+ /* set the float voltage */
+ if (chip->vfloat_mv != -EINVAL) {
+ rc = smb1351_float_voltage_set(chip, chip->vfloat_mv);
+ if (rc) {
+ pr_err("Couldn't set float voltage rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ /* set iterm */
+ if (chip->iterm_ma != -EINVAL) {
+ if (chip->iterm_disabled) {
+ pr_err("Error: Both iterm_disabled and iterm_ma set\n");
+ return -EINVAL;
+ } else {
+ rc = smb1351_iterm_set(chip, chip->iterm_ma);
+ if (rc) {
+ pr_err("Couldn't set iterm rc = %d\n", rc);
+ return rc;
+ }
+ }
+ } else if (chip->iterm_disabled) {
+ rc = smb1351_masked_write(chip, CHG_CTRL_REG,
+ ITERM_EN_BIT, ITERM_DISABLE);
+ if (rc) {
+ pr_err("Couldn't set iterm rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ /* set recharge-threshold */
+ if (chip->recharge_mv != -EINVAL) {
+ if (chip->recharge_disabled) {
+ pr_err("Error: Both recharge_disabled and recharge_mv set\n");
+ return -EINVAL;
+ } else {
+ reg = AUTO_RECHG_ENABLE;
+ if (chip->recharge_mv > 50)
+ reg |= AUTO_RECHG_TH_100MV;
+ else
+ reg |= AUTO_RECHG_TH_50MV;
+
+ rc = smb1351_masked_write(chip, CHG_CTRL_REG,
+ AUTO_RECHG_BIT |
+ AUTO_RECHG_TH_BIT, reg);
+ if (rc) {
+ pr_err("Couldn't set rechg-cfg rc = %d\n", rc);
+ return rc;
+ }
+ }
+ } else if (chip->recharge_disabled) {
+ rc = smb1351_masked_write(chip, CHG_CTRL_REG,
+ AUTO_RECHG_BIT,
+ AUTO_RECHG_DISABLE);
+ if (rc) {
+ pr_err("Couldn't disable auto-rechg rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ /* enable/disable charging by suspending usb */
+ rc = smb1351_usb_suspend(chip, USER, chip->usb_suspended_status);
+ if (rc) {
+ pr_err("Unable to %s battery charging. rc=%d\n",
+ chip->usb_suspended_status ? "disable" : "enable",
+ rc);
+ }
+
+ return rc;
+}
+
+static enum power_supply_property smb1351_battery_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+};
+
+static int smb1351_get_prop_batt_status(struct smb1351_charger *chip)
+{
+ int rc;
+ u8 reg = 0;
+
+ if (chip->batt_full)
+ return POWER_SUPPLY_STATUS_FULL;
+
+ rc = smb1351_read_reg(chip, STATUS_4_REG, &reg);
+ if (rc) {
+ pr_err("Couldn't read STATUS_4 rc = %d\n", rc);
+ return POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ pr_debug("STATUS_4_REG(0x3A)=%x\n", reg);
+
+ if (reg & STATUS_HOLD_OFF_BIT)
+ return POWER_SUPPLY_STATUS_NOT_CHARGING;
+
+ if (reg & STATUS_CHG_MASK)
+ return POWER_SUPPLY_STATUS_CHARGING;
+
+ return POWER_SUPPLY_STATUS_DISCHARGING;
+}
+
+static int smb1351_get_prop_batt_present(struct smb1351_charger *chip)
+{
+ return !chip->battery_missing;
+}
+
+static int smb1351_get_prop_batt_capacity(struct smb1351_charger *chip)
+{
+ union power_supply_propval ret = {0, };
+
+ if (chip->fake_battery_soc >= 0)
+ return chip->fake_battery_soc;
+
+ if (chip->bms_psy) {
+ chip->bms_psy->get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_CAPACITY, &ret);
+ return ret.intval;
+ }
+ pr_debug("return DEFAULT_BATT_CAPACITY\n");
+ return DEFAULT_BATT_CAPACITY;
+}
+
+static int smb1351_get_prop_batt_temp(struct smb1351_charger *chip)
+{
+ union power_supply_propval ret = {0, };
+ int rc = 0;
+ struct qpnp_vadc_result results;
+
+ if (chip->bms_psy) {
+ chip->bms_psy->get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_TEMP, &ret);
+ return ret.intval;
+ }
+ if (chip->vadc_dev) {
+ rc = qpnp_vadc_read(chip->vadc_dev,
+ LR_MUX1_BATT_THERM, &results);
+ if (rc)
+ pr_debug("Unable to read adc batt temp rc=%d\n", rc);
+ else
+ return (int)results.physical;
+ }
+
+ pr_debug("return default temperature\n");
+ return DEFAULT_BATT_TEMP;
+}
+
+static int smb1351_get_prop_charge_type(struct smb1351_charger *chip)
+{
+ int rc;
+ u8 reg = 0;
+
+ rc = smb1351_read_reg(chip, STATUS_4_REG, &reg);
+ if (rc) {
+ pr_err("Couldn't read STATUS_4 rc = %d\n", rc);
+ return POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+ }
+
+ pr_debug("STATUS_4_REG(0x3A)=%x\n", reg);
+
+ reg &= STATUS_CHG_MASK;
+
+ if (reg == STATUS_FAST_CHARGING)
+ return POWER_SUPPLY_CHARGE_TYPE_FAST;
+ else if (reg == STATUS_TAPER_CHARGING)
+ return POWER_SUPPLY_CHARGE_TYPE_TAPER;
+ else if (reg == STATUS_PRE_CHARGING)
+ return POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ else
+ return POWER_SUPPLY_CHARGE_TYPE_NONE;
+}
+
+static int smb1351_get_prop_batt_health(struct smb1351_charger *chip)
+{
+ union power_supply_propval ret = {0, };
+
+ if (chip->batt_hot)
+ ret.intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (chip->batt_cold)
+ ret.intval = POWER_SUPPLY_HEALTH_COLD;
+ else if (chip->batt_warm)
+ ret.intval = POWER_SUPPLY_HEALTH_WARM;
+ else if (chip->batt_cool)
+ ret.intval = POWER_SUPPLY_HEALTH_COOL;
+ else
+ ret.intval = POWER_SUPPLY_HEALTH_GOOD;
+
+ return ret.intval;
+}
+
+static int smb1351_set_usb_chg_current(struct smb1351_charger *chip,
+ int current_ma)
+{
+ int i, rc = 0;
+ u8 reg = 0, mask = 0;
+
+ pr_debug("USB current_ma = %d\n", current_ma);
+
+ if (chip->chg_autonomous_mode) {
+ pr_debug("Charger in autonomous mode\n");
+ return 0;
+ }
+
+ /* set suspend bit when urrent_ma <= 2 */
+ if (current_ma <= SUSPEND_CURRENT_MA) {
+ smb1351_usb_suspend(chip, CURRENT, true);
+ pr_debug("USB suspend\n");
+ return 0;
+ }
+
+ if (current_ma > SUSPEND_CURRENT_MA &&
+ current_ma < USB2_MIN_CURRENT_MA)
+ current_ma = USB2_MIN_CURRENT_MA;
+
+ if (current_ma == USB2_MIN_CURRENT_MA) {
+ /* USB 2.0 - 100mA */
+ reg = CMD_USB_2_MODE | CMD_USB_100_MODE;
+ } else if (current_ma == USB3_MIN_CURRENT_MA) {
+ /* USB 3.0 - 150mA */
+ reg = CMD_USB_3_MODE | CMD_USB_100_MODE;
+ } else if (current_ma == USB2_MAX_CURRENT_MA) {
+ /* USB 2.0 - 500mA */
+ reg = CMD_USB_2_MODE | CMD_USB_500_MODE;
+ } else if (current_ma == USB3_MAX_CURRENT_MA) {
+ /* USB 3.0 - 900mA */
+ reg = CMD_USB_3_MODE | CMD_USB_500_MODE;
+ } else if (current_ma > USB2_MAX_CURRENT_MA) {
+ /* HC mode - if none of the above */
+ reg = CMD_USB_AC_MODE;
+
+ for (i = ARRAY_SIZE(usb_chg_current) - 1; i >= 0; i--) {
+ if (usb_chg_current[i] <= current_ma)
+ break;
+ }
+ if (i < 0)
+ i = 0;
+ rc = smb1351_masked_write(chip, CHG_CURRENT_CTRL_REG,
+ AC_INPUT_CURRENT_LIMIT_MASK, i);
+ if (rc) {
+ pr_err("Couldn't set input mA rc=%d\n", rc);
+ return rc;
+ }
+ }
+ /* control input current mode by command */
+ reg |= CMD_INPUT_CURRENT_MODE_CMD;
+ mask = CMD_INPUT_CURRENT_MODE_BIT | CMD_USB_2_3_SEL_BIT |
+ CMD_USB_1_5_AC_CTRL_MASK;
+ rc = smb1351_masked_write(chip, CMD_INPUT_LIMIT_REG, mask, reg);
+ if (rc) {
+ pr_err("Couldn't set charging mode rc = %d\n", rc);
+ return rc;
+ }
+
+ /* unset the suspend bit here */
+ smb1351_usb_suspend(chip, CURRENT, false);
+
+ return rc;
+}
+
+static int smb1351_batt_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED:
+ case POWER_SUPPLY_PROP_CAPACITY:
+ return 1;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int smb1351_battery_set_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ const union power_supply_propval *val)
+{
+ int rc;
+ struct smb1351_charger *chip = container_of(psy,
+ struct smb1351_charger, batt_psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (!chip->bms_controlled_charging)
+ return -EINVAL;
+ switch (val->intval) {
+ case POWER_SUPPLY_STATUS_FULL:
+ rc = smb1351_battchg_disable(chip, SOC, true);
+ if (rc) {
+ pr_err("Couldn't disable charging rc = %d\n",
+ rc);
+ } else {
+ chip->batt_full = true;
+ pr_debug("status = FULL, batt_full = %d\n",
+ chip->batt_full);
+ }
+ break;
+ case POWER_SUPPLY_STATUS_DISCHARGING:
+ chip->batt_full = false;
+ power_supply_changed(&chip->batt_psy);
+ pr_debug("status = DISCHARGING, batt_full = %d\n",
+ chip->batt_full);
+ break;
+ case POWER_SUPPLY_STATUS_CHARGING:
+ rc = smb1351_battchg_disable(chip, SOC, false);
+ if (rc) {
+ pr_err("Couldn't enable charging rc = %d\n",
+ rc);
+ } else {
+ chip->batt_full = false;
+ pr_debug("status = CHARGING, batt_full = %d\n",
+ chip->batt_full);
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ smb1351_usb_suspend(chip, USER, !val->intval);
+ break;
+ case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED:
+ smb1351_battchg_disable(chip, USER, !val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ chip->fake_battery_soc = val->intval;
+ power_supply_changed(&chip->batt_psy);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int smb1351_battery_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct smb1351_charger *chip = container_of(psy,
+ struct smb1351_charger, batt_psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = smb1351_get_prop_batt_status(chip);
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = smb1351_get_prop_batt_present(chip);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = smb1351_get_prop_batt_capacity(chip);
+ break;
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ val->intval = !chip->usb_suspended_status;
+ break;
+ case POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED:
+ val->intval = !chip->battchg_disabled_status;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ val->intval = smb1351_get_prop_charge_type(chip);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = smb1351_get_prop_batt_health(chip);
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = smb1351_get_prop_batt_temp(chip);
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = "smb1351";
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static enum power_supply_property smb1351_parallel_properties[] = {
+ POWER_SUPPLY_PROP_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+};
+
+static int smb1351_parallel_set_chg_present(struct smb1351_charger *chip,
+ int present)
+{
+ int rc;
+ u8 reg, mask = 0;
+
+ if (present == chip->parallel_charger_present) {
+ pr_debug("present %d -> %d, skipping\n",
+ chip->parallel_charger_present, present);
+ return 0;
+ }
+
+ if (present) {
+ /* Check if SMB1351 is present */
+ rc = smb1351_read_reg(chip, CHG_REVISION_REG, &reg);
+ if (rc) {
+ pr_debug("Failed to detect smb1351-parallel-charger, may be absent\n");
+ return -ENODEV;
+ }
+
+ rc = smb_chip_get_version(chip);
+ if (rc) {
+ pr_err("Couldn't get version rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = smb1351_enable_volatile_writes(chip);
+ if (rc) {
+ pr_err("Couldn't configure for volatile rc = %d\n", rc);
+ return rc;
+ }
+
+ /* set the float voltage */
+ if (chip->vfloat_mv != -EINVAL) {
+ rc = smb1351_float_voltage_set(chip, chip->vfloat_mv);
+ if (rc) {
+ pr_err("Couldn't set float voltage rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* set recharge-threshold and enable auto recharge */
+ if (chip->recharge_mv != -EINVAL) {
+ reg = AUTO_RECHG_ENABLE;
+ if (chip->recharge_mv > 50)
+ reg |= AUTO_RECHG_TH_100MV;
+ else
+ reg |= AUTO_RECHG_TH_50MV;
+
+ rc = smb1351_masked_write(chip, CHG_CTRL_REG,
+ AUTO_RECHG_BIT |
+ AUTO_RECHG_TH_BIT, reg);
+ if (rc) {
+ pr_err("Couldn't set rechg-cfg rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ /* set chg en by pin active low */
+ reg = chip->parallel_pin_polarity_setting | USBCS_CTRL_BY_I2C;
+ rc = smb1351_masked_write(chip, CHG_PIN_EN_CTRL_REG,
+ EN_PIN_CTRL_MASK | USBCS_CTRL_BIT, reg);
+ if (rc) {
+ pr_err("Couldn't set en pin rc=%d\n", rc);
+ return rc;
+ }
+
+ /* control USB suspend via command bits */
+ rc = smb1351_masked_write(chip, VARIOUS_FUNC_REG,
+ SUSPEND_MODE_CTRL_BIT,
+ SUSPEND_MODE_CTRL_BY_I2C);
+ if (rc) {
+ pr_err("Couldn't set USB suspend rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * setup USB 2.0/3.0 detection and USB 500/100
+ * command polarity
+ */
+ reg = USB_2_3_MODE_SEL_BY_I2C | USB_CMD_POLARITY_500_1_100_0;
+ mask = USB_2_3_MODE_SEL_BIT | USB_5_1_CMD_POLARITY_BIT;
+ rc = smb1351_masked_write(chip,
+ CHG_OTH_CURRENT_CTRL_REG, mask, reg);
+ if (rc) {
+ pr_err("Couldn't set CHG_OTH_CURRENT_CTRL_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* set fast charging current limit */
+ chip->target_fastchg_current_max_ma = SMB1351_CHG_FAST_MIN_MA;
+ rc = smb1351_fastchg_current_set(chip,
+ chip->target_fastchg_current_max_ma);
+ if (rc) {
+ pr_err("Couldn't set fastchg current rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ chip->parallel_charger_present = present;
+ /*
+ * When present is being set force USB suspend, start charging
+ * only when POWER_SUPPLY_PROP_CURRENT_MAX is set.
+ */
+ chip->usb_psy_ma = SUSPEND_CURRENT_MA;
+ smb1351_usb_suspend(chip, CURRENT, true);
+
+ return 0;
+}
+
+static int smb1351_get_closest_usb_setpoint(int val)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(usb_chg_current) - 1; i >= 0; i--) {
+ if (usb_chg_current[i] <= val)
+ break;
+ }
+ if (i < 0)
+ i = 0;
+
+ if (i >= ARRAY_SIZE(usb_chg_current) - 1)
+ return ARRAY_SIZE(usb_chg_current) - 1;
+
+ /* check what is closer, i or i + 1 */
+ if (abs(usb_chg_current[i] - val) < abs(usb_chg_current[i + 1] - val))
+ return i;
+ else
+ return i + 1;
+}
+
+static bool smb1351_is_input_current_limited(struct smb1351_charger *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smb1351_read_reg(chip, IRQ_H_REG, &reg);
+ if (rc) {
+ pr_err("Failed to read IRQ_H_REG for ICL status: %d\n", rc);
+ return false;
+ }
+
+ return !!(reg & IRQ_IC_LIMIT_STATUS_BIT);
+}
+
+static int smb1351_parallel_set_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ const union power_supply_propval *val)
+{
+ int rc = 0, index;
+ struct smb1351_charger *chip = container_of(psy,
+ struct smb1351_charger, parallel_psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ /*
+ *CHG EN is controlled by pin in the parallel charging.
+ *Use suspend if disable charging by command.
+ */
+ if (chip->parallel_charger_present)
+ rc = smb1351_usb_suspend(chip, USER, !val->intval);
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ rc = smb1351_parallel_set_chg_present(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ if (chip->parallel_charger_present) {
+ chip->target_fastchg_current_max_ma =
+ val->intval / 1000;
+ rc = smb1351_fastchg_current_set(chip,
+ chip->target_fastchg_current_max_ma);
+ }
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ if (chip->parallel_charger_present) {
+ index = smb1351_get_closest_usb_setpoint(
+ val->intval / 1000);
+ chip->usb_psy_ma = usb_chg_current[index];
+ rc = smb1351_set_usb_chg_current(chip,
+ chip->usb_psy_ma);
+ }
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ if (chip->parallel_charger_present &&
+ (chip->vfloat_mv != val->intval)) {
+ rc = smb1351_float_voltage_set(chip, val->intval);
+ if (!rc)
+ chip->vfloat_mv = val->intval;
+ } else {
+ chip->vfloat_mv = val->intval;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return rc;
+}
+
+static int smb1351_parallel_is_writeable(struct power_supply *psy,
+ enum power_supply_property prop)
+{
+ switch (prop) {
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int smb1351_parallel_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct smb1351_charger *chip = container_of(psy,
+ struct smb1351_charger, parallel_psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ val->intval = !chip->usb_suspended_status;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ if (chip->parallel_charger_present)
+ val->intval = chip->usb_psy_ma * 1000;
+ else
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ val->intval = chip->vfloat_mv;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = chip->parallel_charger_present;
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ if (chip->parallel_charger_present)
+ val->intval = chip->fastchg_current_max_ma * 1000;
+ else
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_STATUS:
+ if (chip->parallel_charger_present)
+ val->intval = smb1351_get_prop_batt_status(chip);
+ else
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+ if (chip->parallel_charger_present)
+ val->intval =
+ smb1351_is_input_current_limited(chip) ? 1 : 0;
+ else
+ val->intval = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void smb1351_chg_set_appropriate_battery_current(
+ struct smb1351_charger *chip)
+{
+ int rc;
+ unsigned int current_max = chip->target_fastchg_current_max_ma;
+
+ if (chip->batt_cool)
+ current_max = min(current_max, chip->batt_cool_ma);
+ if (chip->batt_warm)
+ current_max = min(current_max, chip->batt_warm_ma);
+
+ pr_debug("setting %dmA", current_max);
+
+ rc = smb1351_fastchg_current_set(chip, current_max);
+ if (rc)
+ pr_err("Couldn't set charging current rc = %d\n", rc);
+}
+
+static void smb1351_chg_set_appropriate_vddmax(struct smb1351_charger *chip)
+{
+ int rc;
+ unsigned int vddmax = chip->vfloat_mv;
+
+ if (chip->batt_cool)
+ vddmax = min(vddmax, chip->batt_cool_mv);
+ if (chip->batt_warm)
+ vddmax = min(vddmax, chip->batt_warm_mv);
+
+ pr_debug("setting %dmV\n", vddmax);
+
+ rc = smb1351_float_voltage_set(chip, vddmax);
+ if (rc)
+ pr_err("Couldn't set float voltage rc = %d\n", rc);
+}
+
+static void smb1351_chg_ctrl_in_jeita(struct smb1351_charger *chip)
+{
+ union power_supply_propval ret = {0, };
+ int rc;
+
+ /* enable the iterm to prevent the reverse boost */
+ if (chip->iterm_disabled) {
+ if (chip->batt_cool || chip->batt_warm) {
+ rc = smb1351_iterm_set(chip, 100);
+ pr_debug("set the iterm due to JEITA\n");
+ } else {
+ rc = smb1351_masked_write(chip, CHG_CTRL_REG,
+ ITERM_EN_BIT, ITERM_DISABLE);
+ pr_debug("disable the iterm when exits warm/cool\n");
+ }
+ if (rc) {
+ pr_err("Couldn't set iterm rc = %d\n", rc);
+ return;
+ }
+ }
+ /*
+ * When JEITA back to normal, the charging maybe disabled due to
+ * the current termination. So re-enable the charging if the soc
+ * is less than 100 in the normal mode. A 200ms delay is requred
+ * before the disabe and enable operation.
+ */
+ if (chip->bms_psy) {
+ rc = chip->bms_psy->get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_CAPACITY, &ret);
+ if (rc) {
+ pr_err("Couldn't read the bms capacity rc = %d\n",
+ rc);
+ return;
+ }
+ if (!chip->batt_cool && !chip->batt_warm
+ && !chip->batt_cold && !chip->batt_hot
+ && ret.intval < 100) {
+ rc = smb1351_battchg_disable(chip, THERMAL, true);
+ if (rc) {
+ pr_err("Couldn't disable charging rc = %d\n",
+ rc);
+ return;
+ }
+ /* delay for resetting the charging */
+ msleep(200);
+ rc = smb1351_battchg_disable(chip, THERMAL, false);
+ if (rc) {
+ pr_err("Couldn't enable charging rc = %d\n",
+ rc);
+ return;
+ } else {
+ chip->batt_full = false;
+ pr_debug("re-enable charging, batt_full = %d\n",
+ chip->batt_full);
+ }
+ pr_debug("batt psy changed\n");
+ power_supply_changed(&chip->batt_psy);
+ }
+ }
+}
+
+#define HYSTERESIS_DECIDEGC 20
+static void smb1351_chg_adc_notification(enum qpnp_tm_state state, void *ctx)
+{
+ struct smb1351_charger *chip = ctx;
+ struct battery_status *cur;
+ int temp;
+
+ if (state >= ADC_TM_STATE_NUM) {
+ pr_err("invalid state parameter %d\n", state);
+ return;
+ }
+
+ temp = smb1351_get_prop_batt_temp(chip);
+
+ pr_debug("temp = %d state = %s\n", temp,
+ state == ADC_TM_WARM_STATE ? "hot" : "cold");
+
+ /* reset the adc status request */
+ chip->adc_param.state_request = ADC_TM_WARM_COOL_THR_ENABLE;
+
+ /* temp from low to high */
+ if (state == ADC_TM_WARM_STATE) {
+ /* WARM -> HOT */
+ if (temp >= chip->batt_hot_decidegc) {
+ cur = &batt_s[BATT_HOT];
+ chip->adc_param.low_temp =
+ chip->batt_hot_decidegc - HYSTERESIS_DECIDEGC;
+ chip->adc_param.state_request = ADC_TM_COOL_THR_ENABLE;
+ /* NORMAL -> WARM */
+ } else if (temp >= chip->batt_warm_decidegc &&
+ chip->jeita_supported) {
+ cur = &batt_s[BATT_WARM];
+ chip->adc_param.low_temp =
+ chip->batt_warm_decidegc - HYSTERESIS_DECIDEGC;
+ chip->adc_param.high_temp = chip->batt_hot_decidegc;
+ /* COOL -> NORMAL */
+ } else if (temp >= chip->batt_cool_decidegc &&
+ chip->jeita_supported) {
+ cur = &batt_s[BATT_NORMAL];
+ chip->adc_param.low_temp =
+ chip->batt_cool_decidegc - HYSTERESIS_DECIDEGC;
+ chip->adc_param.high_temp = chip->batt_warm_decidegc;
+ /* COLD -> COOL */
+ } else if (temp >= chip->batt_cold_decidegc) {
+ cur = &batt_s[BATT_COOL];
+ chip->adc_param.low_temp =
+ chip->batt_cold_decidegc - HYSTERESIS_DECIDEGC;
+ if (chip->jeita_supported)
+ chip->adc_param.high_temp =
+ chip->batt_cool_decidegc;
+ else
+ chip->adc_param.high_temp =
+ chip->batt_hot_decidegc;
+ /* MISSING -> COLD */
+ } else if (temp >= chip->batt_missing_decidegc) {
+ cur = &batt_s[BATT_COLD];
+ chip->adc_param.high_temp = chip->batt_cold_decidegc;
+ chip->adc_param.low_temp = chip->batt_missing_decidegc
+ - HYSTERESIS_DECIDEGC;
+
+ }
+ /* temp from high to low */
+ } else {
+ /* COLD -> MISSING */
+ if (temp <= chip->batt_missing_decidegc) {
+ cur = &batt_s[BATT_MISSING];
+ chip->adc_param.high_temp = chip->batt_missing_decidegc
+ + HYSTERESIS_DECIDEGC;
+ chip->adc_param.state_request = ADC_TM_WARM_THR_ENABLE;
+ /* COOL -> COLD */
+ } else if (temp <= chip->batt_cold_decidegc) {
+ cur = &batt_s[BATT_COLD];
+ chip->adc_param.high_temp =
+ chip->batt_cold_decidegc + HYSTERESIS_DECIDEGC;
+ /* add low_temp to enable batt present check */
+ chip->adc_param.low_temp = chip->batt_missing_decidegc;
+ /* NORMAL -> COOL */
+ } else if (temp <= chip->batt_cool_decidegc &&
+ chip->jeita_supported) {
+ cur = &batt_s[BATT_COOL];
+ chip->adc_param.high_temp =
+ chip->batt_cool_decidegc + HYSTERESIS_DECIDEGC;
+ chip->adc_param.low_temp = chip->batt_cold_decidegc;
+ /* WARM -> NORMAL */
+ } else if (temp <= chip->batt_warm_decidegc &&
+ chip->jeita_supported) {
+ cur = &batt_s[BATT_NORMAL];
+ chip->adc_param.high_temp =
+ chip->batt_warm_decidegc + HYSTERESIS_DECIDEGC;
+ chip->adc_param.low_temp = chip->batt_cool_decidegc;
+ /* HOT -> WARM */
+ } else if (temp <= chip->batt_hot_decidegc) {
+ cur = &batt_s[BATT_WARM];
+ if (chip->jeita_supported)
+ chip->adc_param.low_temp =
+ chip->batt_warm_decidegc;
+ else
+ chip->adc_param.low_temp =
+ chip->batt_cold_decidegc;
+ chip->adc_param.high_temp =
+ chip->batt_hot_decidegc + HYSTERESIS_DECIDEGC;
+ }
+ }
+
+ if (cur->batt_present)
+ chip->battery_missing = false;
+ else
+ chip->battery_missing = true;
+
+ if (cur->batt_hot ^ chip->batt_hot ||
+ cur->batt_cold ^ chip->batt_cold) {
+ chip->batt_hot = cur->batt_hot;
+ chip->batt_cold = cur->batt_cold;
+ /* stop charging explicitly since we use PMIC thermal pin*/
+ if (cur->batt_hot || cur->batt_cold ||
+ chip->battery_missing)
+ smb1351_battchg_disable(chip, THERMAL, 1);
+ else
+ smb1351_battchg_disable(chip, THERMAL, 0);
+ }
+
+ if ((chip->batt_warm ^ cur->batt_warm ||
+ chip->batt_cool ^ cur->batt_cool)
+ && chip->jeita_supported) {
+ chip->batt_warm = cur->batt_warm;
+ chip->batt_cool = cur->batt_cool;
+ smb1351_chg_set_appropriate_battery_current(chip);
+ smb1351_chg_set_appropriate_vddmax(chip);
+ smb1351_chg_ctrl_in_jeita(chip);
+ }
+
+ pr_debug("hot %d, cold %d, warm %d, cool %d, soft jeita supported %d, missing %d, low = %d deciDegC, high = %d deciDegC\n",
+ chip->batt_hot, chip->batt_cold, chip->batt_warm,
+ chip->batt_cool, chip->jeita_supported,
+ chip->battery_missing, chip->adc_param.low_temp,
+ chip->adc_param.high_temp);
+ if (qpnp_adc_tm_channel_measure(chip->adc_tm_dev, &chip->adc_param))
+ pr_err("request ADC error\n");
+}
+
+static int rerun_apsd(struct smb1351_charger *chip)
+{
+ int rc;
+
+ pr_debug("Reruning APSD\nDisabling APSD\n");
+
+ rc = smb1351_masked_write(chip, CMD_HVDCP_REG, CMD_APSD_RE_RUN_BIT,
+ CMD_APSD_RE_RUN_BIT);
+ if (rc)
+ pr_err("Couldn't re-run APSD algo\n");
+
+ return 0;
+}
+
+static void smb1351_hvdcp_det_work(struct work_struct *work)
+{
+ int rc;
+ u8 reg;
+ struct smb1351_charger *chip = container_of(work,
+ struct smb1351_charger,
+ hvdcp_det_work.work);
+
+ rc = smb1351_read_reg(chip, STATUS_7_REG, &reg);
+ if (rc) {
+ pr_err("Couldn't read STATUS_7_REG rc == %d\n", rc);
+ goto end;
+ }
+ pr_debug("STATUS_7_REG = 0x%02X\n", reg);
+
+ if (reg) {
+ pr_debug("HVDCP detected; notifying USB PSY\n");
+ power_supply_set_supply_type(chip->usb_psy,
+ POWER_SUPPLY_TYPE_USB_HVDCP);
+ }
+end:
+ pm_relax(chip->dev);
+}
+
+#define HVDCP_NOTIFY_MS 2500
+static int smb1351_apsd_complete_handler(struct smb1351_charger *chip,
+ u8 status)
+{
+ int rc;
+ u8 reg = 0;
+ union power_supply_propval prop = {0, };
+ enum power_supply_type type = POWER_SUPPLY_TYPE_UNKNOWN;
+
+ /*
+ * If apsd is disabled, charger detection is done by
+ * USB phy driver.
+ */
+ if (chip->disable_apsd || chip->usbin_ov) {
+ pr_debug("APSD %s, status = %d\n",
+ chip->disable_apsd ? "disabled" : "enabled", !!status);
+ pr_debug("USBIN ov, status = %d\n", chip->usbin_ov);
+ return 0;
+ }
+
+ rc = smb1351_read_reg(chip, STATUS_5_REG, &reg);
+ if (rc) {
+ pr_err("Couldn't read STATUS_5 rc = %d\n", rc);
+ return rc;
+ }
+
+ pr_debug("STATUS_5_REG(0x3B)=%x\n", reg);
+
+ switch (reg) {
+ case STATUS_PORT_ACA_DOCK:
+ case STATUS_PORT_ACA_C:
+ case STATUS_PORT_ACA_B:
+ case STATUS_PORT_ACA_A:
+ type = POWER_SUPPLY_TYPE_USB_ACA;
+ break;
+ case STATUS_PORT_CDP:
+ type = POWER_SUPPLY_TYPE_USB_CDP;
+ break;
+ case STATUS_PORT_DCP:
+ type = POWER_SUPPLY_TYPE_USB_DCP;
+ break;
+ case STATUS_PORT_SDP:
+ type = POWER_SUPPLY_TYPE_USB;
+ break;
+ case STATUS_PORT_OTHER:
+ type = POWER_SUPPLY_TYPE_USB_DCP;
+ break;
+ default:
+ type = POWER_SUPPLY_TYPE_USB;
+ break;
+ }
+
+ if (status) {
+ chip->chg_present = true;
+ pr_debug("APSD complete. USB type detected=%d chg_present=%d\n",
+ type, chip->chg_present);
+ if (!chip->battery_missing && !chip->apsd_rerun) {
+ if (type == POWER_SUPPLY_TYPE_USB) {
+ pr_debug("Setting usb psy dp=f dm=f SDP and rerun\n");
+ power_supply_set_dp_dm(chip->usb_psy,
+ POWER_SUPPLY_DP_DM_DPF_DMF);
+ chip->apsd_rerun = true;
+ rerun_apsd(chip);
+ return 0;
+ }
+ pr_debug("Set usb psy dp=f dm=f DCP and no rerun\n");
+ power_supply_set_dp_dm(chip->usb_psy,
+ POWER_SUPPLY_DP_DM_DPF_DMF);
+ }
+ /*
+ * If defined force hvdcp 2p0 property,
+ * we force to hvdcp 2p0 in the APSD handler.
+ */
+ if (chip->force_hvdcp_2p0) {
+ pr_debug("Force set to HVDCP 2.0 mode\n");
+ smb1351_masked_write(chip, VARIOUS_FUNC_3_REG,
+ QC_2P1_AUTH_ALGO_BIT, 0);
+ smb1351_masked_write(chip, CMD_HVDCP_REG,
+ CMD_FORCE_HVDCP_2P0_BIT,
+ CMD_FORCE_HVDCP_2P0_BIT);
+ type = POWER_SUPPLY_TYPE_USB_HVDCP;
+ } else if (type == POWER_SUPPLY_TYPE_USB_DCP) {
+ pr_debug("schedule hvdcp detection worker\n");
+ pm_stay_awake(chip->dev);
+ schedule_delayed_work(&chip->hvdcp_det_work,
+ msecs_to_jiffies(HVDCP_NOTIFY_MS));
+ }
+
+ power_supply_set_supply_type(chip->usb_psy, type);
+ /*
+ * SMB is now done sampling the D+/D- lines,
+ * indicate USB driver
+ */
+ pr_debug("updating usb_psy present=%d\n", chip->chg_present);
+ power_supply_set_present(chip->usb_psy, chip->chg_present);
+ chip->apsd_rerun = false;
+ } else if (!chip->apsd_rerun) {
+ /* Handle Charger removal */
+ chip->usb_psy->get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_TYPE, &prop);
+ chip->chg_present = false;
+ power_supply_set_supply_type(chip->usb_psy,
+ POWER_SUPPLY_TYPE_UNKNOWN);
+ power_supply_set_present(chip->usb_psy,
+ chip->chg_present);
+ pr_debug("Set usb psy dm=r df=r\n");
+ power_supply_set_dp_dm(chip->usb_psy,
+ POWER_SUPPLY_DP_DM_DPR_DMR);
+ }
+
+ return 0;
+}
+
+/*
+ * As source detect interrupt is not triggered on the falling edge,
+ * we need to schedule a work for checking source detect status after
+ * charger UV interrupt fired.
+ */
+#define FIRST_CHECK_DELAY 100
+#define SECOND_CHECK_DELAY 1000
+static void smb1351_chg_remove_work(struct work_struct *work)
+{
+ int rc;
+ u8 reg;
+ struct smb1351_charger *chip = container_of(work,
+ struct smb1351_charger, chg_remove_work.work);
+
+ rc = smb1351_read_reg(chip, IRQ_G_REG, &reg);
+ if (rc) {
+ pr_err("Couldn't read IRQ_G_REG rc = %d\n", rc);
+ goto end;
+ }
+
+ if (!(reg & IRQ_SOURCE_DET_BIT)) {
+ pr_debug("chg removed\n");
+ smb1351_apsd_complete_handler(chip, 0);
+ } else if (!chip->chg_remove_work_scheduled) {
+ chip->chg_remove_work_scheduled = true;
+ goto reschedule;
+ } else {
+ pr_debug("charger is present\n");
+ }
+end:
+ chip->chg_remove_work_scheduled = false;
+ pm_relax(chip->dev);
+ return;
+
+reschedule:
+ pr_debug("reschedule after 1s\n");
+ schedule_delayed_work(&chip->chg_remove_work,
+ msecs_to_jiffies(SECOND_CHECK_DELAY));
+}
+
+static int smb1351_usbin_uv_handler(struct smb1351_charger *chip, u8 status)
+{
+ /* use this to detect USB insertion only if !apsd */
+ if (chip->disable_apsd) {
+ /*
+ * If APSD is disabled, src det interrupt won't trigger.
+ * Hence use usbin_uv for removal and insertion notification
+ */
+ if (status == 0) {
+ chip->chg_present = true;
+ pr_debug("updating usb_psy present=%d\n",
+ chip->chg_present);
+ power_supply_set_supply_type(chip->usb_psy,
+ POWER_SUPPLY_TYPE_USB);
+ power_supply_set_present(chip->usb_psy,
+ chip->chg_present);
+ } else {
+ chip->chg_present = false;
+ power_supply_set_supply_type(chip->usb_psy,
+ POWER_SUPPLY_TYPE_UNKNOWN);
+ power_supply_set_present(chip->usb_psy, chip->
+ chg_present);
+ pr_debug("updating usb_psy present=%d\n",
+ chip->chg_present);
+ }
+ return 0;
+ }
+
+ if (status) {
+ cancel_delayed_work_sync(&chip->hvdcp_det_work);
+ pm_relax(chip->dev);
+ pr_debug("schedule charger remove worker\n");
+ schedule_delayed_work(&chip->chg_remove_work,
+ msecs_to_jiffies(FIRST_CHECK_DELAY));
+ pm_stay_awake(chip->dev);
+ }
+
+ pr_debug("chip->chg_present = %d\n", chip->chg_present);
+
+ return 0;
+}
+
+static int smb1351_usbin_ov_handler(struct smb1351_charger *chip, u8 status)
+{
+ int health;
+ int rc;
+ u8 reg;
+
+ rc = smb1351_read_reg(chip, IRQ_E_REG, &reg);
+ if (rc)
+ pr_err("Couldn't read IRQ_E rc = %d\n", rc);
+
+ if (status != 0) {
+ chip->chg_present = false;
+ chip->usbin_ov = true;
+ power_supply_set_supply_type(chip->usb_psy,
+ POWER_SUPPLY_TYPE_UNKNOWN);
+ power_supply_set_present(chip->usb_psy, chip->chg_present);
+ } else {
+ chip->usbin_ov = false;
+ if (reg & IRQ_USBIN_UV_BIT)
+ pr_debug("Charger unplugged from OV\n");
+ else
+ smb1351_apsd_complete_handler(chip, 1);
+ }
+
+ if (chip->usb_psy) {
+ health = status ? POWER_SUPPLY_HEALTH_OVERVOLTAGE
+ : POWER_SUPPLY_HEALTH_GOOD;
+ power_supply_set_health_state(chip->usb_psy, health);
+ pr_debug("chip ov status is %d\n", health);
+ }
+ pr_debug("chip->chg_present = %d\n", chip->chg_present);
+
+ return 0;
+}
+
+static int smb1351_fast_chg_handler(struct smb1351_charger *chip, u8 status)
+{
+ pr_debug("enter\n");
+ return 0;
+}
+
+static int smb1351_chg_term_handler(struct smb1351_charger *chip, u8 status)
+{
+ pr_debug("enter\n");
+ if (!chip->bms_controlled_charging)
+ chip->batt_full = !!status;
+ return 0;
+}
+
+static int smb1351_safety_timeout_handler(struct smb1351_charger *chip,
+ u8 status)
+{
+ pr_debug("safety_timeout triggered\n");
+ return 0;
+}
+
+static int smb1351_aicl_done_handler(struct smb1351_charger *chip, u8 status)
+{
+ pr_debug("aicl_done triggered\n");
+ return 0;
+}
+
+static int smb1351_hot_hard_handler(struct smb1351_charger *chip, u8 status)
+{
+ pr_debug("status = 0x%02x\n", status);
+ chip->batt_hot = !!status;
+ return 0;
+}
+static int smb1351_cold_hard_handler(struct smb1351_charger *chip, u8 status)
+{
+ pr_debug("status = 0x%02x\n", status);
+ chip->batt_cold = !!status;
+ return 0;
+}
+static int smb1351_hot_soft_handler(struct smb1351_charger *chip, u8 status)
+{
+ pr_debug("status = 0x%02x\n", status);
+ chip->batt_warm = !!status;
+ return 0;
+}
+static int smb1351_cold_soft_handler(struct smb1351_charger *chip, u8 status)
+{
+ pr_debug("status = 0x%02x\n", status);
+ chip->batt_cool = !!status;
+ return 0;
+}
+
+static int smb1351_battery_missing_handler(struct smb1351_charger *chip,
+ u8 status)
+{
+ if (status)
+ chip->battery_missing = true;
+ else
+ chip->battery_missing = false;
+
+ return 0;
+}
+
+static struct irq_handler_info handlers[] = {
+ [0] = {
+ .stat_reg = IRQ_A_REG,
+ .val = 0,
+ .prev_val = 0,
+ .irq_info = {
+ { .name = "cold_soft",
+ .smb_irq = smb1351_cold_soft_handler,
+ },
+ { .name = "hot_soft",
+ .smb_irq = smb1351_hot_soft_handler,
+ },
+ { .name = "cold_hard",
+ .smb_irq = smb1351_cold_hard_handler,
+ },
+ { .name = "hot_hard",
+ .smb_irq = smb1351_hot_hard_handler,
+ },
+ },
+ },
+ [1] = {
+ .stat_reg = IRQ_B_REG,
+ .val = 0,
+ .prev_val = 0,
+ .irq_info = {
+ { .name = "internal_temp_limit",
+ },
+ { .name = "vbatt_low",
+ },
+ { .name = "battery_missing",
+ .smb_irq = smb1351_battery_missing_handler,
+ },
+ { .name = "batt_therm_removed",
+ },
+ },
+ },
+ [2] = {
+ .stat_reg = IRQ_C_REG,
+ .val = 0,
+ .prev_val = 0,
+ .irq_info = {
+ { .name = "chg_term",
+ .smb_irq = smb1351_chg_term_handler,
+ },
+ { .name = "taper",
+ },
+ { .name = "recharge",
+ },
+ { .name = "fast_chg",
+ .smb_irq = smb1351_fast_chg_handler,
+ },
+ },
+ },
+ [3] = {
+ .stat_reg = IRQ_D_REG,
+ .val = 0,
+ .prev_val = 0,
+ .irq_info = {
+ { .name = "prechg_timeout",
+ },
+ { .name = "safety_timeout",
+ .smb_irq = smb1351_safety_timeout_handler,
+ },
+ { .name = "chg_error",
+ },
+ { .name = "batt_ov",
+ },
+ },
+ },
+ [4] = {
+ .stat_reg = IRQ_E_REG,
+ .val = 0,
+ .prev_val = 0,
+ .irq_info = {
+ { .name = "power_ok",
+ },
+ { .name = "afvc",
+ },
+ { .name = "usbin_uv",
+ .smb_irq = smb1351_usbin_uv_handler,
+ },
+ { .name = "usbin_ov",
+ .smb_irq = smb1351_usbin_ov_handler,
+ },
+ },
+ },
+ [5] = {
+ .stat_reg = IRQ_F_REG,
+ .val = 0,
+ .prev_val = 0,
+ .irq_info = {
+ { .name = "otg_oc_retry",
+ },
+ { .name = "rid",
+ },
+ { .name = "otg_fail",
+ },
+ { .name = "otg_oc",
+ },
+ },
+ },
+ [6] = {
+ .stat_reg = IRQ_G_REG,
+ .val = 0,
+ .prev_val = 0,
+ .irq_info = {
+ { .name = "chg_inhibit",
+ },
+ { .name = "aicl_fail",
+ },
+ { .name = "aicl_done",
+ .smb_irq = smb1351_aicl_done_handler,
+ },
+ { .name = "apsd_complete",
+ .smb_irq = smb1351_apsd_complete_handler,
+ },
+ },
+ },
+ [7] = {
+ .stat_reg = IRQ_H_REG,
+ .val = 0,
+ .prev_val = 0,
+ .irq_info = {
+ { .name = "wdog_timeout",
+ },
+ { .name = "hvdcp_auth_done",
+ },
+ },
+ },
+};
+
+#define IRQ_LATCHED_MASK 0x02
+#define IRQ_STATUS_MASK 0x01
+#define BITS_PER_IRQ 2
+static irqreturn_t smb1351_chg_stat_handler(int irq, void *dev_id)
+{
+ struct smb1351_charger *chip = dev_id;
+ int i, j;
+ u8 triggered;
+ u8 changed;
+ u8 rt_stat, prev_rt_stat;
+ int rc;
+ int handler_count = 0;
+
+ mutex_lock(&chip->irq_complete);
+
+ chip->irq_waiting = true;
+ if (!chip->resume_completed) {
+ pr_debug("IRQ triggered before device-resume\n");
+ disable_irq_nosync(irq);
+ mutex_unlock(&chip->irq_complete);
+ return IRQ_HANDLED;
+ }
+ chip->irq_waiting = false;
+
+ for (i = 0; i < ARRAY_SIZE(handlers); i++) {
+ rc = smb1351_read_reg(chip, handlers[i].stat_reg,
+ &handlers[i].val);
+ if (rc) {
+ pr_err("Couldn't read %d rc = %d\n",
+ handlers[i].stat_reg, rc);
+ continue;
+ }
+
+ for (j = 0; j < ARRAY_SIZE(handlers[i].irq_info); j++) {
+ triggered = handlers[i].val
+ & (IRQ_LATCHED_MASK << (j * BITS_PER_IRQ));
+ rt_stat = handlers[i].val
+ & (IRQ_STATUS_MASK << (j * BITS_PER_IRQ));
+ prev_rt_stat = handlers[i].prev_val
+ & (IRQ_STATUS_MASK << (j * BITS_PER_IRQ));
+ changed = prev_rt_stat ^ rt_stat;
+
+ if (triggered || changed)
+ rt_stat ? handlers[i].irq_info[j].high++ :
+ handlers[i].irq_info[j].low++;
+
+ if ((triggered || changed)
+ && handlers[i].irq_info[j].smb_irq != NULL) {
+ handler_count++;
+ rc = handlers[i].irq_info[j].smb_irq(chip,
+ rt_stat);
+ if (rc)
+ pr_err("Couldn't handle %d irq for reg 0x%02x rc = %d\n",
+ j, handlers[i].stat_reg, rc);
+ }
+ }
+ handlers[i].prev_val = handlers[i].val;
+ }
+
+ pr_debug("handler count = %d\n", handler_count);
+ if (handler_count) {
+ pr_debug("batt psy changed\n");
+ power_supply_changed(&chip->batt_psy);
+ }
+
+ mutex_unlock(&chip->irq_complete);
+
+ return IRQ_HANDLED;
+}
+
+static void smb1351_external_power_changed(struct power_supply *psy)
+{
+ struct smb1351_charger *chip = container_of(psy,
+ struct smb1351_charger, batt_psy);
+ union power_supply_propval prop = {0,};
+ int rc, current_limit = 0, online = 0;
+
+ if (chip->bms_psy_name)
+ chip->bms_psy =
+ power_supply_get_by_name((char *)chip->bms_psy_name);
+
+ rc = chip->usb_psy->get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_ONLINE, &prop);
+ if (rc)
+ pr_err("Couldn't read USB online property, rc=%d\n", rc);
+ else
+ online = prop.intval;
+
+ rc = chip->usb_psy->get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &prop);
+ if (rc)
+ pr_err("Couldn't read USB current_max property, rc=%d\n", rc);
+ else
+ current_limit = prop.intval / 1000;
+
+ pr_debug("online = %d, current_limit = %d\n", online, current_limit);
+
+ smb1351_enable_volatile_writes(chip);
+ smb1351_set_usb_chg_current(chip, current_limit);
+
+ pr_debug("updating batt psy\n");
+}
+
+#define LAST_CNFG_REG 0x16
+static int show_cnfg_regs(struct seq_file *m, void *data)
+{
+ struct smb1351_charger *chip = m->private;
+ int rc;
+ u8 reg;
+ u8 addr;
+
+ for (addr = 0; addr <= LAST_CNFG_REG; addr++) {
+ rc = smb1351_read_reg(chip, addr, &reg);
+ if (!rc)
+ seq_printf(m, "0x%02x = 0x%02x\n", addr, reg);
+ }
+
+ return 0;
+}
+
+static int cnfg_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct smb1351_charger *chip = inode->i_private;
+
+ return single_open(file, show_cnfg_regs, chip);
+}
+
+static const struct file_operations cnfg_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = cnfg_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#define FIRST_CMD_REG 0x30
+#define LAST_CMD_REG 0x34
+static int show_cmd_regs(struct seq_file *m, void *data)
+{
+ struct smb1351_charger *chip = m->private;
+ int rc;
+ u8 reg;
+ u8 addr;
+
+ for (addr = FIRST_CMD_REG; addr <= LAST_CMD_REG; addr++) {
+ rc = smb1351_read_reg(chip, addr, &reg);
+ if (!rc)
+ seq_printf(m, "0x%02x = 0x%02x\n", addr, reg);
+ }
+
+ return 0;
+}
+
+static int cmd_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct smb1351_charger *chip = inode->i_private;
+
+ return single_open(file, show_cmd_regs, chip);
+}
+
+static const struct file_operations cmd_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = cmd_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#define FIRST_STATUS_REG 0x36
+#define LAST_STATUS_REG 0x3F
+static int show_status_regs(struct seq_file *m, void *data)
+{
+ struct smb1351_charger *chip = m->private;
+ int rc;
+ u8 reg;
+ u8 addr;
+
+ for (addr = FIRST_STATUS_REG; addr <= LAST_STATUS_REG; addr++) {
+ rc = smb1351_read_reg(chip, addr, &reg);
+ if (!rc)
+ seq_printf(m, "0x%02x = 0x%02x\n", addr, reg);
+ }
+
+ return 0;
+}
+
+static int status_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct smb1351_charger *chip = inode->i_private;
+
+ return single_open(file, show_status_regs, chip);
+}
+
+static const struct file_operations status_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = status_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int show_irq_count(struct seq_file *m, void *data)
+{
+ int i, j, total = 0;
+
+ for (i = 0; i < ARRAY_SIZE(handlers); i++)
+ for (j = 0; j < 4; j++) {
+ seq_printf(m, "%s=%d\t(high=%d low=%d)\n",
+ handlers[i].irq_info[j].name,
+ handlers[i].irq_info[j].high
+ + handlers[i].irq_info[j].low,
+ handlers[i].irq_info[j].high,
+ handlers[i].irq_info[j].low);
+ total += (handlers[i].irq_info[j].high
+ + handlers[i].irq_info[j].low);
+ }
+
+ seq_printf(m, "\n\tTotal = %d\n", total);
+
+ return 0;
+}
+
+static int irq_count_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct smb1351_charger *chip = inode->i_private;
+
+ return single_open(file, show_irq_count, chip);
+}
+
+static const struct file_operations irq_count_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = irq_count_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int get_reg(void *data, u64 *val)
+{
+ struct smb1351_charger *chip = data;
+ int rc;
+ u8 temp;
+
+ rc = smb1351_read_reg(chip, chip->peek_poke_address, &temp);
+ if (rc) {
+ pr_err("Couldn't read reg %x rc = %d\n",
+ chip->peek_poke_address, rc);
+ return -EAGAIN;
+ }
+ *val = temp;
+ return 0;
+}
+
+static int set_reg(void *data, u64 val)
+{
+ struct smb1351_charger *chip = data;
+ int rc;
+ u8 temp;
+
+ temp = (u8) val;
+ rc = smb1351_write_reg(chip, chip->peek_poke_address, temp);
+ if (rc) {
+ pr_err("Couldn't write 0x%02x to 0x%02x rc= %d\n",
+ temp, chip->peek_poke_address, rc);
+ return -EAGAIN;
+ }
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(poke_poke_debug_ops, get_reg, set_reg, "0x%02llx\n");
+
+static int force_irq_set(void *data, u64 val)
+{
+ struct smb1351_charger *chip = data;
+
+ smb1351_chg_stat_handler(chip->client->irq, data);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_irq_ops, NULL, force_irq_set, "0x%02llx\n");
+
+#ifdef DEBUG
+static void dump_regs(struct smb1351_charger *chip)
+{
+ int rc;
+ u8 reg;
+ u8 addr;
+
+ for (addr = 0; addr <= LAST_CNFG_REG; addr++) {
+ rc = smb1351_read_reg(chip, addr, &reg);
+ if (rc)
+ pr_err("Couldn't read 0x%02x rc = %d\n", addr, rc);
+ else
+ pr_debug("0x%02x = 0x%02x\n", addr, reg);
+ }
+
+ for (addr = FIRST_STATUS_REG; addr <= LAST_STATUS_REG; addr++) {
+ rc = smb1351_read_reg(chip, addr, &reg);
+ if (rc)
+ pr_err("Couldn't read 0x%02x rc = %d\n", addr, rc);
+ else
+ pr_debug("0x%02x = 0x%02x\n", addr, reg);
+ }
+
+ for (addr = FIRST_CMD_REG; addr <= LAST_CMD_REG; addr++) {
+ rc = smb1351_read_reg(chip, addr, &reg);
+ if (rc)
+ pr_err("Couldn't read 0x%02x rc = %d\n", addr, rc);
+ else
+ pr_debug("0x%02x = 0x%02x\n", addr, reg);
+ }
+}
+#else
+static void dump_regs(struct smb1351_charger *chip)
+{
+}
+#endif
+
+static int smb1351_parse_dt(struct smb1351_charger *chip)
+{
+ int rc;
+ struct device_node *node = chip->dev->of_node;
+
+ if (!node) {
+ pr_err("device tree info. missing\n");
+ return -EINVAL;
+ }
+
+ chip->usb_suspended_status = of_property_read_bool(node,
+ "qcom,charging-disabled");
+
+ chip->chg_autonomous_mode = of_property_read_bool(node,
+ "qcom,chg-autonomous-mode");
+
+ chip->disable_apsd = of_property_read_bool(node, "qcom,disable-apsd");
+
+ chip->using_pmic_therm = of_property_read_bool(node,
+ "qcom,using-pmic-therm");
+ chip->bms_controlled_charging = of_property_read_bool(node,
+ "qcom,bms-controlled-charging");
+ chip->force_hvdcp_2p0 = of_property_read_bool(node,
+ "qcom,force-hvdcp-2p0");
+
+ rc = of_property_read_string(node, "qcom,bms-psy-name",
+ &chip->bms_psy_name);
+ if (rc)
+ chip->bms_psy_name = NULL;
+
+ rc = of_property_read_u32(node, "qcom,fastchg-current-max-ma",
+ &chip->target_fastchg_current_max_ma);
+ if (rc)
+ chip->target_fastchg_current_max_ma = SMB1351_CHG_FAST_MAX_MA;
+
+ chip->iterm_disabled = of_property_read_bool(node,
+ "qcom,iterm-disabled");
+
+ rc = of_property_read_u32(node, "qcom,iterm-ma", &chip->iterm_ma);
+ if (rc)
+ chip->iterm_ma = -EINVAL;
+
+ rc = of_property_read_u32(node, "qcom,float-voltage-mv",
+ &chip->vfloat_mv);
+ if (rc)
+ chip->vfloat_mv = -EINVAL;
+
+ rc = of_property_read_u32(node, "qcom,recharge-mv",
+ &chip->recharge_mv);
+ if (rc)
+ chip->recharge_mv = -EINVAL;
+
+ chip->recharge_disabled = of_property_read_bool(node,
+ "qcom,recharge-disabled");
+
+ /* thermal and jeita support */
+ rc = of_property_read_u32(node, "qcom,batt-cold-decidegc",
+ &chip->batt_cold_decidegc);
+ if (rc < 0)
+ chip->batt_cold_decidegc = -EINVAL;
+
+ rc = of_property_read_u32(node, "qcom,batt-hot-decidegc",
+ &chip->batt_hot_decidegc);
+ if (rc < 0)
+ chip->batt_hot_decidegc = -EINVAL;
+
+ rc = of_property_read_u32(node, "qcom,batt-warm-decidegc",
+ &chip->batt_warm_decidegc);
+
+ rc |= of_property_read_u32(node, "qcom,batt-cool-decidegc",
+ &chip->batt_cool_decidegc);
+
+ if (!rc) {
+ rc = of_property_read_u32(node, "qcom,batt-cool-mv",
+ &chip->batt_cool_mv);
+
+ rc |= of_property_read_u32(node, "qcom,batt-warm-mv",
+ &chip->batt_warm_mv);
+
+ rc |= of_property_read_u32(node, "qcom,batt-cool-ma",
+ &chip->batt_cool_ma);
+
+ rc |= of_property_read_u32(node, "qcom,batt-warm-ma",
+ &chip->batt_warm_ma);
+ if (rc)
+ chip->jeita_supported = false;
+ else
+ chip->jeita_supported = true;
+ }
+
+ pr_debug("jeita_supported = %d\n", chip->jeita_supported);
+
+ rc = of_property_read_u32(node, "qcom,batt-missing-decidegc",
+ &chip->batt_missing_decidegc);
+
+ chip->pinctrl_state_name = of_get_property(node, "pinctrl-names", NULL);
+
+ return 0;
+}
+
+static int smb1351_determine_initial_state(struct smb1351_charger *chip)
+{
+ int rc;
+ u8 reg = 0;
+
+ /*
+ * It is okay to read the interrupt status here since
+ * interrupts aren't requested. Reading interrupt status
+ * clears the interrupt so be careful to read interrupt
+ * status only in interrupt handling code
+ */
+
+ rc = smb1351_read_reg(chip, IRQ_B_REG, &reg);
+ if (rc) {
+ pr_err("Couldn't read IRQ_B rc = %d\n", rc);
+ goto fail_init_status;
+ }
+
+ chip->battery_missing = (reg & IRQ_BATT_MISSING_BIT) ? true : false;
+
+ rc = smb1351_read_reg(chip, IRQ_C_REG, &reg);
+ if (rc) {
+ pr_err("Couldn't read IRQ_C rc = %d\n", rc);
+ goto fail_init_status;
+ }
+ chip->batt_full = (reg & IRQ_TERM_BIT) ? true : false;
+
+ rc = smb1351_read_reg(chip, IRQ_A_REG, &reg);
+ if (rc) {
+ pr_err("Couldn't read irq A rc = %d\n", rc);
+ return rc;
+ }
+
+ if (reg & IRQ_HOT_HARD_BIT)
+ chip->batt_hot = true;
+ if (reg & IRQ_COLD_HARD_BIT)
+ chip->batt_cold = true;
+ if (reg & IRQ_HOT_SOFT_BIT)
+ chip->batt_warm = true;
+ if (reg & IRQ_COLD_SOFT_BIT)
+ chip->batt_cool = true;
+
+ rc = smb1351_read_reg(chip, IRQ_E_REG, &reg);
+ if (rc) {
+ pr_err("Couldn't read IRQ_E rc = %d\n", rc);
+ goto fail_init_status;
+ }
+
+ if (reg & IRQ_USBIN_UV_BIT) {
+ smb1351_usbin_uv_handler(chip, 1);
+ } else {
+ smb1351_usbin_uv_handler(chip, 0);
+ smb1351_apsd_complete_handler(chip, 1);
+ }
+
+ rc = smb1351_read_reg(chip, IRQ_G_REG, &reg);
+ if (rc) {
+ pr_err("Couldn't read IRQ_G rc = %d\n", rc);
+ goto fail_init_status;
+ }
+
+ if (reg & IRQ_SOURCE_DET_BIT)
+ smb1351_apsd_complete_handler(chip, 1);
+
+ return 0;
+
+fail_init_status:
+ pr_err("Couldn't determine initial status\n");
+ return rc;
+}
+
+static int is_parallel_charger(struct i2c_client *client)
+{
+ struct device_node *node = client->dev.of_node;
+
+ return of_property_read_bool(node, "qcom,parallel-charger");
+}
+
+static int create_debugfs_entries(struct smb1351_charger *chip)
+{
+ struct dentry *ent;
+
+ chip->debug_root = debugfs_create_dir("smb1351", NULL);
+ if (!chip->debug_root) {
+ pr_err("Couldn't create debug dir\n");
+ } else {
+ ent = debugfs_create_file("config_registers", S_IFREG | S_IRUGO,
+ chip->debug_root, chip,
+ &cnfg_debugfs_ops);
+ if (!ent)
+ pr_err("Couldn't create cnfg debug file\n");
+
+ ent = debugfs_create_file("status_registers", S_IFREG | S_IRUGO,
+ chip->debug_root, chip,
+ &status_debugfs_ops);
+ if (!ent)
+ pr_err("Couldn't create status debug file\n");
+
+ ent = debugfs_create_file("cmd_registers", S_IFREG | S_IRUGO,
+ chip->debug_root, chip,
+ &cmd_debugfs_ops);
+ if (!ent)
+ pr_err("Couldn't create cmd debug file\n");
+
+ ent = debugfs_create_x32("address", S_IFREG | S_IWUSR | S_IRUGO,
+ chip->debug_root,
+ &(chip->peek_poke_address));
+ if (!ent)
+ pr_err("Couldn't create address debug file\n");
+
+ ent = debugfs_create_file("data", S_IFREG | S_IWUSR | S_IRUGO,
+ chip->debug_root, chip,
+ &poke_poke_debug_ops);
+ if (!ent)
+ pr_err("Couldn't create data debug file\n");
+
+ ent = debugfs_create_file("force_irq",
+ S_IFREG | S_IWUSR | S_IRUGO,
+ chip->debug_root, chip,
+ &force_irq_ops);
+ if (!ent)
+ pr_err("Couldn't create data debug file\n");
+
+ ent = debugfs_create_file("irq_count", S_IFREG | S_IRUGO,
+ chip->debug_root, chip,
+ &irq_count_debugfs_ops);
+ if (!ent)
+ pr_err("Couldn't create count debug file\n");
+ }
+ return 0;
+}
+
+static int smb1351_main_charger_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc;
+ struct smb1351_charger *chip;
+ struct power_supply *usb_psy;
+ u8 reg = 0;
+
+ usb_psy = power_supply_get_by_name("usb");
+ if (!usb_psy) {
+ pr_debug("USB psy not found; deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip) {
+ pr_err("Couldn't allocate memory\n");
+ return -ENOMEM;
+ }
+
+ chip->client = client;
+ chip->dev = &client->dev;
+ chip->usb_psy = usb_psy;
+ chip->fake_battery_soc = -EINVAL;
+ INIT_DELAYED_WORK(&chip->chg_remove_work, smb1351_chg_remove_work);
+ INIT_DELAYED_WORK(&chip->hvdcp_det_work, smb1351_hvdcp_det_work);
+ device_init_wakeup(chip->dev, true);
+
+ /* probe the device to check if its actually connected */
+ rc = smb1351_read_reg(chip, CHG_REVISION_REG, &reg);
+ if (rc) {
+ pr_err("Failed to detect smb1351, device may be absent\n");
+ return -ENODEV;
+ }
+ pr_debug("smb1351 chip revision is %d\n", reg);
+
+ rc = smb1351_parse_dt(chip);
+ if (rc) {
+ pr_err("Couldn't parse DT nodes rc=%d\n", rc);
+ return rc;
+ }
+
+ /* using vadc and adc_tm for implementing pmic therm */
+ if (chip->using_pmic_therm) {
+ chip->vadc_dev = qpnp_get_vadc(chip->dev, "chg");
+ if (IS_ERR(chip->vadc_dev)) {
+ rc = PTR_ERR(chip->vadc_dev);
+ if (rc != -EPROBE_DEFER)
+ pr_err("vadc property missing\n");
+ return rc;
+ }
+ chip->adc_tm_dev = qpnp_get_adc_tm(chip->dev, "chg");
+ if (IS_ERR(chip->adc_tm_dev)) {
+ rc = PTR_ERR(chip->adc_tm_dev);
+ if (rc != -EPROBE_DEFER)
+ pr_err("adc_tm property missing\n");
+ return rc;
+ }
+ }
+
+ i2c_set_clientdata(client, chip);
+
+ chip->batt_psy.name = "battery";
+ chip->batt_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ chip->batt_psy.get_property = smb1351_battery_get_property;
+ chip->batt_psy.set_property = smb1351_battery_set_property;
+ chip->batt_psy.property_is_writeable =
+ smb1351_batt_property_is_writeable;
+ chip->batt_psy.properties = smb1351_battery_properties;
+ chip->batt_psy.num_properties =
+ ARRAY_SIZE(smb1351_battery_properties);
+ chip->batt_psy.external_power_changed =
+ smb1351_external_power_changed;
+ chip->batt_psy.supplied_to = pm_batt_supplied_to;
+ chip->batt_psy.num_supplicants = ARRAY_SIZE(pm_batt_supplied_to);
+
+ chip->resume_completed = true;
+ mutex_init(&chip->irq_complete);
+
+ rc = power_supply_register(chip->dev, &chip->batt_psy);
+ if (rc) {
+ pr_err("Couldn't register batt psy rc=%d\n", rc);
+ return rc;
+ }
+
+ dump_regs(chip);
+
+ rc = smb1351_regulator_init(chip);
+ if (rc) {
+ pr_err("Couldn't initialize smb1351 ragulator rc=%d\n", rc);
+ goto fail_smb1351_regulator_init;
+ }
+
+ rc = smb1351_hw_init(chip);
+ if (rc) {
+ pr_err("Couldn't intialize hardware rc=%d\n", rc);
+ goto fail_smb1351_hw_init;
+ }
+
+ rc = smb1351_determine_initial_state(chip);
+ if (rc) {
+ pr_err("Couldn't determine initial state rc=%d\n", rc);
+ goto fail_smb1351_hw_init;
+ }
+
+ /* STAT irq configuration */
+ if (client->irq) {
+ rc = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ smb1351_chg_stat_handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "smb1351_chg_stat_irq", chip);
+ if (rc) {
+ pr_err("Failed STAT irq=%d request rc = %d\n",
+ client->irq, rc);
+ goto fail_smb1351_hw_init;
+ }
+ enable_irq_wake(client->irq);
+ }
+
+ if (chip->using_pmic_therm) {
+ if (!chip->jeita_supported) {
+ /* add hot/cold temperature monitor */
+ chip->adc_param.low_temp = chip->batt_cold_decidegc;
+ chip->adc_param.high_temp = chip->batt_hot_decidegc;
+ } else {
+ chip->adc_param.low_temp = chip->batt_cool_decidegc;
+ chip->adc_param.high_temp = chip->batt_warm_decidegc;
+ }
+ chip->adc_param.timer_interval = ADC_MEAS2_INTERVAL_1S;
+ chip->adc_param.state_request = ADC_TM_WARM_COOL_THR_ENABLE;
+ chip->adc_param.btm_ctx = chip;
+ chip->adc_param.threshold_notification =
+ smb1351_chg_adc_notification;
+ chip->adc_param.channel = LR_MUX1_BATT_THERM;
+
+ rc = qpnp_adc_tm_channel_measure(chip->adc_tm_dev,
+ &chip->adc_param);
+ if (rc) {
+ pr_err("requesting ADC error %d\n", rc);
+ goto fail_smb1351_hw_init;
+ }
+ }
+
+ create_debugfs_entries(chip);
+
+ dump_regs(chip);
+
+ pr_info("smb1351 successfully probed. charger=%d, batt=%d version=%s\n",
+ chip->chg_present,
+ smb1351_get_prop_batt_present(chip),
+ smb1351_version_str[chip->version]);
+ return 0;
+
+fail_smb1351_hw_init:
+ regulator_unregister(chip->otg_vreg.rdev);
+fail_smb1351_regulator_init:
+ power_supply_unregister(&chip->batt_psy);
+ return rc;
+}
+
+static int smb1351_parallel_charger_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc;
+ struct smb1351_charger *chip;
+ struct device_node *node = client->dev.of_node;
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip) {
+ pr_err("Couldn't allocate memory\n");
+ return -ENOMEM;
+ }
+
+ chip->client = client;
+ chip->dev = &client->dev;
+ chip->parallel_charger = true;
+
+ chip->usb_suspended_status = of_property_read_bool(node,
+ "qcom,charging-disabled");
+ rc = of_property_read_u32(node, "qcom,float-voltage-mv",
+ &chip->vfloat_mv);
+ if (rc)
+ chip->vfloat_mv = -EINVAL;
+ rc = of_property_read_u32(node, "qcom,recharge-mv",
+ &chip->recharge_mv);
+ if (rc)
+ chip->recharge_mv = -EINVAL;
+
+ rc = of_property_read_u32(node, "qcom,parallel-en-pin-polarity",
+ &chip->parallel_pin_polarity_setting);
+ if (rc)
+ chip->parallel_pin_polarity_setting = EN_BY_PIN_LOW_ENABLE;
+ else
+ chip->parallel_pin_polarity_setting =
+ chip->parallel_pin_polarity_setting ?
+ EN_BY_PIN_HIGH_ENABLE : EN_BY_PIN_LOW_ENABLE;
+
+ i2c_set_clientdata(client, chip);
+
+ chip->parallel_psy.name = "usb-parallel";
+ chip->parallel_psy.type = POWER_SUPPLY_TYPE_USB_PARALLEL;
+ chip->parallel_psy.get_property = smb1351_parallel_get_property;
+ chip->parallel_psy.set_property = smb1351_parallel_set_property;
+ chip->parallel_psy.properties = smb1351_parallel_properties;
+ chip->parallel_psy.property_is_writeable
+ = smb1351_parallel_is_writeable;
+ chip->parallel_psy.num_properties
+ = ARRAY_SIZE(smb1351_parallel_properties);
+
+ rc = power_supply_register(chip->dev, &chip->parallel_psy);
+ if (rc) {
+ pr_err("Couldn't register parallel psy rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->resume_completed = true;
+ mutex_init(&chip->irq_complete);
+
+ create_debugfs_entries(chip);
+
+ pr_info("smb1351 parallel successfully probed.\n");
+
+ return 0;
+}
+
+static int smb1351_charger_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ if (is_parallel_charger(client))
+ return smb1351_parallel_charger_probe(client, id);
+ else
+ return smb1351_main_charger_probe(client, id);
+}
+
+static int smb1351_charger_remove(struct i2c_client *client)
+{
+ struct smb1351_charger *chip = i2c_get_clientdata(client);
+
+ cancel_delayed_work_sync(&chip->chg_remove_work);
+ power_supply_unregister(&chip->batt_psy);
+
+ mutex_destroy(&chip->irq_complete);
+ debugfs_remove_recursive(chip->debug_root);
+ return 0;
+}
+
+static int smb1351_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct smb1351_charger *chip = i2c_get_clientdata(client);
+
+ /* no suspend resume activities for parallel charger */
+ if (chip->parallel_charger)
+ return 0;
+
+ mutex_lock(&chip->irq_complete);
+ chip->resume_completed = false;
+ mutex_unlock(&chip->irq_complete);
+
+ return 0;
+}
+
+static int smb1351_suspend_noirq(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct smb1351_charger *chip = i2c_get_clientdata(client);
+
+ /* no suspend resume activities for parallel charger */
+ if (chip->parallel_charger)
+ return 0;
+
+ if (chip->irq_waiting) {
+ pr_err_ratelimited("Aborting suspend, an interrupt was detected while suspending\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int smb1351_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct smb1351_charger *chip = i2c_get_clientdata(client);
+
+ /* no suspend resume activities for parallel charger */
+ if (chip->parallel_charger)
+ return 0;
+
+ mutex_lock(&chip->irq_complete);
+ chip->resume_completed = true;
+ if (chip->irq_waiting) {
+ mutex_unlock(&chip->irq_complete);
+ smb1351_chg_stat_handler(client->irq, chip);
+ enable_irq(client->irq);
+ } else {
+ mutex_unlock(&chip->irq_complete);
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops smb1351_pm_ops = {
+ .suspend = smb1351_suspend,
+ .suspend_noirq = smb1351_suspend_noirq,
+ .resume = smb1351_resume,
+};
+
+static struct of_device_id smb1351_match_table[] = {
+ { .compatible = "qcom,smb1351-charger",},
+ { },
+};
+
+static const struct i2c_device_id smb1351_charger_id[] = {
+ {"smb1351-charger", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, smb1351_charger_id);
+
+static struct i2c_driver smb1351_charger_driver = {
+ .driver = {
+ .name = "smb1351-charger",
+ .owner = THIS_MODULE,
+ .of_match_table = smb1351_match_table,
+ .pm = &smb1351_pm_ops,
+ },
+ .probe = smb1351_charger_probe,
+ .remove = smb1351_charger_remove,
+ .id_table = smb1351_charger_id,
+};
+
+module_i2c_driver(smb1351_charger_driver);
+
+MODULE_DESCRIPTION("smb1351 Charger");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("i2c:smb1351-charger");
diff --git a/drivers/power/smb135x-charger.c b/drivers/power/smb135x-charger.c
new file mode 100644
index 000000000000..4d5a229762cc
--- /dev/null
+++ b/drivers/power/smb135x-charger.c
@@ -0,0 +1,4516 @@
+/* Copyright (c) 2013-2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/i2c.h>
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/power_supply.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
+#include <linux/pinctrl/consumer.h>
+
+#define SMB135X_BITS_PER_REG 8
+
+/* Mask/Bit helpers */
+#define _SMB135X_MASK(BITS, POS) \
+ ((unsigned char)(((1 << (BITS)) - 1) << (POS)))
+#define SMB135X_MASK(LEFT_BIT_POS, RIGHT_BIT_POS) \
+ _SMB135X_MASK((LEFT_BIT_POS) - (RIGHT_BIT_POS) + 1, \
+ (RIGHT_BIT_POS))
+
+/* Config registers */
+#define CFG_3_REG 0x03
+#define CHG_ITERM_50MA 0x08
+#define CHG_ITERM_100MA 0x10
+#define CHG_ITERM_150MA 0x18
+#define CHG_ITERM_200MA 0x20
+#define CHG_ITERM_250MA 0x28
+#define CHG_ITERM_300MA 0x00
+#define CHG_ITERM_500MA 0x30
+#define CHG_ITERM_600MA 0x38
+#define CHG_ITERM_MASK SMB135X_MASK(5, 3)
+
+#define CFG_4_REG 0x04
+#define CHG_INHIBIT_MASK SMB135X_MASK(7, 6)
+#define CHG_INHIBIT_50MV_VAL 0x00
+#define CHG_INHIBIT_100MV_VAL 0x40
+#define CHG_INHIBIT_200MV_VAL 0x80
+#define CHG_INHIBIT_300MV_VAL 0xC0
+
+#define CFG_5_REG 0x05
+#define RECHARGE_200MV_BIT BIT(2)
+#define USB_2_3_BIT BIT(5)
+
+#define CFG_A_REG 0x0A
+#define DCIN_INPUT_MASK SMB135X_MASK(4, 0)
+
+#define CFG_C_REG 0x0C
+#define USBIN_INPUT_MASK SMB135X_MASK(4, 0)
+#define USBIN_ADAPTER_ALLOWANCE_MASK SMB135X_MASK(7, 5)
+#define ALLOW_5V_ONLY 0x00
+#define ALLOW_5V_OR_9V 0x20
+#define ALLOW_5V_TO_9V 0x40
+#define ALLOW_9V_ONLY 0x60
+
+#define CFG_D_REG 0x0D
+
+#define CFG_E_REG 0x0E
+#define POLARITY_100_500_BIT BIT(2)
+#define USB_CTRL_BY_PIN_BIT BIT(1)
+#define HVDCP_5_9_BIT BIT(4)
+
+#define CFG_11_REG 0x11
+#define PRIORITY_BIT BIT(7)
+#define AUTO_SRC_DET_EN_BIT BIT(0)
+
+#define USBIN_DCIN_CFG_REG 0x12
+#define USBIN_SUSPEND_VIA_COMMAND_BIT BIT(6)
+
+#define CFG_14_REG 0x14
+#define CHG_EN_BY_PIN_BIT BIT(7)
+#define CHG_EN_ACTIVE_LOW_BIT BIT(6)
+#define PRE_TO_FAST_REQ_CMD_BIT BIT(5)
+#define DISABLE_CURRENT_TERM_BIT BIT(3)
+#define DISABLE_AUTO_RECHARGE_BIT BIT(2)
+#define EN_CHG_INHIBIT_BIT BIT(0)
+
+#define CFG_16_REG 0x16
+#define SAFETY_TIME_EN_BIT BIT(5)
+#define SAFETY_TIME_EN_SHIFT 5
+#define SAFETY_TIME_MINUTES_MASK SMB135X_MASK(3, 2)
+#define SAFETY_TIME_MINUTES_SHIFT 2
+
+#define CFG_17_REG 0x17
+#define CHG_STAT_DISABLE_BIT BIT(0)
+#define CHG_STAT_ACTIVE_HIGH_BIT BIT(1)
+#define CHG_STAT_IRQ_ONLY_BIT BIT(4)
+
+#define CFG_19_REG 0x19
+#define BATT_MISSING_ALGO_BIT BIT(2)
+#define BATT_MISSING_THERM_BIT BIT(1)
+
+#define CFG_1A_REG 0x1A
+#define HOT_SOFT_VFLOAT_COMP_EN_BIT BIT(3)
+#define COLD_SOFT_VFLOAT_COMP_EN_BIT BIT(2)
+#define HOT_SOFT_CURRENT_COMP_EN_BIT BIT(1)
+#define COLD_SOFT_CURRENT_COMP_EN_BIT BIT(0)
+
+#define CFG_1B_REG 0x1B
+#define COLD_HARD_MASK SMB135X_MASK(7, 6)
+#define COLD_HARD_SHIFT 6
+#define HOT_HARD_MASK SMB135X_MASK(5, 4)
+#define HOT_HARD_SHIFT 4
+#define COLD_SOFT_MASK SMB135X_MASK(3, 2)
+#define COLD_SOFT_SHIFT 2
+#define HOT_SOFT_MASK SMB135X_MASK(1, 0)
+#define HOT_SOFT_SHIFT 0
+
+#define VFLOAT_REG 0x1E
+
+#define VERSION1_REG 0x2A
+#define VERSION1_MASK SMB135X_MASK(7, 6)
+#define VERSION1_SHIFT 6
+#define VERSION2_REG 0x32
+#define VERSION2_MASK SMB135X_MASK(1, 0)
+#define VERSION3_REG 0x34
+
+/* Irq Config registers */
+#define IRQ_CFG_REG 0x07
+#define IRQ_BAT_HOT_COLD_HARD_BIT BIT(7)
+#define IRQ_BAT_HOT_COLD_SOFT_BIT BIT(6)
+#define IRQ_OTG_OVER_CURRENT_BIT BIT(4)
+#define IRQ_USBIN_UV_BIT BIT(2)
+#define IRQ_INTERNAL_TEMPERATURE_BIT BIT(0)
+
+#define IRQ2_CFG_REG 0x08
+#define IRQ2_SAFETY_TIMER_BIT BIT(7)
+#define IRQ2_CHG_ERR_BIT BIT(6)
+#define IRQ2_CHG_PHASE_CHANGE_BIT BIT(4)
+#define IRQ2_CHG_INHIBIT_BIT BIT(3)
+#define IRQ2_POWER_OK_BIT BIT(2)
+#define IRQ2_BATT_MISSING_BIT BIT(1)
+#define IRQ2_VBAT_LOW_BIT BIT(0)
+
+#define IRQ3_CFG_REG 0x09
+#define IRQ3_RID_DETECT_BIT BIT(4)
+#define IRQ3_SRC_DETECT_BIT BIT(2)
+#define IRQ3_DCIN_UV_BIT BIT(0)
+
+#define USBIN_OTG_REG 0x0F
+#define OTG_CNFG_MASK SMB135X_MASK(3, 2)
+#define OTG_CNFG_PIN_CTRL 0x04
+#define OTG_CNFG_COMMAND_CTRL 0x08
+#define OTG_CNFG_AUTO_CTRL 0x0C
+
+/* Command Registers */
+#define CMD_I2C_REG 0x40
+#define ALLOW_VOLATILE_BIT BIT(6)
+
+#define CMD_INPUT_LIMIT 0x41
+#define USB_SHUTDOWN_BIT BIT(6)
+#define DC_SHUTDOWN_BIT BIT(5)
+#define USE_REGISTER_FOR_CURRENT BIT(2)
+#define USB_100_500_AC_MASK SMB135X_MASK(1, 0)
+#define USB_100_VAL 0x02
+#define USB_500_VAL 0x00
+#define USB_AC_VAL 0x01
+
+#define CMD_CHG_REG 0x42
+#define CMD_CHG_EN BIT(1)
+#define OTG_EN BIT(0)
+
+/* Status registers */
+#define STATUS_1_REG 0x47
+#define USING_USB_BIT BIT(1)
+#define USING_DC_BIT BIT(0)
+
+#define STATUS_4_REG 0x4A
+#define BATT_NET_CHG_CURRENT_BIT BIT(7)
+#define BATT_LESS_THAN_2V BIT(4)
+#define CHG_HOLD_OFF_BIT BIT(3)
+#define CHG_TYPE_MASK SMB135X_MASK(2, 1)
+#define CHG_TYPE_SHIFT 1
+#define BATT_NOT_CHG_VAL 0x0
+#define BATT_PRE_CHG_VAL 0x1
+#define BATT_FAST_CHG_VAL 0x2
+#define BATT_TAPER_CHG_VAL 0x3
+#define CHG_EN_BIT BIT(0)
+
+#define STATUS_5_REG 0x4B
+#define CDP_BIT BIT(7)
+#define DCP_BIT BIT(6)
+#define OTHER_BIT BIT(5)
+#define SDP_BIT BIT(4)
+#define ACA_A_BIT BIT(3)
+#define ACA_B_BIT BIT(2)
+#define ACA_C_BIT BIT(1)
+#define ACA_DOCK_BIT BIT(0)
+
+#define STATUS_6_REG 0x4C
+#define RID_FLOAT_BIT BIT(3)
+#define RID_A_BIT BIT(2)
+#define RID_B_BIT BIT(1)
+#define RID_C_BIT BIT(0)
+
+#define STATUS_7_REG 0x4D
+
+#define STATUS_8_REG 0x4E
+#define USBIN_9V BIT(5)
+#define USBIN_UNREG BIT(4)
+#define USBIN_LV BIT(3)
+#define DCIN_9V BIT(2)
+#define DCIN_UNREG BIT(1)
+#define DCIN_LV BIT(0)
+
+#define STATUS_9_REG 0x4F
+#define REV_MASK SMB135X_MASK(3, 0)
+
+/* Irq Status registers */
+#define IRQ_A_REG 0x50
+#define IRQ_A_HOT_HARD_BIT BIT(6)
+#define IRQ_A_COLD_HARD_BIT BIT(4)
+#define IRQ_A_HOT_SOFT_BIT BIT(2)
+#define IRQ_A_COLD_SOFT_BIT BIT(0)
+
+#define IRQ_B_REG 0x51
+#define IRQ_B_BATT_TERMINAL_BIT BIT(6)
+#define IRQ_B_BATT_MISSING_BIT BIT(4)
+#define IRQ_B_VBAT_LOW_BIT BIT(2)
+#define IRQ_B_TEMPERATURE_BIT BIT(0)
+
+#define IRQ_C_REG 0x52
+#define IRQ_C_TERM_BIT BIT(0)
+#define IRQ_C_FASTCHG_BIT BIT(6)
+
+#define IRQ_D_REG 0x53
+#define IRQ_D_TIMEOUT_BIT BIT(2)
+
+#define IRQ_E_REG 0x54
+#define IRQ_E_DC_OV_BIT BIT(6)
+#define IRQ_E_DC_UV_BIT BIT(4)
+#define IRQ_E_USB_OV_BIT BIT(2)
+#define IRQ_E_USB_UV_BIT BIT(0)
+
+#define IRQ_F_REG 0x55
+#define IRQ_F_POWER_OK_BIT BIT(0)
+
+#define IRQ_G_REG 0x56
+#define IRQ_G_SRC_DETECT_BIT BIT(6)
+
+enum {
+ WRKARND_USB100_BIT = BIT(0),
+ WRKARND_APSD_FAIL = BIT(1),
+};
+
+enum {
+ REV_1 = 1, /* Rev 1.0 */
+ REV_1_1 = 2, /* Rev 1.1 */
+ REV_2 = 3, /* Rev 2 */
+ REV_2_1 = 5, /* Rev 2.1 */
+ REV_MAX,
+};
+
+static char *revision_str[] = {
+ [REV_1] = "rev1",
+ [REV_1_1] = "rev1.1",
+ [REV_2] = "rev2",
+ [REV_2_1] = "rev2.1",
+};
+
+enum {
+ V_SMB1356,
+ V_SMB1357,
+ V_SMB1358,
+ V_SMB1359,
+ V_MAX,
+};
+
+static int version_data[] = {
+ [V_SMB1356] = V_SMB1356,
+ [V_SMB1357] = V_SMB1357,
+ [V_SMB1358] = V_SMB1358,
+ [V_SMB1359] = V_SMB1359,
+};
+
+static char *version_str[] = {
+ [V_SMB1356] = "smb1356",
+ [V_SMB1357] = "smb1357",
+ [V_SMB1358] = "smb1358",
+ [V_SMB1359] = "smb1359",
+};
+
+enum {
+ USER = BIT(0),
+ THERMAL = BIT(1),
+ CURRENT = BIT(2),
+};
+
+enum path_type {
+ USB,
+ DC,
+};
+
+static int chg_time[] = {
+ 192,
+ 384,
+ 768,
+ 1536,
+};
+
+static char *pm_batt_supplied_to[] = {
+ "bms",
+};
+
+struct smb135x_regulator {
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+};
+
+struct smb135x_chg {
+ struct i2c_client *client;
+ struct device *dev;
+ struct mutex read_write_lock;
+
+ u8 revision;
+ int version;
+
+ bool chg_enabled;
+ bool chg_disabled_permanently;
+
+ bool usb_present;
+ bool dc_present;
+ bool usb_slave_present;
+ bool dc_ov;
+
+ bool bmd_algo_disabled;
+ bool iterm_disabled;
+ int iterm_ma;
+ int vfloat_mv;
+ int safety_time;
+ int resume_delta_mv;
+ int fake_battery_soc;
+ struct dentry *debug_root;
+ int usb_current_arr_size;
+ int *usb_current_table;
+ int dc_current_arr_size;
+ int *dc_current_table;
+ bool inhibit_disabled;
+ int fastchg_current_arr_size;
+ int *fastchg_current_table;
+ int fastchg_ma;
+ u8 irq_cfg_mask[3];
+ int otg_oc_count;
+ struct delayed_work reset_otg_oc_count_work;
+ struct mutex otg_oc_count_lock;
+ struct delayed_work hvdcp_det_work;
+
+ bool parallel_charger;
+ bool parallel_charger_present;
+ bool bms_controlled_charging;
+
+ /* psy */
+ struct power_supply *usb_psy;
+ int usb_psy_ma;
+ int real_usb_psy_ma;
+ struct power_supply batt_psy;
+ struct power_supply dc_psy;
+ struct power_supply parallel_psy;
+ struct power_supply *bms_psy;
+ int dc_psy_type;
+ int dc_psy_ma;
+ const char *bms_psy_name;
+
+ /* status tracking */
+ bool chg_done_batt_full;
+ bool batt_present;
+ bool batt_hot;
+ bool batt_cold;
+ bool batt_warm;
+ bool batt_cool;
+
+ bool resume_completed;
+ bool irq_waiting;
+ u32 usb_suspended;
+ u32 dc_suspended;
+ struct mutex path_suspend_lock;
+
+ u32 peek_poke_address;
+ struct smb135x_regulator otg_vreg;
+ int skip_writes;
+ int skip_reads;
+ u32 workaround_flags;
+ bool soft_vfloat_comp_disabled;
+ bool soft_current_comp_disabled;
+ struct mutex irq_complete;
+ struct regulator *therm_bias_vreg;
+ struct regulator *usb_pullup_vreg;
+ struct delayed_work wireless_insertion_work;
+
+ unsigned int thermal_levels;
+ unsigned int therm_lvl_sel;
+ unsigned int *thermal_mitigation;
+ unsigned int gamma_setting_num;
+ unsigned int *gamma_setting;
+ struct mutex current_change_lock;
+
+ const char *pinctrl_state_name;
+ struct pinctrl *smb_pinctrl;
+
+ bool apsd_rerun;
+ bool id_line_not_connected;
+};
+
+#define RETRY_COUNT 5
+int retry_sleep_ms[RETRY_COUNT] = {
+ 10, 20, 30, 40, 50
+};
+
+static int __smb135x_read(struct smb135x_chg *chip, int reg,
+ u8 *val)
+{
+ s32 ret;
+ int retry_count = 0;
+
+retry:
+ ret = i2c_smbus_read_byte_data(chip->client, reg);
+ if (ret < 0 && retry_count < RETRY_COUNT) {
+ /* sleep for few ms before retrying */
+ msleep(retry_sleep_ms[retry_count++]);
+ goto retry;
+ }
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "i2c read fail: can't read from %02x: %d\n", reg, ret);
+ return ret;
+ } else {
+ *val = ret;
+ }
+
+ return 0;
+}
+
+static int __smb135x_write(struct smb135x_chg *chip, int reg,
+ u8 val)
+{
+ s32 ret;
+ int retry_count = 0;
+
+retry:
+ ret = i2c_smbus_write_byte_data(chip->client, reg, val);
+ if (ret < 0 && retry_count < RETRY_COUNT) {
+ /* sleep for few ms before retrying */
+ msleep(retry_sleep_ms[retry_count++]);
+ goto retry;
+ }
+ if (ret < 0) {
+ dev_err(chip->dev,
+ "i2c write fail: can't write %02x to %02x: %d\n",
+ val, reg, ret);
+ return ret;
+ }
+ pr_debug("Writing 0x%02x=0x%02x\n", reg, val);
+ return 0;
+}
+
+static int smb135x_read(struct smb135x_chg *chip, int reg,
+ u8 *val)
+{
+ int rc;
+
+ if (chip->skip_reads) {
+ *val = 0;
+ return 0;
+ }
+ mutex_lock(&chip->read_write_lock);
+ pm_stay_awake(chip->dev);
+ rc = __smb135x_read(chip, reg, val);
+ pm_relax(chip->dev);
+ mutex_unlock(&chip->read_write_lock);
+
+ return rc;
+}
+
+static int smb135x_write(struct smb135x_chg *chip, int reg,
+ u8 val)
+{
+ int rc;
+
+ if (chip->skip_writes)
+ return 0;
+
+ mutex_lock(&chip->read_write_lock);
+ pm_stay_awake(chip->dev);
+ rc = __smb135x_write(chip, reg, val);
+ pm_relax(chip->dev);
+ mutex_unlock(&chip->read_write_lock);
+
+ return rc;
+}
+
+static int smb135x_masked_write(struct smb135x_chg *chip, int reg,
+ u8 mask, u8 val)
+{
+ s32 rc;
+ u8 temp;
+
+ if (chip->skip_writes || chip->skip_reads)
+ return 0;
+
+ mutex_lock(&chip->read_write_lock);
+ rc = __smb135x_read(chip, reg, &temp);
+ if (rc < 0) {
+ dev_err(chip->dev, "read failed: reg=%03X, rc=%d\n", reg, rc);
+ goto out;
+ }
+ temp &= ~mask;
+ temp |= val & mask;
+ rc = __smb135x_write(chip, reg, temp);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "write failed: reg=%03X, rc=%d\n", reg, rc);
+ }
+out:
+ mutex_unlock(&chip->read_write_lock);
+ return rc;
+}
+
+static int read_revision(struct smb135x_chg *chip, u8 *revision)
+{
+ int rc;
+ u8 reg;
+
+ rc = smb135x_read(chip, STATUS_9_REG, &reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read status 9 rc = %d\n", rc);
+ return rc;
+ }
+ *revision = (reg & REV_MASK);
+ return 0;
+}
+
+static int read_version1(struct smb135x_chg *chip, u8 *version)
+{
+ int rc;
+ u8 reg;
+
+ rc = smb135x_read(chip, VERSION1_REG, &reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read version 1 rc = %d\n", rc);
+ return rc;
+ }
+ *version = (reg & VERSION1_MASK) >> VERSION1_SHIFT;
+ return 0;
+}
+
+static int read_version2(struct smb135x_chg *chip, u8 *version)
+{
+ int rc;
+ u8 reg;
+
+ rc = smb135x_read(chip, VERSION2_REG, &reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read version 2 rc = %d\n", rc);
+ return rc;
+ }
+ *version = (reg & VERSION2_MASK);
+ return 0;
+}
+
+static int read_version3(struct smb135x_chg *chip, u8 *version)
+{
+ int rc;
+ u8 reg;
+
+ rc = smb135x_read(chip, VERSION3_REG, &reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read version 3 rc = %d\n", rc);
+ return rc;
+ }
+ *version = reg;
+ return 0;
+}
+
+#define TRIM_23_REG 0x23
+#define CHECK_USB100_GOOD_BIT BIT(1)
+static bool is_usb100_broken(struct smb135x_chg *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smb135x_read(chip, TRIM_23_REG, &reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read status 9 rc = %d\n", rc);
+ return rc;
+ }
+ return !!(reg & CHECK_USB100_GOOD_BIT);
+}
+
+static bool is_usb_slave_present(struct smb135x_chg *chip)
+{
+ bool usb_slave_present;
+ u8 reg;
+ int rc;
+
+ if (chip->id_line_not_connected)
+ return false;
+
+ rc = smb135x_read(chip, STATUS_6_REG, &reg);
+ if (rc < 0) {
+ pr_err("Couldn't read stat 6 rc = %d\n", rc);
+ return false;
+ }
+
+ if ((reg & (RID_FLOAT_BIT | RID_A_BIT | RID_B_BIT | RID_C_BIT)) == 0)
+ usb_slave_present = 1;
+ else
+ usb_slave_present = 0;
+
+ pr_debug("stat6= 0x%02x slave_present = %d\n", reg, usb_slave_present);
+ return usb_slave_present;
+}
+
+static char *usb_type_str[] = {
+ "ACA_DOCK", /* bit 0 */
+ "ACA_C", /* bit 1 */
+ "ACA_B", /* bit 2 */
+ "ACA_A", /* bit 3 */
+ "SDP", /* bit 4 */
+ "OTHER", /* bit 5 */
+ "DCP", /* bit 6 */
+ "CDP", /* bit 7 */
+ "NONE", /* bit 8 error case */
+};
+
+/* helper to return the string of USB type */
+static char *get_usb_type_name(u8 stat_5)
+{
+ unsigned long stat = stat_5;
+
+ return usb_type_str[find_first_bit(&stat, SMB135X_BITS_PER_REG)];
+}
+
+static enum power_supply_type usb_type_enum[] = {
+ POWER_SUPPLY_TYPE_USB_ACA, /* bit 0 */
+ POWER_SUPPLY_TYPE_USB_ACA, /* bit 1 */
+ POWER_SUPPLY_TYPE_USB_ACA, /* bit 2 */
+ POWER_SUPPLY_TYPE_USB_ACA, /* bit 3 */
+ POWER_SUPPLY_TYPE_USB, /* bit 4 */
+ POWER_SUPPLY_TYPE_UNKNOWN, /* bit 5 */
+ POWER_SUPPLY_TYPE_USB_DCP, /* bit 6 */
+ POWER_SUPPLY_TYPE_USB_CDP, /* bit 7 */
+ POWER_SUPPLY_TYPE_UNKNOWN, /* bit 8 error case, report UNKNWON */
+};
+
+/* helper to return enum power_supply_type of USB type */
+static enum power_supply_type get_usb_supply_type(u8 stat_5)
+{
+ unsigned long stat = stat_5;
+
+ return usb_type_enum[find_first_bit(&stat, SMB135X_BITS_PER_REG)];
+}
+
+static enum power_supply_property smb135x_battery_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
+};
+
+static int smb135x_get_prop_batt_status(struct smb135x_chg *chip)
+{
+ int rc;
+ int status = POWER_SUPPLY_STATUS_DISCHARGING;
+ u8 reg = 0;
+ u8 chg_type;
+
+ if (chip->chg_done_batt_full)
+ return POWER_SUPPLY_STATUS_FULL;
+
+ rc = smb135x_read(chip, STATUS_4_REG, &reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Unable to read STATUS_4_REG rc = %d\n", rc);
+ return POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+
+ if (reg & CHG_HOLD_OFF_BIT) {
+ /*
+ * when chg hold off happens the battery is
+ * not charging
+ */
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ goto out;
+ }
+
+ chg_type = (reg & CHG_TYPE_MASK) >> CHG_TYPE_SHIFT;
+
+ if (chg_type == BATT_NOT_CHG_VAL)
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ status = POWER_SUPPLY_STATUS_CHARGING;
+out:
+ pr_debug("STATUS_4_REG=%x\n", reg);
+ return status;
+}
+
+static int smb135x_get_prop_batt_present(struct smb135x_chg *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = smb135x_read(chip, STATUS_4_REG, &reg);
+ if (rc < 0)
+ return 0;
+
+ /* treat battery gone if less than 2V */
+ if (reg & BATT_LESS_THAN_2V)
+ return 0;
+
+ return chip->batt_present;
+}
+
+static int smb135x_get_prop_charge_type(struct smb135x_chg *chip)
+{
+ int rc;
+ u8 reg;
+ u8 chg_type;
+
+ rc = smb135x_read(chip, STATUS_4_REG, &reg);
+ if (rc < 0)
+ return POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+
+ chg_type = (reg & CHG_TYPE_MASK) >> CHG_TYPE_SHIFT;
+ if (chg_type == BATT_NOT_CHG_VAL)
+ return POWER_SUPPLY_CHARGE_TYPE_NONE;
+ else if (chg_type == BATT_FAST_CHG_VAL)
+ return POWER_SUPPLY_CHARGE_TYPE_FAST;
+ else if (chg_type == BATT_PRE_CHG_VAL)
+ return POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ else if (chg_type == BATT_TAPER_CHG_VAL)
+ return POWER_SUPPLY_CHARGE_TYPE_TAPER;
+
+ return POWER_SUPPLY_CHARGE_TYPE_NONE;
+}
+
+#define DEFAULT_BATT_CAPACITY 50
+static int smb135x_get_prop_batt_capacity(struct smb135x_chg *chip)
+{
+ union power_supply_propval ret = {0, };
+
+ if (chip->fake_battery_soc >= 0)
+ return chip->fake_battery_soc;
+ if (chip->bms_psy) {
+ chip->bms_psy->get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_CAPACITY, &ret);
+ return ret.intval;
+ }
+
+ return DEFAULT_BATT_CAPACITY;
+}
+
+static int smb135x_get_prop_batt_health(struct smb135x_chg *chip)
+{
+ union power_supply_propval ret = {0, };
+
+ if (chip->batt_hot)
+ ret.intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (chip->batt_cold)
+ ret.intval = POWER_SUPPLY_HEALTH_COLD;
+ else if (chip->batt_warm)
+ ret.intval = POWER_SUPPLY_HEALTH_WARM;
+ else if (chip->batt_cool)
+ ret.intval = POWER_SUPPLY_HEALTH_COOL;
+ else
+ ret.intval = POWER_SUPPLY_HEALTH_GOOD;
+
+ return ret.intval;
+}
+
+static int smb135x_enable_volatile_writes(struct smb135x_chg *chip)
+{
+ int rc;
+
+ rc = smb135x_masked_write(chip, CMD_I2C_REG,
+ ALLOW_VOLATILE_BIT, ALLOW_VOLATILE_BIT);
+ if (rc < 0)
+ dev_err(chip->dev,
+ "Couldn't set VOLATILE_W_PERM_BIT rc=%d\n", rc);
+
+ return rc;
+}
+
+static int usb_current_table_smb1356[] = {
+ 180,
+ 240,
+ 270,
+ 285,
+ 300,
+ 330,
+ 360,
+ 390,
+ 420,
+ 540,
+ 570,
+ 600,
+ 660,
+ 720,
+ 840,
+ 900,
+ 960,
+ 1080,
+ 1110,
+ 1128,
+ 1146,
+ 1170,
+ 1182,
+ 1200,
+ 1230,
+ 1260,
+ 1380,
+ 1440,
+ 1560,
+ 1620,
+ 1680,
+ 1800
+};
+
+static int fastchg_current_table[] = {
+ 300,
+ 400,
+ 450,
+ 475,
+ 500,
+ 550,
+ 600,
+ 650,
+ 700,
+ 900,
+ 950,
+ 1000,
+ 1100,
+ 1200,
+ 1400,
+ 2700,
+ 1500,
+ 1600,
+ 1800,
+ 1850,
+ 1880,
+ 1910,
+ 2800,
+ 1950,
+ 1970,
+ 2000,
+ 2050,
+ 2100,
+ 2300,
+ 2400,
+ 2500,
+ 3000
+};
+
+static int usb_current_table_smb1357_smb1358[] = {
+ 300,
+ 400,
+ 450,
+ 475,
+ 500,
+ 550,
+ 600,
+ 650,
+ 700,
+ 900,
+ 950,
+ 1000,
+ 1100,
+ 1200,
+ 1400,
+ 1450,
+ 1500,
+ 1600,
+ 1800,
+ 1850,
+ 1880,
+ 1910,
+ 1930,
+ 1950,
+ 1970,
+ 2000,
+ 2050,
+ 2100,
+ 2300,
+ 2400,
+ 2500,
+ 3000
+};
+
+static int usb_current_table_smb1359[] = {
+ 300,
+ 400,
+ 450,
+ 475,
+ 500,
+ 550,
+ 600,
+ 650,
+ 700,
+ 900,
+ 950,
+ 1000,
+ 1100,
+ 1200,
+ 1400,
+ 1450,
+ 1500,
+ 1600,
+ 1800,
+ 1850,
+ 1880,
+ 1910,
+ 1930,
+ 1950,
+ 1970,
+ 2000,
+ 2050,
+ 2100,
+ 2300,
+ 2400,
+ 2500
+};
+
+static int dc_current_table_smb1356[] = {
+ 180,
+ 240,
+ 270,
+ 285,
+ 300,
+ 330,
+ 360,
+ 390,
+ 420,
+ 540,
+ 570,
+ 600,
+ 660,
+ 720,
+ 840,
+ 870,
+ 900,
+ 960,
+ 1080,
+ 1110,
+ 1128,
+ 1146,
+ 1158,
+ 1170,
+ 1182,
+ 1200,
+};
+
+static int dc_current_table[] = {
+ 300,
+ 400,
+ 450,
+ 475,
+ 500,
+ 550,
+ 600,
+ 650,
+ 700,
+ 900,
+ 950,
+ 1000,
+ 1100,
+ 1200,
+ 1400,
+ 1450,
+ 1500,
+ 1600,
+ 1800,
+ 1850,
+ 1880,
+ 1910,
+ 1930,
+ 1950,
+ 1970,
+ 2000,
+};
+
+#define CURRENT_100_MA 100
+#define CURRENT_150_MA 150
+#define CURRENT_500_MA 500
+#define CURRENT_900_MA 900
+#define SUSPEND_CURRENT_MA 2
+
+static int __smb135x_usb_suspend(struct smb135x_chg *chip, bool suspend)
+{
+ int rc;
+
+ rc = smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+ USB_SHUTDOWN_BIT, suspend ? USB_SHUTDOWN_BIT : 0);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set cfg 11 rc = %d\n", rc);
+ return rc;
+}
+
+static int __smb135x_dc_suspend(struct smb135x_chg *chip, bool suspend)
+{
+ int rc = 0;
+
+ rc = smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+ DC_SHUTDOWN_BIT, suspend ? DC_SHUTDOWN_BIT : 0);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set cfg 11 rc = %d\n", rc);
+ return rc;
+}
+
+static int smb135x_path_suspend(struct smb135x_chg *chip, enum path_type path,
+ int reason, bool suspend)
+{
+ int rc = 0;
+ int suspended;
+ int *path_suspended;
+ int (*func)(struct smb135x_chg *chip, bool suspend);
+
+ mutex_lock(&chip->path_suspend_lock);
+ if (path == USB) {
+ suspended = chip->usb_suspended;
+ path_suspended = &chip->usb_suspended;
+ func = __smb135x_usb_suspend;
+ } else {
+ suspended = chip->dc_suspended;
+ path_suspended = &chip->dc_suspended;
+ func = __smb135x_dc_suspend;
+ }
+
+ if (suspend == false)
+ suspended &= ~reason;
+ else
+ suspended |= reason;
+
+ if (*path_suspended && !suspended)
+ rc = func(chip, 0);
+ if (!(*path_suspended) && suspended)
+ rc = func(chip, 1);
+
+ if (rc)
+ dev_err(chip->dev, "Couldn't set/unset suspend for %s path rc = %d\n",
+ path == USB ? "usb" : "dc",
+ rc);
+ else
+ *path_suspended = suspended;
+
+ mutex_unlock(&chip->path_suspend_lock);
+ return rc;
+}
+
+static int smb135x_get_usb_chg_current(struct smb135x_chg *chip)
+{
+ if (chip->usb_suspended)
+ return SUSPEND_CURRENT_MA;
+ else
+ return chip->real_usb_psy_ma;
+}
+#define FCC_MASK SMB135X_MASK(4, 0)
+#define CFG_1C_REG 0x1C
+static int smb135x_get_fastchg_current(struct smb135x_chg *chip)
+{
+ u8 reg;
+ int rc;
+
+ rc = smb135x_read(chip, CFG_1C_REG, &reg);
+ if (rc < 0) {
+ pr_debug("cannot read 1c rc = %d\n", rc);
+ return 0;
+ }
+ reg &= FCC_MASK;
+ if (reg < 0 || chip->fastchg_current_arr_size == 0
+ || reg > chip->fastchg_current_table[
+ chip->fastchg_current_arr_size - 1]) {
+ dev_err(chip->dev, "Current table out of range\n");
+ return -EINVAL;
+ }
+ return chip->fastchg_current_table[reg];
+}
+
+static int smb135x_set_fastchg_current(struct smb135x_chg *chip,
+ int current_ma)
+{
+ int i, rc, diff, best, best_diff;
+ u8 reg;
+
+ /*
+ * if there is no array loaded or if the smallest current limit is
+ * above the requested current, then do nothing
+ */
+ if (chip->fastchg_current_arr_size == 0) {
+ dev_err(chip->dev, "no table loaded\n");
+ return -EINVAL;
+ } else if ((current_ma - chip->fastchg_current_table[0]) < 0) {
+ dev_err(chip->dev, "invalid current requested\n");
+ return -EINVAL;
+ }
+
+ /* use the closest setting under the requested current */
+ best = 0;
+ best_diff = current_ma - chip->fastchg_current_table[best];
+
+ for (i = 1; i < chip->fastchg_current_arr_size; i++) {
+ diff = current_ma - chip->fastchg_current_table[i];
+ if (diff >= 0 && diff < best_diff) {
+ best_diff = diff;
+ best = i;
+ }
+ }
+ i = best;
+
+ reg = i & FCC_MASK;
+ rc = smb135x_masked_write(chip, CFG_1C_REG, FCC_MASK, reg);
+ if (rc < 0)
+ dev_err(chip->dev, "cannot write to config c rc = %d\n", rc);
+ pr_debug("fastchg current set to %dma\n",
+ chip->fastchg_current_table[i]);
+ return rc;
+}
+
+static int smb135x_set_high_usb_chg_current(struct smb135x_chg *chip,
+ int current_ma)
+{
+ int i, rc;
+ u8 usb_cur_val;
+
+ for (i = chip->usb_current_arr_size - 1; i >= 0; i--) {
+ if (current_ma >= chip->usb_current_table[i])
+ break;
+ }
+ if (i < 0) {
+ dev_err(chip->dev,
+ "Cannot find %dma current_table using %d\n",
+ current_ma, CURRENT_150_MA);
+ rc = smb135x_masked_write(chip, CFG_5_REG,
+ USB_2_3_BIT, USB_2_3_BIT);
+ rc |= smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+ USB_100_500_AC_MASK, USB_100_VAL);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set %dmA rc=%d\n",
+ CURRENT_150_MA, rc);
+ else
+ chip->real_usb_psy_ma = CURRENT_150_MA;
+ return rc;
+ }
+
+ usb_cur_val = i & USBIN_INPUT_MASK;
+ rc = smb135x_masked_write(chip, CFG_C_REG,
+ USBIN_INPUT_MASK, usb_cur_val);
+ if (rc < 0) {
+ dev_err(chip->dev, "cannot write to config c rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+ USB_100_500_AC_MASK, USB_AC_VAL);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't write cfg 5 rc = %d\n", rc);
+ else
+ chip->real_usb_psy_ma = chip->usb_current_table[i];
+ return rc;
+}
+
+#define MAX_VERSION 0xF
+#define USB_100_PROBLEM_VERSION 0x2
+/* if APSD results are used
+ * if SDP is detected it will look at 500mA setting
+ * if set it will draw 500mA
+ * if unset it will draw 100mA
+ * if CDP/DCP it will look at 0x0C setting
+ * i.e. values in 0x41[1, 0] does not matter
+ */
+static int smb135x_set_usb_chg_current(struct smb135x_chg *chip,
+ int current_ma)
+{
+ int rc;
+
+ pr_debug("USB current_ma = %d\n", current_ma);
+
+ if (chip->workaround_flags & WRKARND_USB100_BIT) {
+ pr_info("USB requested = %dmA using %dmA\n", current_ma,
+ CURRENT_500_MA);
+ current_ma = CURRENT_500_MA;
+ }
+
+ if (current_ma == 0)
+ /* choose the lowest available value of 100mA */
+ current_ma = CURRENT_100_MA;
+
+ if (current_ma == SUSPEND_CURRENT_MA) {
+ /* force suspend bit */
+ rc = smb135x_path_suspend(chip, USB, CURRENT, true);
+ chip->real_usb_psy_ma = SUSPEND_CURRENT_MA;
+ goto out;
+ }
+ if (current_ma < CURRENT_150_MA) {
+ /* force 100mA */
+ rc = smb135x_masked_write(chip, CFG_5_REG, USB_2_3_BIT, 0);
+ rc |= smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+ USB_100_500_AC_MASK, USB_100_VAL);
+ rc |= smb135x_path_suspend(chip, USB, CURRENT, false);
+ chip->real_usb_psy_ma = CURRENT_100_MA;
+ goto out;
+ }
+ /* specific current values */
+ if (current_ma == CURRENT_150_MA) {
+ rc = smb135x_masked_write(chip, CFG_5_REG,
+ USB_2_3_BIT, USB_2_3_BIT);
+ rc |= smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+ USB_100_500_AC_MASK, USB_100_VAL);
+ rc |= smb135x_path_suspend(chip, USB, CURRENT, false);
+ chip->real_usb_psy_ma = CURRENT_150_MA;
+ goto out;
+ }
+ if (current_ma == CURRENT_500_MA) {
+ rc = smb135x_masked_write(chip, CFG_5_REG, USB_2_3_BIT, 0);
+ rc |= smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+ USB_100_500_AC_MASK, USB_500_VAL);
+ rc |= smb135x_path_suspend(chip, USB, CURRENT, false);
+ chip->real_usb_psy_ma = CURRENT_500_MA;
+ goto out;
+ }
+ if (current_ma == CURRENT_900_MA) {
+ rc = smb135x_masked_write(chip, CFG_5_REG,
+ USB_2_3_BIT, USB_2_3_BIT);
+ rc |= smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+ USB_100_500_AC_MASK, USB_500_VAL);
+ rc |= smb135x_path_suspend(chip, USB, CURRENT, false);
+ chip->real_usb_psy_ma = CURRENT_900_MA;
+ goto out;
+ }
+
+ rc = smb135x_set_high_usb_chg_current(chip, current_ma);
+ rc |= smb135x_path_suspend(chip, USB, CURRENT, false);
+out:
+ if (rc < 0)
+ dev_err(chip->dev,
+ "Couldn't set %dmA rc = %d\n", current_ma, rc);
+ return rc;
+}
+
+static int smb135x_set_dc_chg_current(struct smb135x_chg *chip,
+ int current_ma)
+{
+ int i, rc;
+ u8 dc_cur_val;
+
+ for (i = chip->dc_current_arr_size - 1; i >= 0; i--) {
+ if (chip->dc_psy_ma >= chip->dc_current_table[i])
+ break;
+ }
+ dc_cur_val = i & DCIN_INPUT_MASK;
+ rc = smb135x_masked_write(chip, CFG_A_REG,
+ DCIN_INPUT_MASK, dc_cur_val);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set dc charge current rc = %d\n",
+ rc);
+ return rc;
+ }
+ return 0;
+}
+
+static int smb135x_set_appropriate_current(struct smb135x_chg *chip,
+ enum path_type path)
+{
+ int therm_ma, current_ma;
+ int path_current = (path == USB) ? chip->usb_psy_ma : chip->dc_psy_ma;
+ int (*func)(struct smb135x_chg *chip, int current_ma);
+ int rc = 0;
+
+ if (!chip->usb_psy && path == USB)
+ return 0;
+
+ /*
+ * If battery is absent do not modify the current at all, these
+ * would be some appropriate values set by the bootloader or default
+ * configuration and since it is the only source of power we should
+ * not change it
+ */
+ if (!chip->batt_present) {
+ pr_debug("ignoring current request since battery is absent\n");
+ return 0;
+ }
+
+ if (path == USB) {
+ path_current = chip->usb_psy_ma;
+ func = smb135x_set_usb_chg_current;
+ } else {
+ path_current = chip->dc_psy_ma;
+ func = smb135x_set_dc_chg_current;
+ if (chip->dc_psy_type == -EINVAL)
+ func = NULL;
+ }
+
+ if (chip->therm_lvl_sel > 0
+ && chip->therm_lvl_sel < (chip->thermal_levels - 1))
+ /*
+ * consider thermal limit only when it is active and not at
+ * the highest level
+ */
+ therm_ma = chip->thermal_mitigation[chip->therm_lvl_sel];
+ else
+ therm_ma = path_current;
+
+ current_ma = min(therm_ma, path_current);
+ if (func != NULL)
+ rc = func(chip, current_ma);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set %s current to min(%d, %d)rc = %d\n",
+ path == USB ? "usb" : "dc",
+ therm_ma, path_current,
+ rc);
+ return rc;
+}
+
+static int smb135x_charging_enable(struct smb135x_chg *chip, int enable)
+{
+ int rc;
+
+ rc = smb135x_masked_write(chip, CMD_CHG_REG,
+ CMD_CHG_EN, enable ? CMD_CHG_EN : 0);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set CHG_ENABLE_BIT enable = %d rc = %d\n",
+ enable, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int __smb135x_charging(struct smb135x_chg *chip, int enable)
+{
+ int rc = 0;
+
+ pr_debug("charging enable = %d\n", enable);
+
+ if (chip->chg_disabled_permanently) {
+ pr_debug("charging is disabled permanetly\n");
+ return -EINVAL;
+ }
+
+ rc = smb135x_charging_enable(chip, enable);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't %s charging rc = %d\n",
+ enable ? "enable" : "disable", rc);
+ return rc;
+ }
+ chip->chg_enabled = enable;
+
+ /* set the suspended status */
+ rc = smb135x_path_suspend(chip, DC, USER, !enable);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set dc suspend to %d rc = %d\n",
+ enable, rc);
+ return rc;
+ }
+ rc = smb135x_path_suspend(chip, USB, USER, !enable);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set usb suspend to %d rc = %d\n",
+ enable, rc);
+ return rc;
+ }
+
+ pr_debug("charging %s\n",
+ enable ? "enabled" : "disabled running from batt");
+ return rc;
+}
+
+static int smb135x_charging(struct smb135x_chg *chip, int enable)
+{
+ int rc = 0;
+
+ pr_debug("charging enable = %d\n", enable);
+
+ __smb135x_charging(chip, enable);
+
+ if (chip->usb_psy) {
+ pr_debug("usb psy changed\n");
+ power_supply_changed(chip->usb_psy);
+ }
+ if (chip->dc_psy_type != -EINVAL) {
+ pr_debug("dc psy changed\n");
+ power_supply_changed(&chip->dc_psy);
+ }
+ pr_debug("charging %s\n",
+ enable ? "enabled" : "disabled running from batt");
+ return rc;
+}
+
+static int smb135x_system_temp_level_set(struct smb135x_chg *chip,
+ int lvl_sel)
+{
+ int rc = 0;
+ int prev_therm_lvl;
+
+ if (!chip->thermal_mitigation) {
+ pr_err("Thermal mitigation not supported\n");
+ return -EINVAL;
+ }
+
+ if (lvl_sel < 0) {
+ pr_err("Unsupported level selected %d\n", lvl_sel);
+ return -EINVAL;
+ }
+
+ if (lvl_sel >= chip->thermal_levels) {
+ pr_err("Unsupported level selected %d forcing %d\n", lvl_sel,
+ chip->thermal_levels - 1);
+ lvl_sel = chip->thermal_levels - 1;
+ }
+
+ if (lvl_sel == chip->therm_lvl_sel)
+ return 0;
+
+ mutex_lock(&chip->current_change_lock);
+ prev_therm_lvl = chip->therm_lvl_sel;
+ chip->therm_lvl_sel = lvl_sel;
+ if (chip->therm_lvl_sel == (chip->thermal_levels - 1)) {
+ /*
+ * Disable charging if highest value selected by
+ * setting the DC and USB path in suspend
+ */
+ rc = smb135x_path_suspend(chip, DC, THERMAL, true);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set dc suspend rc %d\n", rc);
+ goto out;
+ }
+ rc = smb135x_path_suspend(chip, USB, THERMAL, true);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set usb suspend rc %d\n", rc);
+ goto out;
+ }
+ goto out;
+ }
+
+ smb135x_set_appropriate_current(chip, USB);
+ smb135x_set_appropriate_current(chip, DC);
+
+ if (prev_therm_lvl == chip->thermal_levels - 1) {
+ /*
+ * If previously highest value was selected charging must have
+ * been disabed. Enable charging by taking the DC and USB path
+ * out of suspend.
+ */
+ rc = smb135x_path_suspend(chip, DC, THERMAL, false);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set dc suspend rc %d\n", rc);
+ goto out;
+ }
+ rc = smb135x_path_suspend(chip, USB, THERMAL, false);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set usb suspend rc %d\n", rc);
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&chip->current_change_lock);
+ return rc;
+}
+
+static int smb135x_battery_set_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ const union power_supply_propval *val)
+{
+ int rc = 0, update_psy = 0;
+ struct smb135x_chg *chip = container_of(psy,
+ struct smb135x_chg, batt_psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (!chip->bms_controlled_charging) {
+ rc = -EINVAL;
+ break;
+ }
+ switch (val->intval) {
+ case POWER_SUPPLY_STATUS_FULL:
+ rc = smb135x_charging_enable(chip, false);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't disable charging rc = %d\n",
+ rc);
+ } else {
+ chip->chg_done_batt_full = true;
+ update_psy = 1;
+ dev_dbg(chip->dev, "status = FULL chg_done_batt_full = %d",
+ chip->chg_done_batt_full);
+ }
+ break;
+ case POWER_SUPPLY_STATUS_DISCHARGING:
+ chip->chg_done_batt_full = false;
+ update_psy = 1;
+ dev_dbg(chip->dev, "status = DISCHARGING chg_done_batt_full = %d",
+ chip->chg_done_batt_full);
+ break;
+ case POWER_SUPPLY_STATUS_CHARGING:
+ rc = smb135x_charging_enable(chip, true);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't enable charging rc = %d\n",
+ rc);
+ } else {
+ chip->chg_done_batt_full = false;
+ dev_dbg(chip->dev, "status = CHARGING chg_done_batt_full = %d",
+ chip->chg_done_batt_full);
+ }
+ break;
+ default:
+ update_psy = 0;
+ rc = -EINVAL;
+ }
+ break;
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ smb135x_charging(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ chip->fake_battery_soc = val->intval;
+ update_psy = 1;
+ break;
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ smb135x_system_temp_level_set(chip, val->intval);
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ if (!rc && update_psy)
+ power_supply_changed(&chip->batt_psy);
+ return rc;
+}
+
+static int smb135x_battery_is_writeable(struct power_supply *psy,
+ enum power_supply_property prop)
+{
+ int rc;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ case POWER_SUPPLY_PROP_CAPACITY:
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ rc = 1;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+
+static int smb135x_battery_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct smb135x_chg *chip = container_of(psy,
+ struct smb135x_chg, batt_psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = smb135x_get_prop_batt_status(chip);
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = smb135x_get_prop_batt_present(chip);
+ break;
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ val->intval = chip->chg_enabled;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ val->intval = smb135x_get_prop_charge_type(chip);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = smb135x_get_prop_batt_capacity(chip);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = smb135x_get_prop_batt_health(chip);
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
+ val->intval = chip->therm_lvl_sel;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static enum power_supply_property smb135x_dc_properties[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_HEALTH,
+};
+
+static int smb135x_dc_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct smb135x_chg *chip = container_of(psy,
+ struct smb135x_chg, dc_psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = chip->dc_present;
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = chip->chg_enabled ? chip->dc_present : 0;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = chip->dc_present;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#define MIN_FLOAT_MV 3600
+#define MAX_FLOAT_MV 4500
+
+#define MID_RANGE_FLOAT_MV_MIN 3600
+#define MID_RANGE_FLOAT_MIN_VAL 0x05
+#define MID_RANGE_FLOAT_STEP_MV 20
+
+#define HIGH_RANGE_FLOAT_MIN_MV 4340
+#define HIGH_RANGE_FLOAT_MIN_VAL 0x2A
+#define HIGH_RANGE_FLOAT_STEP_MV 10
+
+#define VHIGH_RANGE_FLOAT_MIN_MV 4400
+#define VHIGH_RANGE_FLOAT_MIN_VAL 0x2E
+#define VHIGH_RANGE_FLOAT_STEP_MV 20
+static int smb135x_float_voltage_set(struct smb135x_chg *chip, int vfloat_mv)
+{
+ u8 temp;
+
+ if ((vfloat_mv < MIN_FLOAT_MV) || (vfloat_mv > MAX_FLOAT_MV)) {
+ dev_err(chip->dev, "bad float voltage mv =%d asked to set\n",
+ vfloat_mv);
+ return -EINVAL;
+ }
+
+ if (vfloat_mv <= HIGH_RANGE_FLOAT_MIN_MV) {
+ /* mid range */
+ temp = MID_RANGE_FLOAT_MIN_VAL
+ + (vfloat_mv - MID_RANGE_FLOAT_MV_MIN)
+ / MID_RANGE_FLOAT_STEP_MV;
+ } else if (vfloat_mv < VHIGH_RANGE_FLOAT_MIN_MV) {
+ /* high range */
+ temp = HIGH_RANGE_FLOAT_MIN_VAL
+ + (vfloat_mv - HIGH_RANGE_FLOAT_MIN_MV)
+ / HIGH_RANGE_FLOAT_STEP_MV;
+ } else {
+ /* very high range */
+ temp = VHIGH_RANGE_FLOAT_MIN_VAL
+ + (vfloat_mv - VHIGH_RANGE_FLOAT_MIN_MV)
+ / VHIGH_RANGE_FLOAT_STEP_MV;
+ }
+
+ return smb135x_write(chip, VFLOAT_REG, temp);
+}
+
+static int smb135x_set_resume_threshold(struct smb135x_chg *chip,
+ int resume_delta_mv)
+{
+ int rc;
+ u8 reg;
+
+ if (!chip->inhibit_disabled) {
+ if (resume_delta_mv < 100)
+ reg = CHG_INHIBIT_50MV_VAL;
+ else if (resume_delta_mv < 200)
+ reg = CHG_INHIBIT_100MV_VAL;
+ else if (resume_delta_mv < 300)
+ reg = CHG_INHIBIT_200MV_VAL;
+ else
+ reg = CHG_INHIBIT_300MV_VAL;
+
+ rc = smb135x_masked_write(chip, CFG_4_REG, CHG_INHIBIT_MASK,
+ reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set inhibit val rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (resume_delta_mv < 200)
+ reg = 0;
+ else
+ reg = RECHARGE_200MV_BIT;
+
+ rc = smb135x_masked_write(chip, CFG_5_REG, RECHARGE_200MV_BIT, reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set recharge rc = %d\n", rc);
+ return rc;
+ }
+ return 0;
+}
+
+static enum power_supply_property smb135x_parallel_properties[] = {
+ POWER_SUPPLY_PROP_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+};
+
+static int smb135x_parallel_set_chg_present(struct smb135x_chg *chip,
+ int present)
+{
+ u8 val;
+ int rc;
+
+ if (present == chip->parallel_charger_present) {
+ pr_debug("present %d -> %d, skipping\n",
+ chip->parallel_charger_present, present);
+ return 0;
+ }
+
+ if (present) {
+ /* Check if SMB135x is present */
+ rc = smb135x_read(chip, VERSION1_REG, &val);
+ if (rc) {
+ pr_debug("Failed to detect smb135x-parallel charger may be absent\n");
+ return -ENODEV;
+ }
+
+ rc = smb135x_enable_volatile_writes(chip);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't configure for volatile rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ /* set the float voltage */
+ if (chip->vfloat_mv != -EINVAL) {
+ rc = smb135x_float_voltage_set(chip, chip->vfloat_mv);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set float voltage rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* resume threshold */
+ if (chip->resume_delta_mv != -EINVAL) {
+ smb135x_set_resume_threshold(chip,
+ chip->resume_delta_mv);
+ }
+
+ rc = smb135x_masked_write(chip, CMD_INPUT_LIMIT,
+ USE_REGISTER_FOR_CURRENT,
+ USE_REGISTER_FOR_CURRENT);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set input limit cmd rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* set chg en by pin active low and enable auto recharge */
+ rc = smb135x_masked_write(chip, CFG_14_REG,
+ CHG_EN_BY_PIN_BIT | CHG_EN_ACTIVE_LOW_BIT
+ | DISABLE_AUTO_RECHARGE_BIT,
+ CHG_EN_BY_PIN_BIT | CHG_EN_ACTIVE_LOW_BIT);
+
+ /* set bit 0 = 100mA bit 1 = 500mA and set register control */
+ rc = smb135x_masked_write(chip, CFG_E_REG,
+ POLARITY_100_500_BIT | USB_CTRL_BY_PIN_BIT,
+ POLARITY_100_500_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set usbin cfg rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* control USB suspend via command bits */
+ rc = smb135x_masked_write(chip, USBIN_DCIN_CFG_REG,
+ USBIN_SUSPEND_VIA_COMMAND_BIT,
+ USBIN_SUSPEND_VIA_COMMAND_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set cfg rc=%d\n", rc);
+ return rc;
+ }
+
+ /* set the fastchg_current to the lowest setting */
+ if (chip->fastchg_current_arr_size > 0)
+ rc = smb135x_set_fastchg_current(chip,
+ chip->fastchg_current_table[0]);
+
+ /*
+ * enforce chip->chg_enabled since this could be the first
+ * time we have i2c access to the charger after
+ * chip->chg_enabled has been modified
+ */
+ smb135x_charging(chip, chip->chg_enabled);
+ }
+
+ chip->parallel_charger_present = present;
+ /*
+ * When present is being set force USB suspend, start charging
+ * only when CURRENT_MAX is set.
+ *
+ * Usually the chip will be shutdown (no i2c access to the chip)
+ * when USB is removed, however there could be situations when
+ * it is not. To cover for USB reinsetions in such situations
+ * force USB suspend when present is being unset.
+ * It is likely that i2c access could fail here - do not return error.
+ * (It is not possible to detect whether the chip is in shutdown state
+ * or not except for the i2c error).
+ */
+ chip->usb_psy_ma = SUSPEND_CURRENT_MA;
+ rc = smb135x_path_suspend(chip, USB, CURRENT, true);
+
+ if (present) {
+ if (rc) {
+ dev_err(chip->dev,
+ "Couldn't set usb suspend to true rc = %d\n",
+ rc);
+ return rc;
+ }
+ /* Check if the USB is configured for suspend. If not, do it */
+ mutex_lock(&chip->path_suspend_lock);
+ rc = smb135x_read(chip, CMD_INPUT_LIMIT, &val);
+ if (rc) {
+ dev_err(chip->dev,
+ "Couldn't read 0x%02x rc:%d\n", CMD_INPUT_LIMIT,
+ rc);
+ mutex_unlock(&chip->path_suspend_lock);
+ return rc;
+ } else if (!(val & BIT(6))) {
+ rc = __smb135x_usb_suspend(chip, 1);
+ }
+ mutex_unlock(&chip->path_suspend_lock);
+ if (rc) {
+ dev_err(chip->dev,
+ "Couldn't set usb to suspend rc:%d\n", rc);
+ return rc;
+ }
+ } else {
+ chip->real_usb_psy_ma = SUSPEND_CURRENT_MA;
+ }
+ return 0;
+}
+
+static int smb135x_parallel_set_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ const union power_supply_propval *val)
+{
+ int rc = 0;
+ struct smb135x_chg *chip = container_of(psy,
+ struct smb135x_chg, parallel_psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ if (chip->parallel_charger_present)
+ smb135x_charging(chip, val->intval);
+ else
+ chip->chg_enabled = val->intval;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ rc = smb135x_parallel_set_chg_present(chip, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ if (chip->parallel_charger_present) {
+ rc = smb135x_set_fastchg_current(chip,
+ val->intval / 1000);
+ }
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ if (chip->parallel_charger_present) {
+ chip->usb_psy_ma = val->intval / 1000;
+ rc = smb135x_set_usb_chg_current(chip,
+ chip->usb_psy_ma);
+ }
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ if (chip->parallel_charger_present &&
+ (chip->vfloat_mv != val->intval)) {
+ rc = smb135x_float_voltage_set(chip, val->intval);
+ if (!rc)
+ chip->vfloat_mv = val->intval;
+ } else {
+ chip->vfloat_mv = val->intval;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return rc;
+}
+
+static int smb135x_parallel_is_writeable(struct power_supply *psy,
+ enum power_supply_property prop)
+{
+ int rc;
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ rc = 1;
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+ return rc;
+}
+static int smb135x_parallel_get_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct smb135x_chg *chip = container_of(psy,
+ struct smb135x_chg, parallel_psy);
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_CHARGING_ENABLED:
+ val->intval = chip->chg_enabled;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ if (chip->parallel_charger_present)
+ val->intval = smb135x_get_usb_chg_current(chip) * 1000;
+ else
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ val->intval = chip->vfloat_mv;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = chip->parallel_charger_present;
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ if (chip->parallel_charger_present)
+ val->intval = smb135x_get_fastchg_current(chip) * 1000;
+ else
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_STATUS:
+ if (chip->parallel_charger_present)
+ val->intval = smb135x_get_prop_batt_status(chip);
+ else
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void smb135x_external_power_changed(struct power_supply *psy)
+{
+ struct smb135x_chg *chip = container_of(psy,
+ struct smb135x_chg, batt_psy);
+ union power_supply_propval prop = {0,};
+ int rc, current_limit = 0;
+
+ if (!chip->usb_psy)
+ return;
+
+ if (chip->bms_psy_name)
+ chip->bms_psy =
+ power_supply_get_by_name((char *)chip->bms_psy_name);
+
+ rc = chip->usb_psy->get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &prop);
+ if (rc < 0)
+ dev_err(chip->dev,
+ "could not read USB current_max property, rc=%d\n", rc);
+ else
+ current_limit = prop.intval / 1000;
+
+ pr_debug("current_limit = %d\n", current_limit);
+
+ if (chip->usb_psy_ma != current_limit) {
+ mutex_lock(&chip->current_change_lock);
+ chip->usb_psy_ma = current_limit;
+ rc = smb135x_set_appropriate_current(chip, USB);
+ mutex_unlock(&chip->current_change_lock);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set usb current rc = %d\n",
+ rc);
+ }
+
+ rc = chip->usb_psy->get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_ONLINE, &prop);
+ if (rc < 0)
+ dev_err(chip->dev,
+ "could not read USB ONLINE property, rc=%d\n", rc);
+
+ /* update online property */
+ rc = 0;
+ if (chip->usb_present && chip->chg_enabled && chip->usb_psy_ma != 0) {
+ if (prop.intval == 0)
+ rc = power_supply_set_online(chip->usb_psy, true);
+ } else {
+ if (prop.intval == 1)
+ rc = power_supply_set_online(chip->usb_psy, false);
+ }
+ if (rc < 0)
+ dev_err(chip->dev, "could not set usb online, rc=%d\n", rc);
+}
+
+static bool elapsed_msec_greater(struct timeval *start_time,
+ struct timeval *end_time, int ms)
+{
+ int msec_elapsed;
+
+ msec_elapsed = (end_time->tv_sec - start_time->tv_sec) * 1000 +
+ DIV_ROUND_UP(end_time->tv_usec - start_time->tv_usec, 1000);
+
+ return (msec_elapsed > ms);
+}
+
+#define MAX_STEP_MS 10
+static int smb135x_chg_otg_enable(struct smb135x_chg *chip)
+{
+ int rc = 0;
+ int restart_count = 0;
+ struct timeval time_a, time_b, time_c, time_d;
+ u8 reg;
+
+ if (chip->revision == REV_2) {
+ /*
+ * Workaround for a hardware bug where the OTG needs to be
+ * enabled disabled and enabled for it to be actually enabled.
+ * The time between each step should be atmost MAX_STEP_MS
+ *
+ * Note that if enable-disable executes within the timeframe
+ * but the final enable takes more than MAX_STEP_ME, we treat
+ * it as the first enable and try disabling again. We don't
+ * want to issue enable back to back.
+ *
+ * Notice the instances when time is captured and the
+ * successive steps.
+ * timeA-enable-timeC-disable-timeB-enable-timeD.
+ * When
+ * (timeB - timeA) < MAX_STEP_MS AND
+ * (timeC - timeD) < MAX_STEP_MS
+ * then it is guaranteed that the successive steps
+ * must have executed within MAX_STEP_MS
+ */
+ do_gettimeofday(&time_a);
+restart_from_enable:
+ /* first step - enable otg */
+ rc = smb135x_masked_write(chip, CMD_CHG_REG, OTG_EN, OTG_EN);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't enable OTG mode rc=%d\n",
+ rc);
+ return rc;
+ }
+
+restart_from_disable:
+ /* second step - disable otg */
+ do_gettimeofday(&time_c);
+ rc = smb135x_masked_write(chip, CMD_CHG_REG, OTG_EN, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't enable OTG mode rc=%d\n",
+ rc);
+ return rc;
+ }
+ do_gettimeofday(&time_b);
+
+ if (elapsed_msec_greater(&time_a, &time_b, MAX_STEP_MS)) {
+ restart_count++;
+ if (restart_count > 10) {
+ dev_err(chip->dev,
+ "Couldn't enable OTG restart_count=%d\n",
+ restart_count);
+ return -EAGAIN;
+ }
+ time_a = time_b;
+ pr_debug("restarting from first enable\n");
+ goto restart_from_enable;
+ }
+
+ /* third step (first step in case of a failure) - enable otg */
+ time_a = time_b;
+ rc = smb135x_masked_write(chip, CMD_CHG_REG, OTG_EN, OTG_EN);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't enable OTG mode rc=%d\n",
+ rc);
+ return rc;
+ }
+ do_gettimeofday(&time_d);
+
+ if (elapsed_msec_greater(&time_c, &time_d, MAX_STEP_MS)) {
+ restart_count++;
+ if (restart_count > 10) {
+ dev_err(chip->dev,
+ "Couldn't enable OTG restart_count=%d\n",
+ restart_count);
+ return -EAGAIN;
+ }
+ pr_debug("restarting from disable\n");
+ goto restart_from_disable;
+ }
+ } else {
+ rc = smb135x_read(chip, CMD_CHG_REG, &reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read cmd reg rc=%d\n",
+ rc);
+ return rc;
+ }
+ if (reg & OTG_EN) {
+ /* if it is set, disable it before re-enabling it */
+ rc = smb135x_masked_write(chip, CMD_CHG_REG, OTG_EN, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't disable OTG mode rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+ rc = smb135x_masked_write(chip, CMD_CHG_REG, OTG_EN, OTG_EN);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't enable OTG mode rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+static int smb135x_chg_otg_regulator_enable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ struct smb135x_chg *chip = rdev_get_drvdata(rdev);
+
+ chip->otg_oc_count = 0;
+ rc = smb135x_chg_otg_enable(chip);
+ if (rc)
+ dev_err(chip->dev, "Couldn't enable otg regulator rc=%d\n", rc);
+
+ return rc;
+}
+
+static int smb135x_chg_otg_regulator_disable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ struct smb135x_chg *chip = rdev_get_drvdata(rdev);
+
+ mutex_lock(&chip->otg_oc_count_lock);
+ cancel_delayed_work_sync(&chip->reset_otg_oc_count_work);
+ mutex_unlock(&chip->otg_oc_count_lock);
+ rc = smb135x_masked_write(chip, CMD_CHG_REG, OTG_EN, 0);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't disable OTG mode rc=%d\n", rc);
+ return rc;
+}
+
+static int smb135x_chg_otg_regulator_is_enable(struct regulator_dev *rdev)
+{
+ int rc = 0;
+ u8 reg = 0;
+ struct smb135x_chg *chip = rdev_get_drvdata(rdev);
+
+ rc = smb135x_read(chip, CMD_CHG_REG, &reg);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read OTG enable bit rc=%d\n", rc);
+ return rc;
+ }
+
+ return (reg & OTG_EN) ? 1 : 0;
+}
+
+struct regulator_ops smb135x_chg_otg_reg_ops = {
+ .enable = smb135x_chg_otg_regulator_enable,
+ .disable = smb135x_chg_otg_regulator_disable,
+ .is_enabled = smb135x_chg_otg_regulator_is_enable,
+};
+
+static int smb135x_set_current_tables(struct smb135x_chg *chip)
+{
+ switch (chip->version) {
+ case V_SMB1356:
+ chip->usb_current_table = usb_current_table_smb1356;
+ chip->usb_current_arr_size
+ = ARRAY_SIZE(usb_current_table_smb1356);
+ chip->dc_current_table = dc_current_table_smb1356;
+ chip->dc_current_arr_size
+ = ARRAY_SIZE(dc_current_table_smb1356);
+ chip->fastchg_current_table = NULL;
+ chip->fastchg_current_arr_size = 0;
+ break;
+ case V_SMB1357:
+ chip->usb_current_table = usb_current_table_smb1357_smb1358;
+ chip->usb_current_arr_size
+ = ARRAY_SIZE(usb_current_table_smb1357_smb1358);
+ chip->dc_current_table = dc_current_table;
+ chip->dc_current_arr_size = ARRAY_SIZE(dc_current_table);
+ chip->fastchg_current_table = fastchg_current_table;
+ chip->fastchg_current_arr_size
+ = ARRAY_SIZE(fastchg_current_table);
+ break;
+ case V_SMB1358:
+ chip->usb_current_table = usb_current_table_smb1357_smb1358;
+ chip->usb_current_arr_size
+ = ARRAY_SIZE(usb_current_table_smb1357_smb1358);
+ chip->dc_current_table = dc_current_table;
+ chip->dc_current_arr_size = ARRAY_SIZE(dc_current_table);
+ chip->fastchg_current_table = NULL;
+ chip->fastchg_current_arr_size = 0;
+ break;
+ case V_SMB1359:
+ chip->usb_current_table = usb_current_table_smb1359;
+ chip->usb_current_arr_size
+ = ARRAY_SIZE(usb_current_table_smb1359);
+ chip->dc_current_table = dc_current_table;
+ chip->dc_current_arr_size = ARRAY_SIZE(dc_current_table);
+ chip->fastchg_current_table = NULL;
+ chip->fastchg_current_arr_size = 0;
+ break;
+ }
+ return 0;
+}
+
+#define SMB1356_VERSION3_BIT BIT(7)
+#define SMB1357_VERSION1_VAL 0x01
+#define SMB1358_VERSION1_VAL 0x02
+#define SMB1359_VERSION1_VAL 0x00
+#define SMB1357_VERSION2_VAL 0x01
+#define SMB1358_VERSION2_VAL 0x02
+#define SMB1359_VERSION2_VAL 0x00
+static int smb135x_chip_version_and_revision(struct smb135x_chg *chip)
+{
+ int rc;
+ u8 version1, version2, version3;
+
+ /* read the revision */
+ rc = read_revision(chip, &chip->revision);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read revision rc = %d\n", rc);
+ return rc;
+ }
+
+ if (chip->revision >= REV_MAX || revision_str[chip->revision] == NULL) {
+ dev_err(chip->dev, "Bad revision found = %d\n", chip->revision);
+ return -EINVAL;
+ }
+
+ /* check if it is smb1356 */
+ rc = read_version3(chip, &version3);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read version3 rc = %d\n", rc);
+ return rc;
+ }
+
+ if (version3 & SMB1356_VERSION3_BIT) {
+ chip->version = V_SMB1356;
+ goto wrkarnd_and_input_current_values;
+ }
+
+ /* check if it is smb1357, smb1358 or smb1359 based on revision */
+ if (chip->revision <= REV_1_1) {
+ rc = read_version1(chip, &version1);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read version 1 rc = %d\n", rc);
+ return rc;
+ }
+ switch (version1) {
+ case SMB1357_VERSION1_VAL:
+ chip->version = V_SMB1357;
+ break;
+ case SMB1358_VERSION1_VAL:
+ chip->version = V_SMB1358;
+ break;
+ case SMB1359_VERSION1_VAL:
+ chip->version = V_SMB1359;
+ break;
+ default:
+ dev_err(chip->dev,
+ "Unknown version 1 = 0x%02x rc = %d\n",
+ version1, rc);
+ return rc;
+ }
+ } else {
+ rc = read_version2(chip, &version2);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read version 2 rc = %d\n", rc);
+ return rc;
+ }
+ switch (version2) {
+ case SMB1357_VERSION2_VAL:
+ chip->version = V_SMB1357;
+ break;
+ case SMB1358_VERSION2_VAL:
+ chip->version = V_SMB1358;
+ break;
+ case SMB1359_VERSION2_VAL:
+ chip->version = V_SMB1359;
+ break;
+ default:
+ dev_err(chip->dev,
+ "Unknown version 2 = 0x%02x rc = %d\n",
+ version2, rc);
+ return rc;
+ }
+ }
+
+wrkarnd_and_input_current_values:
+ if (is_usb100_broken(chip))
+ chip->workaround_flags |= WRKARND_USB100_BIT;
+ /*
+ * Rev v1.0 and v1.1 of SMB135x fails charger type detection
+ * (apsd) due to interference on the D+/- lines by the USB phy.
+ * Set the workaround flag to disable charger type reporting
+ * for this revision.
+ */
+ if (chip->revision <= REV_1_1)
+ chip->workaround_flags |= WRKARND_APSD_FAIL;
+
+ pr_debug("workaround_flags = %x\n", chip->workaround_flags);
+
+ return smb135x_set_current_tables(chip);
+}
+
+static int smb135x_regulator_init(struct smb135x_chg *chip)
+{
+ int rc = 0;
+ struct regulator_init_data *init_data;
+ struct regulator_config cfg = {};
+
+ init_data = of_get_regulator_init_data(chip->dev, chip->dev->of_node);
+ if (!init_data) {
+ dev_err(chip->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ if (init_data->constraints.name) {
+ chip->otg_vreg.rdesc.owner = THIS_MODULE;
+ chip->otg_vreg.rdesc.type = REGULATOR_VOLTAGE;
+ chip->otg_vreg.rdesc.ops = &smb135x_chg_otg_reg_ops;
+ chip->otg_vreg.rdesc.name = init_data->constraints.name;
+
+ cfg.dev = chip->dev;
+ cfg.init_data = init_data;
+ cfg.driver_data = chip;
+ cfg.of_node = chip->dev->of_node;
+
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_STATUS;
+
+ chip->otg_vreg.rdev = regulator_register(
+ &chip->otg_vreg.rdesc, &cfg);
+ if (IS_ERR(chip->otg_vreg.rdev)) {
+ rc = PTR_ERR(chip->otg_vreg.rdev);
+ chip->otg_vreg.rdev = NULL;
+ if (rc != -EPROBE_DEFER)
+ dev_err(chip->dev,
+ "OTG reg failed, rc=%d\n", rc);
+ }
+ }
+
+ return rc;
+}
+
+static void smb135x_regulator_deinit(struct smb135x_chg *chip)
+{
+ if (chip->otg_vreg.rdev)
+ regulator_unregister(chip->otg_vreg.rdev);
+}
+
+static void wireless_insertion_work(struct work_struct *work)
+{
+ struct smb135x_chg *chip =
+ container_of(work, struct smb135x_chg,
+ wireless_insertion_work.work);
+
+ /* unsuspend dc */
+ smb135x_path_suspend(chip, DC, CURRENT, false);
+}
+
+static int hot_hard_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+ pr_debug("rt_stat = 0x%02x\n", rt_stat);
+ chip->batt_hot = !!rt_stat;
+ return 0;
+}
+static int cold_hard_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+ pr_debug("rt_stat = 0x%02x\n", rt_stat);
+ chip->batt_cold = !!rt_stat;
+ return 0;
+}
+static int hot_soft_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+ pr_debug("rt_stat = 0x%02x\n", rt_stat);
+ chip->batt_warm = !!rt_stat;
+ return 0;
+}
+static int cold_soft_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+ pr_debug("rt_stat = 0x%02x\n", rt_stat);
+ chip->batt_cool = !!rt_stat;
+ return 0;
+}
+static int battery_missing_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+ pr_debug("rt_stat = 0x%02x\n", rt_stat);
+ chip->batt_present = !rt_stat;
+ return 0;
+}
+static int vbat_low_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+ pr_warn("vbat low\n");
+ return 0;
+}
+static int chg_hot_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+ pr_warn("chg hot\n");
+ return 0;
+}
+static int chg_term_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+ pr_debug("rt_stat = 0x%02x\n", rt_stat);
+
+ /*
+ * This handler gets called even when the charger based termination
+ * is disabled (due to change in RT status). However, in a bms
+ * controlled design the battery status should not be updated.
+ */
+ if (!chip->iterm_disabled)
+ chip->chg_done_batt_full = !!rt_stat;
+ return 0;
+}
+
+static int taper_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+ pr_debug("rt_stat = 0x%02x\n", rt_stat);
+ return 0;
+}
+
+static int fast_chg_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+ pr_debug("rt_stat = 0x%02x\n", rt_stat);
+
+ if (rt_stat & IRQ_C_FASTCHG_BIT)
+ chip->chg_done_batt_full = false;
+
+ return 0;
+}
+
+static int recharge_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+ int rc;
+
+ pr_debug("rt_stat = 0x%02x\n", rt_stat);
+
+ if (chip->bms_controlled_charging) {
+ rc = smb135x_charging_enable(chip, true);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't enable charging rc = %d\n",
+ rc);
+ }
+
+ return 0;
+}
+
+static int safety_timeout_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+ pr_warn("safety timeout rt_stat = 0x%02x\n", rt_stat);
+ return 0;
+}
+
+/**
+ * power_ok_handler() - called when the switcher turns on or turns off
+ * @chip: pointer to smb135x_chg chip
+ * @rt_stat: the status bit indicating switcher turning on or off
+ */
+static int power_ok_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+ pr_debug("rt_stat = 0x%02x\n", rt_stat);
+ return 0;
+}
+
+static int rid_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+ bool usb_slave_present;
+
+ usb_slave_present = is_usb_slave_present(chip);
+
+ if (chip->usb_slave_present ^ usb_slave_present) {
+ chip->usb_slave_present = usb_slave_present;
+ if (chip->usb_psy) {
+ pr_debug("setting usb psy usb_otg = %d\n",
+ chip->usb_slave_present);
+ power_supply_set_usb_otg(chip->usb_psy,
+ chip->usb_slave_present);
+ }
+ }
+ return 0;
+}
+
+#define RESET_OTG_OC_COUNT_MS 100
+static void reset_otg_oc_count_work(struct work_struct *work)
+{
+ struct smb135x_chg *chip =
+ container_of(work, struct smb135x_chg,
+ reset_otg_oc_count_work.work);
+
+ mutex_lock(&chip->otg_oc_count_lock);
+ pr_debug("It has been %dmS since OverCurrent interrupt resetting the count\n",
+ RESET_OTG_OC_COUNT_MS);
+ chip->otg_oc_count = 0;
+ mutex_unlock(&chip->otg_oc_count_lock);
+}
+
+#define MAX_OTG_RETRY 3
+static int otg_oc_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+ int rc;
+
+ mutex_lock(&chip->otg_oc_count_lock);
+ cancel_delayed_work_sync(&chip->reset_otg_oc_count_work);
+ ++chip->otg_oc_count;
+ if (chip->otg_oc_count < MAX_OTG_RETRY) {
+ rc = smb135x_chg_otg_enable(chip);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't enable OTG mode rc=%d\n",
+ rc);
+ } else {
+ pr_warn_ratelimited("Tried enabling OTG %d times, the USB slave is nonconformant.\n",
+ chip->otg_oc_count);
+ }
+
+ pr_debug("rt_stat = 0x%02x\n", rt_stat);
+ schedule_delayed_work(&chip->reset_otg_oc_count_work,
+ msecs_to_jiffies(RESET_OTG_OC_COUNT_MS));
+ mutex_unlock(&chip->otg_oc_count_lock);
+ return 0;
+}
+
+static int handle_dc_removal(struct smb135x_chg *chip)
+{
+ if (chip->dc_psy_type == POWER_SUPPLY_TYPE_WIRELESS) {
+ cancel_delayed_work_sync(&chip->wireless_insertion_work);
+ smb135x_path_suspend(chip, DC, CURRENT, true);
+ }
+
+ if (chip->dc_psy_type != -EINVAL)
+ power_supply_set_online(&chip->dc_psy, chip->dc_present);
+ return 0;
+}
+
+#define DCIN_UNSUSPEND_DELAY_MS 1000
+static int handle_dc_insertion(struct smb135x_chg *chip)
+{
+ if (chip->dc_psy_type == POWER_SUPPLY_TYPE_WIRELESS)
+ schedule_delayed_work(&chip->wireless_insertion_work,
+ msecs_to_jiffies(DCIN_UNSUSPEND_DELAY_MS));
+ if (chip->dc_psy_type != -EINVAL)
+ power_supply_set_online(&chip->dc_psy,
+ chip->dc_present);
+
+ return 0;
+}
+/**
+ * dcin_uv_handler() - called when the dc voltage crosses the uv threshold
+ * @chip: pointer to smb135x_chg chip
+ * @rt_stat: the status bit indicating whether dc voltage is uv
+ */
+static int dcin_uv_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+ /*
+ * rt_stat indicates if dc is undervolted. If so dc_present
+ * should be marked removed
+ */
+ bool dc_present = !rt_stat;
+
+ pr_debug("chip->dc_present = %d dc_present = %d\n",
+ chip->dc_present, dc_present);
+
+ if (chip->dc_present && !dc_present) {
+ /* dc removed */
+ chip->dc_present = dc_present;
+ handle_dc_removal(chip);
+ }
+
+ if (!chip->dc_present && dc_present) {
+ /* dc inserted */
+ chip->dc_present = dc_present;
+ handle_dc_insertion(chip);
+ }
+
+ return 0;
+}
+
+static int dcin_ov_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+ /*
+ * rt_stat indicates if dc is overvolted. If so dc_present
+ * should be marked removed
+ */
+ bool dc_present = !rt_stat;
+
+ pr_debug("chip->dc_present = %d dc_present = %d\n",
+ chip->dc_present, dc_present);
+
+ chip->dc_ov = !!rt_stat;
+
+ if (chip->dc_present && !dc_present) {
+ /* dc removed */
+ chip->dc_present = dc_present;
+ handle_dc_removal(chip);
+ }
+
+ if (!chip->dc_present && dc_present) {
+ /* dc inserted */
+ chip->dc_present = dc_present;
+ handle_dc_insertion(chip);
+ }
+ return 0;
+}
+
+static int handle_usb_removal(struct smb135x_chg *chip)
+{
+ if (chip->usb_psy) {
+ cancel_delayed_work_sync(&chip->hvdcp_det_work);
+ pm_relax(chip->dev);
+ pr_debug("setting usb psy type = %d\n",
+ POWER_SUPPLY_TYPE_UNKNOWN);
+ power_supply_set_supply_type(chip->usb_psy,
+ POWER_SUPPLY_TYPE_UNKNOWN);
+ pr_debug("setting usb psy present = %d\n", chip->usb_present);
+ power_supply_set_present(chip->usb_psy, chip->usb_present);
+ pr_debug("Setting usb psy dp=r dm=r\n");
+ power_supply_set_dp_dm(chip->usb_psy,
+ POWER_SUPPLY_DP_DM_DPR_DMR);
+ }
+ return 0;
+}
+
+static int rerun_apsd(struct smb135x_chg *chip)
+{
+ int rc;
+
+ pr_debug("Reruning APSD\nDisabling APSD\n");
+ rc = smb135x_masked_write(chip, CFG_11_REG, AUTO_SRC_DET_EN_BIT, 0);
+ if (rc) {
+ dev_err(chip->dev, "Couldn't Disable APSD rc=%d\n", rc);
+ return rc;
+ }
+ pr_debug("Allow only 9V chargers\n");
+ rc = smb135x_masked_write(chip, CFG_C_REG,
+ USBIN_ADAPTER_ALLOWANCE_MASK, ALLOW_9V_ONLY);
+ if (rc)
+ dev_err(chip->dev, "Couldn't Allow 9V rc=%d\n", rc);
+ pr_debug("Enabling APSD\n");
+ rc = smb135x_masked_write(chip, CFG_11_REG, AUTO_SRC_DET_EN_BIT, 1);
+ if (rc)
+ dev_err(chip->dev, "Couldn't Enable APSD rc=%d\n", rc);
+ pr_debug("Allow 5V-9V\n");
+ rc = smb135x_masked_write(chip, CFG_C_REG,
+ USBIN_ADAPTER_ALLOWANCE_MASK, ALLOW_5V_TO_9V);
+ if (rc)
+ dev_err(chip->dev, "Couldn't Allow 5V-9V rc=%d\n", rc);
+ return rc;
+}
+
+static void smb135x_hvdcp_det_work(struct work_struct *work)
+{
+ int rc;
+ u8 reg;
+ struct smb135x_chg *chip = container_of(work, struct smb135x_chg,
+ hvdcp_det_work.work);
+
+ rc = smb135x_read(chip, STATUS_7_REG, &reg);
+ if (rc) {
+ pr_err("Couldn't read STATUS_7_REG rc == %d\n", rc);
+ goto end;
+ }
+ pr_debug("STATUS_7_REG = 0x%02X\n", reg);
+
+ if (reg) {
+ pr_debug("HVDCP detected; notifying USB PSY\n");
+ power_supply_set_supply_type(chip->usb_psy,
+ POWER_SUPPLY_TYPE_USB_HVDCP);
+ }
+end:
+ pm_relax(chip->dev);
+}
+
+#define HVDCP_NOTIFY_MS 2500
+static int handle_usb_insertion(struct smb135x_chg *chip)
+{
+ u8 reg;
+ int rc;
+ char *usb_type_name = "null";
+ enum power_supply_type usb_supply_type;
+
+ /* usb inserted */
+ rc = smb135x_read(chip, STATUS_5_REG, &reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read status 5 rc = %d\n", rc);
+ return rc;
+ }
+ /*
+ * Report the charger type as UNKNOWN if the
+ * apsd-fail flag is set. This nofifies the USB driver
+ * to initiate a s/w based charger type detection.
+ */
+ if (chip->workaround_flags & WRKARND_APSD_FAIL)
+ reg = 0;
+
+ usb_type_name = get_usb_type_name(reg);
+ usb_supply_type = get_usb_supply_type(reg);
+ pr_debug("inserted %s, usb psy type = %d stat_5 = 0x%02x apsd_rerun = %d\n",
+ usb_type_name, usb_supply_type, reg, chip->apsd_rerun);
+
+ if (chip->batt_present && !chip->apsd_rerun && chip->usb_psy) {
+ if (usb_supply_type == POWER_SUPPLY_TYPE_USB) {
+ pr_debug("Setting usb psy dp=f dm=f SDP and rerun\n");
+ power_supply_set_dp_dm(chip->usb_psy,
+ POWER_SUPPLY_DP_DM_DPF_DMF);
+ chip->apsd_rerun = true;
+ rerun_apsd(chip);
+ /* rising edge of src detect will happen in few mS */
+ return 0;
+ } else {
+ pr_debug("Set usb psy dp=f dm=f DCP and no rerun\n");
+ power_supply_set_dp_dm(chip->usb_psy,
+ POWER_SUPPLY_DP_DM_DPF_DMF);
+ }
+ }
+
+ if (usb_supply_type == POWER_SUPPLY_TYPE_USB_DCP) {
+ pr_debug("schedule hvdcp detection worker\n");
+ pm_stay_awake(chip->dev);
+ schedule_delayed_work(&chip->hvdcp_det_work,
+ msecs_to_jiffies(HVDCP_NOTIFY_MS));
+ }
+
+ if (chip->usb_psy) {
+ if (chip->bms_controlled_charging) {
+ /* enable charging on USB insertion */
+ rc = smb135x_charging_enable(chip, true);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't enable charging rc = %d\n",
+ rc);
+ }
+ pr_debug("setting usb psy type = %d\n", usb_supply_type);
+ power_supply_set_supply_type(chip->usb_psy, usb_supply_type);
+ pr_debug("setting usb psy present = %d\n", chip->usb_present);
+ power_supply_set_present(chip->usb_psy, chip->usb_present);
+ }
+ chip->apsd_rerun = false;
+ return 0;
+}
+
+/**
+ * usbin_uv_handler()
+ * @chip: pointer to smb135x_chg chip
+ * @rt_stat: the status bit indicating chg insertion/removal
+ */
+static int usbin_uv_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+ /*
+ * rt_stat indicates if usb is undervolted
+ */
+ bool usb_present = !rt_stat;
+
+ pr_debug("chip->usb_present = %d usb_present = %d\n",
+ chip->usb_present, usb_present);
+
+ return 0;
+}
+
+static int usbin_ov_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+ /*
+ * rt_stat indicates if usb is overvolted. If so usb_present
+ * should be marked removed
+ */
+ bool usb_present = !rt_stat;
+ int health;
+
+ pr_debug("chip->usb_present = %d usb_present = %d\n",
+ chip->usb_present, usb_present);
+ if (chip->usb_present && !usb_present) {
+ /* USB removed */
+ chip->usb_present = usb_present;
+ handle_usb_removal(chip);
+ } else if (!chip->usb_present && usb_present) {
+ /* USB inserted */
+ chip->usb_present = usb_present;
+ handle_usb_insertion(chip);
+ }
+
+ if (chip->usb_psy) {
+ health = rt_stat ? POWER_SUPPLY_HEALTH_OVERVOLTAGE
+ : POWER_SUPPLY_HEALTH_GOOD;
+ power_supply_set_health_state(chip->usb_psy, health);
+ }
+
+ return 0;
+}
+
+/**
+ * src_detect_handler() - this is called on rising edge when USB
+ * charger type is detected and on falling edge when
+ * USB voltage falls below the coarse detect voltage
+ * (1V), use it for handling USB charger insertion
+ * and removal.
+ * @chip: pointer to smb135x_chg chip
+ * @rt_stat: the status bit indicating chg insertion/removal
+ */
+static int src_detect_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+ bool usb_present = !!rt_stat;
+
+ pr_debug("chip->usb_present = %d usb_present = %d\n",
+ chip->usb_present, usb_present);
+
+ if (!chip->usb_present && usb_present) {
+ /* USB inserted */
+ chip->usb_present = usb_present;
+ handle_usb_insertion(chip);
+ } else if (usb_present && chip->apsd_rerun) {
+ handle_usb_insertion(chip);
+ } else if (chip->usb_present && !usb_present) {
+ chip->usb_present = !chip->usb_present;
+ handle_usb_removal(chip);
+ }
+
+ return 0;
+}
+
+static int chg_inhibit_handler(struct smb135x_chg *chip, u8 rt_stat)
+{
+ /*
+ * charger is inserted when the battery voltage is high
+ * so h/w won't start charging just yet. Treat this as
+ * battery full
+ */
+ pr_debug("rt_stat = 0x%02x\n", rt_stat);
+
+ if (!chip->inhibit_disabled)
+ chip->chg_done_batt_full = !!rt_stat;
+ return 0;
+}
+
+struct smb_irq_info {
+ const char *name;
+ int (*smb_irq)(struct smb135x_chg *chip,
+ u8 rt_stat);
+ int high;
+ int low;
+};
+
+struct irq_handler_info {
+ u8 stat_reg;
+ u8 val;
+ u8 prev_val;
+ struct smb_irq_info irq_info[4];
+};
+
+static struct irq_handler_info handlers[] = {
+ {IRQ_A_REG, 0, 0,
+ {
+ {
+ .name = "cold_soft",
+ .smb_irq = cold_soft_handler,
+ },
+ {
+ .name = "hot_soft",
+ .smb_irq = hot_soft_handler,
+ },
+ {
+ .name = "cold_hard",
+ .smb_irq = cold_hard_handler,
+ },
+ {
+ .name = "hot_hard",
+ .smb_irq = hot_hard_handler,
+ },
+ },
+ },
+ {IRQ_B_REG, 0, 0,
+ {
+ {
+ .name = "chg_hot",
+ .smb_irq = chg_hot_handler,
+ },
+ {
+ .name = "vbat_low",
+ .smb_irq = vbat_low_handler,
+ },
+ {
+ .name = "battery_missing",
+ .smb_irq = battery_missing_handler,
+ },
+ {
+ .name = "battery_missing",
+ .smb_irq = battery_missing_handler,
+ },
+ },
+ },
+ {IRQ_C_REG, 0, 0,
+ {
+ {
+ .name = "chg_term",
+ .smb_irq = chg_term_handler,
+ },
+ {
+ .name = "taper",
+ .smb_irq = taper_handler,
+ },
+ {
+ .name = "recharge",
+ .smb_irq = recharge_handler,
+ },
+ {
+ .name = "fast_chg",
+ .smb_irq = fast_chg_handler,
+ },
+ },
+ },
+ {IRQ_D_REG, 0, 0,
+ {
+ {
+ .name = "prechg_timeout",
+ },
+ {
+ .name = "safety_timeout",
+ .smb_irq = safety_timeout_handler,
+ },
+ {
+ .name = "aicl_done",
+ },
+ {
+ .name = "battery_ov",
+ },
+ },
+ },
+ {IRQ_E_REG, 0, 0,
+ {
+ {
+ .name = "usbin_uv",
+ .smb_irq = usbin_uv_handler,
+ },
+ {
+ .name = "usbin_ov",
+ .smb_irq = usbin_ov_handler,
+ },
+ {
+ .name = "dcin_uv",
+ .smb_irq = dcin_uv_handler,
+ },
+ {
+ .name = "dcin_ov",
+ .smb_irq = dcin_ov_handler,
+ },
+ },
+ },
+ {IRQ_F_REG, 0, 0,
+ {
+ {
+ .name = "power_ok",
+ .smb_irq = power_ok_handler,
+ },
+ {
+ .name = "rid",
+ .smb_irq = rid_handler,
+ },
+ {
+ .name = "otg_fail",
+ },
+ {
+ .name = "otg_oc",
+ .smb_irq = otg_oc_handler,
+ },
+ },
+ },
+ {IRQ_G_REG, 0, 0,
+ {
+ {
+ .name = "chg_inhibit",
+ .smb_irq = chg_inhibit_handler,
+ },
+ {
+ .name = "chg_error",
+ },
+ {
+ .name = "wd_timeout",
+ },
+ {
+ .name = "src_detect",
+ .smb_irq = src_detect_handler,
+ },
+ },
+ },
+};
+
+static int smb135x_irq_read(struct smb135x_chg *chip)
+{
+ int rc, i;
+
+ /*
+ * When dcin path is suspended the irq triggered status is not cleared
+ * causing a storm. To prevent this situation unsuspend dcin path while
+ * reading interrupts and restore its status back.
+ */
+ mutex_lock(&chip->path_suspend_lock);
+
+ if (chip->dc_suspended)
+ __smb135x_dc_suspend(chip, false);
+
+ for (i = 0; i < ARRAY_SIZE(handlers); i++) {
+ rc = smb135x_read(chip, handlers[i].stat_reg,
+ &handlers[i].val);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read %d rc = %d\n",
+ handlers[i].stat_reg, rc);
+ handlers[i].val = 0;
+ continue;
+ }
+ }
+
+ if (chip->dc_suspended)
+ __smb135x_dc_suspend(chip, true);
+
+ mutex_unlock(&chip->path_suspend_lock);
+
+ return rc;
+}
+#define IRQ_LATCHED_MASK 0x02
+#define IRQ_STATUS_MASK 0x01
+#define BITS_PER_IRQ 2
+static irqreturn_t smb135x_chg_stat_handler(int irq, void *dev_id)
+{
+ struct smb135x_chg *chip = dev_id;
+ int i, j;
+ u8 triggered;
+ u8 changed;
+ u8 rt_stat, prev_rt_stat;
+ int rc;
+ int handler_count = 0;
+
+ mutex_lock(&chip->irq_complete);
+ chip->irq_waiting = true;
+ if (!chip->resume_completed) {
+ dev_dbg(chip->dev, "IRQ triggered before device-resume\n");
+ disable_irq_nosync(irq);
+ mutex_unlock(&chip->irq_complete);
+ return IRQ_HANDLED;
+ }
+ chip->irq_waiting = false;
+
+ smb135x_irq_read(chip);
+ for (i = 0; i < ARRAY_SIZE(handlers); i++) {
+ for (j = 0; j < ARRAY_SIZE(handlers[i].irq_info); j++) {
+ triggered = handlers[i].val
+ & (IRQ_LATCHED_MASK << (j * BITS_PER_IRQ));
+ rt_stat = handlers[i].val
+ & (IRQ_STATUS_MASK << (j * BITS_PER_IRQ));
+ prev_rt_stat = handlers[i].prev_val
+ & (IRQ_STATUS_MASK << (j * BITS_PER_IRQ));
+ changed = prev_rt_stat ^ rt_stat;
+
+ if (triggered || changed)
+ rt_stat ? handlers[i].irq_info[j].high++ :
+ handlers[i].irq_info[j].low++;
+
+ if ((triggered || changed)
+ && handlers[i].irq_info[j].smb_irq != NULL) {
+ handler_count++;
+ rc = handlers[i].irq_info[j].smb_irq(chip,
+ rt_stat);
+ if (rc < 0)
+ dev_err(chip->dev,
+ "Couldn't handle %d irq for reg 0x%02x rc = %d\n",
+ j, handlers[i].stat_reg, rc);
+ }
+ }
+ handlers[i].prev_val = handlers[i].val;
+ }
+
+ pr_debug("handler count = %d\n", handler_count);
+ if (handler_count) {
+ pr_debug("batt psy changed\n");
+ power_supply_changed(&chip->batt_psy);
+ if (chip->usb_psy) {
+ pr_debug("usb psy changed\n");
+ power_supply_changed(chip->usb_psy);
+ }
+ if (chip->dc_psy_type != -EINVAL) {
+ pr_debug("dc psy changed\n");
+ power_supply_changed(&chip->dc_psy);
+ }
+ }
+
+ mutex_unlock(&chip->irq_complete);
+
+ return IRQ_HANDLED;
+}
+
+#define LAST_CNFG_REG 0x1F
+static int show_cnfg_regs(struct seq_file *m, void *data)
+{
+ struct smb135x_chg *chip = m->private;
+ int rc;
+ u8 reg;
+ u8 addr;
+
+ for (addr = 0; addr <= LAST_CNFG_REG; addr++) {
+ rc = smb135x_read(chip, addr, &reg);
+ if (!rc)
+ seq_printf(m, "0x%02x = 0x%02x\n", addr, reg);
+ }
+
+ return 0;
+}
+
+static int cnfg_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct smb135x_chg *chip = inode->i_private;
+
+ return single_open(file, show_cnfg_regs, chip);
+}
+
+static const struct file_operations cnfg_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = cnfg_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#define FIRST_CMD_REG 0x40
+#define LAST_CMD_REG 0x42
+static int show_cmd_regs(struct seq_file *m, void *data)
+{
+ struct smb135x_chg *chip = m->private;
+ int rc;
+ u8 reg;
+ u8 addr;
+
+ for (addr = FIRST_CMD_REG; addr <= LAST_CMD_REG; addr++) {
+ rc = smb135x_read(chip, addr, &reg);
+ if (!rc)
+ seq_printf(m, "0x%02x = 0x%02x\n", addr, reg);
+ }
+
+ return 0;
+}
+
+static int cmd_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct smb135x_chg *chip = inode->i_private;
+
+ return single_open(file, show_cmd_regs, chip);
+}
+
+static const struct file_operations cmd_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = cmd_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#define FIRST_STATUS_REG 0x46
+#define LAST_STATUS_REG 0x56
+static int show_status_regs(struct seq_file *m, void *data)
+{
+ struct smb135x_chg *chip = m->private;
+ int rc;
+ u8 reg;
+ u8 addr;
+
+ for (addr = FIRST_STATUS_REG; addr <= LAST_STATUS_REG; addr++) {
+ rc = smb135x_read(chip, addr, &reg);
+ if (!rc)
+ seq_printf(m, "0x%02x = 0x%02x\n", addr, reg);
+ }
+
+ return 0;
+}
+
+static int status_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct smb135x_chg *chip = inode->i_private;
+
+ return single_open(file, show_status_regs, chip);
+}
+
+static const struct file_operations status_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = status_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int show_irq_count(struct seq_file *m, void *data)
+{
+ int i, j, total = 0;
+
+ for (i = 0; i < ARRAY_SIZE(handlers); i++)
+ for (j = 0; j < 4; j++) {
+ seq_printf(m, "%s=%d\t(high=%d low=%d)\n",
+ handlers[i].irq_info[j].name,
+ handlers[i].irq_info[j].high
+ + handlers[i].irq_info[j].low,
+ handlers[i].irq_info[j].high,
+ handlers[i].irq_info[j].low);
+ total += (handlers[i].irq_info[j].high
+ + handlers[i].irq_info[j].low);
+ }
+
+ seq_printf(m, "\n\tTotal = %d\n", total);
+
+ return 0;
+}
+
+static int irq_count_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct smb135x_chg *chip = inode->i_private;
+
+ return single_open(file, show_irq_count, chip);
+}
+
+static const struct file_operations irq_count_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = irq_count_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int get_reg(void *data, u64 *val)
+{
+ struct smb135x_chg *chip = data;
+ int rc;
+ u8 temp;
+
+ rc = smb135x_read(chip, chip->peek_poke_address, &temp);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't read reg %x rc = %d\n",
+ chip->peek_poke_address, rc);
+ return -EAGAIN;
+ }
+ *val = temp;
+ return 0;
+}
+
+static int set_reg(void *data, u64 val)
+{
+ struct smb135x_chg *chip = data;
+ int rc;
+ u8 temp;
+
+ temp = (u8) val;
+ rc = smb135x_write(chip, chip->peek_poke_address, temp);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't write 0x%02x to 0x%02x rc= %d\n",
+ chip->peek_poke_address, temp, rc);
+ return -EAGAIN;
+ }
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(poke_poke_debug_ops, get_reg, set_reg, "0x%02llx\n");
+
+static int force_irq_set(void *data, u64 val)
+{
+ struct smb135x_chg *chip = data;
+
+ smb135x_chg_stat_handler(chip->client->irq, data);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_irq_ops, NULL, force_irq_set, "0x%02llx\n");
+
+static int force_rechg_set(void *data, u64 val)
+{
+ int rc = 0;
+ struct smb135x_chg *chip = data;
+
+ if (!chip->chg_enabled) {
+ pr_debug("Charging Disabled force recharge not allowed\n");
+ return -EINVAL;
+ }
+
+ if (!chip->inhibit_disabled) {
+ rc = smb135x_masked_write(chip, CFG_14_REG, EN_CHG_INHIBIT_BIT,
+ 0);
+ if (rc)
+ dev_err(chip->dev,
+ "Couldn't disable charge-inhibit rc=%d\n", rc);
+
+ /* delay for charge-inhibit to take affect */
+ msleep(500);
+ }
+
+ rc |= smb135x_charging(chip, false);
+ rc |= smb135x_charging(chip, true);
+
+ if (!chip->inhibit_disabled) {
+ rc |= smb135x_masked_write(chip, CFG_14_REG,
+ EN_CHG_INHIBIT_BIT, EN_CHG_INHIBIT_BIT);
+ if (rc)
+ dev_err(chip->dev,
+ "Couldn't enable charge-inhibit rc=%d\n", rc);
+ }
+
+ return rc;
+}
+DEFINE_SIMPLE_ATTRIBUTE(force_rechg_ops, NULL, force_rechg_set, "0x%02llx\n");
+
+#ifdef DEBUG
+static void dump_regs(struct smb135x_chg *chip)
+{
+ int rc;
+ u8 reg;
+ u8 addr;
+
+ for (addr = 0; addr <= LAST_CNFG_REG; addr++) {
+ rc = smb135x_read(chip, addr, &reg);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't read 0x%02x rc = %d\n",
+ addr, rc);
+ else
+ pr_debug("0x%02x = 0x%02x\n", addr, reg);
+ }
+
+ for (addr = FIRST_STATUS_REG; addr <= LAST_STATUS_REG; addr++) {
+ rc = smb135x_read(chip, addr, &reg);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't read 0x%02x rc = %d\n",
+ addr, rc);
+ else
+ pr_debug("0x%02x = 0x%02x\n", addr, reg);
+ }
+
+ for (addr = FIRST_CMD_REG; addr <= LAST_CMD_REG; addr++) {
+ rc = smb135x_read(chip, addr, &reg);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't read 0x%02x rc = %d\n",
+ addr, rc);
+ else
+ pr_debug("0x%02x = 0x%02x\n", addr, reg);
+ }
+}
+#else
+static void dump_regs(struct smb135x_chg *chip)
+{
+}
+#endif
+static int determine_initial_status(struct smb135x_chg *chip)
+{
+ int rc;
+ u8 reg;
+
+ /*
+ * It is okay to read the interrupt status here since
+ * interrupts aren't requested. reading interrupt status
+ * clears the interrupt so be careful to read interrupt
+ * status only in interrupt handling code
+ */
+
+ chip->batt_present = true;
+ rc = smb135x_read(chip, IRQ_B_REG, &reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read irq b rc = %d\n", rc);
+ return rc;
+ }
+ if (reg & IRQ_B_BATT_TERMINAL_BIT || reg & IRQ_B_BATT_MISSING_BIT)
+ chip->batt_present = false;
+ rc = smb135x_read(chip, STATUS_4_REG, &reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read status 4 rc = %d\n", rc);
+ return rc;
+ }
+ /* treat battery gone if less than 2V */
+ if (reg & BATT_LESS_THAN_2V)
+ chip->batt_present = false;
+
+ rc = smb135x_read(chip, IRQ_A_REG, &reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read irq A rc = %d\n", rc);
+ return rc;
+ }
+
+ if (reg & IRQ_A_HOT_HARD_BIT)
+ chip->batt_hot = true;
+ if (reg & IRQ_A_COLD_HARD_BIT)
+ chip->batt_cold = true;
+ if (reg & IRQ_A_HOT_SOFT_BIT)
+ chip->batt_warm = true;
+ if (reg & IRQ_A_COLD_SOFT_BIT)
+ chip->batt_cool = true;
+
+ rc = smb135x_read(chip, IRQ_C_REG, &reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read irq A rc = %d\n", rc);
+ return rc;
+ }
+ if (reg & IRQ_C_TERM_BIT)
+ chip->chg_done_batt_full = true;
+
+ rc = smb135x_read(chip, IRQ_E_REG, &reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read irq E rc = %d\n", rc);
+ return rc;
+ }
+ chip->usb_present = !(reg & IRQ_E_USB_OV_BIT)
+ && !(reg & IRQ_E_USB_UV_BIT);
+ chip->dc_present = !(reg & IRQ_E_DC_OV_BIT) && !(reg & IRQ_E_DC_UV_BIT);
+
+ if (chip->usb_present)
+ handle_usb_insertion(chip);
+ else
+ handle_usb_removal(chip);
+
+ if (chip->dc_psy_type != -EINVAL) {
+ if (chip->dc_psy_type == POWER_SUPPLY_TYPE_WIRELESS) {
+ /*
+ * put the dc path in suspend state if it is powered
+ * by wireless charger
+ */
+ if (chip->dc_present)
+ smb135x_path_suspend(chip, DC, CURRENT, false);
+ else
+ smb135x_path_suspend(chip, DC, CURRENT, true);
+ }
+ }
+
+ chip->usb_slave_present = is_usb_slave_present(chip);
+ if (chip->usb_psy && !chip->id_line_not_connected) {
+ pr_debug("setting usb psy usb_otg = %d\n",
+ chip->usb_slave_present);
+ power_supply_set_usb_otg(chip->usb_psy,
+ chip->usb_slave_present);
+ }
+ return 0;
+}
+
+static int smb135x_hw_init(struct smb135x_chg *chip)
+{
+ int rc;
+ int i;
+ u8 reg, mask;
+
+ if (chip->pinctrl_state_name) {
+ chip->smb_pinctrl = pinctrl_get_select(chip->dev,
+ chip->pinctrl_state_name);
+ if (IS_ERR(chip->smb_pinctrl)) {
+ pr_err("Could not get/set %s pinctrl state rc = %ld\n",
+ chip->pinctrl_state_name,
+ PTR_ERR(chip->smb_pinctrl));
+ return PTR_ERR(chip->smb_pinctrl);
+ }
+ }
+
+ if (chip->therm_bias_vreg) {
+ rc = regulator_enable(chip->therm_bias_vreg);
+ if (rc) {
+ pr_err("Couldn't enable therm-bias rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ /*
+ * Enable USB data line pullup regulator this is needed for the D+
+ * line to be at proper voltage for HVDCP charger detection.
+ */
+ if (chip->usb_pullup_vreg) {
+ rc = regulator_enable(chip->usb_pullup_vreg);
+ if (rc) {
+ pr_err("Unable to enable data line pull-up regulator rc=%d\n",
+ rc);
+ if (chip->therm_bias_vreg)
+ regulator_disable(chip->therm_bias_vreg);
+ return rc;
+ }
+ }
+
+ rc = smb135x_enable_volatile_writes(chip);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't configure for volatile rc = %d\n",
+ rc);
+ goto free_regulator;
+ }
+
+ /*
+ * force using current from the register i.e. ignore auto
+ * power source detect (APSD) mA ratings
+ */
+ mask = USE_REGISTER_FOR_CURRENT;
+
+ if (chip->workaround_flags & WRKARND_USB100_BIT)
+ reg = 0;
+ else
+ /* this ignores APSD results */
+ reg = USE_REGISTER_FOR_CURRENT;
+
+ rc = smb135x_masked_write(chip, CMD_INPUT_LIMIT, mask, reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set input limit cmd rc=%d\n", rc);
+ goto free_regulator;
+ }
+
+ /* set bit 0 = 100mA bit 1 = 500mA and set register control */
+ rc = smb135x_masked_write(chip, CFG_E_REG,
+ POLARITY_100_500_BIT | USB_CTRL_BY_PIN_BIT,
+ POLARITY_100_500_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set usbin cfg rc=%d\n", rc);
+ goto free_regulator;
+ }
+
+ /*
+ * set chg en by cmd register, set chg en by writing bit 1,
+ * enable auto pre to fast, enable current termination, enable
+ * auto recharge, enable chg inhibition based on the dt flag
+ */
+ if (chip->inhibit_disabled)
+ reg = 0;
+ else
+ reg = EN_CHG_INHIBIT_BIT;
+
+ rc = smb135x_masked_write(chip, CFG_14_REG,
+ CHG_EN_BY_PIN_BIT | CHG_EN_ACTIVE_LOW_BIT
+ | PRE_TO_FAST_REQ_CMD_BIT | DISABLE_AUTO_RECHARGE_BIT
+ | EN_CHG_INHIBIT_BIT, reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set cfg 14 rc=%d\n", rc);
+ goto free_regulator;
+ }
+
+ /* control USB suspend via command bits */
+ rc = smb135x_masked_write(chip, USBIN_DCIN_CFG_REG,
+ USBIN_SUSPEND_VIA_COMMAND_BIT, USBIN_SUSPEND_VIA_COMMAND_BIT);
+
+ /* set the float voltage */
+ if (chip->vfloat_mv != -EINVAL) {
+ rc = smb135x_float_voltage_set(chip, chip->vfloat_mv);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set float voltage rc = %d\n", rc);
+ goto free_regulator;
+ }
+ }
+
+ /* set iterm */
+ if (chip->iterm_ma != -EINVAL) {
+ if (chip->iterm_disabled) {
+ dev_err(chip->dev, "Error: Both iterm_disabled and iterm_ma set\n");
+ rc = -EINVAL;
+ goto free_regulator;
+ } else {
+ if (chip->iterm_ma <= 50)
+ reg = CHG_ITERM_50MA;
+ else if (chip->iterm_ma <= 100)
+ reg = CHG_ITERM_100MA;
+ else if (chip->iterm_ma <= 150)
+ reg = CHG_ITERM_150MA;
+ else if (chip->iterm_ma <= 200)
+ reg = CHG_ITERM_200MA;
+ else if (chip->iterm_ma <= 250)
+ reg = CHG_ITERM_250MA;
+ else if (chip->iterm_ma <= 300)
+ reg = CHG_ITERM_300MA;
+ else if (chip->iterm_ma <= 500)
+ reg = CHG_ITERM_500MA;
+ else
+ reg = CHG_ITERM_600MA;
+
+ rc = smb135x_masked_write(chip, CFG_3_REG,
+ CHG_ITERM_MASK, reg);
+ if (rc) {
+ dev_err(chip->dev,
+ "Couldn't set iterm rc = %d\n", rc);
+ goto free_regulator;
+ }
+
+ rc = smb135x_masked_write(chip, CFG_14_REG,
+ DISABLE_CURRENT_TERM_BIT, 0);
+ if (rc) {
+ dev_err(chip->dev,
+ "Couldn't enable iterm rc = %d\n", rc);
+ goto free_regulator;
+ }
+ }
+ } else if (chip->iterm_disabled) {
+ rc = smb135x_masked_write(chip, CFG_14_REG,
+ DISABLE_CURRENT_TERM_BIT,
+ DISABLE_CURRENT_TERM_BIT);
+ if (rc) {
+ dev_err(chip->dev, "Couldn't set iterm rc = %d\n",
+ rc);
+ goto free_regulator;
+ }
+ }
+
+ /* set the safety time voltage */
+ if (chip->safety_time != -EINVAL) {
+ if (chip->safety_time == 0) {
+ /* safety timer disabled */
+ reg = 1 << SAFETY_TIME_EN_SHIFT;
+ rc = smb135x_masked_write(chip, CFG_16_REG,
+ SAFETY_TIME_EN_BIT, reg);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't disable safety timer rc = %d\n",
+ rc);
+ goto free_regulator;
+ }
+ } else {
+ for (i = 0; i < ARRAY_SIZE(chg_time); i++) {
+ if (chip->safety_time <= chg_time[i]) {
+ reg = i << SAFETY_TIME_MINUTES_SHIFT;
+ break;
+ }
+ }
+ rc = smb135x_masked_write(chip, CFG_16_REG,
+ SAFETY_TIME_EN_BIT | SAFETY_TIME_MINUTES_MASK,
+ reg);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't set safety timer rc = %d\n",
+ rc);
+ goto free_regulator;
+ }
+ }
+ }
+
+ /* battery missing detection */
+ rc = smb135x_masked_write(chip, CFG_19_REG,
+ BATT_MISSING_ALGO_BIT | BATT_MISSING_THERM_BIT,
+ chip->bmd_algo_disabled ? BATT_MISSING_THERM_BIT :
+ BATT_MISSING_ALGO_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set batt_missing config = %d\n",
+ rc);
+ goto free_regulator;
+ }
+
+ /* set maximum fastchg current */
+ if (chip->fastchg_ma != -EINVAL) {
+ rc = smb135x_set_fastchg_current(chip, chip->fastchg_ma);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set fastchg current = %d\n",
+ rc);
+ goto free_regulator;
+ }
+ }
+
+ if (chip->usb_pullup_vreg) {
+ /* enable 9V HVDCP adapter support */
+ rc = smb135x_masked_write(chip, CFG_E_REG, HVDCP_5_9_BIT,
+ HVDCP_5_9_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "Couldn't request for 5 or 9V rc=%d\n", rc);
+ goto free_regulator;
+ }
+ }
+
+ if (chip->gamma_setting) {
+ rc = smb135x_masked_write(chip, CFG_1B_REG, COLD_HARD_MASK,
+ chip->gamma_setting[0] << COLD_HARD_SHIFT);
+
+ rc |= smb135x_masked_write(chip, CFG_1B_REG, HOT_HARD_MASK,
+ chip->gamma_setting[1] << HOT_HARD_SHIFT);
+
+ rc |= smb135x_masked_write(chip, CFG_1B_REG, COLD_SOFT_MASK,
+ chip->gamma_setting[2] << COLD_SOFT_SHIFT);
+
+ rc |= smb135x_masked_write(chip, CFG_1B_REG, HOT_SOFT_MASK,
+ chip->gamma_setting[3] << HOT_SOFT_SHIFT);
+ if (rc < 0)
+ goto free_regulator;
+ }
+
+ __smb135x_charging(chip, chip->chg_enabled);
+
+ /* interrupt enabling - active low */
+ if (chip->client->irq) {
+ mask = CHG_STAT_IRQ_ONLY_BIT | CHG_STAT_ACTIVE_HIGH_BIT
+ | CHG_STAT_DISABLE_BIT;
+ reg = CHG_STAT_IRQ_ONLY_BIT;
+ rc = smb135x_masked_write(chip, CFG_17_REG, mask, reg);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set irq config rc = %d\n",
+ rc);
+ goto free_regulator;
+ }
+
+ /* enabling only interesting interrupts */
+ rc = smb135x_write(chip, IRQ_CFG_REG,
+ IRQ_BAT_HOT_COLD_HARD_BIT
+ | IRQ_BAT_HOT_COLD_SOFT_BIT
+ | IRQ_OTG_OVER_CURRENT_BIT
+ | IRQ_INTERNAL_TEMPERATURE_BIT
+ | IRQ_USBIN_UV_BIT);
+
+ rc |= smb135x_write(chip, IRQ2_CFG_REG,
+ IRQ2_SAFETY_TIMER_BIT
+ | IRQ2_CHG_ERR_BIT
+ | IRQ2_CHG_PHASE_CHANGE_BIT
+ | IRQ2_POWER_OK_BIT
+ | IRQ2_BATT_MISSING_BIT
+ | IRQ2_VBAT_LOW_BIT);
+
+ rc |= smb135x_write(chip, IRQ3_CFG_REG, IRQ3_SRC_DETECT_BIT
+ | IRQ3_DCIN_UV_BIT | IRQ3_RID_DETECT_BIT);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set irq enable rc = %d\n",
+ rc);
+ goto free_regulator;
+ }
+ }
+
+ /* resume threshold */
+ if (chip->resume_delta_mv != -EINVAL) {
+ smb135x_set_resume_threshold(chip, chip->resume_delta_mv);
+ }
+
+ /* DC path current settings */
+ if (chip->dc_psy_type != -EINVAL) {
+ rc = smb135x_set_dc_chg_current(chip, chip->dc_psy_ma);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set dc charge current rc = %d\n",
+ rc);
+ goto free_regulator;
+ }
+ }
+
+ /*
+ * on some devices the battery is powered via external sources which
+ * could raise its voltage above the float voltage. smb135x chips go
+ * in to reverse boost in such a situation and the workaround is to
+ * disable float voltage compensation (note that the battery will appear
+ * hot/cold when powered via external source).
+ */
+
+ if (chip->soft_vfloat_comp_disabled) {
+ mask = HOT_SOFT_VFLOAT_COMP_EN_BIT
+ | COLD_SOFT_VFLOAT_COMP_EN_BIT;
+ rc = smb135x_masked_write(chip, CFG_1A_REG, mask, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't disable soft vfloat rc = %d\n",
+ rc);
+ goto free_regulator;
+ }
+ }
+
+ if (chip->soft_current_comp_disabled) {
+ mask = HOT_SOFT_CURRENT_COMP_EN_BIT
+ | COLD_SOFT_CURRENT_COMP_EN_BIT;
+ rc = smb135x_masked_write(chip, CFG_1A_REG, mask, 0);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't disable soft current rc = %d\n",
+ rc);
+ goto free_regulator;
+ }
+ }
+
+ /*
+ * Command mode for OTG control. This gives us RID interrupts but keeps
+ * enabling the 5V OTG via i2c register control
+ */
+ rc = smb135x_masked_write(chip, USBIN_OTG_REG, OTG_CNFG_MASK,
+ OTG_CNFG_COMMAND_CTRL);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't write to otg cfg reg rc = %d\n",
+ rc);
+ goto free_regulator;
+ }
+ return 0;
+
+free_regulator:
+ if (chip->therm_bias_vreg)
+ regulator_disable(chip->therm_bias_vreg);
+ if (chip->usb_pullup_vreg)
+ regulator_disable(chip->usb_pullup_vreg);
+ return rc;
+}
+
+static struct of_device_id smb135x_match_table[] = {
+ {
+ .compatible = "qcom,smb1356-charger",
+ .data = &version_data[V_SMB1356],
+ },
+ {
+ .compatible = "qcom,smb1357-charger",
+ .data = &version_data[V_SMB1357],
+ },
+ {
+ .compatible = "qcom,smb1358-charger",
+ .data = &version_data[V_SMB1358],
+ },
+ {
+ .compatible = "qcom,smb1359-charger",
+ .data = &version_data[V_SMB1359],
+ },
+ { },
+};
+
+#define DC_MA_MIN 300
+#define DC_MA_MAX 2000
+#define NUM_GAMMA_VALUES 4
+static int smb_parse_dt(struct smb135x_chg *chip)
+{
+ int rc;
+ struct device_node *node = chip->dev->of_node;
+ const char *dc_psy_type;
+
+ if (!node) {
+ dev_err(chip->dev, "device tree info. missing\n");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(node, "qcom,float-voltage-mv",
+ &chip->vfloat_mv);
+ if (rc < 0)
+ chip->vfloat_mv = -EINVAL;
+
+ rc = of_property_read_u32(node, "qcom,charging-timeout",
+ &chip->safety_time);
+ if (rc < 0)
+ chip->safety_time = -EINVAL;
+
+ if (!rc &&
+ (chip->safety_time > chg_time[ARRAY_SIZE(chg_time) - 1])) {
+ dev_err(chip->dev, "Bad charging-timeout %d\n",
+ chip->safety_time);
+ return -EINVAL;
+ }
+
+ chip->bmd_algo_disabled = of_property_read_bool(node,
+ "qcom,bmd-algo-disabled");
+
+ chip->dc_psy_type = -EINVAL;
+ dc_psy_type = of_get_property(node, "qcom,dc-psy-type", NULL);
+ if (dc_psy_type) {
+ if (strcmp(dc_psy_type, "Mains") == 0)
+ chip->dc_psy_type = POWER_SUPPLY_TYPE_MAINS;
+ else if (strcmp(dc_psy_type, "Wireless") == 0)
+ chip->dc_psy_type = POWER_SUPPLY_TYPE_WIRELESS;
+ }
+
+ if (chip->dc_psy_type != -EINVAL) {
+ rc = of_property_read_u32(node, "qcom,dc-psy-ma",
+ &chip->dc_psy_ma);
+ if (rc < 0) {
+ dev_err(chip->dev,
+ "no mA current for dc rc = %d\n", rc);
+ return rc;
+ }
+
+ if (chip->dc_psy_ma < DC_MA_MIN
+ || chip->dc_psy_ma > DC_MA_MAX) {
+ dev_err(chip->dev, "Bad dc mA %d\n", chip->dc_psy_ma);
+ return -EINVAL;
+ }
+ }
+
+ rc = of_property_read_u32(node, "qcom,recharge-thresh-mv",
+ &chip->resume_delta_mv);
+ if (rc < 0)
+ chip->resume_delta_mv = -EINVAL;
+
+ rc = of_property_read_u32(node, "qcom,iterm-ma", &chip->iterm_ma);
+ if (rc < 0)
+ chip->iterm_ma = -EINVAL;
+
+ chip->iterm_disabled = of_property_read_bool(node,
+ "qcom,iterm-disabled");
+
+ chip->chg_disabled_permanently = (of_property_read_bool(node,
+ "qcom,charging-disabled"));
+ chip->chg_enabled = !chip->chg_disabled_permanently;
+
+ chip->inhibit_disabled = of_property_read_bool(node,
+ "qcom,inhibit-disabled");
+
+ chip->bms_controlled_charging = of_property_read_bool(node,
+ "qcom,bms-controlled-charging");
+
+ rc = of_property_read_string(node, "qcom,bms-psy-name",
+ &chip->bms_psy_name);
+ if (rc)
+ chip->bms_psy_name = NULL;
+
+ rc = of_property_read_u32(node, "qcom,fastchg-ma", &chip->fastchg_ma);
+ if (rc < 0)
+ chip->fastchg_ma = -EINVAL;
+
+ chip->soft_vfloat_comp_disabled = of_property_read_bool(node,
+ "qcom,soft-vfloat-comp-disabled");
+
+ chip->soft_current_comp_disabled = of_property_read_bool(node,
+ "qcom,soft-current-comp-disabled");
+
+ if (of_find_property(node, "therm-bias-supply", NULL)) {
+ /* get the thermistor bias regulator */
+ chip->therm_bias_vreg = devm_regulator_get(chip->dev,
+ "therm-bias");
+ if (IS_ERR(chip->therm_bias_vreg))
+ return PTR_ERR(chip->therm_bias_vreg);
+ }
+
+ /*
+ * Gamma value indicates the ratio of the pull up resistors and NTC
+ * resistor in battery pack. There are 4 options, refer to the graphic
+ * user interface and choose the right one.
+ */
+ if (of_find_property(node, "qcom,gamma-setting",
+ &chip->gamma_setting_num)) {
+ chip->gamma_setting_num = chip->gamma_setting_num /
+ sizeof(chip->gamma_setting_num);
+ if (NUM_GAMMA_VALUES != chip->gamma_setting_num) {
+ pr_err("Gamma setting not correct!\n");
+ return -EINVAL;
+ }
+
+ chip->gamma_setting = devm_kzalloc(chip->dev,
+ chip->gamma_setting_num *
+ sizeof(chip->gamma_setting_num), GFP_KERNEL);
+ if (!chip->gamma_setting) {
+ pr_err("gamma setting kzalloc failed!\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32_array(node,
+ "qcom,gamma-setting",
+ chip->gamma_setting, chip->gamma_setting_num);
+ if (rc) {
+ pr_err("Couldn't read gamma setting, rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ if (of_find_property(node, "qcom,thermal-mitigation",
+ &chip->thermal_levels)) {
+ chip->thermal_mitigation = devm_kzalloc(chip->dev,
+ chip->thermal_levels,
+ GFP_KERNEL);
+
+ if (chip->thermal_mitigation == NULL) {
+ pr_err("thermal mitigation kzalloc() failed.\n");
+ return -ENOMEM;
+ }
+
+ chip->thermal_levels /= sizeof(int);
+ rc = of_property_read_u32_array(node,
+ "qcom,thermal-mitigation",
+ chip->thermal_mitigation, chip->thermal_levels);
+ if (rc) {
+ pr_err("Couldn't read threm limits rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ if (of_find_property(node, "usb-pullup-supply", NULL)) {
+ /* get the data line pull-up regulator */
+ chip->usb_pullup_vreg = devm_regulator_get(chip->dev,
+ "usb-pullup");
+ if (IS_ERR(chip->usb_pullup_vreg))
+ return PTR_ERR(chip->usb_pullup_vreg);
+ }
+
+ chip->pinctrl_state_name = of_get_property(node, "pinctrl-names", NULL);
+
+ chip->id_line_not_connected = of_property_read_bool(node,
+ "qcom,id-line-not-connected");
+ return 0;
+}
+
+static int create_debugfs_entries(struct smb135x_chg *chip)
+{
+ chip->debug_root = debugfs_create_dir("smb135x", NULL);
+ if (!chip->debug_root)
+ dev_err(chip->dev, "Couldn't create debug dir\n");
+
+ if (chip->debug_root) {
+ struct dentry *ent;
+
+ ent = debugfs_create_file("config_registers", S_IFREG | S_IRUGO,
+ chip->debug_root, chip,
+ &cnfg_debugfs_ops);
+ if (!ent)
+ dev_err(chip->dev,
+ "Couldn't create cnfg debug file\n");
+
+ ent = debugfs_create_file("status_registers", S_IFREG | S_IRUGO,
+ chip->debug_root, chip,
+ &status_debugfs_ops);
+ if (!ent)
+ dev_err(chip->dev,
+ "Couldn't create status debug file\n");
+
+ ent = debugfs_create_file("cmd_registers", S_IFREG | S_IRUGO,
+ chip->debug_root, chip,
+ &cmd_debugfs_ops);
+ if (!ent)
+ dev_err(chip->dev,
+ "Couldn't create cmd debug file\n");
+
+ ent = debugfs_create_x32("address", S_IFREG | S_IWUSR | S_IRUGO,
+ chip->debug_root,
+ &(chip->peek_poke_address));
+ if (!ent)
+ dev_err(chip->dev,
+ "Couldn't create address debug file\n");
+
+ ent = debugfs_create_file("data", S_IFREG | S_IWUSR | S_IRUGO,
+ chip->debug_root, chip,
+ &poke_poke_debug_ops);
+ if (!ent)
+ dev_err(chip->dev,
+ "Couldn't create data debug file\n");
+
+ ent = debugfs_create_file("force_irq",
+ S_IFREG | S_IWUSR | S_IRUGO,
+ chip->debug_root, chip,
+ &force_irq_ops);
+ if (!ent)
+ dev_err(chip->dev,
+ "Couldn't create force_irq debug file\n");
+
+ ent = debugfs_create_x32("skip_writes",
+ S_IFREG | S_IWUSR | S_IRUGO,
+ chip->debug_root,
+ &(chip->skip_writes));
+ if (!ent)
+ dev_err(chip->dev,
+ "Couldn't create skip writes debug file\n");
+
+ ent = debugfs_create_x32("skip_reads",
+ S_IFREG | S_IWUSR | S_IRUGO,
+ chip->debug_root,
+ &(chip->skip_reads));
+ if (!ent)
+ dev_err(chip->dev,
+ "Couldn't create skip reads debug file\n");
+
+ ent = debugfs_create_file("irq_count", S_IFREG | S_IRUGO,
+ chip->debug_root, chip,
+ &irq_count_debugfs_ops);
+ if (!ent)
+ dev_err(chip->dev,
+ "Couldn't create irq_count debug file\n");
+
+ ent = debugfs_create_file("force_recharge",
+ S_IFREG | S_IWUSR | S_IRUGO,
+ chip->debug_root, chip,
+ &force_rechg_ops);
+ if (!ent)
+ dev_err(chip->dev,
+ "Couldn't create force recharge debug file\n");
+
+ ent = debugfs_create_x32("usb_suspend_votes",
+ S_IFREG | S_IWUSR | S_IRUGO,
+ chip->debug_root,
+ &(chip->usb_suspended));
+ if (!ent)
+ dev_err(chip->dev,
+ "Couldn't create usb_suspend_votes file\n");
+
+ ent = debugfs_create_x32("dc_suspend_votes",
+ S_IFREG | S_IWUSR | S_IRUGO,
+ chip->debug_root,
+ &(chip->dc_suspended));
+ if (!ent)
+ dev_err(chip->dev,
+ "Couldn't create dc_suspend_votes file\n");
+ }
+ return 0;
+}
+
+static int is_parallel_charger(struct i2c_client *client)
+{
+ struct device_node *node = client->dev.of_node;
+
+ return of_property_read_bool(node, "qcom,parallel-charger");
+}
+
+static int smb135x_main_charger_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc;
+ struct smb135x_chg *chip;
+ struct power_supply *usb_psy;
+ u8 reg = 0;
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip) {
+ dev_err(&client->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ chip->client = client;
+ chip->dev = &client->dev;
+
+ rc = smb_parse_dt(chip);
+ if (rc < 0) {
+ dev_err(&client->dev, "Unable to parse DT nodes\n");
+ return rc;
+ }
+
+ usb_psy = power_supply_get_by_name("usb");
+ if (!usb_psy && chip->chg_enabled) {
+ dev_dbg(&client->dev, "USB supply not found; defer probe\n");
+ return -EPROBE_DEFER;
+ }
+ chip->usb_psy = usb_psy;
+
+ chip->fake_battery_soc = -EINVAL;
+
+ INIT_DELAYED_WORK(&chip->wireless_insertion_work,
+ wireless_insertion_work);
+
+ INIT_DELAYED_WORK(&chip->reset_otg_oc_count_work,
+ reset_otg_oc_count_work);
+ INIT_DELAYED_WORK(&chip->hvdcp_det_work, smb135x_hvdcp_det_work);
+ mutex_init(&chip->path_suspend_lock);
+ mutex_init(&chip->current_change_lock);
+ mutex_init(&chip->read_write_lock);
+ mutex_init(&chip->otg_oc_count_lock);
+ device_init_wakeup(chip->dev, true);
+ /* probe the device to check if its actually connected */
+ rc = smb135x_read(chip, CFG_4_REG, &reg);
+ if (rc) {
+ pr_err("Failed to detect SMB135x, device may be absent\n");
+ return -ENODEV;
+ }
+
+ i2c_set_clientdata(client, chip);
+
+ rc = smb135x_chip_version_and_revision(chip);
+ if (rc) {
+ dev_err(&client->dev,
+ "Couldn't detect version/revision rc=%d\n", rc);
+ return rc;
+ }
+
+ dump_regs(chip);
+
+ rc = smb135x_regulator_init(chip);
+ if (rc) {
+ dev_err(&client->dev,
+ "Couldn't initialize regulator rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smb135x_hw_init(chip);
+ if (rc < 0) {
+ dev_err(&client->dev,
+ "Unable to intialize hardware rc = %d\n", rc);
+ goto free_regulator;
+ }
+
+ rc = determine_initial_status(chip);
+ if (rc < 0) {
+ dev_err(&client->dev,
+ "Unable to determine init status rc = %d\n", rc);
+ goto free_regulator;
+ }
+
+ chip->batt_psy.name = "battery";
+ chip->batt_psy.type = POWER_SUPPLY_TYPE_BATTERY;
+ chip->batt_psy.get_property = smb135x_battery_get_property;
+ chip->batt_psy.set_property = smb135x_battery_set_property;
+ chip->batt_psy.properties = smb135x_battery_properties;
+ chip->batt_psy.num_properties = ARRAY_SIZE(smb135x_battery_properties);
+ chip->batt_psy.external_power_changed = smb135x_external_power_changed;
+ chip->batt_psy.property_is_writeable = smb135x_battery_is_writeable;
+
+ if (chip->bms_controlled_charging) {
+ chip->batt_psy.supplied_to = pm_batt_supplied_to;
+ chip->batt_psy.num_supplicants =
+ ARRAY_SIZE(pm_batt_supplied_to);
+ }
+
+ rc = power_supply_register(chip->dev, &chip->batt_psy);
+ if (rc < 0) {
+ dev_err(&client->dev,
+ "Unable to register batt_psy rc = %d\n", rc);
+ goto free_regulator;
+ }
+
+ if (chip->dc_psy_type != -EINVAL) {
+ chip->dc_psy.name = "dc";
+ chip->dc_psy.type = chip->dc_psy_type;
+ chip->dc_psy.get_property = smb135x_dc_get_property;
+ chip->dc_psy.properties = smb135x_dc_properties;
+ chip->dc_psy.num_properties = ARRAY_SIZE(smb135x_dc_properties);
+ rc = power_supply_register(chip->dev, &chip->dc_psy);
+ if (rc < 0) {
+ dev_err(&client->dev,
+ "Unable to register dc_psy rc = %d\n", rc);
+ goto unregister_batt_psy;
+ }
+ }
+
+ chip->resume_completed = true;
+ mutex_init(&chip->irq_complete);
+
+ /* STAT irq configuration */
+ if (client->irq) {
+ rc = devm_request_threaded_irq(&client->dev, client->irq, NULL,
+ smb135x_chg_stat_handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "smb135x_chg_stat_irq", chip);
+ if (rc < 0) {
+ dev_err(&client->dev,
+ "request_irq for irq=%d failed rc = %d\n",
+ client->irq, rc);
+ goto unregister_dc_psy;
+ }
+ enable_irq_wake(client->irq);
+ }
+
+ create_debugfs_entries(chip);
+ dev_info(chip->dev, "SMB135X version = %s revision = %s successfully probed batt=%d dc = %d usb = %d\n",
+ version_str[chip->version],
+ revision_str[chip->revision],
+ smb135x_get_prop_batt_present(chip),
+ chip->dc_present, chip->usb_present);
+ return 0;
+
+unregister_dc_psy:
+ if (chip->dc_psy_type != -EINVAL)
+ power_supply_unregister(&chip->dc_psy);
+unregister_batt_psy:
+ power_supply_unregister(&chip->batt_psy);
+free_regulator:
+ smb135x_regulator_deinit(chip);
+ return rc;
+}
+
+static int smb135x_parallel_charger_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc;
+ struct smb135x_chg *chip;
+ const struct of_device_id *match;
+ struct device_node *node = client->dev.of_node;
+
+ chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip) {
+ dev_err(&client->dev, "Unable to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ chip->client = client;
+ chip->dev = &client->dev;
+ chip->parallel_charger = true;
+ chip->dc_psy_type = -EINVAL;
+
+ chip->chg_enabled = !(of_property_read_bool(node,
+ "qcom,charging-disabled"));
+
+ rc = of_property_read_u32(node, "qcom,recharge-thresh-mv",
+ &chip->resume_delta_mv);
+ if (rc < 0)
+ chip->resume_delta_mv = -EINVAL;
+
+ rc = of_property_read_u32(node, "qcom,float-voltage-mv",
+ &chip->vfloat_mv);
+ if (rc < 0)
+ chip->vfloat_mv = -EINVAL;
+
+ mutex_init(&chip->path_suspend_lock);
+ mutex_init(&chip->current_change_lock);
+ mutex_init(&chip->read_write_lock);
+
+ match = of_match_node(smb135x_match_table, node);
+ if (match == NULL) {
+ dev_err(chip->dev, "device tree match not found\n");
+ return -EINVAL;
+ }
+
+ chip->version = *(int *)match->data;
+ smb135x_set_current_tables(chip);
+
+ i2c_set_clientdata(client, chip);
+
+ chip->parallel_psy.name = "usb-parallel";
+ chip->parallel_psy.type = POWER_SUPPLY_TYPE_USB_PARALLEL;
+ chip->parallel_psy.get_property = smb135x_parallel_get_property;
+ chip->parallel_psy.set_property = smb135x_parallel_set_property;
+ chip->parallel_psy.properties = smb135x_parallel_properties;
+ chip->parallel_psy.property_is_writeable
+ = smb135x_parallel_is_writeable;
+ chip->parallel_psy.num_properties
+ = ARRAY_SIZE(smb135x_parallel_properties);
+
+ rc = power_supply_register(chip->dev, &chip->parallel_psy);
+ if (rc < 0) {
+ dev_err(&client->dev,
+ "Unable to register parallel_psy rc = %d\n", rc);
+ return rc;
+ }
+
+ chip->resume_completed = true;
+ mutex_init(&chip->irq_complete);
+
+ create_debugfs_entries(chip);
+
+ dev_info(chip->dev, "SMB135X USB PARALLEL CHARGER version = %s successfully probed\n",
+ version_str[chip->version]);
+ return 0;
+}
+
+static int smb135x_charger_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ if (is_parallel_charger(client))
+ return smb135x_parallel_charger_probe(client, id);
+ else
+ return smb135x_main_charger_probe(client, id);
+}
+
+static int smb135x_charger_remove(struct i2c_client *client)
+{
+ int rc;
+ struct smb135x_chg *chip = i2c_get_clientdata(client);
+
+ debugfs_remove_recursive(chip->debug_root);
+
+ if (chip->parallel_charger) {
+ power_supply_unregister(&chip->parallel_psy);
+ goto mutex_destroy;
+ }
+
+ if (chip->therm_bias_vreg) {
+ rc = regulator_disable(chip->therm_bias_vreg);
+ if (rc)
+ pr_err("Couldn't disable therm-bias rc = %d\n", rc);
+ }
+
+ if (chip->usb_pullup_vreg) {
+ rc = regulator_disable(chip->usb_pullup_vreg);
+ if (rc)
+ pr_err("Couldn't disable data-pullup rc = %d\n", rc);
+ }
+
+ if (chip->dc_psy_type != -EINVAL)
+ power_supply_unregister(&chip->dc_psy);
+
+ power_supply_unregister(&chip->batt_psy);
+
+ smb135x_regulator_deinit(chip);
+
+mutex_destroy:
+ mutex_destroy(&chip->irq_complete);
+ return 0;
+}
+
+static int smb135x_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct smb135x_chg *chip = i2c_get_clientdata(client);
+ int i, rc;
+
+ /* no suspend resume activities for parallel charger */
+ if (chip->parallel_charger)
+ return 0;
+
+ /* Save the current IRQ config */
+ for (i = 0; i < 3; i++) {
+ rc = smb135x_read(chip, IRQ_CFG_REG + i,
+ &chip->irq_cfg_mask[i]);
+ if (rc)
+ dev_err(chip->dev,
+ "Couldn't save irq cfg regs rc=%d\n", rc);
+ }
+
+ /* enable only important IRQs */
+ rc = smb135x_write(chip, IRQ_CFG_REG, IRQ_USBIN_UV_BIT);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set irq_cfg rc = %d\n", rc);
+
+ rc = smb135x_write(chip, IRQ2_CFG_REG, IRQ2_BATT_MISSING_BIT
+ | IRQ2_VBAT_LOW_BIT
+ | IRQ2_POWER_OK_BIT);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set irq2_cfg rc = %d\n", rc);
+
+ rc = smb135x_write(chip, IRQ3_CFG_REG, IRQ3_SRC_DETECT_BIT
+ | IRQ3_DCIN_UV_BIT | IRQ3_RID_DETECT_BIT);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't set irq3_cfg rc = %d\n", rc);
+
+ mutex_lock(&chip->irq_complete);
+ chip->resume_completed = false;
+ mutex_unlock(&chip->irq_complete);
+
+ return 0;
+}
+
+static int smb135x_suspend_noirq(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct smb135x_chg *chip = i2c_get_clientdata(client);
+
+ /* no suspend resume activities for parallel charger */
+ if (chip->parallel_charger)
+ return 0;
+
+ if (chip->irq_waiting) {
+ pr_err_ratelimited("Aborting suspend, an interrupt was detected while suspending\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static int smb135x_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct smb135x_chg *chip = i2c_get_clientdata(client);
+ int i, rc;
+
+ /* no suspend resume activities for parallel charger */
+ if (chip->parallel_charger)
+ return 0;
+ /* Restore the IRQ config */
+ for (i = 0; i < 3; i++) {
+ rc = smb135x_write(chip, IRQ_CFG_REG + i,
+ chip->irq_cfg_mask[i]);
+ if (rc)
+ dev_err(chip->dev,
+ "Couldn't restore irq cfg regs rc=%d\n", rc);
+ }
+ mutex_lock(&chip->irq_complete);
+ chip->resume_completed = true;
+ if (chip->irq_waiting) {
+ mutex_unlock(&chip->irq_complete);
+ smb135x_chg_stat_handler(client->irq, chip);
+ enable_irq(client->irq);
+ } else {
+ mutex_unlock(&chip->irq_complete);
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops smb135x_pm_ops = {
+ .resume = smb135x_resume,
+ .suspend_noirq = smb135x_suspend_noirq,
+ .suspend = smb135x_suspend,
+};
+
+static const struct i2c_device_id smb135x_charger_id[] = {
+ {"smb135x-charger", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, smb135x_charger_id);
+
+static void smb135x_shutdown(struct i2c_client *client)
+{
+ int rc;
+ struct smb135x_chg *chip = i2c_get_clientdata(client);
+
+ if (chip->usb_pullup_vreg) {
+ /*
+ * switch to 5V adapter to prevent any errorneous request of 12V
+ * when USB D+ line pull-up regulator turns off.
+ */
+ rc = smb135x_masked_write(chip, CFG_E_REG, HVDCP_5_9_BIT, 0);
+ if (rc < 0)
+ dev_err(chip->dev,
+ "Couldn't request for 5V rc=%d\n", rc);
+ }
+}
+
+static struct i2c_driver smb135x_charger_driver = {
+ .driver = {
+ .name = "smb135x-charger",
+ .owner = THIS_MODULE,
+ .of_match_table = smb135x_match_table,
+ .pm = &smb135x_pm_ops,
+ },
+ .probe = smb135x_charger_probe,
+ .remove = smb135x_charger_remove,
+ .id_table = smb135x_charger_id,
+ .shutdown = smb135x_shutdown,
+};
+
+module_i2c_driver(smb135x_charger_driver);
+
+MODULE_DESCRIPTION("SMB135x Charger");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("i2c:smb135x-charger");
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 2f4641a0e88b..33163b6dd289 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -309,6 +309,16 @@ config PWM_RCAR
To compile this driver as a module, choose M here: the module
will be called pwm-rcar.
+config PWM_QPNP
+ depends on SPMI || MSM_SPMI
+ depends on OF_SPMI
+ tristate "Qualcomm QPNP LPG/PWM support"
+ help
+ This driver supports PWM/LPG devices in Qualcomm PMIC chips which
+ comply with QPNP. QPNP is a SPMI based PMIC implementation. These
+ devices support Pulse Width Modulation output with user generated
+ patterns. They share a lookup table with size of 64 entries.
+
config PWM_RENESAS_TPU
tristate "Renesas TPU PWM support"
depends on ARCH_SHMOBILE || COMPILE_TEST
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index 69b8275f3c08..1cd3f8426764 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_PWM_PCA9685) += pwm-pca9685.o
obj-$(CONFIG_PWM_PUV3) += pwm-puv3.o
obj-$(CONFIG_PWM_PXA) += pwm-pxa.o
obj-$(CONFIG_PWM_RCAR) += pwm-rcar.o
+obj-$(CONFIG_PWM_QPNP) += pwm-qpnp.o
obj-$(CONFIG_PWM_RENESAS_TPU) += pwm-renesas-tpu.o
obj-$(CONFIG_PWM_ROCKCHIP) += pwm-rockchip.o
obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o
@@ -41,3 +42,4 @@ obj-$(CONFIG_PWM_TIPWMSS) += pwm-tipwmss.o
obj-$(CONFIG_PWM_TWL) += pwm-twl.o
obj-$(CONFIG_PWM_TWL_LED) += pwm-twl-led.o
obj-$(CONFIG_PWM_VT8500) += pwm-vt8500.o
+
diff --git a/drivers/pwm/pwm-qpnp.c b/drivers/pwm/pwm-qpnp.c
new file mode 100644
index 000000000000..0ab6af3e59b5
--- /dev/null
+++ b/drivers/pwm/pwm-qpnp.c
@@ -0,0 +1,2126 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Qualcomm QPNP Pulse Width Modulation (PWM) driver
+ *
+ * The HW module is also called LPG (Light Pattern Generator).
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/spmi.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/radix-tree.h>
+#include <linux/qpnp/pwm.h>
+
+#define QPNP_LPG_DRIVER_NAME "qcom,qpnp-pwm"
+#define QPNP_LPG_CHANNEL_BASE "qpnp-lpg-channel-base"
+#define QPNP_LPG_LUT_BASE "qpnp-lpg-lut-base"
+
+#define QPNP_PWM_MODE_ONLY_SUB_TYPE 0x0B
+#define QPNP_LPG_CHAN_SUB_TYPE 0x2
+#define QPNP_LPG_S_CHAN_SUB_TYPE 0x11
+
+/* LPG Control for LPG_PATTERN_CONFIG */
+#define QPNP_RAMP_DIRECTION_SHIFT 4
+#define QPNP_RAMP_DIRECTION_MASK 0x10
+#define QPNP_PATTERN_REPEAT_SHIFT 3
+#define QPNP_PATTERN_REPEAT_MASK 0x08
+#define QPNP_RAMP_TOGGLE_SHIFT 2
+#define QPNP_RAMP_TOGGLE_MASK 0x04
+#define QPNP_EN_PAUSE_HI_SHIFT 1
+#define QPNP_EN_PAUSE_HI_MASK 0x02
+#define QPNP_EN_PAUSE_LO_MASK 0x01
+
+/* LPG Control for LPG_PWM_SIZE_CLK */
+#define QPNP_PWM_SIZE_SHIFT_SUB_TYPE 2
+#define QPNP_PWM_SIZE_MASK_SUB_TYPE 0x4
+#define QPNP_PWM_FREQ_CLK_SELECT_MASK_SUB_TYPE 0x03
+#define QPNP_PWM_SIZE_9_BIT_SUB_TYPE 0x01
+
+#define QPNP_SET_PWM_CLK_SUB_TYPE(val, clk, pwm_size) \
+do { \
+ val = (clk + 1) & QPNP_PWM_FREQ_CLK_SELECT_MASK_SUB_TYPE; \
+ val |= (((pwm_size > 6 ? QPNP_PWM_SIZE_9_BIT_SUB_TYPE : 0) << \
+ QPNP_PWM_SIZE_SHIFT_SUB_TYPE) & QPNP_PWM_SIZE_MASK_SUB_TYPE); \
+} while (0)
+
+#define QPNP_GET_PWM_SIZE_SUB_TYPE(reg) ((reg & QPNP_PWM_SIZE_MASK_SUB_TYPE) \
+ >> QPNP_PWM_SIZE_SHIFT_SUB_TYPE)
+
+#define QPNP_PWM_SIZE_SHIFT 4
+#define QPNP_PWM_SIZE_MASK 0x30
+#define QPNP_PWM_FREQ_CLK_SELECT_MASK 0x03
+#define QPNP_MIN_PWM_BIT_SIZE 6
+#define QPNP_MAX_PWM_BIT_SIZE 9
+#define QPNP_PWM_SIZES_SUPPORTED 10
+
+#define QPNP_SET_PWM_CLK(val, clk, pwm_size) \
+do { \
+ val = (clk + 1) & QPNP_PWM_FREQ_CLK_SELECT_MASK; \
+ val |= (((pwm_size - QPNP_MIN_PWM_BIT_SIZE) << \
+ QPNP_PWM_SIZE_SHIFT) & QPNP_PWM_SIZE_MASK); \
+} while (0)
+
+#define QPNP_GET_PWM_SIZE(reg) ((reg & QPNP_PWM_SIZE_MASK) \
+ >> QPNP_PWM_SIZE_SHIFT)
+
+/* LPG Control for LPG_PWM_FREQ_PREDIV_CLK */
+#define QPNP_PWM_FREQ_PRE_DIVIDE_SHIFT 5
+#define QPNP_PWM_FREQ_PRE_DIVIDE_MASK 0x60
+#define QPNP_PWM_FREQ_EXP_MASK 0x07
+
+#define QPNP_SET_PWM_FREQ_PREDIV(val, pre_div, pre_div_exp) \
+do { \
+ val = (pre_div << QPNP_PWM_FREQ_PRE_DIVIDE_SHIFT) & \
+ QPNP_PWM_FREQ_PRE_DIVIDE_MASK; \
+ val |= (pre_div_exp & QPNP_PWM_FREQ_EXP_MASK); \
+} while (0)
+
+/* LPG Control for LPG_PWM_TYPE_CONFIG */
+#define QPNP_EN_GLITCH_REMOVAL_SHIFT 5
+#define QPNP_EN_GLITCH_REMOVAL_MASK 0x20
+#define QPNP_EN_FULL_SCALE_SHIFT 3
+#define QPNP_EN_FULL_SCALE_MASK 0x08
+#define QPNP_EN_PHASE_STAGGER_SHIFT 2
+#define QPNP_EN_PHASE_STAGGER_MASK 0x04
+#define QPNP_PHASE_STAGGER_MASK 0x03
+
+/* LPG Control for PWM_VALUE_LSB */
+#define QPNP_PWM_VALUE_LSB_MASK 0xFF
+
+/* LPG Control for PWM_VALUE_MSB */
+#define QPNP_PWM_VALUE_MSB_SHIFT 8
+#define QPNP_PWM_VALUE_MSB_MASK 0x01
+
+/* LPG Control for ENABLE_CONTROL */
+#define QPNP_EN_PWM_HIGH_SHIFT 7
+#define QPNP_EN_PWM_HIGH_MASK 0x80
+#define QPNP_EN_PWM_LO_SHIFT 6
+#define QPNP_EN_PWM_LO_MASK 0x40
+#define QPNP_EN_PWM_OUTPUT_SHIFT 5
+#define QPNP_EN_PWM_OUTPUT_MASK 0x20
+#define QPNP_PWM_SRC_SELECT_SHIFT 2
+#define QPNP_PWM_SRC_SELECT_MASK 0x04
+#define QPNP_PWM_EN_RAMP_GEN_SHIFT 1
+#define QPNP_PWM_EN_RAMP_GEN_MASK 0x02
+
+/* LPG Control for PWM_SYNC */
+#define QPNP_PWM_SYNC_VALUE 0x01
+#define QPNP_PWM_SYNC_MASK 0x01
+
+/* LPG Control for RAMP_CONTROL */
+#define QPNP_RAMP_START_MASK 0x01
+
+#define QPNP_ENABLE_LUT_V0(value) (value |= QPNP_RAMP_START_MASK)
+#define QPNP_DISABLE_LUT_V0(value) (value &= ~QPNP_RAMP_START_MASK)
+#define QPNP_ENABLE_LUT_V1(value, id) (value |= BIT(id))
+
+/* LPG Control for RAMP_STEP_DURATION_LSB */
+#define QPNP_RAMP_STEP_DURATION_LSB_MASK 0xFF
+
+/* LPG Control for RAMP_STEP_DURATION_MSB */
+#define QPNP_RAMP_STEP_DURATION_MSB_SHIFT 8
+#define QPNP_RAMP_STEP_DURATION_MSB_MASK 0x01
+
+#define QPNP_PWM_1KHZ 1024
+#define QPNP_GET_RAMP_STEP_DURATION(ramp_time_ms) \
+ ((ramp_time_ms * QPNP_PWM_1KHZ) / 1000)
+
+/* LPG Control for PAUSE_HI_MULTIPLIER_LSB */
+#define QPNP_PAUSE_HI_MULTIPLIER_LSB_MASK 0xFF
+
+/* LPG Control for PAUSE_HI_MULTIPLIER_MSB */
+#define QPNP_PAUSE_HI_MULTIPLIER_MSB_SHIFT 8
+#define QPNP_PAUSE_HI_MULTIPLIER_MSB_MASK 0x1F
+
+/* LPG Control for PAUSE_LO_MULTIPLIER_LSB */
+#define QPNP_PAUSE_LO_MULTIPLIER_LSB_MASK 0xFF
+
+/* LPG Control for PAUSE_LO_MULTIPLIER_MSB */
+#define QPNP_PAUSE_LO_MULTIPLIER_MSB_SHIFT 8
+#define QPNP_PAUSE_LO_MULTIPLIER_MSB_MASK 0x1F
+
+/* LPG Control for HI_INDEX */
+#define QPNP_HI_INDEX_MASK 0x3F
+
+/* LPG Control for LO_INDEX */
+#define QPNP_LO_INDEX_MASK 0x3F
+
+/* LPG DTEST */
+#define QPNP_LPG_DTEST_LINE_MAX 4
+#define QPNP_LPG_DTEST_OUTPUT_MAX 5
+#define QPNP_DTEST_OUTPUT_MASK 0x07
+
+#define NUM_CLOCKS 3
+#define QPNP_PWM_M_MAX 7
+#define NSEC_1024HZ (NSEC_PER_SEC / 1024)
+#define NSEC_32768HZ (NSEC_PER_SEC / 32768)
+#define NSEC_19P2MHZ (NSEC_PER_SEC / 19200000)
+
+#define NUM_LPG_PRE_DIVIDE 4
+
+#define PRE_DIVIDE_1 1
+#define PRE_DIVIDE_3 3
+#define PRE_DIVIDE_5 5
+#define PRE_DIVIDE_6 6
+
+#define SPMI_LPG_REG_BASE_OFFSET 0x40
+#define SPMI_LPG_REVISION2_OFFSET 0x1
+#define SPMI_LPG_REV1_RAMP_CONTROL_OFFSET 0x86
+#define SPMI_LPG_SUB_TYPE_OFFSET 0x5
+#define SPMI_LPG_PWM_SYNC 0x7
+#define SPMI_LPG_REG_ADDR(b, n) (b + SPMI_LPG_REG_BASE_OFFSET + (n))
+#define SPMI_MAX_BUF_LEN 8
+
+#define QPNP_PWM_LUT_NOT_SUPPORTED 0x1
+
+/* Supported PWM sizes */
+#define QPNP_PWM_SIZE_6_BIT 6
+#define QPNP_PWM_SIZE_7_BIT 7
+#define QPNP_PWM_SIZE_8_BIT 8
+#define QPNP_PWM_SIZE_9_BIT 9
+
+#define QPNP_PWM_SIZE_6_9_BIT 0x9
+#define QPNP_PWM_SIZE_7_8_BIT 0x6
+#define QPNP_PWM_SIZE_6_7_9_BIT 0xB
+
+/* Supported time levels */
+enum time_level {
+ LVL_NSEC,
+ LVL_USEC,
+};
+
+/* LPG revisions */
+enum qpnp_lpg_revision {
+ QPNP_LPG_REVISION_0 = 0x0,
+ QPNP_LPG_REVISION_1 = 0x1,
+};
+
+/* LPG LUT MODE STATE */
+enum qpnp_lut_state {
+ QPNP_LUT_ENABLE = 0x0,
+ QPNP_LUT_DISABLE = 0x1,
+};
+
+/* PWM MODE STATE */
+enum qpnp_pwm_state {
+ QPNP_PWM_ENABLE = 0x0,
+ QPNP_PWM_DISABLE = 0x1,
+};
+
+/* SPMI LPG registers */
+enum qpnp_lpg_registers_list {
+ QPNP_LPG_PATTERN_CONFIG,
+ QPNP_LPG_PWM_SIZE_CLK,
+ QPNP_LPG_PWM_FREQ_PREDIV_CLK,
+ QPNP_LPG_PWM_TYPE_CONFIG,
+ QPNP_PWM_VALUE_LSB,
+ QPNP_PWM_VALUE_MSB,
+ QPNP_ENABLE_CONTROL,
+ QPNP_RAMP_CONTROL,
+ QPNP_RAMP_STEP_DURATION_LSB = QPNP_RAMP_CONTROL + 9,
+ QPNP_RAMP_STEP_DURATION_MSB,
+ QPNP_PAUSE_HI_MULTIPLIER_LSB,
+ QPNP_PAUSE_HI_MULTIPLIER_MSB,
+ QPNP_PAUSE_LO_MULTIPLIER_LSB,
+ QPNP_PAUSE_LO_MULTIPLIER_MSB,
+ QPNP_HI_INDEX,
+ QPNP_LO_INDEX,
+ QPNP_LPG_SEC_ACCESS = QPNP_LO_INDEX + 121,
+ QPNP_LPG_DTEST = QPNP_LO_INDEX + 139,
+ QPNP_TOTAL_LPG_SPMI_REGISTERS
+};
+
+/*
+ * Formula from HSID,
+ * pause_time (hi/lo) = (pause_cnt- 1)*(ramp_ms)
+ * OR,
+ * pause_cnt = (pause_time / ramp_ms) + 1
+ */
+#define QPNP_SET_PAUSE_CNT(to_pause_cnt, from_pause, ramp_ms) \
+ (to_pause_cnt = (from_pause / (ramp_ms ? ramp_ms : 1)) + 1)
+
+
+static unsigned int pt_t[NUM_LPG_PRE_DIVIDE][NUM_CLOCKS] = {
+ { PRE_DIVIDE_1 * NSEC_1024HZ,
+ PRE_DIVIDE_1 * NSEC_32768HZ,
+ PRE_DIVIDE_1 * NSEC_19P2MHZ,
+ },
+ { PRE_DIVIDE_3 * NSEC_1024HZ,
+ PRE_DIVIDE_3 * NSEC_32768HZ,
+ PRE_DIVIDE_3 * NSEC_19P2MHZ,
+ },
+ { PRE_DIVIDE_5 * NSEC_1024HZ,
+ PRE_DIVIDE_5 * NSEC_32768HZ,
+ PRE_DIVIDE_5 * NSEC_19P2MHZ,
+ },
+ { PRE_DIVIDE_6 * NSEC_1024HZ,
+ PRE_DIVIDE_6 * NSEC_32768HZ,
+ PRE_DIVIDE_6 * NSEC_19P2MHZ,
+ },
+};
+
+struct qpnp_lut_config {
+ u8 *duty_pct_list;
+ int list_len;
+ int ramp_index;
+ int lo_index;
+ int hi_index;
+ int lut_pause_hi_cnt;
+ int lut_pause_lo_cnt;
+ int ramp_step_ms;
+ bool ramp_direction;
+ bool pattern_repeat;
+ bool ramp_toggle;
+ bool enable_pause_hi;
+ bool enable_pause_lo;
+};
+
+struct qpnp_lpg_config {
+ struct qpnp_lut_config lut_config;
+ u16 base_addr;
+ u16 lut_base_addr;
+ u16 lut_size;
+};
+
+struct _qpnp_pwm_config {
+ int pwm_value;
+ int pwm_period; /* in microseconds */
+ int pwm_duty; /* in microseconds */
+ struct pwm_period_config period;
+ int supported_sizes;
+ int force_pwm_size;
+};
+
+/* Public facing structure */
+struct qpnp_pwm_chip {
+ struct spmi_device *spmi_dev;
+ struct pwm_chip chip;
+ bool enabled;
+ struct _qpnp_pwm_config pwm_config;
+ struct qpnp_lpg_config lpg_config;
+ spinlock_t lpg_lock;
+ enum qpnp_lpg_revision revision;
+ u8 sub_type;
+ u32 flags;
+ u8 qpnp_lpg_registers[QPNP_TOTAL_LPG_SPMI_REGISTERS];
+ int channel_id;
+ const char *channel_owner;
+ u32 dtest_line;
+ u32 dtest_output;
+ bool in_test_mode;
+};
+
+/* Internal functions */
+static inline struct qpnp_pwm_chip *qpnp_pwm_from_pwm_dev(
+ struct pwm_device *pwm)
+{
+ return container_of(pwm->chip, struct qpnp_pwm_chip, chip);
+}
+
+static inline struct qpnp_pwm_chip *qpnp_pwm_from_pwm_chip(
+ struct pwm_chip *chip)
+{
+ return container_of(chip, struct qpnp_pwm_chip, chip);
+}
+
+static inline void qpnp_set_pattern_config(u8 *val,
+ struct qpnp_lut_config *lut_config)
+{
+ *val = lut_config->enable_pause_lo & QPNP_EN_PAUSE_LO_MASK;
+ *val |= (lut_config->enable_pause_hi << QPNP_EN_PAUSE_HI_SHIFT) &
+ QPNP_EN_PAUSE_HI_MASK;
+ *val |= (lut_config->ramp_toggle << QPNP_RAMP_TOGGLE_SHIFT) &
+ QPNP_RAMP_TOGGLE_MASK;
+ *val |= (lut_config->pattern_repeat << QPNP_PATTERN_REPEAT_SHIFT) &
+ QPNP_PATTERN_REPEAT_MASK;
+ *val |= (lut_config->ramp_direction << QPNP_RAMP_DIRECTION_SHIFT) &
+ QPNP_RAMP_DIRECTION_MASK;
+}
+
+static inline void qpnp_set_pwm_type_config(u8 *val, bool glitch,
+ bool full_scale, bool en_phase, bool phase)
+{
+ *val = phase;
+ *val |= (en_phase << QPNP_EN_PHASE_STAGGER_SHIFT) &
+ QPNP_EN_PHASE_STAGGER_MASK;
+ *val |= (full_scale << QPNP_EN_FULL_SCALE_SHIFT) &
+ QPNP_EN_FULL_SCALE_MASK;
+ *val |= (glitch << QPNP_EN_GLITCH_REMOVAL_SHIFT) &
+ QPNP_EN_GLITCH_REMOVAL_MASK;
+}
+
+static int qpnp_set_control(struct qpnp_pwm_chip *chip, bool pwm_hi,
+ bool pwm_lo, bool pwm_out, bool pwm_src, bool ramp_gen)
+{
+ int value;
+ value = (ramp_gen << QPNP_PWM_EN_RAMP_GEN_SHIFT) |
+ (pwm_src << QPNP_PWM_SRC_SELECT_SHIFT) |
+ (pwm_lo << QPNP_EN_PWM_LO_SHIFT) |
+ (pwm_hi << QPNP_EN_PWM_HIGH_SHIFT);
+ if (chip->sub_type != QPNP_LPG_S_CHAN_SUB_TYPE)
+ value |= (pwm_out << QPNP_EN_PWM_OUTPUT_SHIFT);
+ return value;
+}
+
+#define QPNP_ENABLE_LUT_CONTROL(chip) \
+ qpnp_set_control((chip), 0, 0, 0, 0, 1)
+#define QPNP_ENABLE_PWM_CONTROL(chip) \
+ qpnp_set_control((chip), 0, 0, 0, 1, 0)
+#define QPNP_ENABLE_PWM_MODE(chip) \
+ qpnp_set_control((chip), 1, 1, 1, 1, 0)
+#define QPNP_ENABLE_PWM_MODE_GPLED_CHANNEL(chip) \
+ qpnp_set_control((chip), 1, 1, 1, 1, 1)
+#define QPNP_ENABLE_LPG_MODE(chip) \
+ qpnp_set_control((chip), 1, 1, 1, 0, 1)
+#define QPNP_DISABLE_PWM_MODE(chip) \
+ qpnp_set_control((chip), 0, 0, 0, 1, 0)
+#define QPNP_DISABLE_LPG_MODE(chip) \
+ qpnp_set_control((chip), 0, 0, 0, 0, 1)
+#define QPNP_IS_PWM_CONFIG_SELECTED(val) (val & QPNP_PWM_SRC_SELECT_MASK)
+
+#define QPNP_ENABLE_PWM_MODE_ONLY_SUB_TYPE 0x80
+#define QPNP_DISABLE_PWM_MODE_ONLY_SUB_TYPE 0x0
+#define QPNP_PWM_MODE_ONLY_ENABLE_DISABLE_MASK_SUB_TYPE 0x80
+
+static inline void qpnp_convert_to_lut_flags(int *flags,
+ struct qpnp_lut_config *l_config)
+{
+ *flags = ((l_config->ramp_direction ? PM_PWM_LUT_RAMP_UP : 0) |
+ (l_config->pattern_repeat ? PM_PWM_LUT_LOOP : 0)|
+ (l_config->ramp_toggle ? PM_PWM_LUT_REVERSE : 0) |
+ (l_config->enable_pause_hi ? PM_PWM_LUT_PAUSE_HI_EN : 0) |
+ (l_config->enable_pause_lo ? PM_PWM_LUT_PAUSE_LO_EN : 0));
+}
+
+static inline void qpnp_set_lut_params(struct lut_params *l_params,
+ struct qpnp_lut_config *l_config, int s_idx, int size)
+{
+ l_params->start_idx = s_idx;
+ l_params->idx_len = size;
+ l_params->lut_pause_hi = l_config->lut_pause_hi_cnt;
+ l_params->lut_pause_lo = l_config->lut_pause_lo_cnt;
+ l_params->ramp_step_ms = l_config->ramp_step_ms;
+ qpnp_convert_to_lut_flags(&l_params->flags, l_config);
+}
+
+static void qpnp_lpg_save(u8 *u8p, u8 mask, u8 val)
+{
+ *u8p &= ~mask;
+ *u8p |= val & mask;
+}
+
+static int qpnp_lpg_save_and_write(u8 value, u8 mask, u8 *reg, u16 addr,
+ u16 size, struct qpnp_pwm_chip *chip)
+{
+ qpnp_lpg_save(reg, mask, value);
+
+ return spmi_ext_register_writel(chip->spmi_dev->ctrl,
+ chip->spmi_dev->sid, addr, reg, size);
+}
+
+/*
+ * PWM Frequency = Clock Frequency / (N * T)
+ * or
+ * PWM Period = Clock Period * (N * T)
+ * where
+ * N = 2^9 or 2^6 for 9-bit or 6-bit PWM size
+ * T = Pre-divide * 2^m, where m = 0..7 (exponent)
+ *
+ * This is the formula to figure out m for the best pre-divide and clock:
+ * (PWM Period / N) = (Pre-divide * Clock Period) * 2^m
+ */
+static void qpnp_lpg_calc_period(enum time_level tm_lvl,
+ unsigned int period_value,
+ struct qpnp_pwm_chip *chip)
+{
+ int n, m, clk, div;
+ int best_m, best_div, best_clk;
+ unsigned int last_err, cur_err, min_err;
+ unsigned int tmp_p, period_n;
+ int supported_sizes = chip->pwm_config.supported_sizes;
+ int force_pwm_size = chip->pwm_config.force_pwm_size;
+ struct pwm_period_config *period = &chip->pwm_config.period;
+
+ /* PWM Period / N */
+ if (supported_sizes == QPNP_PWM_SIZE_7_8_BIT)
+ n = 7;
+ else
+ n = 6;
+
+ if (tm_lvl == LVL_USEC) {
+ if (period_value < ((unsigned)(-1) / NSEC_PER_USEC)) {
+ period_n = (period_value * NSEC_PER_USEC) >> n;
+ } else {
+ if (supported_sizes == QPNP_PWM_SIZE_7_8_BIT)
+ n = 8;
+ else
+ n = 9;
+ period_n = (period_value >> n) * NSEC_PER_USEC;
+ }
+ } else {
+ period_n = period_value >> n;
+ }
+
+ if (force_pwm_size != 0) {
+ if (n < force_pwm_size)
+ period_n = period_n >> (force_pwm_size - n);
+ else
+ period_n = period_n << (n - force_pwm_size);
+ n = force_pwm_size;
+ pr_info("LPG channel '%d' pwm size is forced to=%d\n",
+ chip->channel_id, n);
+ }
+
+ min_err = last_err = (unsigned)(-1);
+ best_m = 0;
+ best_clk = 0;
+ best_div = 0;
+ for (clk = 0; clk < NUM_CLOCKS; clk++) {
+ for (div = 0; div < NUM_LPG_PRE_DIVIDE; div++) {
+ /* period_n = (PWM Period / N) */
+ /* tmp_p = (Pre-divide * Clock Period) * 2^m */
+ tmp_p = pt_t[div][clk];
+ for (m = 0; m <= QPNP_PWM_M_MAX; m++) {
+ if (period_n > tmp_p)
+ cur_err = period_n - tmp_p;
+ else
+ cur_err = tmp_p - period_n;
+
+ if (cur_err < min_err) {
+ min_err = cur_err;
+ best_m = m;
+ best_clk = clk;
+ best_div = div;
+ }
+
+ if (m && cur_err > last_err)
+ /* Break for bigger cur_err */
+ break;
+
+ last_err = cur_err;
+ tmp_p <<= 1;
+ }
+ }
+ }
+
+ /* Adapt to optimal pwm size, the higher the resolution the better */
+ if (!force_pwm_size) {
+ if (supported_sizes == QPNP_PWM_SIZE_7_8_BIT) {
+ if (n == 7 && best_m >= 1) {
+ n += 1;
+ best_m -= 1;
+ }
+ } else if (n == 6) {
+ if (best_m >= 3) {
+ n += 3;
+ best_m -= 3;
+ } else if (best_m >= 1 && (
+ chip->sub_type != QPNP_PWM_MODE_ONLY_SUB_TYPE &&
+ chip->sub_type != QPNP_LPG_S_CHAN_SUB_TYPE)) {
+ n += 1;
+ best_m -= 1;
+ }
+ }
+ }
+
+ period->pwm_size = n;
+ period->clk = best_clk;
+ period->pre_div = best_div;
+ period->pre_div_exp = best_m;
+}
+
+static void qpnp_lpg_calc_pwm_value(struct _qpnp_pwm_config *pwm_config,
+ unsigned int period_value,
+ unsigned int duty_value)
+{
+ unsigned int max_pwm_value, tmp;
+
+ /* Figure out pwm_value with overflow handling */
+ tmp = 1 << (sizeof(tmp) * 8 - pwm_config->period.pwm_size);
+ if (duty_value < tmp) {
+ tmp = duty_value << pwm_config->period.pwm_size;
+ pwm_config->pwm_value = tmp / period_value;
+ } else {
+ tmp = period_value >> pwm_config->period.pwm_size;
+ pwm_config->pwm_value = duty_value / tmp;
+ }
+ max_pwm_value = (1 << pwm_config->period.pwm_size) - 1;
+ if (pwm_config->pwm_value > max_pwm_value)
+ pwm_config->pwm_value = max_pwm_value;
+}
+
+static int qpnp_lpg_change_table(struct qpnp_pwm_chip *chip,
+ int duty_pct[], int raw_value)
+{
+ unsigned int pwm_value, max_pwm_value;
+ struct qpnp_lut_config *lut = &chip->lpg_config.lut_config;
+ int i, pwm_size, rc = 0;
+ int burst_size = SPMI_MAX_BUF_LEN;
+ int list_len = lut->list_len << 1;
+ int offset = (lut->lo_index << 1) - 2;
+
+ pwm_size = QPNP_GET_PWM_SIZE(
+ chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK]) +
+ QPNP_MIN_PWM_BIT_SIZE;
+
+ max_pwm_value = (1 << pwm_size) - 1;
+
+ if (unlikely(lut->list_len != (lut->hi_index - lut->lo_index + 1))) {
+ pr_err("LUT internal Data structure corruption detected\n");
+ pr_err("LUT list size: %d\n", lut->list_len);
+ pr_err("However, index size is: %d\n",
+ (lut->hi_index - lut->lo_index + 1));
+ return -EINVAL;
+ }
+
+ for (i = 0; i < lut->list_len; i++) {
+ if (raw_value)
+ pwm_value = duty_pct[i];
+ else
+ pwm_value = (duty_pct[i] << pwm_size) / 100;
+
+ if (pwm_value > max_pwm_value)
+ pwm_value = max_pwm_value;
+
+ if (chip->pwm_config.supported_sizes == QPNP_PWM_SIZE_7_8_BIT) {
+ lut->duty_pct_list[i] = pwm_value;
+ } else {
+ lut->duty_pct_list[i*2] = pwm_value;
+ lut->duty_pct_list[(i*2)+1] = (pwm_value >>
+ QPNP_PWM_VALUE_MSB_SHIFT) & QPNP_PWM_VALUE_MSB_MASK;
+ }
+ }
+
+ /*
+ * For the Keypad Backlight Lookup Table (KPDBL_LUT),
+ * offset is lo_index.
+ */
+ if (chip->pwm_config.supported_sizes == QPNP_PWM_SIZE_7_8_BIT)
+ offset = lut->lo_index;
+
+ /* Write with max allowable burst mode, each entry is of two bytes */
+ for (i = 0; i < list_len; i += burst_size) {
+ if (i + burst_size >= list_len)
+ burst_size = list_len - i;
+ rc = spmi_ext_register_writel(chip->spmi_dev->ctrl,
+ chip->spmi_dev->sid,
+ chip->lpg_config.lut_base_addr + offset + i,
+ lut->duty_pct_list + i, burst_size);
+ }
+
+ return rc;
+}
+
+static void qpnp_lpg_save_period(struct qpnp_pwm_chip *chip)
+{
+ u8 mask, val;
+ struct _qpnp_pwm_config *pwm_config = &chip->pwm_config;
+
+ if (chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE) {
+ QPNP_SET_PWM_CLK_SUB_TYPE(val, pwm_config->period.clk,
+ pwm_config->period.pwm_size);
+ mask = QPNP_PWM_SIZE_MASK_SUB_TYPE |
+ QPNP_PWM_FREQ_CLK_SELECT_MASK_SUB_TYPE;
+ } else {
+ QPNP_SET_PWM_CLK(val, pwm_config->period.clk,
+ pwm_config->period.pwm_size);
+ mask = QPNP_PWM_SIZE_MASK | QPNP_PWM_FREQ_CLK_SELECT_MASK;
+ }
+
+ qpnp_lpg_save(&chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK],
+ mask, val);
+
+ QPNP_SET_PWM_FREQ_PREDIV(val, pwm_config->period.pre_div,
+ pwm_config->period.pre_div_exp);
+
+ mask = QPNP_PWM_FREQ_PRE_DIVIDE_MASK | QPNP_PWM_FREQ_EXP_MASK;
+
+ qpnp_lpg_save(&chip->qpnp_lpg_registers[QPNP_LPG_PWM_FREQ_PREDIV_CLK],
+ mask, val);
+}
+
+static int qpnp_lpg_save_pwm_value(struct qpnp_pwm_chip *chip)
+{
+ unsigned int max_pwm_value;
+ int pwm_size;
+ u8 mask, value;
+ struct _qpnp_pwm_config *pwm_config = &chip->pwm_config;
+ struct qpnp_lpg_config *lpg_config = &chip->lpg_config;
+ int rc;
+
+ if (chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE)
+ pwm_size = QPNP_GET_PWM_SIZE_SUB_TYPE(
+ chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK]) ?
+ QPNP_MAX_PWM_BIT_SIZE : QPNP_MIN_PWM_BIT_SIZE;
+ else
+ pwm_size = QPNP_GET_PWM_SIZE(
+ chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK]) +
+ QPNP_MIN_PWM_BIT_SIZE;
+
+ max_pwm_value = (1 << pwm_size) - 1;
+
+ if (pwm_config->pwm_value > max_pwm_value)
+ pwm_config->pwm_value = max_pwm_value;
+
+ value = pwm_config->pwm_value;
+ mask = QPNP_PWM_VALUE_LSB_MASK;
+
+ rc = qpnp_lpg_save_and_write(value, mask,
+ &chip->qpnp_lpg_registers[QPNP_PWM_VALUE_LSB],
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_PWM_VALUE_LSB), 1, chip);
+ if (rc)
+ return rc;
+
+ value = (pwm_config->pwm_value >> QPNP_PWM_VALUE_MSB_SHIFT) &
+ QPNP_PWM_VALUE_MSB_MASK;
+
+ mask = QPNP_PWM_VALUE_MSB_MASK;
+
+ rc = qpnp_lpg_save_and_write(value, mask,
+ &chip->qpnp_lpg_registers[QPNP_PWM_VALUE_MSB],
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_PWM_VALUE_MSB), 1, chip);
+ if (rc)
+ return rc;
+
+ if (chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE ||
+ chip->sub_type == QPNP_LPG_S_CHAN_SUB_TYPE) {
+ value = QPNP_PWM_SYNC_VALUE & QPNP_PWM_SYNC_MASK;
+ rc = spmi_ext_register_writel(chip->spmi_dev->ctrl,
+ chip->spmi_dev->sid,
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ SPMI_LPG_PWM_SYNC), &value, 1);
+ }
+
+ return rc;
+}
+
+static int qpnp_lpg_configure_pattern(struct qpnp_pwm_chip *chip)
+{
+ struct qpnp_lpg_config *lpg_config = &chip->lpg_config;
+ struct qpnp_lut_config *lut_config = &lpg_config->lut_config;
+ u8 value, mask;
+
+ qpnp_set_pattern_config(&value, lut_config);
+
+ mask = QPNP_RAMP_DIRECTION_MASK | QPNP_PATTERN_REPEAT_MASK |
+ QPNP_RAMP_TOGGLE_MASK | QPNP_EN_PAUSE_HI_MASK |
+ QPNP_EN_PAUSE_LO_MASK;
+
+ return qpnp_lpg_save_and_write(value, mask,
+ &chip->qpnp_lpg_registers[QPNP_LPG_PATTERN_CONFIG],
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_LPG_PATTERN_CONFIG), 1, chip);
+}
+
+static int qpnp_lpg_configure_pwm(struct qpnp_pwm_chip *chip)
+{
+ struct qpnp_lpg_config *lpg_config = &chip->lpg_config;
+ int rc;
+ u8 value, mask;
+
+ rc = spmi_ext_register_writel(chip->spmi_dev->ctrl, chip->spmi_dev->sid,
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_LPG_PWM_SIZE_CLK),
+ &chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK], 1);
+
+ if (rc)
+ return rc;
+
+ rc = spmi_ext_register_writel(chip->spmi_dev->ctrl, chip->spmi_dev->sid,
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_LPG_PWM_FREQ_PREDIV_CLK),
+ &chip->qpnp_lpg_registers[QPNP_LPG_PWM_FREQ_PREDIV_CLK], 1);
+ if (rc)
+ return rc;
+
+ qpnp_set_pwm_type_config(&value, 1, 0, 0, 0);
+
+ mask = QPNP_EN_GLITCH_REMOVAL_MASK | QPNP_EN_FULL_SCALE_MASK |
+ QPNP_EN_PHASE_STAGGER_MASK | QPNP_PHASE_STAGGER_MASK;
+
+ return qpnp_lpg_save_and_write(value, mask,
+ &chip->qpnp_lpg_registers[QPNP_LPG_PWM_TYPE_CONFIG],
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_LPG_PWM_TYPE_CONFIG), 1, chip);
+}
+
+static int qpnp_configure_pwm_control(struct qpnp_pwm_chip *chip)
+{
+ struct qpnp_lpg_config *lpg_config = &chip->lpg_config;
+ u8 value, mask;
+
+ if (chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE)
+ return 0;
+
+ value = QPNP_ENABLE_PWM_CONTROL(chip);
+
+ mask = QPNP_EN_PWM_HIGH_MASK | QPNP_EN_PWM_LO_MASK |
+ QPNP_PWM_SRC_SELECT_MASK | QPNP_PWM_EN_RAMP_GEN_MASK;
+ if (chip->sub_type != QPNP_LPG_S_CHAN_SUB_TYPE)
+ mask |= QPNP_EN_PWM_OUTPUT_MASK;
+
+ return qpnp_lpg_save_and_write(value, mask,
+ &chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL],
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_ENABLE_CONTROL), 1, chip);
+
+}
+
+static int qpnp_configure_lpg_control(struct qpnp_pwm_chip *chip)
+{
+ struct qpnp_lpg_config *lpg_config = &chip->lpg_config;
+ u8 value, mask;
+
+ value = QPNP_ENABLE_LUT_CONTROL(chip);
+
+ mask = QPNP_EN_PWM_HIGH_MASK | QPNP_EN_PWM_LO_MASK |
+ QPNP_PWM_SRC_SELECT_MASK | QPNP_PWM_EN_RAMP_GEN_MASK;
+ if (chip->sub_type != QPNP_LPG_S_CHAN_SUB_TYPE)
+ mask |= QPNP_EN_PWM_OUTPUT_MASK;
+
+ return qpnp_lpg_save_and_write(value, mask,
+ &chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL],
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_ENABLE_CONTROL), 1, chip);
+
+}
+
+static int qpnp_lpg_configure_ramp_step_duration(struct qpnp_pwm_chip *chip)
+{
+ struct qpnp_lpg_config *lpg_config = &chip->lpg_config;
+ struct qpnp_lut_config lut_config = lpg_config->lut_config;
+ int rc, value;
+ u8 val, mask;
+
+ value = QPNP_GET_RAMP_STEP_DURATION(lut_config.ramp_step_ms);
+ val = value & QPNP_RAMP_STEP_DURATION_LSB_MASK;
+ mask = QPNP_RAMP_STEP_DURATION_LSB_MASK;
+
+ rc = qpnp_lpg_save_and_write(val, mask,
+ &chip->qpnp_lpg_registers[QPNP_RAMP_STEP_DURATION_LSB],
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_RAMP_STEP_DURATION_LSB), 1, chip);
+ if (rc)
+ return rc;
+
+ val = (value >> QPNP_RAMP_STEP_DURATION_MSB_SHIFT) &
+ QPNP_RAMP_STEP_DURATION_MSB_MASK;
+
+ mask = QPNP_RAMP_STEP_DURATION_MSB_MASK;
+
+ return qpnp_lpg_save_and_write(val, mask,
+ &chip->qpnp_lpg_registers[QPNP_RAMP_STEP_DURATION_MSB],
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_RAMP_STEP_DURATION_MSB), 1, chip);
+}
+
+static int qpnp_lpg_configure_pause(struct qpnp_pwm_chip *chip)
+{
+ struct qpnp_lpg_config *lpg_config = &chip->lpg_config;
+ struct qpnp_lut_config lut_config = lpg_config->lut_config;
+ u8 value, mask;
+ int rc = 0;
+
+ if (lut_config.enable_pause_hi) {
+ value = lut_config.lut_pause_hi_cnt;
+ mask = QPNP_PAUSE_HI_MULTIPLIER_LSB_MASK;
+
+ rc = qpnp_lpg_save_and_write(value, mask,
+ &chip->qpnp_lpg_registers[QPNP_PAUSE_HI_MULTIPLIER_LSB],
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_PAUSE_HI_MULTIPLIER_LSB), 1, chip);
+ if (rc)
+ return rc;
+
+ value = (lut_config.lut_pause_hi_cnt >>
+ QPNP_PAUSE_HI_MULTIPLIER_MSB_SHIFT) &
+ QPNP_PAUSE_HI_MULTIPLIER_MSB_MASK;
+
+ mask = QPNP_PAUSE_HI_MULTIPLIER_MSB_MASK;
+
+ rc = qpnp_lpg_save_and_write(value, mask,
+ &chip->qpnp_lpg_registers[QPNP_PAUSE_HI_MULTIPLIER_MSB],
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_PAUSE_HI_MULTIPLIER_MSB), 1, chip);
+ } else {
+ value = 0;
+ mask = QPNP_PAUSE_HI_MULTIPLIER_LSB_MASK;
+
+ rc = qpnp_lpg_save_and_write(value, mask,
+ &chip->qpnp_lpg_registers[QPNP_PAUSE_HI_MULTIPLIER_LSB],
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_PAUSE_HI_MULTIPLIER_LSB), 1, chip);
+ if (rc)
+ return rc;
+
+ mask = QPNP_PAUSE_HI_MULTIPLIER_MSB_MASK;
+
+ rc = qpnp_lpg_save_and_write(value, mask,
+ &chip->qpnp_lpg_registers[QPNP_PAUSE_HI_MULTIPLIER_MSB],
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_PAUSE_HI_MULTIPLIER_MSB), 1, chip);
+ if (rc)
+ return rc;
+
+ }
+
+ if (lut_config.enable_pause_lo) {
+ value = lut_config.lut_pause_lo_cnt;
+ mask = QPNP_PAUSE_LO_MULTIPLIER_LSB_MASK;
+
+ rc = qpnp_lpg_save_and_write(value, mask,
+ &chip->qpnp_lpg_registers[QPNP_PAUSE_LO_MULTIPLIER_LSB],
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_PAUSE_LO_MULTIPLIER_LSB), 1, chip);
+ if (rc)
+ return rc;
+
+ value = (lut_config.lut_pause_lo_cnt >>
+ QPNP_PAUSE_LO_MULTIPLIER_MSB_SHIFT) &
+ QPNP_PAUSE_LO_MULTIPLIER_MSB_MASK;
+
+ mask = QPNP_PAUSE_LO_MULTIPLIER_MSB_MASK;
+
+ rc = qpnp_lpg_save_and_write(value, mask,
+ &chip->qpnp_lpg_registers[QPNP_PAUSE_LO_MULTIPLIER_MSB],
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_PAUSE_LO_MULTIPLIER_MSB), 1, chip);
+ } else {
+ value = 0;
+ mask = QPNP_PAUSE_LO_MULTIPLIER_LSB_MASK;
+
+ rc = qpnp_lpg_save_and_write(value, mask,
+ &chip->qpnp_lpg_registers[QPNP_PAUSE_LO_MULTIPLIER_LSB],
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_PAUSE_LO_MULTIPLIER_LSB), 1, chip);
+ if (rc)
+ return rc;
+
+ mask = QPNP_PAUSE_LO_MULTIPLIER_MSB_MASK;
+
+ rc = qpnp_lpg_save_and_write(value, mask,
+ &chip->qpnp_lpg_registers[QPNP_PAUSE_LO_MULTIPLIER_MSB],
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_PAUSE_LO_MULTIPLIER_MSB), 1, chip);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int qpnp_lpg_configure_index(struct qpnp_pwm_chip *chip)
+{
+ struct qpnp_lpg_config *lpg_config = &chip->lpg_config;
+ struct qpnp_lut_config lut_config = lpg_config->lut_config;
+ u8 value, mask;
+ int rc = 0;
+
+ value = lut_config.hi_index;
+ mask = QPNP_HI_INDEX_MASK;
+
+ rc = qpnp_lpg_save_and_write(value, mask,
+ &chip->qpnp_lpg_registers[QPNP_HI_INDEX],
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_HI_INDEX), 1, chip);
+ if (rc)
+ return rc;
+
+ value = lut_config.lo_index;
+ mask = QPNP_LO_INDEX_MASK;
+
+ rc = qpnp_lpg_save_and_write(value, mask,
+ &chip->qpnp_lpg_registers[QPNP_LO_INDEX],
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_LO_INDEX), 1, chip);
+
+ return rc;
+}
+
+static int qpnp_lpg_change_lut(struct qpnp_pwm_chip *chip)
+{
+ int rc;
+
+ rc = qpnp_lpg_configure_pattern(chip);
+ if (rc) {
+ pr_err("Failed to configure LUT pattern");
+ return rc;
+ }
+ rc = qpnp_lpg_configure_pwm(chip);
+ if (rc) {
+ pr_err("Failed to configure LUT pattern");
+ return rc;
+ }
+ rc = qpnp_configure_lpg_control(chip);
+ if (rc) {
+ pr_err("Failed to configure pause registers");
+ return rc;
+ }
+ rc = qpnp_lpg_configure_ramp_step_duration(chip);
+ if (rc) {
+ pr_err("Failed to configure duty time");
+ return rc;
+ }
+ rc = qpnp_lpg_configure_pause(chip);
+ if (rc) {
+ pr_err("Failed to configure pause registers");
+ return rc;
+ }
+ rc = qpnp_lpg_configure_index(chip);
+ if (rc) {
+ pr_err("Failed to configure index registers");
+ return rc;
+ }
+ return rc;
+}
+
+static int qpnp_dtest_config(struct qpnp_pwm_chip *chip, bool enable)
+{
+ struct qpnp_lpg_config *lpg_config = &chip->lpg_config;
+ u8 value;
+ u16 addr;
+ int rc = 0;
+
+ if (!chip->dtest_output) {
+ pr_err("DTEST output not configured for channel %d\n",
+ chip->channel_id);
+ return -EPERM;
+ }
+
+ if (chip->dtest_line > QPNP_LPG_DTEST_LINE_MAX ||
+ chip->dtest_output > QPNP_LPG_DTEST_OUTPUT_MAX) {
+ pr_err("DTEST line/output values are improper for channel %d\n",
+ chip->channel_id);
+ return -EINVAL;
+ }
+
+ value = 0xA5;
+
+ addr = SPMI_LPG_REG_ADDR(lpg_config->base_addr, QPNP_LPG_SEC_ACCESS);
+
+ rc = spmi_ext_register_writel(chip->spmi_dev->ctrl,
+ chip->spmi_dev->sid, addr, &value, 1);
+
+ if (rc) {
+ pr_err("Couldn't set the access for test mode\n");
+ return rc;
+ }
+
+ addr = SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_LPG_DTEST + chip->dtest_line - 1);
+
+ if (enable)
+ value = chip->dtest_output & QPNP_DTEST_OUTPUT_MASK;
+ else
+ value = 0;
+
+ pr_debug("Setting TEST mode for channel %d addr:%x value: %x\n",
+ chip->channel_id, addr, value);
+
+ rc = spmi_ext_register_writel(chip->spmi_dev->ctrl,
+ chip->spmi_dev->sid, addr, &value, 1);
+
+ return rc;
+}
+
+static int qpnp_lpg_configure_lut_state(struct qpnp_pwm_chip *chip,
+ enum qpnp_lut_state state)
+{
+ struct qpnp_lpg_config *lpg_config = &chip->lpg_config;
+ u8 value1, value2, mask1, mask2;
+ u8 *reg1, *reg2;
+ u16 addr, addr1;
+ int rc;
+ bool test_enable;
+
+ value1 = chip->qpnp_lpg_registers[QPNP_RAMP_CONTROL];
+ reg1 = &chip->qpnp_lpg_registers[QPNP_RAMP_CONTROL];
+ reg2 = &chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL];
+ mask2 = QPNP_EN_PWM_HIGH_MASK | QPNP_EN_PWM_LO_MASK |
+ QPNP_PWM_SRC_SELECT_MASK | QPNP_PWM_EN_RAMP_GEN_MASK;
+ if (chip->sub_type != QPNP_LPG_S_CHAN_SUB_TYPE)
+ mask2 |= QPNP_EN_PWM_OUTPUT_MASK;
+
+ if (chip->sub_type == QPNP_LPG_CHAN_SUB_TYPE
+ && chip->revision == QPNP_LPG_REVISION_0) {
+ if (state == QPNP_LUT_ENABLE) {
+ QPNP_ENABLE_LUT_V0(value1);
+ value2 = QPNP_ENABLE_LPG_MODE(chip);
+ } else {
+ QPNP_DISABLE_LUT_V0(value1);
+ value2 = QPNP_DISABLE_LPG_MODE(chip);
+ }
+ mask1 = QPNP_RAMP_START_MASK;
+ addr1 = SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_RAMP_CONTROL);
+ } else if ((chip->sub_type == QPNP_LPG_CHAN_SUB_TYPE
+ && chip->revision == QPNP_LPG_REVISION_1)
+ || chip->sub_type == QPNP_LPG_S_CHAN_SUB_TYPE) {
+ if (state == QPNP_LUT_ENABLE) {
+ QPNP_ENABLE_LUT_V1(value1,
+ lpg_config->lut_config.ramp_index);
+ value2 = QPNP_ENABLE_LPG_MODE(chip);
+ } else {
+ value2 = QPNP_DISABLE_LPG_MODE(chip);
+ }
+ mask1 = value1;
+ addr1 = lpg_config->lut_base_addr +
+ SPMI_LPG_REV1_RAMP_CONTROL_OFFSET;
+ } else {
+ pr_err("Unsupported LPG subtype 0x%02x, revision 0x%02x\n",
+ chip->sub_type, chip->revision);
+ return -EINVAL;
+ }
+
+ addr = SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_ENABLE_CONTROL);
+
+ if (chip->in_test_mode) {
+ test_enable = (state == QPNP_LUT_ENABLE) ? 1 : 0;
+ rc = qpnp_dtest_config(chip, test_enable);
+ if (rc)
+ pr_err("Failed to configure TEST mode\n");
+ }
+
+ rc = qpnp_lpg_save_and_write(value2, mask2, reg2,
+ addr, 1, chip);
+ if (rc)
+ return rc;
+
+ if (state == QPNP_LUT_ENABLE
+ || (chip->sub_type == QPNP_LPG_CHAN_SUB_TYPE
+ && chip->revision == QPNP_LPG_REVISION_0))
+ rc = qpnp_lpg_save_and_write(value1, mask1, reg1,
+ addr1, 1, chip);
+ return rc;
+}
+
+static inline int qpnp_enable_pwm_mode(struct qpnp_pwm_chip *chip)
+{
+ if (chip->pwm_config.supported_sizes == QPNP_PWM_SIZE_7_8_BIT)
+ return QPNP_ENABLE_PWM_MODE_GPLED_CHANNEL(chip);
+ return QPNP_ENABLE_PWM_MODE(chip);
+}
+
+static int qpnp_lpg_configure_pwm_state(struct qpnp_pwm_chip *chip,
+ enum qpnp_pwm_state state)
+{
+ struct qpnp_lpg_config *lpg_config = &chip->lpg_config;
+ u8 value, mask;
+ int rc;
+ bool test_enable;
+
+ if (chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE) {
+ if (state == QPNP_PWM_ENABLE)
+ value = QPNP_ENABLE_PWM_MODE_ONLY_SUB_TYPE;
+ else
+ value = QPNP_DISABLE_PWM_MODE_ONLY_SUB_TYPE;
+
+ mask = QPNP_PWM_MODE_ONLY_ENABLE_DISABLE_MASK_SUB_TYPE;
+ } else {
+ if (state == QPNP_PWM_ENABLE)
+ value = qpnp_enable_pwm_mode(chip);
+ else
+ value = QPNP_DISABLE_PWM_MODE(chip);
+
+ mask = QPNP_EN_PWM_HIGH_MASK | QPNP_EN_PWM_LO_MASK |
+ QPNP_PWM_SRC_SELECT_MASK | QPNP_PWM_EN_RAMP_GEN_MASK;
+ if (chip->sub_type != QPNP_LPG_S_CHAN_SUB_TYPE)
+ mask |= QPNP_EN_PWM_OUTPUT_MASK;
+ }
+
+ if (chip->in_test_mode) {
+ test_enable = (state == QPNP_PWM_ENABLE) ? 1 : 0;
+ rc = qpnp_dtest_config(chip, test_enable);
+ if (rc)
+ pr_err("Failed to configure TEST mode\n");
+ }
+
+ rc = qpnp_lpg_save_and_write(value, mask,
+ &chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL],
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_ENABLE_CONTROL), 1, chip);
+ if (rc)
+ goto out;
+
+ /*
+ * Due to LPG hardware bug, in the PWM mode, having enabled PWM,
+ * We have to write PWM values one more time.
+ */
+ if (state == QPNP_PWM_ENABLE)
+ return qpnp_lpg_save_pwm_value(chip);
+
+out:
+ return rc;
+}
+
+static int _pwm_config(struct qpnp_pwm_chip *chip,
+ enum time_level tm_lvl,
+ int duty_value, int period_value)
+{
+ int rc;
+ struct _qpnp_pwm_config *pwm_config = &chip->pwm_config;
+ struct pwm_period_config *period = &pwm_config->period;
+
+ pwm_config->pwm_duty = (tm_lvl == LVL_USEC) ? duty_value :
+ duty_value / NSEC_PER_USEC;
+ qpnp_lpg_calc_pwm_value(pwm_config, period_value, duty_value);
+ rc = qpnp_lpg_save_pwm_value(chip);
+ if (rc)
+ goto out;
+ rc = qpnp_lpg_configure_pwm(chip);
+ if (rc)
+ goto out;
+ rc = qpnp_configure_pwm_control(chip);
+ if (rc)
+ goto out;
+
+ if (!rc && chip->enabled)
+ rc = qpnp_lpg_configure_pwm_state(chip, QPNP_PWM_ENABLE);
+
+ pr_debug("duty/period=%u/%u %s: pwm_value=%d (of %d)\n",
+ (unsigned)duty_value, (unsigned)period_value,
+ (tm_lvl == LVL_USEC) ? "usec" : "nsec",
+ pwm_config->pwm_value, 1 << period->pwm_size);
+
+out:
+ return rc;
+}
+
+static int _pwm_lut_config(struct qpnp_pwm_chip *chip, int period_us,
+ int duty_pct[], struct lut_params lut_params)
+{
+ struct qpnp_lpg_config *lpg_config;
+ struct qpnp_lut_config *lut_config;
+ struct pwm_period_config *period;
+ struct _qpnp_pwm_config *pwm_config;
+ int start_idx = lut_params.start_idx;
+ int len = lut_params.idx_len;
+ int flags = lut_params.flags;
+ int raw_lut, ramp_step_ms;
+ int rc = 0;
+
+ pwm_config = &chip->pwm_config;
+ lpg_config = &chip->lpg_config;
+ lut_config = &lpg_config->lut_config;
+ period = &pwm_config->period;
+
+ if (flags & PM_PWM_LUT_NO_TABLE)
+ goto after_table_write;
+
+ raw_lut = 0;
+ if (flags & PM_PWM_LUT_USE_RAW_VALUE)
+ raw_lut = 1;
+
+ lut_config->list_len = len;
+ lut_config->lo_index = start_idx + 1;
+ lut_config->hi_index = start_idx + len;
+
+ rc = qpnp_lpg_change_table(chip, duty_pct, raw_lut);
+ if (rc) {
+ pr_err("qpnp_lpg_change_table: rc=%d\n", rc);
+ return -EINVAL;
+ }
+
+after_table_write:
+ ramp_step_ms = lut_params.ramp_step_ms;
+
+ if (ramp_step_ms > PM_PWM_LUT_RAMP_STEP_TIME_MAX)
+ ramp_step_ms = PM_PWM_LUT_RAMP_STEP_TIME_MAX;
+
+ QPNP_SET_PAUSE_CNT(lut_config->lut_pause_lo_cnt,
+ lut_params.lut_pause_lo, ramp_step_ms);
+ if (lut_config->lut_pause_lo_cnt > PM_PWM_MAX_PAUSE_CNT)
+ lut_config->lut_pause_lo_cnt = PM_PWM_MAX_PAUSE_CNT;
+
+ QPNP_SET_PAUSE_CNT(lut_config->lut_pause_hi_cnt,
+ lut_params.lut_pause_hi, ramp_step_ms);
+ if (lut_config->lut_pause_hi_cnt > PM_PWM_MAX_PAUSE_CNT)
+ lut_config->lut_pause_hi_cnt = PM_PWM_MAX_PAUSE_CNT;
+
+ lut_config->ramp_step_ms = ramp_step_ms;
+
+ lut_config->ramp_direction = !!(flags & PM_PWM_LUT_RAMP_UP);
+ lut_config->pattern_repeat = !!(flags & PM_PWM_LUT_LOOP);
+ lut_config->ramp_toggle = !!(flags & PM_PWM_LUT_REVERSE);
+ lut_config->enable_pause_hi = !!(flags & PM_PWM_LUT_PAUSE_HI_EN);
+ lut_config->enable_pause_lo = !!(flags & PM_PWM_LUT_PAUSE_LO_EN);
+
+ rc = qpnp_lpg_change_lut(chip);
+
+ if (!rc && chip->enabled)
+ rc = qpnp_lpg_configure_lut_state(chip, QPNP_LUT_ENABLE);
+
+ return rc;
+}
+
+static int _pwm_enable(struct qpnp_pwm_chip *chip)
+{
+ int rc = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lpg_lock, flags);
+
+ if (QPNP_IS_PWM_CONFIG_SELECTED(
+ chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL]) ||
+ chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED) {
+ rc = qpnp_lpg_configure_pwm_state(chip, QPNP_PWM_ENABLE);
+ } else if (!(chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED)) {
+ rc = qpnp_lpg_configure_lut_state(chip,
+ QPNP_LUT_ENABLE);
+ }
+
+ if (!rc)
+ chip->enabled = true;
+
+ spin_unlock_irqrestore(&chip->lpg_lock, flags);
+
+ return rc;
+}
+
+/* APIs */
+/**
+ * qpnp_pwm_free - free a PWM device
+ * @pwm_chip: the PWM chip
+ * @pwm: the PWM device
+ */
+static void qpnp_pwm_free(struct pwm_chip *pwm_chip,
+ struct pwm_device *pwm)
+{
+ struct qpnp_pwm_chip *chip = qpnp_pwm_from_pwm_chip(pwm_chip);
+ unsigned long flags;
+
+ spin_lock_irqsave(&chip->lpg_lock, flags);
+
+ qpnp_lpg_configure_pwm_state(chip, QPNP_PWM_DISABLE);
+ if (!(chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED))
+ qpnp_lpg_configure_lut_state(chip, QPNP_LUT_DISABLE);
+
+ chip->enabled = false;
+ spin_unlock_irqrestore(&chip->lpg_lock, flags);
+}
+
+/**
+ * qpnp_pwm_config - change a PWM device configuration
+ * @pwm: the PWM device
+ * @period_ns: period in nanoseconds
+ * @duty_ns: duty cycle in nanoseconds
+ */
+static int qpnp_pwm_config(struct pwm_chip *pwm_chip,
+ struct pwm_device *pwm, int duty_ns, int period_ns)
+{
+ int rc;
+ unsigned long flags;
+ struct qpnp_pwm_chip *chip = qpnp_pwm_from_pwm_chip(pwm_chip);
+ int prev_period_us = chip->pwm_config.pwm_period;
+
+ if ((unsigned)period_ns < PM_PWM_PERIOD_MIN * NSEC_PER_USEC) {
+ pr_err("Invalid pwm handle or parameters\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&chip->lpg_lock, flags);
+
+ if (prev_period_us > INT_MAX / NSEC_PER_USEC ||
+ prev_period_us * NSEC_PER_USEC != period_ns) {
+ qpnp_lpg_calc_period(LVL_NSEC, period_ns, chip);
+ qpnp_lpg_save_period(chip);
+ pwm->period = period_ns;
+ chip->pwm_config.pwm_period = period_ns / NSEC_PER_USEC;
+ }
+
+ rc = _pwm_config(chip, LVL_NSEC, duty_ns, period_ns);
+
+ spin_unlock_irqrestore(&chip->lpg_lock, flags);
+
+ if (rc)
+ pr_err("Failed to configure PWM mode\n");
+
+ return rc;
+}
+
+/**
+ * qpnp_pwm_enable - start a PWM output toggling
+ * @pwm_chip: the PWM chip
+ * @pwm: the PWM device
+ */
+static int qpnp_pwm_enable(struct pwm_chip *pwm_chip,
+ struct pwm_device *pwm)
+{
+ int rc;
+ struct qpnp_pwm_chip *chip = qpnp_pwm_from_pwm_chip(pwm_chip);
+ rc = _pwm_enable(chip);
+ if (rc)
+ pr_err("Failed to enable PWM channel: %d\n", chip->channel_id);
+
+ return rc;
+}
+
+/**
+ * qpnp_pwm_disable - stop a PWM output toggling
+ * @pwm_chip: the PWM chip
+ * @pwm: the PWM device
+ */
+static void qpnp_pwm_disable(struct pwm_chip *pwm_chip,
+ struct pwm_device *pwm)
+{
+
+ struct qpnp_pwm_chip *chip = qpnp_pwm_from_pwm_chip(pwm_chip);
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&chip->lpg_lock, flags);
+
+ if (QPNP_IS_PWM_CONFIG_SELECTED(
+ chip->qpnp_lpg_registers[QPNP_ENABLE_CONTROL]) ||
+ chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED)
+ rc = qpnp_lpg_configure_pwm_state(chip,
+ QPNP_PWM_DISABLE);
+ else if (!(chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED))
+ rc = qpnp_lpg_configure_lut_state(chip,
+ QPNP_LUT_DISABLE);
+
+ if (!rc)
+ chip->enabled = false;
+
+ spin_unlock_irqrestore(&chip->lpg_lock, flags);
+
+ if (rc)
+ pr_err("Failed to disable PWM channel: %d\n",
+ chip->channel_id);
+}
+
+static int _pwm_change_mode(struct qpnp_pwm_chip *chip, enum pm_pwm_mode mode)
+{
+ int rc;
+
+ if (mode)
+ rc = qpnp_configure_lpg_control(chip);
+ else
+ rc = qpnp_configure_pwm_control(chip);
+
+ if (rc)
+ pr_err("Failed to change the mode\n");
+ return rc;
+}
+
+/**
+ * pwm_change_mode - Change the PWM mode configuration
+ * @pwm: the PWM device
+ * @mode: Mode selection value
+ */
+int pwm_change_mode(struct pwm_device *pwm, enum pm_pwm_mode mode)
+{
+ int rc;
+ unsigned long flags;
+ struct qpnp_pwm_chip *chip;
+
+ if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL) {
+ pr_err("Invalid pwm handle or no pwm_chip\n");
+ return -EINVAL;
+ }
+
+ if (mode < PM_PWM_MODE_PWM || mode > PM_PWM_MODE_LPG) {
+ pr_err("Invalid mode value\n");
+ return -EINVAL;
+ }
+
+ chip = qpnp_pwm_from_pwm_dev(pwm);
+
+ spin_lock_irqsave(&chip->lpg_lock, flags);
+ rc = _pwm_change_mode(chip, mode);
+ spin_unlock_irqrestore(&chip->lpg_lock, flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pwm_change_mode);
+
+/**
+ * pwm_config_period - change PWM period
+ *
+ * @pwm: the PWM device
+ * @pwm_p: period in struct qpnp_lpg_period
+ */
+int pwm_config_period(struct pwm_device *pwm,
+ struct pwm_period_config *period)
+{
+ struct _qpnp_pwm_config *pwm_config;
+ struct qpnp_lpg_config *lpg_config;
+ struct qpnp_pwm_chip *chip;
+ unsigned long flags;
+ int rc = 0;
+
+ if (pwm == NULL || IS_ERR(pwm) || period == NULL)
+ return -EINVAL;
+ if (pwm->chip == NULL)
+ return -ENODEV;
+
+ chip = qpnp_pwm_from_pwm_dev(pwm);
+ pwm_config = &chip->pwm_config;
+ lpg_config = &chip->lpg_config;
+
+ spin_lock_irqsave(&chip->lpg_lock, flags);
+
+ pwm_config->period.pwm_size = period->pwm_size;
+ pwm_config->period.clk = period->clk;
+ pwm_config->period.pre_div = period->pre_div;
+ pwm_config->period.pre_div_exp = period->pre_div_exp;
+
+ qpnp_lpg_save_period(chip);
+
+ rc = spmi_ext_register_writel(chip->spmi_dev->ctrl, chip->spmi_dev->sid,
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_LPG_PWM_SIZE_CLK),
+ &chip->qpnp_lpg_registers[QPNP_LPG_PWM_SIZE_CLK], 1);
+
+ if (rc) {
+ pr_err("Write failed: QPNP_LPG_PWM_SIZE_CLK register, rc: %d\n",
+ rc);
+ goto out_unlock;
+ }
+
+ rc = spmi_ext_register_writel(chip->spmi_dev->ctrl, chip->spmi_dev->sid,
+ SPMI_LPG_REG_ADDR(lpg_config->base_addr,
+ QPNP_LPG_PWM_FREQ_PREDIV_CLK),
+ &chip->qpnp_lpg_registers[QPNP_LPG_PWM_FREQ_PREDIV_CLK], 1);
+ if (rc) {
+ pr_err("Failed to write to QPNP_LPG_PWM_FREQ_PREDIV_CLK\n");
+ pr_err("register, rc = %d\n", rc);
+ }
+
+out_unlock:
+ spin_unlock_irqrestore(&chip->lpg_lock, flags);
+ return rc;
+}
+EXPORT_SYMBOL(pwm_config_period);
+
+/**
+ * pwm_config_pwm_value - change a PWM device configuration
+ * @pwm: the PWM device
+ * @pwm_value: the duty cycle in raw PWM value (< 2^pwm_size)
+ */
+int pwm_config_pwm_value(struct pwm_device *pwm, int pwm_value)
+{
+ struct qpnp_lpg_config *lpg_config;
+ struct _qpnp_pwm_config *pwm_config;
+ struct qpnp_pwm_chip *chip;
+ unsigned long flags;
+ int rc = 0;
+
+ if (pwm == NULL || IS_ERR(pwm)) {
+ pr_err("Invalid parameter passed\n");
+ return -EINVAL;
+ }
+
+ if (pwm->chip == NULL) {
+ pr_err("Invalid device handle\n");
+ return -ENODEV;
+ }
+
+ chip = qpnp_pwm_from_pwm_dev(pwm);
+ lpg_config = &chip->lpg_config;
+ pwm_config = &chip->pwm_config;
+
+ spin_lock_irqsave(&chip->lpg_lock, flags);
+
+ if (pwm_config->pwm_value == pwm_value)
+ goto out_unlock;
+
+ pwm_config->pwm_value = pwm_value;
+
+ rc = qpnp_lpg_save_pwm_value(chip);
+
+ if (rc)
+ pr_err("Could not update PWM value for channel %d rc=%d\n",
+ chip->channel_id, rc);
+
+out_unlock:
+ spin_unlock_irqrestore(&chip->lpg_lock, flags);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pwm_config_pwm_value);
+
+/**
+ * pwm_config_us - change a PWM device configuration
+ * @pwm: the PWM device
+ * @period_us: period in microseconds
+ * @duty_us: duty cycle in microseconds
+ */
+int pwm_config_us(struct pwm_device *pwm, int duty_us, int period_us)
+{
+ int rc;
+ unsigned long flags;
+ struct qpnp_pwm_chip *chip;
+
+ if (pwm == NULL || IS_ERR(pwm) ||
+ duty_us > period_us ||
+ (unsigned)period_us > PM_PWM_PERIOD_MAX ||
+ (unsigned)period_us < PM_PWM_PERIOD_MIN) {
+ pr_err("Invalid pwm handle or parameters\n");
+ return -EINVAL;
+ }
+
+ chip = qpnp_pwm_from_pwm_dev(pwm);
+
+ spin_lock_irqsave(&chip->lpg_lock, flags);
+
+ if (chip->pwm_config.pwm_period != period_us) {
+ qpnp_lpg_calc_period(LVL_USEC, period_us, chip);
+ qpnp_lpg_save_period(chip);
+ chip->pwm_config.pwm_period = period_us;
+ if ((unsigned)period_us > (unsigned)(-1) / NSEC_PER_USEC)
+ pwm->period = 0;
+ else
+ pwm->period = (unsigned)period_us * NSEC_PER_USEC;
+ }
+
+ rc = _pwm_config(chip, LVL_USEC, duty_us, period_us);
+
+ spin_unlock_irqrestore(&chip->lpg_lock, flags);
+
+ if (rc)
+ pr_err("Failed to configure PWM mode\n");
+
+ return rc;
+}
+EXPORT_SYMBOL(pwm_config_us);
+
+/**
+ * pwm_lut_config - change LPG LUT device configuration
+ * @pwm: the PWM device
+ * @period_us: period in micro second
+ * @duty_pct: array of duty cycles in percent, like 20, 50.
+ * @lut_params: Lookup table parameters
+ */
+int pwm_lut_config(struct pwm_device *pwm, int period_us,
+ int duty_pct[], struct lut_params lut_params)
+{
+ unsigned long flags;
+ struct qpnp_pwm_chip *chip;
+ int rc = 0;
+
+ if (pwm == NULL || IS_ERR(pwm) || !lut_params.idx_len) {
+ pr_err("Invalid pwm handle or idx_len=0\n");
+ return -EINVAL;
+ }
+
+ if (pwm->chip == NULL)
+ return -ENODEV;
+
+ if (duty_pct == NULL && !(lut_params.flags & PM_PWM_LUT_NO_TABLE)) {
+ pr_err("Invalid duty_pct with flag\n");
+ return -EINVAL;
+ }
+
+ chip = qpnp_pwm_from_pwm_dev(pwm);
+
+ if (chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED) {
+ pr_err("LUT mode isn't supported\n");
+ return -EINVAL;
+ }
+
+ if ((lut_params.start_idx + lut_params.idx_len) >
+ chip->lpg_config.lut_size) {
+ pr_err("Exceed LUT limit\n");
+ return -EINVAL;
+ }
+
+ if ((unsigned)period_us > PM_PWM_PERIOD_MAX ||
+ (unsigned)period_us < PM_PWM_PERIOD_MIN) {
+ pr_err("Period out of range\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&chip->lpg_lock, flags);
+
+ if (chip->pwm_config.pwm_period != period_us) {
+ qpnp_lpg_calc_period(LVL_USEC, period_us, chip);
+ qpnp_lpg_save_period(chip);
+ chip->pwm_config.pwm_period = period_us;
+ }
+
+ rc = _pwm_lut_config(chip, period_us, duty_pct, lut_params);
+
+ spin_unlock_irqrestore(&chip->lpg_lock, flags);
+
+ if (rc)
+ pr_err("Failed to configure LUT\n");
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pwm_lut_config);
+
+static int qpnp_parse_pwm_dt_config(struct device_node *of_pwm_node,
+ struct device_node *of_parent, struct qpnp_pwm_chip *chip)
+{
+ int rc, period;
+
+ rc = of_property_read_u32(of_parent, "qcom,period", (u32 *)&period);
+ if (rc) {
+ pr_err("node is missing PWM Period prop");
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_pwm_node, "qcom,duty",
+ &chip->pwm_config.pwm_duty);
+ if (rc) {
+ pr_err("node is missing PWM Duty prop");
+ return rc;
+ }
+
+ rc = _pwm_config(chip, LVL_USEC, chip->pwm_config.pwm_duty, period);
+
+ return rc;
+}
+
+#define qpnp_check_optional_dt_bindings(func) \
+do { \
+ rc = func; \
+ if (rc && rc != -EINVAL) \
+ goto out; \
+ rc = 0; \
+} while (0)
+
+static int qpnp_parse_lpg_dt_config(struct device_node *of_lpg_node,
+ struct device_node *of_parent, struct qpnp_pwm_chip *chip)
+{
+ int rc, period, list_size, start_idx, *duty_pct_list;
+ struct qpnp_lpg_config *lpg_config = &chip->lpg_config;
+ struct qpnp_lut_config *lut_config = &lpg_config->lut_config;
+ struct lut_params lut_params;
+
+ rc = of_property_read_u32(of_parent, "qcom,period", &period);
+ if (rc) {
+ pr_err("node is missing PWM Period prop\n");
+ return rc;
+ }
+
+ if (!of_get_property(of_lpg_node, "qcom,duty-percents", &list_size)) {
+ pr_err("node is missing duty-pct list\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_lpg_node, "cell-index", &start_idx);
+ if (rc) {
+ pr_err("Missing start index\n");
+ return rc;
+ }
+
+ list_size /= sizeof(u32);
+
+ if (list_size + start_idx > lpg_config->lut_size) {
+ pr_err("duty pct list size overflows\n");
+ return -EINVAL;
+ }
+
+ duty_pct_list = kzalloc(sizeof(u32) * list_size, GFP_KERNEL);
+
+ if (!duty_pct_list) {
+ pr_err("kzalloc failed on duty_pct_list\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32_array(of_lpg_node, "qcom,duty-percents",
+ duty_pct_list, list_size);
+ if (rc) {
+ pr_err("invalid or missing property:\n");
+ pr_err("qcom,duty-pcts-list\n");
+ kfree(duty_pct_list);
+ return rc;
+ }
+
+ /* Read optional properties */
+ qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+ "qcom,ramp-step-duration", &lut_config->ramp_step_ms));
+ qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+ "qcom,lpg-lut-pause-hi", &lut_config->lut_pause_hi_cnt));
+ qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+ "qcom,lpg-lut-pause-lo", &lut_config->lut_pause_lo_cnt));
+ qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+ "qcom,lpg-lut-ramp-direction",
+ (u32 *)&lut_config->ramp_direction));
+ qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+ "qcom,lpg-lut-pattern-repeat",
+ (u32 *)&lut_config->pattern_repeat));
+ qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+ "qcom,lpg-lut-ramp-toggle",
+ (u32 *)&lut_config->ramp_toggle));
+ qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+ "qcom,lpg-lut-enable-pause-hi",
+ (u32 *)&lut_config->enable_pause_hi));
+ qpnp_check_optional_dt_bindings(of_property_read_u32(of_lpg_node,
+ "qcom,lpg-lut-enable-pause-lo",
+ (u32 *)&lut_config->enable_pause_lo));
+
+ qpnp_set_lut_params(&lut_params, lut_config, start_idx, list_size);
+
+ _pwm_lut_config(chip, period, duty_pct_list, lut_params);
+
+out:
+ kfree(duty_pct_list);
+ return rc;
+}
+
+static int qpnp_lpg_get_rev_subtype(struct qpnp_pwm_chip *chip)
+{
+ int rc;
+
+ rc = spmi_ext_register_readl(chip->spmi_dev->ctrl,
+ chip->spmi_dev->sid,
+ chip->lpg_config.base_addr + SPMI_LPG_SUB_TYPE_OFFSET,
+ &chip->sub_type, 1);
+
+ if (rc) {
+ pr_err("Couldn't read subtype rc: %d\n", rc);
+ goto out;
+ }
+
+ rc = spmi_ext_register_readl(chip->spmi_dev->ctrl,
+ chip->spmi_dev->sid,
+ chip->lpg_config.base_addr + SPMI_LPG_REVISION2_OFFSET,
+ (u8 *) &chip->revision, 1);
+
+ if (rc) {
+ pr_err("Couldn't read revision2 rc: %d\n", rc);
+ goto out;
+ }
+
+ if (chip->revision < QPNP_LPG_REVISION_0 ||
+ chip->revision > QPNP_LPG_REVISION_1) {
+ pr_err("Unknown LPG revision detected, rev:%d\n",
+ chip->revision);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (chip->sub_type != QPNP_PWM_MODE_ONLY_SUB_TYPE
+ && chip->sub_type != QPNP_LPG_CHAN_SUB_TYPE
+ && chip->sub_type != QPNP_LPG_S_CHAN_SUB_TYPE) {
+ pr_err("Unknown LPG/PWM subtype detected, subtype:%d\n",
+ chip->sub_type);
+ rc = -EINVAL;
+ }
+out:
+ pr_debug("LPG rev 0x%02x subtype 0x%02x rc: %d\n", chip->revision,
+ chip->sub_type, rc);
+ return rc;
+}
+
+/* Fill in lpg device elements based on values found in device tree. */
+static int qpnp_parse_dt_config(struct spmi_device *spmi,
+ struct qpnp_pwm_chip *chip)
+{
+ int rc, enable, lut_entry_size, list_size, i;
+ const char *lable;
+ struct resource *res;
+ struct device_node *node;
+ int found_pwm_subnode = 0;
+ int found_lpg_subnode = 0;
+ struct device_node *of_node = spmi->dev.of_node;
+ struct qpnp_lpg_config *lpg_config = &chip->lpg_config;
+ struct qpnp_lut_config *lut_config = &lpg_config->lut_config;
+ struct _qpnp_pwm_config *pwm_config = &chip->pwm_config;
+ int force_pwm_size = 0;
+ int pwm_size_list[QPNP_PWM_SIZES_SUPPORTED];
+
+ rc = of_property_read_u32(of_node, "qcom,channel-id",
+ &chip->channel_id);
+ if (rc) {
+ dev_err(&spmi->dev, "%s: node is missing LPG channel id\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!of_get_property(of_node, "qcom,supported-sizes", &list_size)) {
+ pr_err("Missing qcom,supported-size list\n");
+ return -EINVAL;
+ }
+
+ list_size /= sizeof(u32);
+ if (list_size > QPNP_PWM_SIZES_SUPPORTED) {
+ pr_err(" qcom,supported-size list is too big\n");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,supported-sizes",
+ pwm_size_list, list_size);
+
+ if (rc) {
+ pr_err("Invalid qcom,supported-size property\n");
+ return rc;
+ }
+
+ for (i = 0; i < list_size; i++) {
+ pwm_config->supported_sizes |=
+ (1 << (pwm_size_list[i] - QPNP_MIN_PWM_BIT_SIZE));
+ }
+
+ if (!(pwm_config->supported_sizes == QPNP_PWM_SIZE_6_9_BIT ||
+ pwm_config->supported_sizes == QPNP_PWM_SIZE_7_8_BIT ||
+ pwm_config->supported_sizes == QPNP_PWM_SIZE_6_7_9_BIT)) {
+ pr_err("PWM sizes list qcom,supported-size is not proper\n");
+ return -EINVAL;
+ }
+
+ /*
+ * For cetrain LPG channels PWM size can be forced. So that
+ * for every requested pwm period closest pwm frequency is
+ * selected in qpnp_lpg_calc_period() for the forced pwm size.
+ */
+ rc = of_property_read_u32(of_node, "qcom,force-pwm-size",
+ &force_pwm_size);
+ if (pwm_config->supported_sizes == QPNP_PWM_SIZE_7_8_BIT) {
+ if (!(force_pwm_size == QPNP_PWM_SIZE_7_BIT ||
+ force_pwm_size == QPNP_PWM_SIZE_8_BIT))
+ force_pwm_size = 0;
+ } else if (chip->sub_type == QPNP_PWM_MODE_ONLY_SUB_TYPE) {
+ if (!(force_pwm_size == QPNP_PWM_SIZE_6_BIT ||
+ force_pwm_size == QPNP_PWM_SIZE_9_BIT))
+ force_pwm_size = 0;
+ } else if (pwm_config->supported_sizes == QPNP_PWM_SIZE_6_7_9_BIT) {
+ if (!(force_pwm_size == QPNP_PWM_SIZE_6_BIT ||
+ force_pwm_size == QPNP_PWM_SIZE_7_BIT ||
+ force_pwm_size == QPNP_PWM_SIZE_9_BIT))
+ force_pwm_size = 0;
+ }
+
+ pwm_config->force_pwm_size = force_pwm_size;
+ res = spmi_get_resource_byname(spmi, NULL, IORESOURCE_MEM,
+ QPNP_LPG_CHANNEL_BASE);
+ if (!res) {
+ dev_err(&spmi->dev, "%s: node is missing base address\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ lpg_config->base_addr = res->start;
+
+ rc = qpnp_lpg_get_rev_subtype(chip);
+ if (rc)
+ return rc;
+
+ res = spmi_get_resource_byname(spmi, NULL, IORESOURCE_MEM,
+ QPNP_LPG_LUT_BASE);
+ if (!res) {
+ chip->flags |= QPNP_PWM_LUT_NOT_SUPPORTED;
+ } else {
+ lpg_config->lut_base_addr = res->start;
+ /* Each entry of LUT is of 2 bytes for generic LUT and of 1 byte
+ * for KPDBL/GLED LUT.
+ */
+ lpg_config->lut_size = resource_size(res) >> 1;
+ lut_entry_size = sizeof(u16);
+
+ if (pwm_config->supported_sizes == QPNP_PWM_SIZE_7_8_BIT) {
+ lpg_config->lut_size = resource_size(res);
+ lut_entry_size = sizeof(u8);
+ }
+
+ lut_config->duty_pct_list = kzalloc(lpg_config->lut_size *
+ lut_entry_size, GFP_KERNEL);
+ if (!lut_config->duty_pct_list) {
+ pr_err("can not allocate duty pct list\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,ramp-index",
+ &lut_config->ramp_index);
+ if (rc) {
+ pr_err("Missing LPG qcom,ramp-index property\n");
+ kfree(lut_config->duty_pct_list);
+ return rc;
+ }
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,lpg-dtest-line",
+ &chip->dtest_line);
+ if (rc) {
+ chip->in_test_mode = 0;
+ } else {
+ chip->in_test_mode = 1;
+ rc = of_property_read_u32(of_node, "qcom,dtest-output",
+ &chip->dtest_output);
+ if (rc) {
+ pr_err("Missing DTEST output configuration\n");
+ chip->dtest_output = 0;
+ }
+ }
+
+ for_each_child_of_node(of_node, node) {
+ rc = of_property_read_string(node, "label", &lable);
+ if (rc) {
+ dev_err(&spmi->dev, "%s: Missing lable property\n",
+ __func__);
+ goto out;
+ }
+ if (!strncmp(lable, "pwm", 3)) {
+ rc = qpnp_parse_pwm_dt_config(node, of_node, chip);
+ if (rc)
+ goto out;
+ found_pwm_subnode = 1;
+ } else if (!strncmp(lable, "lpg", 3) &&
+ !(chip->flags & QPNP_PWM_LUT_NOT_SUPPORTED)) {
+ rc = qpnp_parse_lpg_dt_config(node, of_node, chip);
+ if (rc)
+ goto out;
+ found_lpg_subnode = 1;
+ } else {
+ dev_err(&spmi->dev, "%s: Invalid value for lable prop",
+ __func__);
+ }
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mode-select", &enable);
+ if (rc)
+ goto read_opt_props;
+
+ if ((enable == PM_PWM_MODE_PWM && found_pwm_subnode == 0) ||
+ (enable == PM_PWM_MODE_LPG && found_lpg_subnode == 0)) {
+ dev_err(&spmi->dev, "%s: Invalid mode select\n", __func__);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ _pwm_change_mode(chip, enable);
+ _pwm_enable(chip);
+
+read_opt_props:
+ /* Initialize optional config parameters from DT if provided */
+ of_property_read_string(node, "qcom,channel-owner",
+ &chip->channel_owner);
+
+ return 0;
+
+out:
+ kfree(lut_config->duty_pct_list);
+ return rc;
+}
+
+static struct pwm_ops qpnp_pwm_ops = {
+ .enable = qpnp_pwm_enable,
+ .disable = qpnp_pwm_disable,
+ .config = qpnp_pwm_config,
+ .free = qpnp_pwm_free,
+ .owner = THIS_MODULE,
+};
+
+static int qpnp_pwm_probe(struct spmi_device *spmi)
+{
+ struct qpnp_pwm_chip *pwm_chip;
+ int rc;
+
+ pwm_chip = kzalloc(sizeof(*pwm_chip), GFP_KERNEL);
+ if (pwm_chip == NULL) {
+ pr_err("kzalloc() failed.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&pwm_chip->lpg_lock);
+
+ pwm_chip->spmi_dev = spmi;
+ dev_set_drvdata(&spmi->dev, pwm_chip);
+
+ rc = qpnp_parse_dt_config(spmi, pwm_chip);
+
+ if (rc)
+ goto failed_config;
+
+ pwm_chip->chip.dev = &spmi->dev;
+ pwm_chip->chip.ops = &qpnp_pwm_ops;
+ pwm_chip->chip.base = -1;
+ pwm_chip->chip.npwm = 1;
+
+ rc = pwmchip_add(&pwm_chip->chip);
+ if (rc < 0) {
+ pr_err("pwmchip_add() failed: %d\n", rc);
+ goto failed_insert;
+ }
+
+ if (pwm_chip->channel_owner)
+ pwm_chip->chip.pwms[0].label = pwm_chip->channel_owner;
+
+ return 0;
+
+failed_insert:
+ kfree(pwm_chip->lpg_config.lut_config.duty_pct_list);
+failed_config:
+ dev_set_drvdata(&spmi->dev, NULL);
+ kfree(pwm_chip);
+ return rc;
+}
+
+static int qpnp_pwm_remove(struct spmi_device *spmi)
+{
+ struct qpnp_pwm_chip *pwm_chip;
+ struct qpnp_lpg_config *lpg_config;
+
+ pwm_chip = dev_get_drvdata(&spmi->dev);
+
+ dev_set_drvdata(&spmi->dev, NULL);
+
+ if (pwm_chip) {
+ lpg_config = &pwm_chip->lpg_config;
+ pwmchip_remove(&pwm_chip->chip);
+ kfree(lpg_config->lut_config.duty_pct_list);
+ kfree(pwm_chip);
+ }
+
+ return 0;
+}
+
+static struct of_device_id spmi_match_table[] = {
+ { .compatible = QPNP_LPG_DRIVER_NAME, },
+ {}
+};
+
+static const struct spmi_device_id qpnp_lpg_id[] = {
+ { QPNP_LPG_DRIVER_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spmi, qpnp_lpg_id);
+
+static struct spmi_driver qpnp_lpg_driver = {
+ .driver = {
+ .name = QPNP_LPG_DRIVER_NAME,
+ .of_match_table = spmi_match_table,
+ .owner = THIS_MODULE,
+ },
+ .probe = qpnp_pwm_probe,
+ .remove = qpnp_pwm_remove,
+ .id_table = qpnp_lpg_id,
+};
+
+/**
+ * qpnp_lpg_init() - register spmi driver for qpnp-lpg
+ */
+int __init qpnp_lpg_init(void)
+{
+ return spmi_driver_register(&qpnp_lpg_driver);
+}
+
+static void __exit qpnp_lpg_exit(void)
+{
+ spmi_driver_unregister(&qpnp_lpg_driver);
+}
+
+MODULE_DESCRIPTION("QPNP PMIC LPG driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" QPNP_LPG_DRIVER_NAME);
+
+subsys_initcall(qpnp_lpg_init);
+module_exit(qpnp_lpg_exit);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 8df0b0e62976..b07545fbc2a5 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -784,5 +784,157 @@ config REGULATOR_WM8994
This driver provides support for the voltage regulators on the
WM8994 CODEC.
+config REGULATOR_RPM_SMD
+ bool "RPM SMD regulator driver"
+ depends on OF
+ depends on MSM_RPM_SMD
+ help
+ Compile in support for the RPM SMD regulator driver which is used for
+ setting voltages and other parameters of the various power rails
+ supplied by some Qualcomm PMICs. The RPM SMD regulator driver should
+ be used on systems which contain an RPM which communicates with the
+ application processor over SMD.
+
+config REGULATOR_QPNP
+ depends on SPMI || MSM_SPMI
+ depends on OF_SPMI
+ tristate "Qualcomm QPNP regulator support"
+ help
+ This driver supports voltage regulators in Qualcomm PMIC chips which
+ comply with QPNP. QPNP is a SPMI based PMIC implementation. These
+ chips provide several different varieties of LDO and switching
+ regulators. They also provide voltage switches and boost regulators.
+
+config REGULATOR_QPNP_LABIBB
+ depends on SPMI || MSM_SPMI
+ depends on OF_SPMI
+ tristate "Qualcomm Technologies, Inc QPNP LAB/IBB regulator support"
+ help
+ This driver supports voltage regulators in Qualcomm Technologies, Inc
+ PMIC chips which comply with QPNP LAB/IBB regulators. QPNP LAB and IBB
+ are SPMI based PMIC implementation. LAB regulator can be used as a
+ regular positive boost regulator. IBB can be used as a regular
+ negative boost regulator. LAB/IBB regulators can also be used
+ together for LCD or AMOLED.
+
+config REGULATOR_SPM
+ bool "SPM regulator driver"
+ depends on (SPMI || MSM_SPMI) && OF_SPMI
+ help
+ Enable support for the SPM regulator driver which is used for
+ setting voltages of processor supply regulators via the SPM module
+ found inside of Qualcomm Technologies (QTI) chips. The SPM regulator
+ driver can be used on QTI SoCs where the APSS processor cores are
+ supplied by their own PMIC regulator.
+
+config REGULATOR_CPR
+ bool "RBCPR regulator driver for APC"
+ depends on OF
+ help
+ Compile in RBCPR (RapidBridge Core Power Reduction) driver to support
+ corner vote for APC power rail. The driver takes PTE process voltage
+ suggestions in efuse as initial settings. It converts corner vote
+ to voltage value before writing to a voltage regulator API, such as
+ that provided by spm-regulator driver.
+
+config REGULATOR_CPR2_GFX
+ bool "RBCPR regulator driver for GFX"
+ depends on OF
+ help
+ This driver supports the CPR (core power reduction) controller for the
+ graphics (GFX) rail. The GFX CPR2 controller monitors the graphics voltage
+ requirements. This driver reads initial voltage values out of hardware
+ fuses and CPR target quotient values out of device tree.
+
+config REGULATOR_CPR3
+ bool "CPR3 regulator core support"
+ help
+ This driver supports Core Power Reduction (CPR) version 3 controllers
+ which are used by some Qualcomm Technologies, Inc. (QTI) SoCs to
+ manage important voltage regulators. CPR3 controllers are capable of
+ monitoring several ring oscillator sensing loops simultaneously. The
+ CPR3 controller informs software when the silicon conditions require
+ the supply voltage to be increased or decreased. On certain supply
+ rails, the CPR3 controller is able to propagate the voltage increase
+ or decrease requests all the way to the PMIC without software
+ involvement.
+
+config REGULATOR_CPR3_HMSS
+ bool "CPR3 regulator for HMSS"
+ depends on OF
+ select REGULATOR_CPR3
+ help
+ This driver supports Qualcomm Technologies, Inc. HMSS application
+ processor specific features including memory array power mux (APM)
+ switching, two CPR3 threads which monitor the two HMSS clusters that
+ are both powered by a shared supply, and hardware closed-loop auto
+ voltage stepping. This driver reads both initial voltage and CPR
+ target quotient values out of hardware fuses.
+
+config REGULATOR_CPR3_MMSS
+ bool "RBCPR3 regulator for MMSS"
+ depends on OF
+ select REGULATOR_CPR3
+ help
+ This driver supports Qualcomm Technologies, Inc. MMSS graphics
+ processor specific features. The MMSS CPR3 controller only uses one
+ thread to monitor the MMSS voltage requirements. This driver reads
+ initial voltage values out of hardware fuses and CPR target quotient
+ values out of device tree.
+
+config REGULATOR_CPR4_APSS
+ bool "CPR4 regulator for APSS"
+ depends on OF
+ select REGULATOR_CPR3
+ help
+ This driver supports Qualcomm Technologies, Inc. APSS application
+ processor specific features including memory array power mux (APM)
+ switching, one CPR4 thread which monitor the two APSS clusters that
+ are both powered by a shared supply, hardware closed-loop auto
+ voltage stepping, voltage adjustments based on online core count,
+ voltage adjustments based on temperature readings, and voltage
+ adjustments for performance boost mode. This driver reads both initial
+ voltage and CPR target quotient values out of hardware fuses.
+
+config REGULATOR_KRYO
+ bool "Kryo regulator driver"
+ depends on OF
+ help
+ Some MSM designs have CPUs that can be directly powered from a common
+ voltage rail via a Block Head Switch (BHS) or an LDO whose output voltage
+ can be configured for use when certain power constraints are met.
+ Say yes to support management of LDO and BHS modes for the clusters in the
+ CPU subsystem.
+
+config REGULATOR_MEM_ACC
+ tristate "QTI Memory accelerator regulator driver"
+ help
+ Say y here to enable the memory accelerator driver for Qualcomm
+ Technologies (QTI) chips. The accelerator controls delays applied
+ for memory accesses.
+ This driver configures the power-mode (corner) for the memory
+ accelerator.
+
+config REGULATOR_PROXY_CONSUMER
+ bool "Boot time regulator proxy consumer support"
+ help
+ This driver provides support for boot time regulator proxy requests.
+ It can enforce a specified voltage range, set a minimum current,
+ and/or keep a regulator enabled. It is needed in circumstances where
+ reducing one or more of these three quantities will cause hardware to
+ stop working if performed before the driver managing the hardware has
+ probed.
+
+config REGULATOR_STUB
+ tristate "Stub Regulator"
+ help
+ This driver adds stub regulator support. The driver is absent of any
+ real hardware based implementation. It allows for clients to register
+ their regulator device constraints and use all of the standard
+ regulator interfaces. This is useful for bringing up new platforms
+ when the real hardware based implementation may not be yet available.
+ Clients can use the real regulator device names with proper
+ constraint checking while the real driver is being developed.
+
endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 0f8174913c17..d848e7d71468 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -102,5 +102,19 @@ obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o
obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o
+obj-$(CONFIG_REGULATOR_PROXY_CONSUMER) += proxy-consumer.o
+obj-$(CONFIG_REGULATOR_MEM_ACC) += mem-acc-regulator.o
+obj-$(CONFIG_REGULATOR_RPM_SMD) += rpm-smd-regulator.o
+obj-$(CONFIG_REGULATOR_QPNP) += qpnp-regulator.o
+obj-$(CONFIG_REGULATOR_SPM) += spm-regulator.o
+obj-$(CONFIG_REGULATOR_CPR) += cpr-regulator.o
+obj-$(CONFIG_REGULATOR_CPR3) += cpr3-regulator.o cpr3-util.o
+obj-$(CONFIG_REGULATOR_CPR3_HMSS) += cpr3-hmss-regulator.o
+obj-$(CONFIG_REGULATOR_CPR3_MMSS) += cpr3-mmss-regulator.o
+obj-$(CONFIG_REGULATOR_CPR4_APSS) += cpr4-apss-regulator.o
+obj-$(CONFIG_REGULATOR_QPNP_LABIBB) += qpnp-labibb-regulator.o
+obj-$(CONFIG_REGULATOR_STUB) += stub-regulator.o
+obj-$(CONFIG_REGULATOR_KRYO) += kryo-regulator.o
+obj-$(CONFIG_REGULATOR_CPR2_GFX) += cpr2-gfx-regulator.o
ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/cpr-regulator.c b/drivers/regulator/cpr-regulator.c
new file mode 100644
index 000000000000..4c21851333c2
--- /dev/null
+++ b/drivers/regulator/cpr-regulator.c
@@ -0,0 +1,6361 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <linux/sort.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/cpr-regulator.h>
+#include <linux/msm_thermal.h>
+#include <linux/msm_tsens.h>
+#include <soc/qcom/scm.h>
+
+/* Register Offsets for RB-CPR and Bit Definitions */
+
+/* RBCPR Version Register */
+#define REG_RBCPR_VERSION 0
+#define RBCPR_VER_2 0x02
+
+/* RBCPR Gate Count and Target Registers */
+#define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * n)
+
+#define RBCPR_GCNT_TARGET_GCNT_BITS 10
+#define RBCPR_GCNT_TARGET_GCNT_SHIFT 12
+#define RBCPR_GCNT_TARGET_GCNT_MASK ((1<<RBCPR_GCNT_TARGET_GCNT_BITS)-1)
+
+/* RBCPR Sensor Mask and Bypass Registers */
+#define REG_RBCPR_SENSOR_MASK0 0x20
+#define RBCPR_SENSOR_MASK0_SENSOR(n) (~BIT(n))
+#define REG_RBCPR_SENSOR_BYPASS0 0x30
+
+/* RBCPR Timer Control */
+#define REG_RBCPR_TIMER_INTERVAL 0x44
+#define REG_RBIF_TIMER_ADJUST 0x4C
+
+#define RBIF_TIMER_ADJ_CONS_UP_BITS 4
+#define RBIF_TIMER_ADJ_CONS_UP_MASK ((1<<RBIF_TIMER_ADJ_CONS_UP_BITS)-1)
+#define RBIF_TIMER_ADJ_CONS_DOWN_BITS 4
+#define RBIF_TIMER_ADJ_CONS_DOWN_MASK ((1<<RBIF_TIMER_ADJ_CONS_DOWN_BITS)-1)
+#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4
+#define RBIF_TIMER_ADJ_CLAMP_INT_BITS 8
+#define RBIF_TIMER_ADJ_CLAMP_INT_MASK ((1<<RBIF_TIMER_ADJ_CLAMP_INT_BITS)-1)
+#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8
+
+/* RBCPR Config Register */
+#define REG_RBIF_LIMIT 0x48
+#define REG_RBCPR_STEP_QUOT 0x80
+#define REG_RBIF_SW_VLEVEL 0x94
+
+#define RBIF_LIMIT_CEILING_BITS 6
+#define RBIF_LIMIT_CEILING_MASK ((1<<RBIF_LIMIT_CEILING_BITS)-1)
+#define RBIF_LIMIT_CEILING_SHIFT 6
+#define RBIF_LIMIT_FLOOR_BITS 6
+#define RBIF_LIMIT_FLOOR_MASK ((1<<RBIF_LIMIT_FLOOR_BITS)-1)
+
+#define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK
+#define RBIF_LIMIT_FLOOR_DEFAULT 0
+#define RBIF_SW_VLEVEL_DEFAULT 0x20
+
+#define RBCPR_STEP_QUOT_STEPQUOT_BITS 8
+#define RBCPR_STEP_QUOT_STEPQUOT_MASK ((1<<RBCPR_STEP_QUOT_STEPQUOT_BITS)-1)
+#define RBCPR_STEP_QUOT_IDLE_CLK_BITS 4
+#define RBCPR_STEP_QUOT_IDLE_CLK_MASK ((1<<RBCPR_STEP_QUOT_IDLE_CLK_BITS)-1)
+#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8
+
+/* RBCPR Control Register */
+#define REG_RBCPR_CTL 0x90
+
+#define RBCPR_CTL_LOOP_EN BIT(0)
+#define RBCPR_CTL_TIMER_EN BIT(3)
+#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5)
+#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6)
+#define RBCPR_CTL_COUNT_MODE BIT(10)
+#define RBCPR_CTL_UP_THRESHOLD_BITS 4
+#define RBCPR_CTL_UP_THRESHOLD_MASK ((1<<RBCPR_CTL_UP_THRESHOLD_BITS)-1)
+#define RBCPR_CTL_UP_THRESHOLD_SHIFT 24
+#define RBCPR_CTL_DN_THRESHOLD_BITS 4
+#define RBCPR_CTL_DN_THRESHOLD_MASK ((1<<RBCPR_CTL_DN_THRESHOLD_BITS)-1)
+#define RBCPR_CTL_DN_THRESHOLD_SHIFT 28
+
+/* RBCPR Ack/Nack Response */
+#define REG_RBIF_CONT_ACK_CMD 0x98
+#define REG_RBIF_CONT_NACK_CMD 0x9C
+
+/* RBCPR Result status Registers */
+#define REG_RBCPR_RESULT_0 0xA0
+#define REG_RBCPR_RESULT_1 0xA4
+
+#define RBCPR_RESULT_1_SEL_FAST_BITS 3
+#define RBCPR_RESULT_1_SEL_FAST(val) (val & \
+ ((1<<RBCPR_RESULT_1_SEL_FAST_BITS) - 1))
+
+#define RBCPR_RESULT0_BUSY_SHIFT 19
+#define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT)
+#define RBCPR_RESULT0_ERROR_LT0_SHIFT 18
+#define RBCPR_RESULT0_ERROR_SHIFT 6
+#define RBCPR_RESULT0_ERROR_BITS 12
+#define RBCPR_RESULT0_ERROR_MASK ((1<<RBCPR_RESULT0_ERROR_BITS)-1)
+#define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2
+#define RBCPR_RESULT0_ERROR_STEPS_BITS 4
+#define RBCPR_RESULT0_ERROR_STEPS_MASK ((1<<RBCPR_RESULT0_ERROR_STEPS_BITS)-1)
+#define RBCPR_RESULT0_STEP_UP_SHIFT 1
+
+/* RBCPR Interrupt Control Register */
+#define REG_RBIF_IRQ_EN(n) (0x100 + 4 * n)
+#define REG_RBIF_IRQ_CLEAR 0x110
+#define REG_RBIF_IRQ_STATUS 0x114
+
+#define CPR_INT_DONE BIT(0)
+#define CPR_INT_MIN BIT(1)
+#define CPR_INT_DOWN BIT(2)
+#define CPR_INT_MID BIT(3)
+#define CPR_INT_UP BIT(4)
+#define CPR_INT_MAX BIT(5)
+#define CPR_INT_CLAMP BIT(6)
+#define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
+ CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
+#define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN)
+
+#define CPR_NUM_RING_OSC 8
+
+/* RBCPR Debug Resgister */
+#define REG_RBCPR_DEBUG1 0x120
+#define RBCPR_DEBUG1_QUOT_FAST_BITS 12
+#define RBCPR_DEBUG1_QUOT_SLOW_BITS 12
+#define RBCPR_DEBUG1_QUOT_SLOW_SHIFT 12
+
+#define RBCPR_DEBUG1_QUOT_FAST(val) (val & \
+ ((1<<RBCPR_DEBUG1_QUOT_FAST_BITS)-1))
+
+#define RBCPR_DEBUG1_QUOT_SLOW(val) ((val>>RBCPR_DEBUG1_QUOT_SLOW_SHIFT) & \
+ ((1<<RBCPR_DEBUG1_QUOT_SLOW_BITS)-1))
+
+/* RBCPR Aging Resgister */
+#define REG_RBCPR_HTOL_AGE 0x160
+#define RBCPR_HTOL_AGE_PAGE BIT(1)
+#define RBCPR_AGE_DATA_STATUS BIT(2)
+
+/* RBCPR Clock Control Register */
+#define RBCPR_CLK_SEL_MASK BIT(0)
+#define RBCPR_CLK_SEL_19P2_MHZ 0
+#define RBCPR_CLK_SEL_AHB_CLK BIT(0)
+
+/* CPR eFuse parameters */
+#define CPR_FUSE_TARGET_QUOT_BITS 12
+#define CPR_FUSE_TARGET_QUOT_BITS_MASK ((1<<CPR_FUSE_TARGET_QUOT_BITS)-1)
+#define CPR_FUSE_RO_SEL_BITS 3
+#define CPR_FUSE_RO_SEL_BITS_MASK ((1<<CPR_FUSE_RO_SEL_BITS)-1)
+
+#define CPR_FUSE_MIN_QUOT_DIFF 50
+
+#define BYTES_PER_FUSE_ROW 8
+
+#define SPEED_BIN_NONE UINT_MAX
+
+#define FUSE_REVISION_UNKNOWN (-1)
+#define FUSE_MAP_NO_MATCH (-1)
+#define FUSE_PARAM_MATCH_ANY 0xFFFFFFFF
+
+#define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0)
+#define FLAGS_SET_MIN_VOLTAGE BIT(1)
+#define FLAGS_UPLIFT_QUOT_VOLT BIT(2)
+
+/*
+ * The number of individual aging measurements to perform which are then
+ * averaged together in order to determine the final aging adjustment value.
+ */
+#define CPR_AGING_MEASUREMENT_ITERATIONS 16
+
+/*
+ * Aging measurements for the aged and unaged ring oscillators take place a few
+ * microseconds apart. If the vdd-supply voltage fluctuates between the two
+ * measurements, then the difference between them will be incorrect. The
+ * difference could end up too high or too low. This constant defines the
+ * number of lowest and highest measurements to ignore when averaging.
+ */
+#define CPR_AGING_MEASUREMENT_FILTER 3
+
+#define CPR_REGULATOR_DRIVER_NAME "qcom,cpr-regulator"
+
+/**
+ * enum vdd_mx_vmin_method - Method to determine vmin for vdd-mx
+ * %VDD_MX_VMIN_APC: Equal to APC voltage
+ * %VDD_MX_VMIN_APC_CORNER_CEILING: Equal to PVS corner ceiling voltage
+ * %VDD_MX_VMIN_APC_SLOW_CORNER_CEILING:
+ * Equal to slow speed corner ceiling
+ * %VDD_MX_VMIN_MX_VMAX: Equal to specified vdd-mx-vmax voltage
+ * %VDD_MX_VMIN_APC_CORNER_MAP: Equal to the APC corner mapped MX
+ * voltage
+ */
+enum vdd_mx_vmin_method {
+ VDD_MX_VMIN_APC,
+ VDD_MX_VMIN_APC_CORNER_CEILING,
+ VDD_MX_VMIN_APC_SLOW_CORNER_CEILING,
+ VDD_MX_VMIN_MX_VMAX,
+ VDD_MX_VMIN_APC_FUSE_CORNER_MAP,
+ VDD_MX_VMIN_APC_CORNER_MAP,
+};
+
+#define CPR_CORNER_MIN 1
+#define CPR_FUSE_CORNER_MIN 1
+/*
+ * This is an arbitrary upper limit which is used in a sanity check in order to
+ * avoid excessive memory allocation due to bad device tree data.
+ */
+#define CPR_FUSE_CORNER_LIMIT 100
+
+struct quot_adjust_info {
+ int speed_bin;
+ int virtual_corner;
+ int quot_adjust;
+};
+
+struct cpr_quot_scale {
+ u32 offset;
+ u32 multiplier;
+};
+
+struct cpr_aging_sensor_info {
+ u32 sensor_id;
+ int initial_quot_diff;
+ int current_quot_diff;
+};
+
+struct cpr_aging_info {
+ struct cpr_aging_sensor_info *sensor_info;
+ int num_aging_sensors;
+ int aging_corner;
+ u32 aging_ro_kv;
+ u32 *aging_derate;
+ u32 aging_sensor_bypass;
+ u32 max_aging_margin;
+ u32 aging_ref_voltage;
+ u32 cpr_ro_kv[CPR_NUM_RING_OSC];
+ int *voltage_adjust;
+
+ bool cpr_aging_error;
+ bool cpr_aging_done;
+};
+
+static const char * const vdd_apc_name[] = {"vdd-apc-optional-prim",
+ "vdd-apc-optional-sec",
+ "vdd-apc"};
+
+enum voltage_change_dir {
+ NO_CHANGE,
+ DOWN,
+ UP,
+};
+
+struct cpr_regulator {
+ struct list_head list;
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+ bool vreg_enabled;
+ int corner;
+ int ceiling_max;
+ struct dentry *debugfs;
+ struct device *dev;
+
+ /* eFuse parameters */
+ phys_addr_t efuse_addr;
+ void __iomem *efuse_base;
+ u64 *remapped_row;
+ u32 remapped_row_base;
+ int num_remapped_rows;
+
+ /* Process voltage parameters */
+ u32 *pvs_corner_v;
+ /* Process voltage variables */
+ u32 pvs_bin;
+ u32 speed_bin;
+ u32 pvs_version;
+
+ /* APC voltage regulator */
+ struct regulator *vdd_apc;
+
+ /* Dependency parameters */
+ struct regulator *vdd_mx;
+ int vdd_mx_vmax;
+ int vdd_mx_vmin_method;
+ int vdd_mx_vmin;
+ int *vdd_mx_corner_map;
+
+ struct regulator *rpm_apc_vreg;
+ int *rpm_apc_corner_map;
+
+ /* mem-acc regulator */
+ struct regulator *mem_acc_vreg;
+
+ /* thermal monitor */
+ int tsens_id;
+ int cpr_disable_temp_threshold;
+ int cpr_enable_temp_threshold;
+ bool cpr_disable_on_temperature;
+ bool cpr_thermal_disable;
+ struct threshold_info tsens_threshold_config;
+
+ /* CPR parameters */
+ u32 num_fuse_corners;
+ u64 cpr_fuse_bits;
+ bool cpr_fuse_disable;
+ bool cpr_fuse_local;
+ bool cpr_fuse_redundant;
+ int cpr_fuse_revision;
+ int cpr_fuse_map_count;
+ int cpr_fuse_map_match;
+ int *cpr_fuse_target_quot;
+ int *cpr_fuse_ro_sel;
+ int *fuse_quot_offset;
+ int gcnt;
+
+ unsigned int cpr_irq;
+ void __iomem *rbcpr_base;
+ phys_addr_t rbcpr_clk_addr;
+ struct mutex cpr_mutex;
+
+ int *cpr_max_ceiling;
+ int *ceiling_volt;
+ int *floor_volt;
+ int *fuse_ceiling_volt;
+ int *fuse_floor_volt;
+ int *last_volt;
+ int *open_loop_volt;
+ int step_volt;
+
+ int *save_ctl;
+ int *save_irq;
+
+ int *vsens_corner_map;
+ /* vsens status */
+ bool vsens_enabled;
+ /* vsens regulators */
+ struct regulator *vdd_vsens_corner;
+ struct regulator *vdd_vsens_voltage;
+
+ /* Config parameters */
+ bool enable;
+ u32 ref_clk_khz;
+ u32 timer_delay_us;
+ u32 timer_cons_up;
+ u32 timer_cons_down;
+ u32 irq_line;
+ u32 *step_quotient;
+ u32 up_threshold;
+ u32 down_threshold;
+ u32 idle_clocks;
+ u32 gcnt_time_us;
+ u32 clamp_timer_interval;
+ u32 vdd_apc_step_up_limit;
+ u32 vdd_apc_step_down_limit;
+ u32 flags;
+ int *corner_map;
+ u32 num_corners;
+ int *quot_adjust;
+ int *mem_acc_corner_map;
+
+ int num_adj_cpus;
+ int online_cpus;
+ int *adj_cpus;
+ int **adj_cpus_save_ctl;
+ int **adj_cpus_save_irq;
+ int **adj_cpus_last_volt;
+ int **adj_cpus_quot_adjust;
+ int **adj_cpus_open_loop_volt;
+ bool adj_cpus_open_loop_volt_as_ceiling;
+ struct notifier_block cpu_notifier;
+ cpumask_t cpu_mask;
+ bool cpr_disabled_in_pc;
+ struct notifier_block pm_notifier;
+
+ bool is_cpr_suspended;
+ bool skip_voltage_change_during_suspend;
+
+ struct cpr_aging_info *aging_info;
+};
+
+#define CPR_DEBUG_MASK_IRQ BIT(0)
+#define CPR_DEBUG_MASK_API BIT(1)
+
+static int cpr_debug_enable;
+#if defined(CONFIG_DEBUG_FS)
+static struct dentry *cpr_debugfs_base;
+#endif
+
+static DEFINE_MUTEX(cpr_regulator_list_mutex);
+static LIST_HEAD(cpr_regulator_list);
+
+module_param_named(debug_enable, cpr_debug_enable, int, S_IRUGO | S_IWUSR);
+#define cpr_debug(cpr_vreg, message, ...) \
+ do { \
+ if (cpr_debug_enable & CPR_DEBUG_MASK_API) \
+ pr_info("%s: " message, (cpr_vreg)->rdesc.name, \
+ ##__VA_ARGS__); \
+ } while (0)
+#define cpr_debug_irq(cpr_vreg, message, ...) \
+ do { \
+ if (cpr_debug_enable & CPR_DEBUG_MASK_IRQ) \
+ pr_info("%s: " message, (cpr_vreg)->rdesc.name, \
+ ##__VA_ARGS__); \
+ else \
+ pr_debug("%s: " message, (cpr_vreg)->rdesc.name, \
+ ##__VA_ARGS__); \
+ } while (0)
+#define cpr_info(cpr_vreg, message, ...) \
+ pr_info("%s: " message, (cpr_vreg)->rdesc.name, ##__VA_ARGS__)
+#define cpr_err(cpr_vreg, message, ...) \
+ pr_err("%s: " message, (cpr_vreg)->rdesc.name, ##__VA_ARGS__)
+
+static u64 cpr_read_remapped_efuse_row(struct cpr_regulator *cpr_vreg,
+ u32 row_num)
+{
+ if (row_num - cpr_vreg->remapped_row_base
+ >= cpr_vreg->num_remapped_rows) {
+ cpr_err(cpr_vreg, "invalid row=%u, max remapped row=%u\n",
+ row_num, cpr_vreg->remapped_row_base
+ + cpr_vreg->num_remapped_rows - 1);
+ return 0;
+ }
+
+ return cpr_vreg->remapped_row[row_num - cpr_vreg->remapped_row_base];
+}
+
+static u64 cpr_read_efuse_row(struct cpr_regulator *cpr_vreg, u32 row_num,
+ bool use_tz_api)
+{
+ int rc;
+ u64 efuse_bits;
+ struct scm_desc desc = {0};
+ struct cpr_read_req {
+ u32 row_address;
+ int addr_type;
+ } req;
+
+ struct cpr_read_rsp {
+ u32 row_data[2];
+ u32 status;
+ } rsp;
+
+ if (cpr_vreg->remapped_row && row_num >= cpr_vreg->remapped_row_base)
+ return cpr_read_remapped_efuse_row(cpr_vreg, row_num);
+
+ if (!use_tz_api) {
+ efuse_bits = readq_relaxed(cpr_vreg->efuse_base
+ + row_num * BYTES_PER_FUSE_ROW);
+ return efuse_bits;
+ }
+
+ desc.args[0] = req.row_address = cpr_vreg->efuse_addr +
+ row_num * BYTES_PER_FUSE_ROW;
+ desc.args[1] = req.addr_type = 0;
+ desc.arginfo = SCM_ARGS(2);
+ efuse_bits = 0;
+
+ if (!is_scm_armv8()) {
+ rc = scm_call(SCM_SVC_FUSE, SCM_FUSE_READ,
+ &req, sizeof(req), &rsp, sizeof(rsp));
+ } else {
+ rc = scm_call2(SCM_SIP_FNID(SCM_SVC_FUSE, SCM_FUSE_READ),
+ &desc);
+ rsp.row_data[0] = desc.ret[0];
+ rsp.row_data[1] = desc.ret[1];
+ rsp.status = desc.ret[2];
+ }
+
+ if (rc) {
+ cpr_err(cpr_vreg, "read row %d failed, err code = %d",
+ row_num, rc);
+ } else {
+ efuse_bits = ((u64)(rsp.row_data[1]) << 32) +
+ (u64)rsp.row_data[0];
+ }
+
+ return efuse_bits;
+}
+
+/**
+ * cpr_read_efuse_param() - read a parameter from one or two eFuse rows
+ * @cpr_vreg: Pointer to cpr_regulator struct for this regulator.
+ * @row_start: Fuse row number to start reading from.
+ * @bit_start: The LSB of the parameter to read from the fuse.
+ * @bit_len: The length of the parameter in bits.
+ * @use_tz_api: Flag to indicate if an SCM call should be used to read the fuse.
+ *
+ * This function reads a parameter of specified offset and bit size out of one
+ * or two consecutive eFuse rows. This allows for the reading of parameters
+ * that happen to be split between two eFuse rows.
+ *
+ * Returns the fuse parameter on success or 0 on failure.
+ */
+static u64 cpr_read_efuse_param(struct cpr_regulator *cpr_vreg, int row_start,
+ int bit_start, int bit_len, bool use_tz_api)
+{
+ u64 fuse[2];
+ u64 param = 0;
+ int bits_first, bits_second;
+
+ if (bit_start < 0) {
+ cpr_err(cpr_vreg, "Invalid LSB = %d specified\n", bit_start);
+ return 0;
+ }
+
+ if (bit_len < 0 || bit_len > 64) {
+ cpr_err(cpr_vreg, "Invalid bit length = %d specified\n",
+ bit_len);
+ return 0;
+ }
+
+ /* Allow bit indexing to start beyond the end of the start row. */
+ if (bit_start >= 64) {
+ row_start += bit_start >> 6; /* equivalent to bit_start / 64 */
+ bit_start &= 0x3F;
+ }
+
+ fuse[0] = cpr_read_efuse_row(cpr_vreg, row_start, use_tz_api);
+
+ if (bit_start == 0 && bit_len == 64) {
+ param = fuse[0];
+ } else if (bit_start + bit_len <= 64) {
+ param = (fuse[0] >> bit_start) & ((1ULL << bit_len) - 1);
+ } else {
+ fuse[1] = cpr_read_efuse_row(cpr_vreg, row_start + 1,
+ use_tz_api);
+ bits_first = 64 - bit_start;
+ bits_second = bit_len - bits_first;
+ param = (fuse[0] >> bit_start) & ((1ULL << bits_first) - 1);
+ param |= (fuse[1] & ((1ULL << bits_second) - 1)) << bits_first;
+ }
+
+ return param;
+}
+
+static bool cpr_is_allowed(struct cpr_regulator *cpr_vreg)
+{
+ if (cpr_vreg->cpr_fuse_disable || !cpr_vreg->enable ||
+ cpr_vreg->cpr_thermal_disable)
+ return false;
+ else
+ return true;
+}
+
+static void cpr_write(struct cpr_regulator *cpr_vreg, u32 offset, u32 value)
+{
+ writel_relaxed(value, cpr_vreg->rbcpr_base + offset);
+}
+
+static u32 cpr_read(struct cpr_regulator *cpr_vreg, u32 offset)
+{
+ return readl_relaxed(cpr_vreg->rbcpr_base + offset);
+}
+
+static void cpr_masked_write(struct cpr_regulator *cpr_vreg, u32 offset,
+ u32 mask, u32 value)
+{
+ u32 reg_val;
+
+ reg_val = readl_relaxed(cpr_vreg->rbcpr_base + offset);
+ reg_val &= ~mask;
+ reg_val |= value & mask;
+ writel_relaxed(reg_val, cpr_vreg->rbcpr_base + offset);
+}
+
+static void cpr_irq_clr(struct cpr_regulator *cpr_vreg)
+{
+ cpr_write(cpr_vreg, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL);
+}
+
+static void cpr_irq_clr_nack(struct cpr_regulator *cpr_vreg)
+{
+ cpr_irq_clr(cpr_vreg);
+ cpr_write(cpr_vreg, REG_RBIF_CONT_NACK_CMD, 1);
+}
+
+static void cpr_irq_clr_ack(struct cpr_regulator *cpr_vreg)
+{
+ cpr_irq_clr(cpr_vreg);
+ cpr_write(cpr_vreg, REG_RBIF_CONT_ACK_CMD, 1);
+}
+
+static void cpr_irq_set(struct cpr_regulator *cpr_vreg, u32 int_bits)
+{
+ cpr_write(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line), int_bits);
+}
+
+static void cpr_ctl_modify(struct cpr_regulator *cpr_vreg, u32 mask, u32 value)
+{
+ cpr_masked_write(cpr_vreg, REG_RBCPR_CTL, mask, value);
+}
+
+static void cpr_ctl_enable(struct cpr_regulator *cpr_vreg, int corner)
+{
+ u32 val;
+
+ if (cpr_vreg->is_cpr_suspended)
+ return;
+
+ /* Program Consecutive Up & Down */
+ val = ((cpr_vreg->timer_cons_down & RBIF_TIMER_ADJ_CONS_DOWN_MASK)
+ << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT) |
+ (cpr_vreg->timer_cons_up & RBIF_TIMER_ADJ_CONS_UP_MASK);
+ cpr_masked_write(cpr_vreg, REG_RBIF_TIMER_ADJUST,
+ RBIF_TIMER_ADJ_CONS_UP_MASK |
+ RBIF_TIMER_ADJ_CONS_DOWN_MASK, val);
+ cpr_masked_write(cpr_vreg, REG_RBCPR_CTL,
+ RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN,
+ cpr_vreg->save_ctl[corner]);
+ cpr_irq_set(cpr_vreg, cpr_vreg->save_irq[corner]);
+
+ if (cpr_is_allowed(cpr_vreg) && cpr_vreg->vreg_enabled &&
+ (cpr_vreg->ceiling_volt[corner] >
+ cpr_vreg->floor_volt[corner]))
+ val = RBCPR_CTL_LOOP_EN;
+ else
+ val = 0;
+ cpr_ctl_modify(cpr_vreg, RBCPR_CTL_LOOP_EN, val);
+}
+
+static void cpr_ctl_disable(struct cpr_regulator *cpr_vreg)
+{
+ if (cpr_vreg->is_cpr_suspended)
+ return;
+
+ cpr_irq_set(cpr_vreg, 0);
+ cpr_ctl_modify(cpr_vreg, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0);
+ cpr_masked_write(cpr_vreg, REG_RBIF_TIMER_ADJUST,
+ RBIF_TIMER_ADJ_CONS_UP_MASK |
+ RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0);
+ cpr_irq_clr(cpr_vreg);
+ cpr_write(cpr_vreg, REG_RBIF_CONT_ACK_CMD, 1);
+ cpr_write(cpr_vreg, REG_RBIF_CONT_NACK_CMD, 1);
+ cpr_ctl_modify(cpr_vreg, RBCPR_CTL_LOOP_EN, 0);
+}
+
+static bool cpr_ctl_is_enabled(struct cpr_regulator *cpr_vreg)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+ return reg_val & RBCPR_CTL_LOOP_EN;
+}
+
+static bool cpr_ctl_is_busy(struct cpr_regulator *cpr_vreg)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(cpr_vreg, REG_RBCPR_RESULT_0);
+ return reg_val & RBCPR_RESULT0_BUSY_MASK;
+}
+
+static void cpr_corner_save(struct cpr_regulator *cpr_vreg, int corner)
+{
+ cpr_vreg->save_ctl[corner] = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+ cpr_vreg->save_irq[corner] =
+ cpr_read(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line));
+}
+
+static void cpr_corner_restore(struct cpr_regulator *cpr_vreg, int corner)
+{
+ u32 gcnt, ctl, irq, ro_sel, step_quot;
+ int fuse_corner = cpr_vreg->corner_map[corner];
+ int i;
+
+ ro_sel = cpr_vreg->cpr_fuse_ro_sel[fuse_corner];
+ gcnt = cpr_vreg->gcnt | (cpr_vreg->cpr_fuse_target_quot[fuse_corner] -
+ cpr_vreg->quot_adjust[corner]);
+
+ /* Program the step quotient and idle clocks */
+ step_quot = ((cpr_vreg->idle_clocks & RBCPR_STEP_QUOT_IDLE_CLK_MASK)
+ << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT) |
+ (cpr_vreg->step_quotient[fuse_corner]
+ & RBCPR_STEP_QUOT_STEPQUOT_MASK);
+ cpr_write(cpr_vreg, REG_RBCPR_STEP_QUOT, step_quot);
+
+ /* Clear the target quotient value and gate count of all ROs */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt);
+ ctl = cpr_vreg->save_ctl[corner];
+ cpr_write(cpr_vreg, REG_RBCPR_CTL, ctl);
+ irq = cpr_vreg->save_irq[corner];
+ cpr_irq_set(cpr_vreg, irq);
+ cpr_debug(cpr_vreg, "gcnt = 0x%08x, ctl = 0x%08x, irq = 0x%08x\n",
+ gcnt, ctl, irq);
+}
+
+static void cpr_corner_switch(struct cpr_regulator *cpr_vreg, int corner)
+{
+ if (cpr_vreg->corner == corner)
+ return;
+
+ cpr_corner_restore(cpr_vreg, corner);
+}
+
+static int cpr_apc_set(struct cpr_regulator *cpr_vreg, u32 new_volt)
+{
+ int max_volt, rc;
+
+ max_volt = cpr_vreg->ceiling_max;
+ rc = regulator_set_voltage(cpr_vreg->vdd_apc, new_volt, max_volt);
+ if (rc)
+ cpr_err(cpr_vreg, "set: vdd_apc = %d uV: rc=%d\n",
+ new_volt, rc);
+ return rc;
+}
+
+static int cpr_mx_get(struct cpr_regulator *cpr_vreg, int corner, int apc_volt)
+{
+ int vdd_mx;
+ int fuse_corner = cpr_vreg->corner_map[corner];
+ int highest_fuse_corner = cpr_vreg->num_fuse_corners;
+
+ switch (cpr_vreg->vdd_mx_vmin_method) {
+ case VDD_MX_VMIN_APC:
+ vdd_mx = apc_volt;
+ break;
+ case VDD_MX_VMIN_APC_CORNER_CEILING:
+ vdd_mx = cpr_vreg->fuse_ceiling_volt[fuse_corner];
+ break;
+ case VDD_MX_VMIN_APC_SLOW_CORNER_CEILING:
+ vdd_mx = cpr_vreg->fuse_ceiling_volt[highest_fuse_corner];
+ break;
+ case VDD_MX_VMIN_MX_VMAX:
+ vdd_mx = cpr_vreg->vdd_mx_vmax;
+ break;
+ case VDD_MX_VMIN_APC_FUSE_CORNER_MAP:
+ vdd_mx = cpr_vreg->vdd_mx_corner_map[fuse_corner];
+ break;
+ case VDD_MX_VMIN_APC_CORNER_MAP:
+ vdd_mx = cpr_vreg->vdd_mx_corner_map[corner];
+ break;
+ default:
+ vdd_mx = 0;
+ break;
+ }
+
+ return vdd_mx;
+}
+
+static int cpr_mx_set(struct cpr_regulator *cpr_vreg, int corner,
+ int vdd_mx_vmin)
+{
+ int rc;
+ int fuse_corner = cpr_vreg->corner_map[corner];
+
+ rc = regulator_set_voltage(cpr_vreg->vdd_mx, vdd_mx_vmin,
+ cpr_vreg->vdd_mx_vmax);
+ cpr_debug(cpr_vreg, "[corner:%d, fuse_corner:%d] %d uV\n", corner,
+ fuse_corner, vdd_mx_vmin);
+
+ if (!rc) {
+ cpr_vreg->vdd_mx_vmin = vdd_mx_vmin;
+ } else {
+ cpr_err(cpr_vreg, "set: vdd_mx [corner:%d, fuse_corner:%d] = %d uV failed: rc=%d\n",
+ corner, fuse_corner, vdd_mx_vmin, rc);
+ }
+ return rc;
+}
+
+static int cpr_scale_voltage(struct cpr_regulator *cpr_vreg, int corner,
+ int new_apc_volt, enum voltage_change_dir dir)
+{
+ int rc = 0, vdd_mx_vmin = 0;
+ int mem_acc_corner = cpr_vreg->mem_acc_corner_map[corner];
+ int fuse_corner = cpr_vreg->corner_map[corner];
+ int apc_corner, vsens_corner;
+
+ /* Determine the vdd_mx voltage */
+ if (dir != NO_CHANGE && cpr_vreg->vdd_mx != NULL)
+ vdd_mx_vmin = cpr_mx_get(cpr_vreg, corner, new_apc_volt);
+
+
+ if (cpr_vreg->vdd_vsens_voltage && cpr_vreg->vsens_enabled) {
+ rc = regulator_disable(cpr_vreg->vdd_vsens_voltage);
+ if (!rc)
+ cpr_vreg->vsens_enabled = false;
+ }
+
+ if (dir == DOWN) {
+ if (!rc && cpr_vreg->mem_acc_vreg)
+ rc = regulator_set_voltage(cpr_vreg->mem_acc_vreg,
+ mem_acc_corner, mem_acc_corner);
+ if (!rc && cpr_vreg->rpm_apc_vreg) {
+ apc_corner = cpr_vreg->rpm_apc_corner_map[corner];
+ rc = regulator_set_voltage(cpr_vreg->rpm_apc_vreg,
+ apc_corner, apc_corner);
+ if (rc)
+ cpr_err(cpr_vreg, "apc_corner voting failed rc=%d\n",
+ rc);
+ }
+ }
+
+ if (!rc && vdd_mx_vmin && dir == UP) {
+ if (vdd_mx_vmin != cpr_vreg->vdd_mx_vmin)
+ rc = cpr_mx_set(cpr_vreg, corner, vdd_mx_vmin);
+ }
+
+ if (!rc)
+ rc = cpr_apc_set(cpr_vreg, new_apc_volt);
+
+ if (dir == UP) {
+ if (!rc && cpr_vreg->mem_acc_vreg)
+ rc = regulator_set_voltage(cpr_vreg->mem_acc_vreg,
+ mem_acc_corner, mem_acc_corner);
+ if (!rc && cpr_vreg->rpm_apc_vreg) {
+ apc_corner = cpr_vreg->rpm_apc_corner_map[corner];
+ rc = regulator_set_voltage(cpr_vreg->rpm_apc_vreg,
+ apc_corner, apc_corner);
+ if (rc)
+ cpr_err(cpr_vreg, "apc_corner voting failed rc=%d\n",
+ rc);
+ }
+ }
+
+ if (!rc && vdd_mx_vmin && dir == DOWN) {
+ if (vdd_mx_vmin != cpr_vreg->vdd_mx_vmin)
+ rc = cpr_mx_set(cpr_vreg, corner, vdd_mx_vmin);
+ }
+
+ if (!rc && cpr_vreg->vdd_vsens_corner) {
+ vsens_corner = cpr_vreg->vsens_corner_map[fuse_corner];
+ rc = regulator_set_voltage(cpr_vreg->vdd_vsens_corner,
+ vsens_corner, vsens_corner);
+ }
+ if (!rc && cpr_vreg->vdd_vsens_voltage) {
+ rc = regulator_set_voltage(cpr_vreg->vdd_vsens_voltage,
+ cpr_vreg->floor_volt[corner],
+ cpr_vreg->ceiling_volt[corner]);
+ if (!rc && !cpr_vreg->vsens_enabled) {
+ rc = regulator_enable(cpr_vreg->vdd_vsens_voltage);
+ if (!rc)
+ cpr_vreg->vsens_enabled = true;
+ }
+ }
+
+ return rc;
+}
+
+static void cpr_scale(struct cpr_regulator *cpr_vreg,
+ enum voltage_change_dir dir)
+{
+ u32 reg_val, error_steps, reg_mask;
+ int last_volt, new_volt, corner, fuse_corner;
+ u32 gcnt, quot;
+
+ corner = cpr_vreg->corner;
+ fuse_corner = cpr_vreg->corner_map[corner];
+
+ reg_val = cpr_read(cpr_vreg, REG_RBCPR_RESULT_0);
+
+ error_steps = (reg_val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
+ & RBCPR_RESULT0_ERROR_STEPS_MASK;
+ last_volt = cpr_vreg->last_volt[corner];
+
+ cpr_debug_irq(cpr_vreg,
+ "last_volt[corner:%d, fuse_corner:%d] = %d uV\n",
+ corner, fuse_corner, last_volt);
+
+ gcnt = cpr_read(cpr_vreg, REG_RBCPR_GCNT_TARGET
+ (cpr_vreg->cpr_fuse_ro_sel[fuse_corner]));
+ quot = gcnt & ((1 << RBCPR_GCNT_TARGET_GCNT_SHIFT) - 1);
+
+ if (dir == UP) {
+ if (cpr_vreg->clamp_timer_interval
+ && error_steps < cpr_vreg->up_threshold) {
+ /*
+ * Handle the case where another measurement started
+ * after the interrupt was triggered due to a core
+ * exiting from power collapse.
+ */
+ error_steps = max(cpr_vreg->up_threshold,
+ cpr_vreg->vdd_apc_step_up_limit);
+ }
+ cpr_debug_irq(cpr_vreg,
+ "Up: cpr status = 0x%08x (error_steps=%d)\n",
+ reg_val, error_steps);
+
+ if (last_volt >= cpr_vreg->ceiling_volt[corner]) {
+ cpr_debug_irq(cpr_vreg,
+ "[corn:%d, fuse_corn:%d] @ ceiling: %d >= %d: NACK\n",
+ corner, fuse_corner, last_volt,
+ cpr_vreg->ceiling_volt[corner]);
+ cpr_irq_clr_nack(cpr_vreg);
+
+ cpr_debug_irq(cpr_vreg, "gcnt = 0x%08x (quot = %d)\n",
+ gcnt, quot);
+
+ /* Maximize the UP threshold */
+ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK <<
+ RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ reg_val = reg_mask;
+ cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
+
+ /* Disable UP interrupt */
+ cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT & ~CPR_INT_UP);
+
+ return;
+ }
+
+ if (error_steps > cpr_vreg->vdd_apc_step_up_limit) {
+ cpr_debug_irq(cpr_vreg,
+ "%d is over up-limit(%d): Clamp\n",
+ error_steps,
+ cpr_vreg->vdd_apc_step_up_limit);
+ error_steps = cpr_vreg->vdd_apc_step_up_limit;
+ }
+
+ /* Calculate new voltage */
+ new_volt = last_volt + (error_steps * cpr_vreg->step_volt);
+ if (new_volt > cpr_vreg->ceiling_volt[corner]) {
+ cpr_debug_irq(cpr_vreg,
+ "new_volt(%d) >= ceiling(%d): Clamp\n",
+ new_volt,
+ cpr_vreg->ceiling_volt[corner]);
+
+ new_volt = cpr_vreg->ceiling_volt[corner];
+ }
+
+ if (cpr_scale_voltage(cpr_vreg, corner, new_volt, dir)) {
+ cpr_irq_clr_nack(cpr_vreg);
+ return;
+ }
+ cpr_vreg->last_volt[corner] = new_volt;
+
+ /* Disable auto nack down */
+ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ reg_val = 0;
+
+ cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
+
+ /* Re-enable default interrupts */
+ cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT);
+
+ /* Ack */
+ cpr_irq_clr_ack(cpr_vreg);
+
+ cpr_debug_irq(cpr_vreg,
+ "UP: -> new_volt[corner:%d, fuse_corner:%d] = %d uV\n",
+ corner, fuse_corner, new_volt);
+ } else if (dir == DOWN) {
+ if (cpr_vreg->clamp_timer_interval
+ && error_steps < cpr_vreg->down_threshold) {
+ /*
+ * Handle the case where another measurement started
+ * after the interrupt was triggered due to a core
+ * exiting from power collapse.
+ */
+ error_steps = max(cpr_vreg->down_threshold,
+ cpr_vreg->vdd_apc_step_down_limit);
+ }
+ cpr_debug_irq(cpr_vreg,
+ "Down: cpr status = 0x%08x (error_steps=%d)\n",
+ reg_val, error_steps);
+
+ if (last_volt <= cpr_vreg->floor_volt[corner]) {
+ cpr_debug_irq(cpr_vreg,
+ "[corn:%d, fuse_corner:%d] @ floor: %d <= %d: NACK\n",
+ corner, fuse_corner, last_volt,
+ cpr_vreg->floor_volt[corner]);
+ cpr_irq_clr_nack(cpr_vreg);
+
+ cpr_debug_irq(cpr_vreg, "gcnt = 0x%08x (quot = %d)\n",
+ gcnt, quot);
+
+ /* Enable auto nack down */
+ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ reg_val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+
+ cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
+
+ /* Disable DOWN interrupt */
+ cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT & ~CPR_INT_DOWN);
+
+ return;
+ }
+
+ if (error_steps > cpr_vreg->vdd_apc_step_down_limit) {
+ cpr_debug_irq(cpr_vreg,
+ "%d is over down-limit(%d): Clamp\n",
+ error_steps,
+ cpr_vreg->vdd_apc_step_down_limit);
+ error_steps = cpr_vreg->vdd_apc_step_down_limit;
+ }
+
+ /* Calculte new voltage */
+ new_volt = last_volt - (error_steps * cpr_vreg->step_volt);
+ if (new_volt < cpr_vreg->floor_volt[corner]) {
+ cpr_debug_irq(cpr_vreg,
+ "new_volt(%d) < floor(%d): Clamp\n",
+ new_volt,
+ cpr_vreg->floor_volt[corner]);
+ new_volt = cpr_vreg->floor_volt[corner];
+ }
+
+ if (cpr_scale_voltage(cpr_vreg, corner, new_volt, dir)) {
+ cpr_irq_clr_nack(cpr_vreg);
+ return;
+ }
+ cpr_vreg->last_volt[corner] = new_volt;
+
+ /* Restore default threshold for UP */
+ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK <<
+ RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ reg_val = cpr_vreg->up_threshold <<
+ RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
+
+ /* Re-enable default interrupts */
+ cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT);
+
+ /* Ack */
+ cpr_irq_clr_ack(cpr_vreg);
+
+ cpr_debug_irq(cpr_vreg,
+ "DOWN: -> new_volt[corner:%d, fuse_corner:%d] = %d uV\n",
+ corner, fuse_corner, new_volt);
+ }
+}
+
+static irqreturn_t cpr_irq_handler(int irq, void *dev)
+{
+ struct cpr_regulator *cpr_vreg = dev;
+ u32 reg_val;
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+
+ reg_val = cpr_read(cpr_vreg, REG_RBIF_IRQ_STATUS);
+ if (cpr_vreg->flags & FLAGS_IGNORE_1ST_IRQ_STATUS)
+ reg_val = cpr_read(cpr_vreg, REG_RBIF_IRQ_STATUS);
+
+ cpr_debug_irq(cpr_vreg, "IRQ_STATUS = 0x%02X\n", reg_val);
+
+ if (!cpr_ctl_is_enabled(cpr_vreg)) {
+ cpr_debug_irq(cpr_vreg, "CPR is disabled\n");
+ goto _exit;
+ } else if (cpr_ctl_is_busy(cpr_vreg)
+ && !cpr_vreg->clamp_timer_interval) {
+ cpr_debug_irq(cpr_vreg, "CPR measurement is not ready\n");
+ goto _exit;
+ } else if (!cpr_is_allowed(cpr_vreg)) {
+ reg_val = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+ cpr_err(cpr_vreg, "Interrupt broken? RBCPR_CTL = 0x%02X\n",
+ reg_val);
+ goto _exit;
+ }
+
+ /* Following sequence of handling is as per each IRQ's priority */
+ if (reg_val & CPR_INT_UP) {
+ cpr_scale(cpr_vreg, UP);
+ } else if (reg_val & CPR_INT_DOWN) {
+ cpr_scale(cpr_vreg, DOWN);
+ } else if (reg_val & CPR_INT_MIN) {
+ cpr_irq_clr_nack(cpr_vreg);
+ } else if (reg_val & CPR_INT_MAX) {
+ cpr_irq_clr_nack(cpr_vreg);
+ } else if (reg_val & CPR_INT_MID) {
+ /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
+ cpr_debug_irq(cpr_vreg, "IRQ occurred for Mid Flag\n");
+ } else {
+ cpr_debug_irq(cpr_vreg,
+ "IRQ occurred for unknown flag (0x%08x)\n", reg_val);
+ }
+
+ /* Save register values for the corner */
+ cpr_corner_save(cpr_vreg, cpr_vreg->corner);
+
+_exit:
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+ return IRQ_HANDLED;
+}
+
+/**
+ * cmp_int() - int comparison function to be passed into the sort() function
+ * which leads to ascending sorting
+ * @a: First int value
+ * @b: Second int value
+ *
+ * Return: >0 if a > b, 0 if a == b, <0 if a < b
+ */
+static int cmp_int(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
+static int cpr_get_aging_quot_delta(struct cpr_regulator *cpr_vreg,
+ struct cpr_aging_sensor_info *aging_sensor_info)
+{
+ int quot_min, quot_max, is_aging_measurement, aging_measurement_count;
+ int quot_min_scaled, quot_max_scaled, quot_delta_scaled_sum;
+ int retries, rc = 0, sel_fast = 0, i, quot_delta_scaled;
+ u32 val, gcnt_ref, gcnt;
+ int *quot_delta_results, filtered_count;
+
+
+ quot_delta_results = kcalloc(CPR_AGING_MEASUREMENT_ITERATIONS,
+ sizeof(*quot_delta_results), GFP_ATOMIC);
+ if (!quot_delta_results)
+ return -ENOMEM;
+
+ /* Clear the target quotient value and gate count of all ROs */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ /* Program GCNT0/1 for getting aging data */
+ gcnt_ref = (cpr_vreg->ref_clk_khz * cpr_vreg->gcnt_time_us) / 1000;
+ gcnt = gcnt_ref * 3 / 2;
+ val = (gcnt & RBCPR_GCNT_TARGET_GCNT_MASK) <<
+ RBCPR_GCNT_TARGET_GCNT_SHIFT;
+ cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(0), val);
+ cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(1), val);
+
+ val = cpr_read(cpr_vreg, REG_RBCPR_GCNT_TARGET(0));
+ cpr_debug(cpr_vreg, "RBCPR_GCNT_TARGET0 = 0x%08x\n", val);
+
+ val = cpr_read(cpr_vreg, REG_RBCPR_GCNT_TARGET(1));
+ cpr_debug(cpr_vreg, "RBCPR_GCNT_TARGET1 = 0x%08x\n", val);
+
+ /* Program TIMER_INTERVAL to zero */
+ cpr_write(cpr_vreg, REG_RBCPR_TIMER_INTERVAL, 0);
+
+ /* Bypass sensors in collapsible domain */
+ if (cpr_vreg->aging_info->aging_sensor_bypass)
+ cpr_write(cpr_vreg, REG_RBCPR_SENSOR_BYPASS0,
+ (cpr_vreg->aging_info->aging_sensor_bypass &
+ RBCPR_SENSOR_MASK0_SENSOR(aging_sensor_info->sensor_id)));
+
+ /* Mask other sensors */
+ cpr_write(cpr_vreg, REG_RBCPR_SENSOR_MASK0,
+ RBCPR_SENSOR_MASK0_SENSOR(aging_sensor_info->sensor_id));
+ val = cpr_read(cpr_vreg, REG_RBCPR_SENSOR_MASK0);
+ cpr_debug(cpr_vreg, "RBCPR_SENSOR_MASK0 = 0x%08x\n", val);
+
+ /* Enable cpr controller */
+ cpr_ctl_modify(cpr_vreg, RBCPR_CTL_LOOP_EN, RBCPR_CTL_LOOP_EN);
+
+ /* Make sure cpr starts measurement with toggling busy bit */
+ mb();
+
+ /* Wait and Ignore the first measurement. Time-out after 5ms */
+ retries = 50;
+ while (retries-- && cpr_ctl_is_busy(cpr_vreg))
+ udelay(100);
+
+ if (retries < 0) {
+ cpr_err(cpr_vreg, "Aging calibration failed\n");
+ rc = -EBUSY;
+ goto _exit;
+ }
+
+ /* Set age page mode */
+ cpr_write(cpr_vreg, REG_RBCPR_HTOL_AGE, RBCPR_HTOL_AGE_PAGE);
+
+ aging_measurement_count = 0;
+ quot_delta_scaled_sum = 0;
+
+ for (i = 0; i < CPR_AGING_MEASUREMENT_ITERATIONS; i++) {
+ /* Send cont nack */
+ cpr_write(cpr_vreg, REG_RBIF_CONT_NACK_CMD, 1);
+
+ /*
+ * Make sure cpr starts next measurement with
+ * toggling busy bit
+ */
+ mb();
+
+ /*
+ * Wait for controller to finish measurement
+ * and time-out after 5ms
+ */
+ retries = 50;
+ while (retries-- && cpr_ctl_is_busy(cpr_vreg))
+ udelay(100);
+
+ if (retries < 0) {
+ cpr_err(cpr_vreg, "Aging calibration failed\n");
+ rc = -EBUSY;
+ goto _exit;
+ }
+
+ /* Check for PAGE_IS_AGE flag in status register */
+ val = cpr_read(cpr_vreg, REG_RBCPR_HTOL_AGE);
+ is_aging_measurement = val & RBCPR_AGE_DATA_STATUS;
+
+ val = cpr_read(cpr_vreg, REG_RBCPR_RESULT_1);
+ sel_fast = RBCPR_RESULT_1_SEL_FAST(val);
+ cpr_debug(cpr_vreg, "RBCPR_RESULT_1 = 0x%08x\n", val);
+
+ val = cpr_read(cpr_vreg, REG_RBCPR_DEBUG1);
+ cpr_debug(cpr_vreg, "RBCPR_DEBUG1 = 0x%08x\n", val);
+
+ if (sel_fast == 1) {
+ quot_min = RBCPR_DEBUG1_QUOT_FAST(val);
+ quot_max = RBCPR_DEBUG1_QUOT_SLOW(val);
+ } else {
+ quot_min = RBCPR_DEBUG1_QUOT_SLOW(val);
+ quot_max = RBCPR_DEBUG1_QUOT_FAST(val);
+ }
+
+ /*
+ * Scale the quotients so that they are equivalent to the fused
+ * values. This accounts for the difference in measurement
+ * interval times.
+ */
+
+ quot_min_scaled = quot_min * (gcnt_ref + 1) / (gcnt + 1);
+ quot_max_scaled = quot_max * (gcnt_ref + 1) / (gcnt + 1);
+
+ quot_delta_scaled = 0;
+ if (is_aging_measurement) {
+ quot_delta_scaled = quot_min_scaled - quot_max_scaled;
+ quot_delta_results[aging_measurement_count++] =
+ quot_delta_scaled;
+ }
+
+ cpr_debug(cpr_vreg,
+ "Age sensor[%d]: measurement[%d]: page_is_age=%u quot_min = %d, quot_max = %d quot_min_scaled = %d, quot_max_scaled = %d quot_delta_scaled = %d\n",
+ aging_sensor_info->sensor_id, i, is_aging_measurement,
+ quot_min, quot_max, quot_min_scaled, quot_max_scaled,
+ quot_delta_scaled);
+ }
+
+ filtered_count
+ = aging_measurement_count - CPR_AGING_MEASUREMENT_FILTER * 2;
+ if (filtered_count > 0) {
+ sort(quot_delta_results, aging_measurement_count,
+ sizeof(*quot_delta_results), cmp_int, NULL);
+
+ quot_delta_scaled_sum = 0;
+ for (i = 0; i < filtered_count; i++)
+ quot_delta_scaled_sum
+ += quot_delta_results[i
+ + CPR_AGING_MEASUREMENT_FILTER];
+
+ aging_sensor_info->current_quot_diff
+ = quot_delta_scaled_sum / filtered_count;
+ cpr_debug(cpr_vreg,
+ "Age sensor[%d]: average aging quotient delta = %d (count = %d)\n",
+ aging_sensor_info->sensor_id,
+ aging_sensor_info->current_quot_diff, filtered_count);
+ } else {
+ cpr_err(cpr_vreg, "%d aging measurements completed after %d iterations\n",
+ aging_measurement_count,
+ CPR_AGING_MEASUREMENT_ITERATIONS);
+ rc = -EBUSY;
+ }
+
+_exit:
+ /* Clear age page bit */
+ cpr_write(cpr_vreg, REG_RBCPR_HTOL_AGE, 0x0);
+
+ /* Disable the CPR controller after aging procedure */
+ cpr_ctl_modify(cpr_vreg, RBCPR_CTL_LOOP_EN, 0x0);
+
+ /* Clear the sensor bypass */
+ if (cpr_vreg->aging_info->aging_sensor_bypass)
+ cpr_write(cpr_vreg, REG_RBCPR_SENSOR_BYPASS0, 0x0);
+
+ /* Unmask all sensors */
+ cpr_write(cpr_vreg, REG_RBCPR_SENSOR_MASK0, 0x0);
+
+ /* Clear gcnt0/1 registers */
+ cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(0), 0x0);
+ cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(1), 0x0);
+
+ /* Program the delay count for the timer */
+ val = (cpr_vreg->ref_clk_khz * cpr_vreg->timer_delay_us) / 1000;
+ cpr_write(cpr_vreg, REG_RBCPR_TIMER_INTERVAL, val);
+
+ return rc;
+}
+
+static void cpr_de_aging_adjustment(void *data)
+{
+ struct cpr_regulator *cpr_vreg = (struct cpr_regulator *)data;
+ struct cpr_aging_info *aging_info = cpr_vreg->aging_info;
+ struct cpr_aging_sensor_info *aging_sensor_info;
+ int i, num_aging_sensors, retries, rc = 0;
+ int max_quot_diff = 0, ro_sel = 0;
+ u32 voltage_adjust, aging_voltage_adjust = 0;
+
+ aging_sensor_info = aging_info->sensor_info;
+ num_aging_sensors = aging_info->num_aging_sensors;
+
+ for (i = 0; i < num_aging_sensors; i++, aging_sensor_info++) {
+ retries = 2;
+ while (retries--) {
+ rc = cpr_get_aging_quot_delta(cpr_vreg,
+ aging_sensor_info);
+ if (!rc)
+ break;
+ }
+ if (rc && retries < 0) {
+ cpr_err(cpr_vreg, "error in age calibration: rc = %d\n",
+ rc);
+ aging_info->cpr_aging_error = true;
+ return;
+ }
+
+ max_quot_diff = max(max_quot_diff,
+ (aging_sensor_info->current_quot_diff -
+ aging_sensor_info->initial_quot_diff));
+ }
+
+ cpr_debug(cpr_vreg, "Max aging quot delta = %d\n",
+ max_quot_diff);
+ aging_voltage_adjust = DIV_ROUND_UP(max_quot_diff * 1000000,
+ aging_info->aging_ro_kv);
+
+ for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+ /* Remove initial max aging adjustment */
+ ro_sel = cpr_vreg->cpr_fuse_ro_sel[i];
+ cpr_vreg->cpr_fuse_target_quot[i] -=
+ (aging_info->cpr_ro_kv[ro_sel]
+ * aging_info->max_aging_margin) / 1000000;
+ aging_info->voltage_adjust[i] = 0;
+
+ if (aging_voltage_adjust > 0) {
+ /* Add required aging adjustment */
+ voltage_adjust = (aging_voltage_adjust
+ * aging_info->aging_derate[i]) / 1000;
+ voltage_adjust = min(voltage_adjust,
+ aging_info->max_aging_margin);
+ cpr_vreg->cpr_fuse_target_quot[i] +=
+ (aging_info->cpr_ro_kv[ro_sel]
+ * voltage_adjust) / 1000000;
+ aging_info->voltage_adjust[i] = voltage_adjust;
+ }
+ }
+}
+
+static int cpr_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+
+ return cpr_vreg->vreg_enabled;
+}
+
+static int cpr_regulator_enable(struct regulator_dev *rdev)
+{
+ struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+ int rc = 0;
+
+ /* Enable dependency power before vdd_apc */
+ if (cpr_vreg->vdd_mx) {
+ rc = regulator_enable(cpr_vreg->vdd_mx);
+ if (rc) {
+ cpr_err(cpr_vreg, "regulator_enable: vdd_mx: rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = regulator_enable(cpr_vreg->vdd_apc);
+ if (rc) {
+ cpr_err(cpr_vreg, "regulator_enable: vdd_apc: rc=%d\n", rc);
+ return rc;
+ }
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+ cpr_vreg->vreg_enabled = true;
+ if (cpr_is_allowed(cpr_vreg) && cpr_vreg->corner) {
+ cpr_irq_clr(cpr_vreg);
+ cpr_corner_restore(cpr_vreg, cpr_vreg->corner);
+ cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
+ }
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+
+ return rc;
+}
+
+static int cpr_regulator_disable(struct regulator_dev *rdev)
+{
+ struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+ int rc;
+
+ rc = regulator_disable(cpr_vreg->vdd_apc);
+ if (!rc) {
+ if (cpr_vreg->vdd_mx)
+ rc = regulator_disable(cpr_vreg->vdd_mx);
+
+ if (rc) {
+ cpr_err(cpr_vreg, "regulator_disable: vdd_mx: rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+ cpr_vreg->vreg_enabled = false;
+ if (cpr_is_allowed(cpr_vreg))
+ cpr_ctl_disable(cpr_vreg);
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+ } else {
+ cpr_err(cpr_vreg, "regulator_disable: vdd_apc: rc=%d\n", rc);
+ }
+
+ return rc;
+}
+
+static int cpr_calculate_de_aging_margin(struct cpr_regulator *cpr_vreg)
+{
+ struct cpr_aging_info *aging_info = cpr_vreg->aging_info;
+ enum voltage_change_dir change_dir = NO_CHANGE;
+ u32 save_ctl, save_irq;
+ cpumask_t tmp_mask;
+ int rc = 0, i;
+
+ save_ctl = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+ save_irq = cpr_read(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line));
+
+ /* Disable interrupt and CPR */
+ cpr_irq_set(cpr_vreg, 0);
+ cpr_write(cpr_vreg, REG_RBCPR_CTL, 0);
+
+ if (aging_info->aging_corner > cpr_vreg->corner)
+ change_dir = UP;
+ else if (aging_info->aging_corner < cpr_vreg->corner)
+ change_dir = DOWN;
+
+ /* set selected reference voltage for de-aging */
+ rc = cpr_scale_voltage(cpr_vreg,
+ aging_info->aging_corner,
+ aging_info->aging_ref_voltage,
+ change_dir);
+ if (rc) {
+ cpr_err(cpr_vreg, "Unable to set aging reference voltage, rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ /* Force PWM mode */
+ rc = regulator_set_mode(cpr_vreg->vdd_apc, REGULATOR_MODE_NORMAL);
+ if (rc) {
+ cpr_err(cpr_vreg, "unable to configure vdd-supply for mode=%u, rc=%d\n",
+ REGULATOR_MODE_NORMAL, rc);
+ return rc;
+ }
+
+ get_online_cpus();
+ cpumask_and(&tmp_mask, &cpr_vreg->cpu_mask, cpu_online_mask);
+ if (!cpumask_empty(&tmp_mask)) {
+ smp_call_function_any(&tmp_mask,
+ cpr_de_aging_adjustment,
+ cpr_vreg, true);
+ aging_info->cpr_aging_done = true;
+ if (!aging_info->cpr_aging_error)
+ for (i = CPR_FUSE_CORNER_MIN;
+ i <= cpr_vreg->num_fuse_corners; i++)
+ cpr_info(cpr_vreg, "Corner[%d]: age adjusted target quot = %d\n",
+ i, cpr_vreg->cpr_fuse_target_quot[i]);
+ }
+
+ put_online_cpus();
+
+ /* Set to initial mode */
+ rc = regulator_set_mode(cpr_vreg->vdd_apc, REGULATOR_MODE_IDLE);
+ if (rc) {
+ cpr_err(cpr_vreg, "unable to configure vdd-supply for mode=%u, rc=%d\n",
+ REGULATOR_MODE_IDLE, rc);
+ return rc;
+ }
+
+ /* Clear interrupts */
+ cpr_irq_clr(cpr_vreg);
+
+ /* Restore register values */
+ cpr_irq_set(cpr_vreg, save_irq);
+ cpr_write(cpr_vreg, REG_RBCPR_CTL, save_ctl);
+
+ return rc;
+}
+
+/* Note that cpr_vreg->cpr_mutex must be held by the caller. */
+static int cpr_regulator_set_voltage(struct regulator_dev *rdev,
+ int corner, bool reset_quot)
+{
+ struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+ struct cpr_aging_info *aging_info = cpr_vreg->aging_info;
+ int rc;
+ int new_volt;
+ enum voltage_change_dir change_dir = NO_CHANGE;
+ int fuse_corner = cpr_vreg->corner_map[corner];
+
+ if (cpr_is_allowed(cpr_vreg)) {
+ cpr_ctl_disable(cpr_vreg);
+ new_volt = cpr_vreg->last_volt[corner];
+ } else {
+ new_volt = cpr_vreg->open_loop_volt[corner];
+ }
+
+ cpr_debug(cpr_vreg, "[corner:%d, fuse_corner:%d] = %d uV\n",
+ corner, fuse_corner, new_volt);
+
+ if (corner > cpr_vreg->corner)
+ change_dir = UP;
+ else if (corner < cpr_vreg->corner)
+ change_dir = DOWN;
+
+ /* Read age sensor data and apply de-aging adjustments */
+ if (cpr_vreg->vreg_enabled && aging_info && !aging_info->cpr_aging_done
+ && (corner <= aging_info->aging_corner)) {
+ rc = cpr_calculate_de_aging_margin(cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "failed in de-aging calibration: rc=%d\n",
+ rc);
+ } else {
+ change_dir = NO_CHANGE;
+ if (corner > aging_info->aging_corner)
+ change_dir = UP;
+ else if (corner < aging_info->aging_corner)
+ change_dir = DOWN;
+ }
+ reset_quot = true;
+ }
+
+ rc = cpr_scale_voltage(cpr_vreg, corner, new_volt, change_dir);
+ if (rc)
+ return rc;
+
+ if (cpr_is_allowed(cpr_vreg) && cpr_vreg->vreg_enabled) {
+ cpr_irq_clr(cpr_vreg);
+ if (reset_quot)
+ cpr_corner_restore(cpr_vreg, corner);
+ else
+ cpr_corner_switch(cpr_vreg, corner);
+ cpr_ctl_enable(cpr_vreg, corner);
+ }
+
+ cpr_vreg->corner = corner;
+
+ return rc;
+}
+
+static int cpr_regulator_set_voltage_op(struct regulator_dev *rdev,
+ int corner, int corner_max, unsigned *selector)
+{
+ struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+ int rc;
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+ rc = cpr_regulator_set_voltage(rdev, corner, false);
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+
+ return rc;
+}
+
+static int cpr_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+
+ return cpr_vreg->corner;
+}
+
+/**
+ * cpr_regulator_list_corner_voltage() - return the ceiling voltage mapped to
+ * the specified voltage corner
+ * @rdev: Regulator device pointer for the cpr-regulator
+ * @corner: Voltage corner
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each cpr-regulator device.
+ *
+ * Return: voltage value in microvolts or -EINVAL if the corner is out of range
+ */
+static int cpr_regulator_list_corner_voltage(struct regulator_dev *rdev,
+ int corner)
+{
+ struct cpr_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+
+ if (corner >= CPR_CORNER_MIN && corner <= cpr_vreg->num_corners)
+ return cpr_vreg->ceiling_volt[corner];
+ else
+ return -EINVAL;
+}
+
+static struct regulator_ops cpr_corner_ops = {
+ .enable = cpr_regulator_enable,
+ .disable = cpr_regulator_disable,
+ .is_enabled = cpr_regulator_is_enabled,
+ .set_voltage = cpr_regulator_set_voltage_op,
+ .get_voltage = cpr_regulator_get_voltage,
+ .list_corner_voltage = cpr_regulator_list_corner_voltage,
+};
+
+#ifdef CONFIG_PM
+static int cpr_suspend(struct cpr_regulator *cpr_vreg)
+{
+ cpr_debug(cpr_vreg, "suspend\n");
+
+ cpr_ctl_disable(cpr_vreg);
+
+ cpr_irq_clr(cpr_vreg);
+
+ return 0;
+}
+
+static int cpr_resume(struct cpr_regulator *cpr_vreg)
+
+{
+ cpr_debug(cpr_vreg, "resume\n");
+
+ cpr_irq_clr(cpr_vreg);
+
+ cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
+
+ return 0;
+}
+
+static int cpr_regulator_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct cpr_regulator *cpr_vreg = platform_get_drvdata(pdev);
+ int rc = 0;
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+
+ if (cpr_is_allowed(cpr_vreg))
+ rc = cpr_suspend(cpr_vreg);
+
+ cpr_vreg->is_cpr_suspended = true;
+
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+
+ return rc;
+}
+
+static int cpr_regulator_resume(struct platform_device *pdev)
+{
+ struct cpr_regulator *cpr_vreg = platform_get_drvdata(pdev);
+ int rc = 0;
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+
+ cpr_vreg->is_cpr_suspended = false;
+
+ if (cpr_is_allowed(cpr_vreg))
+ rc = cpr_resume(cpr_vreg);
+
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+
+ return rc;
+}
+#else
+#define cpr_regulator_suspend NULL
+#define cpr_regulator_resume NULL
+#endif
+
+static int cpr_config(struct cpr_regulator *cpr_vreg, struct device *dev)
+{
+ int i;
+ u32 val, gcnt, reg;
+ void __iomem *rbcpr_clk;
+ int size;
+
+ if (cpr_vreg->rbcpr_clk_addr) {
+ /* Use 19.2 MHz clock for CPR. */
+ rbcpr_clk = ioremap(cpr_vreg->rbcpr_clk_addr, 4);
+ if (!rbcpr_clk) {
+ cpr_err(cpr_vreg, "Unable to map rbcpr_clk\n");
+ return -EINVAL;
+ }
+ reg = readl_relaxed(rbcpr_clk);
+ reg &= ~RBCPR_CLK_SEL_MASK;
+ reg |= RBCPR_CLK_SEL_19P2_MHZ & RBCPR_CLK_SEL_MASK;
+ writel_relaxed(reg, rbcpr_clk);
+ iounmap(rbcpr_clk);
+ }
+
+ /* Disable interrupt and CPR */
+ cpr_write(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line), 0);
+ cpr_write(cpr_vreg, REG_RBCPR_CTL, 0);
+
+ /* Program the default HW Ceiling, Floor and vlevel */
+ val = ((RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK)
+ << RBIF_LIMIT_CEILING_SHIFT)
+ | (RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK);
+ cpr_write(cpr_vreg, REG_RBIF_LIMIT, val);
+ cpr_write(cpr_vreg, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT);
+
+ /* Clear the target quotient value and gate count of all ROs */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ /* Init and save gcnt */
+ gcnt = (cpr_vreg->ref_clk_khz * cpr_vreg->gcnt_time_us) / 1000;
+ gcnt = (gcnt & RBCPR_GCNT_TARGET_GCNT_MASK) <<
+ RBCPR_GCNT_TARGET_GCNT_SHIFT;
+ cpr_vreg->gcnt = gcnt;
+
+ /* Program the delay count for the timer */
+ val = (cpr_vreg->ref_clk_khz * cpr_vreg->timer_delay_us) / 1000;
+ cpr_write(cpr_vreg, REG_RBCPR_TIMER_INTERVAL, val);
+ cpr_info(cpr_vreg, "Timer count: 0x%0x (for %d us)\n", val,
+ cpr_vreg->timer_delay_us);
+
+ /* Program Consecutive Up & Down */
+ val = ((cpr_vreg->timer_cons_down & RBIF_TIMER_ADJ_CONS_DOWN_MASK)
+ << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT) |
+ (cpr_vreg->timer_cons_up & RBIF_TIMER_ADJ_CONS_UP_MASK) |
+ ((cpr_vreg->clamp_timer_interval & RBIF_TIMER_ADJ_CLAMP_INT_MASK)
+ << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT);
+ cpr_write(cpr_vreg, REG_RBIF_TIMER_ADJUST, val);
+
+ /* Program the control register */
+ cpr_vreg->up_threshold &= RBCPR_CTL_UP_THRESHOLD_MASK;
+ cpr_vreg->down_threshold &= RBCPR_CTL_DN_THRESHOLD_MASK;
+ val = (cpr_vreg->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT)
+ | (cpr_vreg->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT);
+ val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE;
+ val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN;
+ cpr_write(cpr_vreg, REG_RBCPR_CTL, val);
+
+ cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT);
+
+ val = cpr_read(cpr_vreg, REG_RBCPR_VERSION);
+ if (val <= RBCPR_VER_2)
+ cpr_vreg->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS;
+
+ size = cpr_vreg->num_corners + 1;
+ cpr_vreg->save_ctl = devm_kzalloc(dev, sizeof(int) * size, GFP_KERNEL);
+ cpr_vreg->save_irq = devm_kzalloc(dev, sizeof(int) * size, GFP_KERNEL);
+ if (!cpr_vreg->save_ctl || !cpr_vreg->save_irq)
+ return -ENOMEM;
+
+ for (i = 1; i < size; i++)
+ cpr_corner_save(cpr_vreg, i);
+
+ return 0;
+}
+
+static int cpr_fuse_is_setting_expected(struct cpr_regulator *cpr_vreg,
+ u32 sel_array[5])
+{
+ u64 fuse_bits;
+ u32 ret;
+
+ fuse_bits = cpr_read_efuse_row(cpr_vreg, sel_array[0], sel_array[4]);
+ ret = (fuse_bits >> sel_array[1]) & ((1 << sel_array[2]) - 1);
+ if (ret == sel_array[3])
+ ret = 1;
+ else
+ ret = 0;
+
+ cpr_info(cpr_vreg, "[row:%d] = 0x%llx @%d:%d == %d ?: %s\n",
+ sel_array[0], fuse_bits,
+ sel_array[1], sel_array[2],
+ sel_array[3],
+ (ret == 1) ? "yes" : "no");
+ return ret;
+}
+
+static int cpr_voltage_uplift_wa_inc_volt(struct cpr_regulator *cpr_vreg,
+ struct device_node *of_node)
+{
+ u32 uplift_voltage;
+ u32 uplift_max_volt = 0;
+ int highest_fuse_corner = cpr_vreg->num_fuse_corners;
+ int rc;
+
+ rc = of_property_read_u32(of_node,
+ "qcom,cpr-uplift-voltage", &uplift_voltage);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "cpr-uplift-voltage is missing, rc = %d", rc);
+ return rc;
+ }
+ rc = of_property_read_u32(of_node,
+ "qcom,cpr-uplift-max-volt", &uplift_max_volt);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "cpr-uplift-max-volt is missing, rc = %d",
+ rc);
+ return rc;
+ }
+
+ cpr_vreg->pvs_corner_v[highest_fuse_corner] += uplift_voltage;
+ if (cpr_vreg->pvs_corner_v[highest_fuse_corner] > uplift_max_volt)
+ cpr_vreg->pvs_corner_v[highest_fuse_corner] = uplift_max_volt;
+
+ return rc;
+}
+
+static int cpr_adjust_init_voltages(struct device_node *of_node,
+ struct cpr_regulator *cpr_vreg)
+{
+ int tuple_count, tuple_match, i;
+ u32 index;
+ u32 volt_adjust = 0;
+ int len = 0;
+ int rc = 0;
+
+ if (!of_find_property(of_node, "qcom,cpr-init-voltage-adjustment",
+ &len)) {
+ /* No initial voltage adjustment needed. */
+ return 0;
+ }
+
+ if (cpr_vreg->cpr_fuse_map_count) {
+ if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+ /*
+ * No matching index to use for initial voltage
+ * adjustment.
+ */
+ return 0;
+ }
+ tuple_count = cpr_vreg->cpr_fuse_map_count;
+ tuple_match = cpr_vreg->cpr_fuse_map_match;
+ } else {
+ tuple_count = 1;
+ tuple_match = 0;
+ }
+
+ if (len != cpr_vreg->num_fuse_corners * tuple_count * sizeof(u32)) {
+ cpr_err(cpr_vreg, "qcom,cpr-init-voltage-adjustment length=%d is invalid\n",
+ len);
+ return -EINVAL;
+ }
+
+ for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+ index = tuple_match * cpr_vreg->num_fuse_corners
+ + i - CPR_FUSE_CORNER_MIN;
+ rc = of_property_read_u32_index(of_node,
+ "qcom,cpr-init-voltage-adjustment", index,
+ &volt_adjust);
+ if (rc) {
+ cpr_err(cpr_vreg, "could not read qcom,cpr-init-voltage-adjustment index %u, rc=%d\n",
+ index, rc);
+ return rc;
+ }
+
+ if (volt_adjust) {
+ cpr_vreg->pvs_corner_v[i] += volt_adjust;
+ cpr_info(cpr_vreg, "adjusted initial voltage[%d]: %d -> %d uV\n",
+ i, cpr_vreg->pvs_corner_v[i] - volt_adjust,
+ cpr_vreg->pvs_corner_v[i]);
+ }
+ }
+
+ return rc;
+}
+
+/*
+ * Property qcom,cpr-fuse-init-voltage specifies the fuse position of the
+ * initial voltage for each fuse corner. MSB of the fuse value is a sign
+ * bit, and the remaining bits define the steps of the offset. Each step has
+ * units of microvolts defined in the qcom,cpr-fuse-init-voltage-step property.
+ * The initial voltages can be calculated using the formula:
+ * pvs_corner_v[corner] = ceiling_volt[corner] + (sign * steps * step_size_uv)
+ */
+static int cpr_pvs_per_corner_init(struct device_node *of_node,
+ struct cpr_regulator *cpr_vreg)
+{
+ u64 efuse_bits;
+ int i, size, sign, steps, step_size_uv, rc;
+ u32 *fuse_sel, *tmp, *ref_uv;
+ struct property *prop;
+ char *init_volt_str;
+
+ init_volt_str = cpr_vreg->cpr_fuse_redundant
+ ? "qcom,cpr-fuse-redun-init-voltage"
+ : "qcom,cpr-fuse-init-voltage";
+
+ prop = of_find_property(of_node, init_volt_str, NULL);
+ if (!prop) {
+ cpr_err(cpr_vreg, "%s is missing\n", init_volt_str);
+ return -EINVAL;
+ }
+ size = prop->length / sizeof(u32);
+ if (size != cpr_vreg->num_fuse_corners * 4) {
+ cpr_err(cpr_vreg,
+ "fuse position for init voltages is invalid\n");
+ return -EINVAL;
+ }
+ fuse_sel = kzalloc(sizeof(u32) * size, GFP_KERNEL);
+ if (!fuse_sel) {
+ cpr_err(cpr_vreg, "memory alloc failed.\n");
+ return -ENOMEM;
+ }
+ rc = of_property_read_u32_array(of_node, init_volt_str,
+ fuse_sel, size);
+ if (rc < 0) {
+ cpr_err(cpr_vreg,
+ "read cpr-fuse-init-voltage failed, rc = %d\n", rc);
+ kfree(fuse_sel);
+ return rc;
+ }
+ rc = of_property_read_u32(of_node, "qcom,cpr-init-voltage-step",
+ &step_size_uv);
+ if (rc < 0) {
+ cpr_err(cpr_vreg,
+ "read cpr-init-voltage-step failed, rc = %d\n", rc);
+ kfree(fuse_sel);
+ return rc;
+ }
+
+ ref_uv = kzalloc((cpr_vreg->num_fuse_corners + 1) * sizeof(*ref_uv),
+ GFP_KERNEL);
+ if (!ref_uv) {
+ cpr_err(cpr_vreg,
+ "Could not allocate memory for reference voltages\n");
+ kfree(fuse_sel);
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cpr-init-voltage-ref",
+ &ref_uv[CPR_FUSE_CORNER_MIN], cpr_vreg->num_fuse_corners);
+ if (rc < 0) {
+ cpr_err(cpr_vreg,
+ "read qcom,cpr-init-voltage-ref failed, rc = %d\n", rc);
+ kfree(fuse_sel);
+ kfree(ref_uv);
+ return rc;
+ }
+
+ tmp = fuse_sel;
+ for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+ efuse_bits = cpr_read_efuse_param(cpr_vreg, fuse_sel[0],
+ fuse_sel[1], fuse_sel[2], fuse_sel[3]);
+ sign = (efuse_bits & (1 << (fuse_sel[2] - 1))) ? -1 : 1;
+ steps = efuse_bits & ((1 << (fuse_sel[2] - 1)) - 1);
+ cpr_vreg->pvs_corner_v[i] =
+ ref_uv[i] + sign * steps * step_size_uv;
+ cpr_vreg->pvs_corner_v[i] = DIV_ROUND_UP(
+ cpr_vreg->pvs_corner_v[i],
+ cpr_vreg->step_volt) *
+ cpr_vreg->step_volt;
+ cpr_debug(cpr_vreg, "corner %d: sign = %d, steps = %d, volt = %d uV\n",
+ i, sign, steps, cpr_vreg->pvs_corner_v[i]);
+ fuse_sel += 4;
+ }
+
+ rc = cpr_adjust_init_voltages(of_node, cpr_vreg);
+ if (rc)
+ goto done;
+
+ for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+ if (cpr_vreg->pvs_corner_v[i]
+ > cpr_vreg->fuse_ceiling_volt[i]) {
+ cpr_info(cpr_vreg, "Warning: initial voltage[%d] %d above ceiling %d\n",
+ i, cpr_vreg->pvs_corner_v[i],
+ cpr_vreg->fuse_ceiling_volt[i]);
+ cpr_vreg->pvs_corner_v[i]
+ = cpr_vreg->fuse_ceiling_volt[i];
+ } else if (cpr_vreg->pvs_corner_v[i] <
+ cpr_vreg->fuse_floor_volt[i]) {
+ cpr_info(cpr_vreg, "Warning: initial voltage[%d] %d below floor %d\n",
+ i, cpr_vreg->pvs_corner_v[i],
+ cpr_vreg->fuse_floor_volt[i]);
+ cpr_vreg->pvs_corner_v[i]
+ = cpr_vreg->fuse_floor_volt[i];
+ }
+ }
+
+done:
+ kfree(tmp);
+ kfree(ref_uv);
+
+ return rc;
+}
+
+/*
+ * A single PVS bin is stored in a fuse that's position is defined either
+ * in the qcom,pvs-fuse-redun property or in the qcom,pvs-fuse property.
+ * The fuse value defined in the qcom,pvs-fuse-redun-sel property is used
+ * to pick between the primary or redudant PVS fuse position.
+ * After the PVS bin value is read out successfully, it is used as the row
+ * index to get initial voltages for each fuse corner from the voltage table
+ * defined in the qcom,pvs-voltage-table property.
+ */
+static int cpr_pvs_single_bin_init(struct device_node *of_node,
+ struct cpr_regulator *cpr_vreg)
+{
+ u64 efuse_bits;
+ u32 pvs_fuse[4], pvs_fuse_redun_sel[5];
+ int rc, i, stripe_size;
+ bool redundant;
+ size_t pvs_bins;
+ u32 *tmp;
+
+ rc = of_property_read_u32_array(of_node, "qcom,pvs-fuse-redun-sel",
+ pvs_fuse_redun_sel, 5);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "pvs-fuse-redun-sel missing: rc=%d\n", rc);
+ return rc;
+ }
+
+ redundant = cpr_fuse_is_setting_expected(cpr_vreg, pvs_fuse_redun_sel);
+ if (redundant) {
+ rc = of_property_read_u32_array(of_node, "qcom,pvs-fuse-redun",
+ pvs_fuse, 4);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "pvs-fuse-redun missing: rc=%d\n",
+ rc);
+ return rc;
+ }
+ } else {
+ rc = of_property_read_u32_array(of_node, "qcom,pvs-fuse",
+ pvs_fuse, 4);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "pvs-fuse missing: rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ /* Construct PVS process # from the efuse bits */
+ efuse_bits = cpr_read_efuse_row(cpr_vreg, pvs_fuse[0], pvs_fuse[3]);
+ cpr_vreg->pvs_bin = (efuse_bits >> pvs_fuse[1]) &
+ ((1 << pvs_fuse[2]) - 1);
+ pvs_bins = 1 << pvs_fuse[2];
+ stripe_size = cpr_vreg->num_fuse_corners;
+ tmp = kzalloc(sizeof(u32) * pvs_bins * stripe_size, GFP_KERNEL);
+ if (!tmp) {
+ cpr_err(cpr_vreg, "memory alloc failed\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,pvs-voltage-table",
+ tmp, pvs_bins * stripe_size);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "pvs-voltage-table missing: rc=%d\n", rc);
+ kfree(tmp);
+ return rc;
+ }
+
+ for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++)
+ cpr_vreg->pvs_corner_v[i] = tmp[cpr_vreg->pvs_bin *
+ stripe_size + i - 1];
+ kfree(tmp);
+
+ rc = cpr_adjust_init_voltages(of_node, cpr_vreg);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+/*
+ * The function reads VDD_MX dependency parameters from device node.
+ * Select the qcom,vdd-mx-corner-map length equal to either num_fuse_corners
+ * or num_corners based on selected vdd-mx-vmin-method.
+ */
+static int cpr_parse_vdd_mx_parameters(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ u32 corner_map_len;
+ int rc, len, size;
+
+ rc = of_property_read_u32(of_node, "qcom,vdd-mx-vmax",
+ &cpr_vreg->vdd_mx_vmax);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "vdd-mx-vmax missing: rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,vdd-mx-vmin-method",
+ &cpr_vreg->vdd_mx_vmin_method);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "vdd-mx-vmin-method missing: rc=%d\n",
+ rc);
+ return rc;
+ }
+ if (cpr_vreg->vdd_mx_vmin_method > VDD_MX_VMIN_APC_CORNER_MAP) {
+ cpr_err(cpr_vreg, "Invalid vdd-mx-vmin-method(%d)\n",
+ cpr_vreg->vdd_mx_vmin_method);
+ return -EINVAL;
+ }
+
+ switch (cpr_vreg->vdd_mx_vmin_method) {
+ case VDD_MX_VMIN_APC_FUSE_CORNER_MAP:
+ corner_map_len = cpr_vreg->num_fuse_corners;
+ break;
+ case VDD_MX_VMIN_APC_CORNER_MAP:
+ corner_map_len = cpr_vreg->num_corners;
+ break;
+ default:
+ cpr_vreg->vdd_mx_corner_map = NULL;
+ return 0;
+ }
+
+ if (!of_find_property(of_node, "qcom,vdd-mx-corner-map", &len)) {
+ cpr_err(cpr_vreg, "qcom,vdd-mx-corner-map missing");
+ return -EINVAL;
+ }
+
+ size = len / sizeof(u32);
+ if (size != corner_map_len) {
+ cpr_err(cpr_vreg,
+ "qcom,vdd-mx-corner-map length=%d is invalid: required:%u\n",
+ size, corner_map_len);
+ return -EINVAL;
+ }
+
+ cpr_vreg->vdd_mx_corner_map = devm_kzalloc(&pdev->dev,
+ (corner_map_len + 1) * sizeof(*cpr_vreg->vdd_mx_corner_map),
+ GFP_KERNEL);
+ if (!cpr_vreg->vdd_mx_corner_map) {
+ cpr_err(cpr_vreg,
+ "Can't allocate memory for cpr_vreg->vdd_mx_corner_map\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32_array(of_node,
+ "qcom,vdd-mx-corner-map",
+ &cpr_vreg->vdd_mx_corner_map[1],
+ corner_map_len);
+ if (rc)
+ cpr_err(cpr_vreg,
+ "read qcom,vdd-mx-corner-map failed, rc = %d\n", rc);
+
+ return rc;
+}
+
+#define MAX_CHARS_PER_INT 10
+
+/*
+ * The initial voltage for each fuse corner may be determined by one of two
+ * possible styles of fuse. If qcom,cpr-fuse-init-voltage is present, then
+ * the initial voltages are encoded in a fuse for each fuse corner. If it is
+ * not present, then the initial voltages are all determined using a single
+ * PVS bin fuse value.
+ */
+static int cpr_pvs_init(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ int highest_fuse_corner = cpr_vreg->num_fuse_corners;
+ int i, rc, pos;
+ size_t buflen;
+ char *buf;
+
+ rc = of_property_read_u32(of_node, "qcom,cpr-apc-volt-step",
+ &cpr_vreg->step_volt);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "read cpr-apc-volt-step failed, rc = %d\n",
+ rc);
+ return rc;
+ } else if (cpr_vreg->step_volt == 0) {
+ cpr_err(cpr_vreg, "apc voltage step size can't be set to 0.\n");
+ return -EINVAL;
+ }
+
+ if (of_find_property(of_node, "qcom,cpr-fuse-init-voltage", NULL)) {
+ rc = cpr_pvs_per_corner_init(of_node, cpr_vreg);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "get pvs per corner failed, rc = %d",
+ rc);
+ return rc;
+ }
+ } else {
+ rc = cpr_pvs_single_bin_init(of_node, cpr_vreg);
+ if (rc < 0) {
+ cpr_err(cpr_vreg,
+ "get pvs from single bin failed, rc = %d", rc);
+ return rc;
+ }
+ }
+
+ if (cpr_vreg->flags & FLAGS_UPLIFT_QUOT_VOLT) {
+ rc = cpr_voltage_uplift_wa_inc_volt(cpr_vreg, of_node);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "pvs volt uplift wa apply failed: %d",
+ rc);
+ return rc;
+ }
+ }
+
+ /*
+ * Allow the highest fuse corner's PVS voltage to define the ceiling
+ * voltage for that corner in order to support SoC's in which variable
+ * ceiling values are required.
+ */
+ if (cpr_vreg->pvs_corner_v[highest_fuse_corner] >
+ cpr_vreg->fuse_ceiling_volt[highest_fuse_corner])
+ cpr_vreg->fuse_ceiling_volt[highest_fuse_corner] =
+ cpr_vreg->pvs_corner_v[highest_fuse_corner];
+
+ /*
+ * Restrict all fuse corner PVS voltages based upon per corner
+ * ceiling and floor voltages.
+ */
+ for (i = CPR_FUSE_CORNER_MIN; i <= highest_fuse_corner; i++)
+ if (cpr_vreg->pvs_corner_v[i] > cpr_vreg->fuse_ceiling_volt[i])
+ cpr_vreg->pvs_corner_v[i]
+ = cpr_vreg->fuse_ceiling_volt[i];
+ else if (cpr_vreg->pvs_corner_v[i]
+ < cpr_vreg->fuse_floor_volt[i])
+ cpr_vreg->pvs_corner_v[i]
+ = cpr_vreg->fuse_floor_volt[i];
+
+ cpr_vreg->ceiling_max
+ = cpr_vreg->fuse_ceiling_volt[highest_fuse_corner];
+
+ /*
+ * Log ceiling, floor, and inital voltages since they are critical for
+ * all CPR debugging.
+ */
+ buflen = cpr_vreg->num_fuse_corners * (MAX_CHARS_PER_INT + 2)
+ * sizeof(*buf);
+ buf = kzalloc(buflen, GFP_KERNEL);
+ if (buf == NULL) {
+ cpr_err(cpr_vreg, "Could not allocate memory for corner voltage logging\n");
+ return 0;
+ }
+
+ for (i = CPR_FUSE_CORNER_MIN, pos = 0; i <= highest_fuse_corner; i++)
+ pos += scnprintf(buf + pos, buflen - pos, "%u%s",
+ cpr_vreg->pvs_corner_v[i],
+ i < highest_fuse_corner ? " " : "");
+ cpr_info(cpr_vreg, "pvs voltage: [%s] uV\n", buf);
+
+ for (i = CPR_FUSE_CORNER_MIN, pos = 0; i <= highest_fuse_corner; i++)
+ pos += scnprintf(buf + pos, buflen - pos, "%d%s",
+ cpr_vreg->fuse_ceiling_volt[i],
+ i < highest_fuse_corner ? " " : "");
+ cpr_info(cpr_vreg, "ceiling voltage: [%s] uV\n", buf);
+
+ for (i = CPR_FUSE_CORNER_MIN, pos = 0; i <= highest_fuse_corner; i++)
+ pos += scnprintf(buf + pos, buflen - pos, "%d%s",
+ cpr_vreg->fuse_floor_volt[i],
+ i < highest_fuse_corner ? " " : "");
+ cpr_info(cpr_vreg, "floor voltage: [%s] uV\n", buf);
+
+ kfree(buf);
+ return 0;
+}
+
+#define CPR_PROP_READ_U32(cpr_vreg, of_node, cpr_property, cpr_config, rc) \
+do { \
+ if (!rc) { \
+ rc = of_property_read_u32(of_node, \
+ "qcom," cpr_property, \
+ cpr_config); \
+ if (rc) { \
+ cpr_err(cpr_vreg, "Missing " #cpr_property \
+ ": rc = %d\n", rc); \
+ } \
+ } \
+} while (0)
+
+static int cpr_apc_init(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ int i, rc = 0;
+
+ for (i = 0; i < ARRAY_SIZE(vdd_apc_name); i++) {
+ cpr_vreg->vdd_apc = devm_regulator_get_optional(&pdev->dev,
+ vdd_apc_name[i]);
+ rc = PTR_RET(cpr_vreg->vdd_apc);
+ if (!IS_ERR_OR_NULL(cpr_vreg->vdd_apc))
+ break;
+ }
+
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ cpr_err(cpr_vreg, "devm_regulator_get: rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Check dependencies */
+ if (of_find_property(of_node, "vdd-mx-supply", NULL)) {
+ cpr_vreg->vdd_mx = devm_regulator_get(&pdev->dev, "vdd-mx");
+ if (IS_ERR_OR_NULL(cpr_vreg->vdd_mx)) {
+ rc = PTR_RET(cpr_vreg->vdd_mx);
+ if (rc != -EPROBE_DEFER)
+ cpr_err(cpr_vreg,
+ "devm_regulator_get: vdd-mx: rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static void cpr_apc_exit(struct cpr_regulator *cpr_vreg)
+{
+ if (cpr_vreg->vreg_enabled) {
+ regulator_disable(cpr_vreg->vdd_apc);
+
+ if (cpr_vreg->vdd_mx)
+ regulator_disable(cpr_vreg->vdd_mx);
+ }
+}
+
+static int cpr_voltage_uplift_wa_inc_quot(struct cpr_regulator *cpr_vreg,
+ struct device_node *of_node)
+{
+ u32 delta_quot[3];
+ int rc, i;
+
+ rc = of_property_read_u32_array(of_node,
+ "qcom,cpr-uplift-quotient", delta_quot, 3);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "cpr-uplift-quotient is missing: %d", rc);
+ return rc;
+ }
+ for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++)
+ cpr_vreg->cpr_fuse_target_quot[i] += delta_quot[i-1];
+ return rc;
+}
+
+static void cpr_parse_pvs_version_fuse(struct cpr_regulator *cpr_vreg,
+ struct device_node *of_node)
+{
+ int rc;
+ u64 fuse_bits;
+ u32 fuse_sel[4];
+
+ rc = of_property_read_u32_array(of_node,
+ "qcom,pvs-version-fuse-sel", fuse_sel, 4);
+ if (!rc) {
+ fuse_bits = cpr_read_efuse_row(cpr_vreg,
+ fuse_sel[0], fuse_sel[3]);
+ cpr_vreg->pvs_version = (fuse_bits >> fuse_sel[1]) &
+ ((1 << fuse_sel[2]) - 1);
+ cpr_info(cpr_vreg, "[row: %d]: 0x%llx, pvs_version = %d\n",
+ fuse_sel[0], fuse_bits, cpr_vreg->pvs_version);
+ } else {
+ cpr_vreg->pvs_version = 0;
+ }
+}
+
+/**
+ * cpr_get_open_loop_voltage() - fill the open_loop_volt array with linearly
+ * interpolated open-loop CPR voltage values.
+ * @cpr_vreg: Handle to the cpr-regulator device
+ * @dev: Device pointer for the cpr-regulator device
+ * @corner_max: Array of length (cpr_vreg->num_fuse_corners + 1) which maps from
+ * fuse corners to the highest virtual corner corresponding to a
+ * given fuse corner
+ * @freq_map: Array of length (cpr_vreg->num_corners + 1) which maps from
+ * virtual corners to frequencies in Hz.
+ * @maps_valid: Boolean which indicates if the values in corner_max and freq_map
+ * are valid. If they are not valid, then the open_loop_volt
+ * values are not interpolated.
+ */
+static int cpr_get_open_loop_voltage(struct cpr_regulator *cpr_vreg,
+ struct device *dev, const u32 *corner_max, const u32 *freq_map,
+ bool maps_valid)
+{
+ int rc = 0;
+ int i, j;
+ u64 volt_high, volt_low, freq_high, freq_low, freq, temp, temp_limit;
+ u32 *max_factor = NULL;
+
+ cpr_vreg->open_loop_volt = devm_kzalloc(dev,
+ sizeof(int) * (cpr_vreg->num_corners + 1), GFP_KERNEL);
+ if (!cpr_vreg->open_loop_volt) {
+ cpr_err(cpr_vreg,
+ "Can't allocate memory for cpr_vreg->open_loop_volt\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Set open loop voltage to be equal to per-fuse-corner initial voltage
+ * by default. This ensures that the open loop voltage is valid for
+ * all virtual corners even if some virtual corner to frequency mappings
+ * are missing. It also ensures that the voltage is valid for the
+ * higher corners not utilized by a given speed-bin.
+ */
+ for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++)
+ cpr_vreg->open_loop_volt[i]
+ = cpr_vreg->pvs_corner_v[cpr_vreg->corner_map[i]];
+
+ if (!maps_valid || !corner_max || !freq_map
+ || !of_find_property(dev->of_node,
+ "qcom,cpr-voltage-scaling-factor-max", NULL)) {
+ /* Not using interpolation */
+ return 0;
+ }
+
+ max_factor
+ = kzalloc(sizeof(*max_factor) * (cpr_vreg->num_fuse_corners + 1),
+ GFP_KERNEL);
+ if (!max_factor) {
+ cpr_err(cpr_vreg, "Could not allocate memory for max_factor array\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32_array(dev->of_node,
+ "qcom,cpr-voltage-scaling-factor-max",
+ &max_factor[CPR_FUSE_CORNER_MIN],
+ cpr_vreg->num_fuse_corners);
+ if (rc) {
+ cpr_debug(cpr_vreg, "failed to read qcom,cpr-voltage-scaling-factor-max; initial voltage interpolation not possible\n");
+ kfree(max_factor);
+ return 0;
+ }
+
+ for (j = CPR_FUSE_CORNER_MIN + 1; j <= cpr_vreg->num_fuse_corners;
+ j++) {
+ freq_high = freq_map[corner_max[j]];
+ freq_low = freq_map[corner_max[j - 1]];
+ volt_high = cpr_vreg->pvs_corner_v[j];
+ volt_low = cpr_vreg->pvs_corner_v[j - 1];
+ if (freq_high <= freq_low || volt_high <= volt_low)
+ continue;
+
+ for (i = corner_max[j - 1] + 1; i < corner_max[j]; i++) {
+ freq = freq_map[i];
+ if (freq_high <= freq)
+ continue;
+
+ temp = (freq_high - freq) * (volt_high - volt_low);
+ do_div(temp, (u32)(freq_high - freq_low));
+
+ /*
+ * max_factor[j] has units of uV/MHz while freq values
+ * have units of Hz. Divide by 1000000 to convert.
+ */
+ temp_limit = (freq_high - freq) * max_factor[j];
+ do_div(temp_limit, 1000000);
+
+ cpr_vreg->open_loop_volt[i]
+ = volt_high - min(temp, temp_limit);
+ cpr_vreg->open_loop_volt[i]
+ = DIV_ROUND_UP(cpr_vreg->open_loop_volt[i],
+ cpr_vreg->step_volt)
+ * cpr_vreg->step_volt;
+ }
+ }
+
+ kfree(max_factor);
+ return 0;
+}
+
+/*
+ * Limit the per-virtual-corner open-loop voltages using the per-virtual-corner
+ * ceiling and floor voltage values. This must be called only after the
+ * open_loop_volt, ceiling, and floor arrays have all been initialized.
+ */
+static int cpr_limit_open_loop_voltage(struct cpr_regulator *cpr_vreg)
+{
+ int i;
+
+ for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+ if (cpr_vreg->open_loop_volt[i] > cpr_vreg->ceiling_volt[i])
+ cpr_vreg->open_loop_volt[i] = cpr_vreg->ceiling_volt[i];
+ else if (cpr_vreg->open_loop_volt[i] < cpr_vreg->floor_volt[i])
+ cpr_vreg->open_loop_volt[i] = cpr_vreg->floor_volt[i];
+ }
+
+ return 0;
+}
+
+/*
+ * Fill an OPP table for the cpr-regulator device struct with pairs of
+ * <virtual voltage corner number, open loop voltage> tuples.
+ */
+static int cpr_populate_opp_table(struct cpr_regulator *cpr_vreg,
+ struct device *dev)
+{
+ int i, rc = 0;
+
+ for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+ rc |= dev_pm_opp_add(dev, i, cpr_vreg->open_loop_volt[i]);
+ if (rc)
+ cpr_debug(cpr_vreg, "could not add OPP entry <%d, %d>, rc=%d\n",
+ i, cpr_vreg->open_loop_volt[i], rc);
+ }
+ if (rc)
+ cpr_err(cpr_vreg, "adding OPP entry failed - OPP may not be enabled, rc=%d\n",
+ rc);
+
+ return 0;
+}
+
+/*
+ * Conditionally reduce the per-virtual-corner ceiling voltages if certain
+ * device tree flags are present. This must be called only after the ceiling
+ * array has been initialized and the open_loop_volt array values have been
+ * initialized and limited to the existing floor to ceiling voltage range.
+ */
+static int cpr_reduce_ceiling_voltage(struct cpr_regulator *cpr_vreg,
+ struct device *dev)
+{
+ bool reduce_to_fuse_open_loop, reduce_to_interpolated_open_loop;
+ int i;
+
+ reduce_to_fuse_open_loop = of_property_read_bool(dev->of_node,
+ "qcom,cpr-init-voltage-as-ceiling");
+ reduce_to_interpolated_open_loop = of_property_read_bool(dev->of_node,
+ "qcom,cpr-scaled-init-voltage-as-ceiling");
+
+ if (!reduce_to_fuse_open_loop && !reduce_to_interpolated_open_loop)
+ return 0;
+
+ for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+ if (reduce_to_interpolated_open_loop &&
+ cpr_vreg->open_loop_volt[i] < cpr_vreg->ceiling_volt[i])
+ cpr_vreg->ceiling_volt[i] = cpr_vreg->open_loop_volt[i];
+ else if (reduce_to_fuse_open_loop &&
+ cpr_vreg->pvs_corner_v[cpr_vreg->corner_map[i]]
+ < cpr_vreg->ceiling_volt[i])
+ cpr_vreg->ceiling_volt[i]
+ = max((u32)cpr_vreg->floor_volt[i],
+ cpr_vreg->pvs_corner_v[cpr_vreg->corner_map[i]]);
+ cpr_debug(cpr_vreg, "lowered ceiling[%d] = %d uV\n",
+ i, cpr_vreg->ceiling_volt[i]);
+ }
+
+ return 0;
+}
+
+static int cpr_adjust_target_quot_offsets(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ int tuple_count, tuple_match, i;
+ u32 index;
+ u32 quot_offset_adjust = 0;
+ int len = 0;
+ int rc = 0;
+ char *quot_offset_str;
+
+ quot_offset_str = "qcom,cpr-quot-offset-adjustment";
+ if (!of_find_property(of_node, quot_offset_str, &len)) {
+ /* No static quotient adjustment needed. */
+ return 0;
+ }
+
+ if (cpr_vreg->cpr_fuse_map_count) {
+ if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+ /* No matching index to use for quotient adjustment. */
+ return 0;
+ }
+ tuple_count = cpr_vreg->cpr_fuse_map_count;
+ tuple_match = cpr_vreg->cpr_fuse_map_match;
+ } else {
+ tuple_count = 1;
+ tuple_match = 0;
+ }
+
+ if (len != cpr_vreg->num_fuse_corners * tuple_count * sizeof(u32)) {
+ cpr_err(cpr_vreg, "%s length=%d is invalid\n", quot_offset_str,
+ len);
+ return -EINVAL;
+ }
+
+ for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+ index = tuple_match * cpr_vreg->num_fuse_corners
+ + i - CPR_FUSE_CORNER_MIN;
+ rc = of_property_read_u32_index(of_node, quot_offset_str, index,
+ &quot_offset_adjust);
+ if (rc) {
+ cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
+ quot_offset_str, index, rc);
+ return rc;
+ }
+
+ if (quot_offset_adjust) {
+ cpr_vreg->fuse_quot_offset[i] += quot_offset_adjust;
+ cpr_info(cpr_vreg, "Corner[%d]: adjusted target quot = %d\n",
+ i, cpr_vreg->fuse_quot_offset[i]);
+ }
+ }
+
+ return rc;
+}
+
+static int cpr_get_fuse_quot_offset(struct cpr_regulator *cpr_vreg,
+ struct platform_device *pdev,
+ struct cpr_quot_scale *quot_scale)
+{
+ struct device *dev = &pdev->dev;
+ struct property *prop;
+ u32 *fuse_sel, *tmp, *offset_multiplier = NULL;
+ int rc = 0, i, size, len;
+ char *quot_offset_str;
+
+ quot_offset_str = cpr_vreg->cpr_fuse_redundant
+ ? "qcom,cpr-fuse-redun-quot-offset"
+ : "qcom,cpr-fuse-quot-offset";
+
+ prop = of_find_property(dev->of_node, quot_offset_str, NULL);
+ if (!prop) {
+ cpr_debug(cpr_vreg, "%s not present\n", quot_offset_str);
+ return 0;
+ } else {
+ size = prop->length / sizeof(u32);
+ if (size != cpr_vreg->num_fuse_corners * 4) {
+ cpr_err(cpr_vreg, "fuse position for quot offset is invalid\n");
+ return -EINVAL;
+ }
+ }
+
+ fuse_sel = kzalloc(sizeof(u32) * size, GFP_KERNEL);
+ if (!fuse_sel) {
+ cpr_err(cpr_vreg, "memory alloc failed.\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32_array(dev->of_node, quot_offset_str,
+ fuse_sel, size);
+
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "read %s failed, rc = %d\n", quot_offset_str,
+ rc);
+ kfree(fuse_sel);
+ return rc;
+ }
+
+ cpr_vreg->fuse_quot_offset = devm_kzalloc(dev,
+ sizeof(u32) * (cpr_vreg->num_fuse_corners + 1),
+ GFP_KERNEL);
+ if (!cpr_vreg->fuse_quot_offset) {
+ cpr_err(cpr_vreg, "Can't allocate memory for cpr_vreg->fuse_quot_offset\n");
+ kfree(fuse_sel);
+ return -ENOMEM;
+ }
+
+ if (!of_find_property(dev->of_node,
+ "qcom,cpr-fuse-quot-offset-scale", &len)) {
+ cpr_debug(cpr_vreg, "qcom,cpr-fuse-quot-offset-scale not present\n");
+ } else {
+ if (len != cpr_vreg->num_fuse_corners * sizeof(u32)) {
+ cpr_err(cpr_vreg, "the size of qcom,cpr-fuse-quot-offset-scale is invalid\n");
+ kfree(fuse_sel);
+ return -EINVAL;
+ }
+
+ offset_multiplier = kzalloc(sizeof(*offset_multiplier)
+ * (cpr_vreg->num_fuse_corners + 1),
+ GFP_KERNEL);
+ if (!offset_multiplier) {
+ cpr_err(cpr_vreg, "memory alloc failed.\n");
+ kfree(fuse_sel);
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32_array(dev->of_node,
+ "qcom,cpr-fuse-quot-offset-scale",
+ &offset_multiplier[1],
+ cpr_vreg->num_fuse_corners);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "read qcom,cpr-fuse-quot-offset-scale failed, rc = %d\n",
+ rc);
+ kfree(fuse_sel);
+ goto out;
+ }
+ }
+
+ tmp = fuse_sel;
+ for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+ cpr_vreg->fuse_quot_offset[i] = cpr_read_efuse_param(cpr_vreg,
+ fuse_sel[0], fuse_sel[1], fuse_sel[2],
+ fuse_sel[3]);
+ if (offset_multiplier)
+ cpr_vreg->fuse_quot_offset[i] *= offset_multiplier[i];
+ fuse_sel += 4;
+ }
+
+ rc = cpr_adjust_target_quot_offsets(pdev, cpr_vreg);
+ kfree(tmp);
+out:
+ kfree(offset_multiplier);
+ return rc;
+}
+
+/*
+ * Adjust the per-virtual-corner open loop voltage with an offset specfied by a
+ * device-tree property. This must be called after open-loop voltage scaling.
+ */
+static int cpr_virtual_corner_voltage_adjust(struct cpr_regulator *cpr_vreg,
+ struct device *dev)
+{
+ char *prop_name = "qcom,cpr-virtual-corner-init-voltage-adjustment";
+ int i, rc, tuple_count, tuple_match, index, len;
+ u32 voltage_adjust;
+
+ if (!of_find_property(dev->of_node, prop_name, &len)) {
+ cpr_debug(cpr_vreg, "%s not specified\n", prop_name);
+ return 0;
+ }
+
+ if (cpr_vreg->cpr_fuse_map_count) {
+ if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+ /* No matching index to use for voltage adjustment. */
+ return 0;
+ }
+ tuple_count = cpr_vreg->cpr_fuse_map_count;
+ tuple_match = cpr_vreg->cpr_fuse_map_match;
+ } else {
+ tuple_count = 1;
+ tuple_match = 0;
+ }
+
+ if (len != cpr_vreg->num_corners * tuple_count * sizeof(u32)) {
+ cpr_err(cpr_vreg, "%s length=%d is invalid\n", prop_name,
+ len);
+ return -EINVAL;
+ }
+
+ for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+ index = tuple_match * cpr_vreg->num_corners
+ + i - CPR_CORNER_MIN;
+ rc = of_property_read_u32_index(dev->of_node, prop_name,
+ index, &voltage_adjust);
+ if (rc) {
+ cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
+ prop_name, index, rc);
+ return rc;
+ }
+
+ if (voltage_adjust) {
+ cpr_vreg->open_loop_volt[i] += (int)voltage_adjust;
+ cpr_info(cpr_vreg, "corner=%d adjusted open-loop voltage=%d\n",
+ i, cpr_vreg->open_loop_volt[i]);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Adjust the per-virtual-corner quot with an offset specfied by a
+ * device-tree property. This must be called after the quot-scaling adjustments
+ * are completed.
+ */
+static int cpr_virtual_corner_quot_adjust(struct cpr_regulator *cpr_vreg,
+ struct device *dev)
+{
+ char *prop_name = "qcom,cpr-virtual-corner-quotient-adjustment";
+ int i, rc, tuple_count, tuple_match, index, len;
+ u32 quot_adjust;
+
+ if (!of_find_property(dev->of_node, prop_name, &len)) {
+ cpr_debug(cpr_vreg, "%s not specified\n", prop_name);
+ return 0;
+ }
+
+ if (cpr_vreg->cpr_fuse_map_count) {
+ if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+ /* No matching index to use for quotient adjustment. */
+ return 0;
+ }
+ tuple_count = cpr_vreg->cpr_fuse_map_count;
+ tuple_match = cpr_vreg->cpr_fuse_map_match;
+ } else {
+ tuple_count = 1;
+ tuple_match = 0;
+ }
+
+ if (len != cpr_vreg->num_corners * tuple_count * sizeof(u32)) {
+ cpr_err(cpr_vreg, "%s length=%d is invalid\n", prop_name,
+ len);
+ return -EINVAL;
+ }
+
+ for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+ index = tuple_match * cpr_vreg->num_corners
+ + i - CPR_CORNER_MIN;
+ rc = of_property_read_u32_index(dev->of_node, prop_name,
+ index, &quot_adjust);
+ if (rc) {
+ cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
+ prop_name, index, rc);
+ return rc;
+ }
+
+ if (quot_adjust) {
+ cpr_vreg->quot_adjust[i] -= (int)quot_adjust;
+ cpr_info(cpr_vreg, "corner=%d adjusted quotient=%d\n",
+ i,
+ cpr_vreg->cpr_fuse_target_quot[cpr_vreg->corner_map[i]]
+ - cpr_vreg->quot_adjust[i]);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * cpr_get_corner_quot_adjustment() -- get the quot_adjust for each corner.
+ *
+ * Get the virtual corner to fuse corner mapping and virtual corner to APC clock
+ * frequency mapping from device tree.
+ * Calculate the quotient adjustment scaling factor for those corners mapping to
+ * all fuse corners except for the lowest one using linear interpolation.
+ * Calculate the quotient adjustment for each of these virtual corners using the
+ * min of the calculated scaling factor and the constant max scaling factor
+ * defined for each fuse corner in device tree.
+ */
+static int cpr_get_corner_quot_adjustment(struct cpr_regulator *cpr_vreg,
+ struct device *dev)
+{
+ int rc = 0;
+ int highest_fuse_corner = cpr_vreg->num_fuse_corners;
+ int i, j, size;
+ struct property *prop;
+ bool corners_mapped, match_found;
+ u32 *tmp, *freq_map = NULL;
+ u32 corner, freq_corner;
+ u32 *freq_max = NULL;
+ u32 *scaling = NULL;
+ u32 *max_factor = NULL;
+ u32 *corner_max = NULL;
+ bool maps_valid = false;
+
+ prop = of_find_property(dev->of_node, "qcom,cpr-corner-map", NULL);
+
+ if (prop) {
+ size = prop->length / sizeof(u32);
+ corners_mapped = true;
+ } else {
+ size = cpr_vreg->num_fuse_corners;
+ corners_mapped = false;
+ }
+
+ cpr_vreg->corner_map = devm_kzalloc(dev, sizeof(int) * (size + 1),
+ GFP_KERNEL);
+ if (!cpr_vreg->corner_map) {
+ cpr_err(cpr_vreg,
+ "Can't allocate memory for cpr_vreg->corner_map\n");
+ return -ENOMEM;
+ }
+ cpr_vreg->num_corners = size;
+
+ cpr_vreg->quot_adjust = devm_kzalloc(dev,
+ sizeof(u32) * (cpr_vreg->num_corners + 1),
+ GFP_KERNEL);
+ if (!cpr_vreg->quot_adjust) {
+ cpr_err(cpr_vreg,
+ "Can't allocate memory for cpr_vreg->quot_adjust\n");
+ return -ENOMEM;
+ }
+
+ if (!corners_mapped) {
+ for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
+ i++)
+ cpr_vreg->corner_map[i] = i;
+ goto free_arrays;
+ } else {
+ rc = of_property_read_u32_array(dev->of_node,
+ "qcom,cpr-corner-map", &cpr_vreg->corner_map[1], size);
+
+ if (rc) {
+ cpr_err(cpr_vreg,
+ "qcom,cpr-corner-map missing, rc = %d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Verify that the virtual corner to fuse corner mapping is
+ * valid.
+ */
+ for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+ if (cpr_vreg->corner_map[i] > cpr_vreg->num_fuse_corners
+ || cpr_vreg->corner_map[i] < CPR_FUSE_CORNER_MIN) {
+ cpr_err(cpr_vreg, "qcom,cpr-corner-map contains an element %d which isn't in the allowed range [%d, %d]\n",
+ cpr_vreg->corner_map[i],
+ CPR_FUSE_CORNER_MIN,
+ cpr_vreg->num_fuse_corners);
+ return -EINVAL;
+ }
+ }
+ }
+
+ prop = of_find_property(dev->of_node,
+ "qcom,cpr-speed-bin-max-corners", NULL);
+ if (!prop) {
+ cpr_debug(cpr_vreg, "qcom,cpr-speed-bin-max-corner missing\n");
+ goto free_arrays;
+ }
+
+ size = prop->length / sizeof(u32);
+ tmp = kzalloc(size * sizeof(u32), GFP_KERNEL);
+ if (!tmp) {
+ cpr_err(cpr_vreg, "memory alloc failed\n");
+ return -ENOMEM;
+ }
+ rc = of_property_read_u32_array(dev->of_node,
+ "qcom,cpr-speed-bin-max-corners", tmp, size);
+ if (rc < 0) {
+ kfree(tmp);
+ cpr_err(cpr_vreg,
+ "get cpr-speed-bin-max-corners failed, rc = %d\n", rc);
+ return rc;
+ }
+
+ corner_max = kzalloc((cpr_vreg->num_fuse_corners + 1)
+ * sizeof(*corner_max), GFP_KERNEL);
+ freq_max = kzalloc((cpr_vreg->num_fuse_corners + 1) * sizeof(*freq_max),
+ GFP_KERNEL);
+ if (corner_max == NULL || freq_max == NULL) {
+ cpr_err(cpr_vreg, "Could not allocate memory for quotient scaling arrays\n");
+ kfree(tmp);
+ rc = -ENOMEM;
+ goto free_arrays;
+ }
+
+ /*
+ * Get the maximum virtual corner for each fuse corner based upon the
+ * speed_bin and pvs_version values.
+ */
+ match_found = false;
+ for (i = 0; i < size; i += cpr_vreg->num_fuse_corners + 2) {
+ if (tmp[i] != cpr_vreg->speed_bin &&
+ tmp[i] != FUSE_PARAM_MATCH_ANY)
+ continue;
+ if (tmp[i + 1] != cpr_vreg->pvs_version &&
+ tmp[i + 1] != FUSE_PARAM_MATCH_ANY)
+ continue;
+ for (j = CPR_FUSE_CORNER_MIN;
+ j <= cpr_vreg->num_fuse_corners; j++)
+ corner_max[j] = tmp[i + 2 + j - CPR_FUSE_CORNER_MIN];
+ match_found = true;
+ break;
+ }
+ kfree(tmp);
+
+ if (!match_found) {
+ cpr_debug(cpr_vreg, "No quotient adjustment possible for speed bin=%u, pvs version=%u\n",
+ cpr_vreg->speed_bin, cpr_vreg->pvs_version);
+ goto free_arrays;
+ }
+
+ /* Verify that fuse corner to max virtual corner mapping is valid. */
+ for (i = CPR_FUSE_CORNER_MIN; i <= highest_fuse_corner; i++) {
+ if (corner_max[i] < CPR_CORNER_MIN
+ || corner_max[i] > cpr_vreg->num_corners) {
+ cpr_err(cpr_vreg, "Invalid corner=%d in qcom,cpr-speed-bin-max-corners\n",
+ corner_max[i]);
+ goto free_arrays;
+ }
+ }
+
+ /*
+ * Return success if the virtual corner values read from
+ * qcom,cpr-speed-bin-max-corners property are incorrect. This allows
+ * the driver to continue to run without quotient scaling.
+ */
+ for (i = CPR_FUSE_CORNER_MIN + 1; i <= highest_fuse_corner; i++) {
+ if (corner_max[i] <= corner_max[i - 1]) {
+ cpr_err(cpr_vreg, "fuse corner=%d (%u) should be larger than the fuse corner=%d (%u)\n",
+ i, corner_max[i], i - 1, corner_max[i - 1]);
+ goto free_arrays;
+ }
+ }
+
+ prop = of_find_property(dev->of_node,
+ "qcom,cpr-corner-frequency-map", NULL);
+ if (!prop) {
+ cpr_debug(cpr_vreg, "qcom,cpr-corner-frequency-map missing\n");
+ goto free_arrays;
+ }
+
+ size = prop->length / sizeof(u32);
+ tmp = kzalloc(sizeof(u32) * size, GFP_KERNEL);
+ if (!tmp) {
+ cpr_err(cpr_vreg, "memory alloc failed\n");
+ rc = -ENOMEM;
+ goto free_arrays;
+ }
+ rc = of_property_read_u32_array(dev->of_node,
+ "qcom,cpr-corner-frequency-map", tmp, size);
+ if (rc < 0) {
+ cpr_err(cpr_vreg,
+ "get cpr-corner-frequency-map failed, rc = %d\n", rc);
+ kfree(tmp);
+ goto free_arrays;
+ }
+ freq_map = kzalloc(sizeof(u32) * (cpr_vreg->num_corners + 1),
+ GFP_KERNEL);
+ if (!freq_map) {
+ cpr_err(cpr_vreg, "memory alloc for freq_map failed!\n");
+ kfree(tmp);
+ rc = -ENOMEM;
+ goto free_arrays;
+ }
+ for (i = 0; i < size; i += 2) {
+ corner = tmp[i];
+ if ((corner < 1) || (corner > cpr_vreg->num_corners)) {
+ cpr_err(cpr_vreg,
+ "corner should be in 1~%d range: %d\n",
+ cpr_vreg->num_corners, corner);
+ continue;
+ }
+ freq_map[corner] = tmp[i + 1];
+ cpr_debug(cpr_vreg,
+ "Frequency at virtual corner %d is %d Hz.\n",
+ corner, freq_map[corner]);
+ }
+ kfree(tmp);
+
+ prop = of_find_property(dev->of_node,
+ "qcom,cpr-quot-adjust-scaling-factor-max", NULL);
+ if (!prop) {
+ cpr_debug(cpr_vreg, "qcom,cpr-quot-adjust-scaling-factor-max missing\n");
+ rc = 0;
+ goto free_arrays;
+ }
+
+ size = prop->length / sizeof(u32);
+ if ((size != 1) && (size != cpr_vreg->num_fuse_corners)) {
+ cpr_err(cpr_vreg, "The size of qcom,cpr-quot-adjust-scaling-factor-max should be 1 or %d\n",
+ cpr_vreg->num_fuse_corners);
+ rc = 0;
+ goto free_arrays;
+ }
+
+ max_factor = kzalloc(sizeof(u32) * (cpr_vreg->num_fuse_corners + 1),
+ GFP_KERNEL);
+ if (!max_factor) {
+ cpr_err(cpr_vreg, "Could not allocate memory for max_factor array\n");
+ rc = -ENOMEM;
+ goto free_arrays;
+ }
+ /*
+ * Leave max_factor[CPR_FUSE_CORNER_MIN ... highest_fuse_corner-1] = 0
+ * if cpr-quot-adjust-scaling-factor-max is a single value in order to
+ * maintain backward compatibility.
+ */
+ i = (size == cpr_vreg->num_fuse_corners) ? CPR_FUSE_CORNER_MIN
+ : highest_fuse_corner;
+ rc = of_property_read_u32_array(dev->of_node,
+ "qcom,cpr-quot-adjust-scaling-factor-max",
+ &max_factor[i], size);
+ if (rc < 0) {
+ cpr_debug(cpr_vreg, "could not read qcom,cpr-quot-adjust-scaling-factor-max, rc=%d\n",
+ rc);
+ rc = 0;
+ goto free_arrays;
+ }
+
+ /*
+ * Get the quotient adjustment scaling factor, according to:
+ * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1))
+ * / (freq(corner_N) - freq(corner_N-1)), max_factor)
+ *
+ * QUOT(corner_N): quotient read from fuse for fuse corner N
+ * QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1)
+ * freq(corner_N): max frequency in MHz supported by fuse corner N
+ * freq(corner_N-1): max frequency in MHz supported by fuse corner
+ * (N - 1)
+ */
+
+ for (i = CPR_FUSE_CORNER_MIN; i <= highest_fuse_corner; i++)
+ freq_max[i] = freq_map[corner_max[i]];
+ for (i = CPR_FUSE_CORNER_MIN + 1; i <= highest_fuse_corner; i++) {
+ if (freq_max[i] <= freq_max[i - 1] || freq_max[i - 1] == 0) {
+ cpr_err(cpr_vreg, "fuse corner %d freq=%u should be larger than fuse corner %d freq=%u\n",
+ i, freq_max[i], i - 1, freq_max[i - 1]);
+ rc = -EINVAL;
+ goto free_arrays;
+ }
+ }
+ scaling = kzalloc((cpr_vreg->num_fuse_corners + 1) * sizeof(*scaling),
+ GFP_KERNEL);
+ if (!scaling) {
+ cpr_err(cpr_vreg, "Could not allocate memory for scaling array\n");
+ rc = -ENOMEM;
+ goto free_arrays;
+ }
+ /* Convert corner max frequencies from Hz to MHz. */
+ for (i = CPR_FUSE_CORNER_MIN; i <= highest_fuse_corner; i++)
+ freq_max[i] /= 1000000;
+
+ for (i = CPR_FUSE_CORNER_MIN + 1; i <= highest_fuse_corner; i++) {
+ if (cpr_vreg->fuse_quot_offset &&
+ (cpr_vreg->cpr_fuse_ro_sel[i] !=
+ cpr_vreg->cpr_fuse_ro_sel[i - 1])) {
+ scaling[i] = 1000 * cpr_vreg->fuse_quot_offset[i]
+ / (freq_max[i] - freq_max[i - 1]);
+ } else {
+ scaling[i] = 1000 * (cpr_vreg->cpr_fuse_target_quot[i]
+ - cpr_vreg->cpr_fuse_target_quot[i - 1])
+ / (freq_max[i] - freq_max[i - 1]);
+ if (cpr_vreg->cpr_fuse_target_quot[i]
+ < cpr_vreg->cpr_fuse_target_quot[i - 1])
+ scaling[i] = 0;
+ }
+ scaling[i] = min(scaling[i], max_factor[i]);
+ cpr_info(cpr_vreg, "fuse corner %d quotient adjustment scaling factor: %d.%03d\n",
+ i, scaling[i] / 1000, scaling[i] % 1000);
+ }
+
+ /*
+ * Walk through the virtual corners mapped to each fuse corner
+ * and calculate the quotient adjustment for each one using the
+ * following formula:
+ * quot_adjust = (freq_max - freq_corner) * scaling / 1000
+ *
+ * @freq_max: max frequency in MHz supported by the fuse corner
+ * @freq_corner: frequency in MHz corresponding to the virtual corner
+ */
+ for (j = CPR_FUSE_CORNER_MIN + 1; j <= highest_fuse_corner; j++) {
+ for (i = corner_max[j - 1] + 1; i < corner_max[j]; i++) {
+ freq_corner = freq_map[i] / 1000000; /* MHz */
+ if (freq_corner > 0) {
+ cpr_vreg->quot_adjust[i] = scaling[j] *
+ (freq_max[j] - freq_corner) / 1000;
+ }
+ }
+ }
+
+ rc = cpr_virtual_corner_quot_adjust(cpr_vreg, dev);
+ if (rc) {
+ cpr_err(cpr_vreg, "count not adjust virtual-corner quot rc=%d\n",
+ rc);
+ goto free_arrays;
+ }
+
+ for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++)
+ cpr_info(cpr_vreg, "adjusted quotient[%d] = %d\n", i,
+ cpr_vreg->cpr_fuse_target_quot[cpr_vreg->corner_map[i]]
+ - cpr_vreg->quot_adjust[i]);
+
+ maps_valid = true;
+
+free_arrays:
+ if (!rc) {
+
+ rc = cpr_get_open_loop_voltage(cpr_vreg, dev, corner_max,
+ freq_map, maps_valid);
+ if (rc) {
+ cpr_err(cpr_vreg, "could not fill open loop voltage array, rc=%d\n",
+ rc);
+ goto free_arrays_1;
+ }
+
+ rc = cpr_virtual_corner_voltage_adjust(cpr_vreg, dev);
+ if (rc)
+ cpr_err(cpr_vreg, "count not adjust virtual-corner voltage rc=%d\n",
+ rc);
+ }
+
+free_arrays_1:
+ kfree(max_factor);
+ kfree(scaling);
+ kfree(freq_map);
+ kfree(corner_max);
+ kfree(freq_max);
+ return rc;
+}
+
+/*
+ * Check if the redundant set of CPR fuses should be used in place of the
+ * primary set and configure the cpr_fuse_redundant element accordingly.
+ */
+static int cpr_check_redundant(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ u32 cpr_fuse_redun_sel[5];
+ int rc;
+
+ if (of_find_property(of_node, "qcom,cpr-fuse-redun-sel", NULL)) {
+ rc = of_property_read_u32_array(of_node,
+ "qcom,cpr-fuse-redun-sel", cpr_fuse_redun_sel, 5);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "qcom,cpr-fuse-redun-sel missing: rc=%d\n",
+ rc);
+ return rc;
+ }
+ cpr_vreg->cpr_fuse_redundant
+ = cpr_fuse_is_setting_expected(cpr_vreg,
+ cpr_fuse_redun_sel);
+ } else {
+ cpr_vreg->cpr_fuse_redundant = false;
+ }
+
+ if (cpr_vreg->cpr_fuse_redundant)
+ cpr_info(cpr_vreg, "using redundant fuse parameters\n");
+
+ return 0;
+}
+
+static int cpr_read_fuse_revision(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ u32 fuse_sel[4];
+ int rc;
+
+ if (of_find_property(of_node, "qcom,cpr-fuse-revision", NULL)) {
+ rc = of_property_read_u32_array(of_node,
+ "qcom,cpr-fuse-revision", fuse_sel, 4);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "qcom,cpr-fuse-revision read failed: rc=%d\n",
+ rc);
+ return rc;
+ }
+ cpr_vreg->cpr_fuse_revision
+ = cpr_read_efuse_param(cpr_vreg, fuse_sel[0],
+ fuse_sel[1], fuse_sel[2], fuse_sel[3]);
+ cpr_info(cpr_vreg, "fuse revision = %d\n",
+ cpr_vreg->cpr_fuse_revision);
+ } else {
+ cpr_vreg->cpr_fuse_revision = FUSE_REVISION_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int cpr_read_ro_select(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ int rc = 0;
+ u32 cpr_fuse_row[2];
+ char *ro_sel_str;
+ int *bp_ro_sel;
+ int i;
+
+ bp_ro_sel
+ = kzalloc((cpr_vreg->num_fuse_corners + 1) * sizeof(*bp_ro_sel),
+ GFP_KERNEL);
+ if (!bp_ro_sel) {
+ cpr_err(cpr_vreg, "could not allocate memory for temp array\n");
+ return -ENOMEM;
+ }
+
+ if (cpr_vreg->cpr_fuse_redundant) {
+ rc = of_property_read_u32_array(of_node,
+ "qcom,cpr-fuse-redun-row",
+ cpr_fuse_row, 2);
+ ro_sel_str = "qcom,cpr-fuse-redun-ro-sel";
+ } else {
+ rc = of_property_read_u32_array(of_node, "qcom,cpr-fuse-row",
+ cpr_fuse_row, 2);
+ ro_sel_str = "qcom,cpr-fuse-ro-sel";
+ }
+ if (rc)
+ goto error;
+
+ rc = of_property_read_u32_array(of_node, ro_sel_str,
+ &bp_ro_sel[CPR_FUSE_CORNER_MIN], cpr_vreg->num_fuse_corners);
+ if (rc) {
+ cpr_err(cpr_vreg, "%s read error, rc=%d\n", ro_sel_str, rc);
+ goto error;
+ }
+
+ for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++)
+ cpr_vreg->cpr_fuse_ro_sel[i]
+ = cpr_read_efuse_param(cpr_vreg, cpr_fuse_row[0],
+ bp_ro_sel[i], CPR_FUSE_RO_SEL_BITS,
+ cpr_fuse_row[1]);
+
+error:
+ kfree(bp_ro_sel);
+
+ return rc;
+}
+
+static int cpr_find_fuse_map_match(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ int i, j, rc, tuple_size;
+ int len = 0;
+ u32 *tmp, val, ro;
+
+ /* Specify default no match case. */
+ cpr_vreg->cpr_fuse_map_match = FUSE_MAP_NO_MATCH;
+ cpr_vreg->cpr_fuse_map_count = 0;
+
+ if (!of_find_property(of_node, "qcom,cpr-fuse-version-map", &len)) {
+ /* No mapping present. */
+ return 0;
+ }
+
+ tuple_size = cpr_vreg->num_fuse_corners + 3;
+ cpr_vreg->cpr_fuse_map_count = len / (sizeof(u32) * tuple_size);
+
+ if (len == 0 || len % (sizeof(u32) * tuple_size)) {
+ cpr_err(cpr_vreg, "qcom,cpr-fuse-version-map length=%d is invalid\n",
+ len);
+ return -EINVAL;
+ }
+
+ tmp = kzalloc(len, GFP_KERNEL);
+ if (!tmp) {
+ cpr_err(cpr_vreg, "could not allocate memory for temp array\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cpr-fuse-version-map",
+ tmp, cpr_vreg->cpr_fuse_map_count * tuple_size);
+ if (rc) {
+ cpr_err(cpr_vreg, "could not read qcom,cpr-fuse-version-map, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ /*
+ * qcom,cpr-fuse-version-map tuple format:
+ * <speed_bin, pvs_version, cpr_fuse_revision, ro_sel[1], ...,
+ * ro_sel[n]> for n == number of fuse corners
+ */
+ for (i = 0; i < cpr_vreg->cpr_fuse_map_count; i++) {
+ if (tmp[i * tuple_size] != cpr_vreg->speed_bin
+ && tmp[i * tuple_size] != FUSE_PARAM_MATCH_ANY)
+ continue;
+ if (tmp[i * tuple_size + 1] != cpr_vreg->pvs_version
+ && tmp[i * tuple_size + 1] != FUSE_PARAM_MATCH_ANY)
+ continue;
+ if (tmp[i * tuple_size + 2] != cpr_vreg->cpr_fuse_revision
+ && tmp[i * tuple_size + 2] != FUSE_PARAM_MATCH_ANY)
+ continue;
+ for (j = 0; j < cpr_vreg->num_fuse_corners; j++) {
+ val = tmp[i * tuple_size + 3 + j];
+ ro = cpr_vreg->cpr_fuse_ro_sel[j + CPR_FUSE_CORNER_MIN];
+ if (val != ro && val != FUSE_PARAM_MATCH_ANY)
+ break;
+ }
+ if (j == cpr_vreg->num_fuse_corners) {
+ cpr_vreg->cpr_fuse_map_match = i;
+ break;
+ }
+ }
+
+ if (cpr_vreg->cpr_fuse_map_match != FUSE_MAP_NO_MATCH)
+ cpr_debug(cpr_vreg, "qcom,cpr-fuse-version-map tuple match found: %d\n",
+ cpr_vreg->cpr_fuse_map_match);
+ else
+ cpr_debug(cpr_vreg, "qcom,cpr-fuse-version-map tuple match not found\n");
+
+done:
+ kfree(tmp);
+ return rc;
+}
+
+static int cpr_minimum_quot_difference_adjustment(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ int tuple_count, tuple_match;
+ int rc, i, len = 0;
+ u32 index, adjust_quot = 0;
+ u32 *min_diff_quot;
+
+ if (!of_find_property(of_node, "qcom,cpr-fuse-min-quot-diff", NULL))
+ /* No conditional adjustment needed on revised quotients. */
+ return 0;
+
+ if (!of_find_property(of_node, "qcom,cpr-min-quot-diff-adjustment",
+ &len)) {
+ cpr_err(cpr_vreg, "qcom,cpr-min-quot-diff-adjustment not specified\n");
+ return -ENODEV;
+ }
+
+ if (cpr_vreg->cpr_fuse_map_count) {
+ if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH)
+ /* No matching index to use for quotient adjustment. */
+ return 0;
+ tuple_count = cpr_vreg->cpr_fuse_map_count;
+ tuple_match = cpr_vreg->cpr_fuse_map_match;
+ } else {
+ tuple_count = 1;
+ tuple_match = 0;
+ }
+
+ if (len != cpr_vreg->num_fuse_corners * tuple_count * sizeof(u32)) {
+ cpr_err(cpr_vreg, "qcom,cpr-min-quot-diff-adjustment length=%d is invalid\n",
+ len);
+ return -EINVAL;
+ }
+
+ min_diff_quot = kzalloc(cpr_vreg->num_fuse_corners * sizeof(u32),
+ GFP_KERNEL);
+ if (!min_diff_quot) {
+ cpr_err(cpr_vreg, "memory alloc failed\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cpr-fuse-min-quot-diff",
+ min_diff_quot,
+ cpr_vreg->num_fuse_corners);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "qcom,cpr-fuse-min-quot-diff reading failed, rc = %d\n",
+ rc);
+ goto error;
+ }
+
+ for (i = CPR_FUSE_CORNER_MIN + 1;
+ i <= cpr_vreg->num_fuse_corners; i++) {
+ if ((cpr_vreg->cpr_fuse_target_quot[i]
+ - cpr_vreg->cpr_fuse_target_quot[i - 1])
+ <= (int)min_diff_quot[i - CPR_FUSE_CORNER_MIN]) {
+ index = tuple_match * cpr_vreg->num_fuse_corners
+ + i - CPR_FUSE_CORNER_MIN;
+ rc = of_property_read_u32_index(of_node,
+ "qcom,cpr-min-quot-diff-adjustment",
+ index, &adjust_quot);
+ if (rc) {
+ cpr_err(cpr_vreg, "could not read qcom,cpr-min-quot-diff-adjustment index %u, rc=%d\n",
+ index, rc);
+ goto error;
+ }
+
+ cpr_vreg->cpr_fuse_target_quot[i]
+ = cpr_vreg->cpr_fuse_target_quot[i - 1]
+ + adjust_quot;
+ cpr_info(cpr_vreg, "Corner[%d]: revised adjusted quotient = %d\n",
+ i, cpr_vreg->cpr_fuse_target_quot[i]);
+ };
+ }
+
+error:
+ kfree(min_diff_quot);
+ return rc;
+}
+
+static int cpr_adjust_target_quots(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ int tuple_count, tuple_match, i;
+ u32 index;
+ u32 quot_adjust = 0;
+ int len = 0;
+ int rc = 0;
+
+ if (!of_find_property(of_node, "qcom,cpr-quotient-adjustment", &len)) {
+ /* No static quotient adjustment needed. */
+ return 0;
+ }
+
+ if (cpr_vreg->cpr_fuse_map_count) {
+ if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+ /* No matching index to use for quotient adjustment. */
+ return 0;
+ }
+ tuple_count = cpr_vreg->cpr_fuse_map_count;
+ tuple_match = cpr_vreg->cpr_fuse_map_match;
+ } else {
+ tuple_count = 1;
+ tuple_match = 0;
+ }
+
+ if (len != cpr_vreg->num_fuse_corners * tuple_count * sizeof(u32)) {
+ cpr_err(cpr_vreg, "qcom,cpr-quotient-adjustment length=%d is invalid\n",
+ len);
+ return -EINVAL;
+ }
+
+ for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+ index = tuple_match * cpr_vreg->num_fuse_corners
+ + i - CPR_FUSE_CORNER_MIN;
+ rc = of_property_read_u32_index(of_node,
+ "qcom,cpr-quotient-adjustment", index, &quot_adjust);
+ if (rc) {
+ cpr_err(cpr_vreg, "could not read qcom,cpr-quotient-adjustment index %u, rc=%d\n",
+ index, rc);
+ return rc;
+ }
+
+ if (quot_adjust) {
+ cpr_vreg->cpr_fuse_target_quot[i] += quot_adjust;
+ cpr_info(cpr_vreg, "Corner[%d]: adjusted target quot = %d\n",
+ i, cpr_vreg->cpr_fuse_target_quot[i]);
+ }
+ }
+
+ rc = cpr_minimum_quot_difference_adjustment(pdev, cpr_vreg);
+ if (rc)
+ cpr_err(cpr_vreg, "failed to apply minimum quot difference rc=%d\n",
+ rc);
+
+ return rc;
+}
+
+static int cpr_check_allowed(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ char *allow_str = "qcom,cpr-allowed";
+ int rc = 0, count;
+ int tuple_count, tuple_match;
+ u32 allow_status;
+
+ if (!of_find_property(of_node, allow_str, &count))
+ /* CPR is allowed for all fuse revisions. */
+ return 0;
+
+ count /= sizeof(u32);
+ if (cpr_vreg->cpr_fuse_map_count) {
+ if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH)
+ /* No matching index to use for CPR allowed. */
+ return 0;
+ tuple_count = cpr_vreg->cpr_fuse_map_count;
+ tuple_match = cpr_vreg->cpr_fuse_map_match;
+ } else {
+ tuple_count = 1;
+ tuple_match = 0;
+ }
+
+ if (count != tuple_count) {
+ cpr_err(cpr_vreg, "%s count=%d is invalid\n", allow_str,
+ count);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_index(of_node, allow_str, tuple_match,
+ &allow_status);
+ if (rc) {
+ cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
+ allow_str, tuple_match, rc);
+ return rc;
+ }
+
+ if (allow_status && !cpr_vreg->cpr_fuse_disable)
+ cpr_vreg->cpr_fuse_disable = false;
+ else
+ cpr_vreg->cpr_fuse_disable = true;
+
+ cpr_info(cpr_vreg, "CPR closed loop is %s for fuse revision %d\n",
+ cpr_vreg->cpr_fuse_disable ? "disabled" : "enabled",
+ cpr_vreg->cpr_fuse_revision);
+
+ return rc;
+}
+
+static int cpr_check_de_aging_allowed(struct cpr_regulator *cpr_vreg,
+ struct device *dev)
+{
+ struct device_node *of_node = dev->of_node;
+ char *allow_str = "qcom,cpr-de-aging-allowed";
+ int rc = 0, count;
+ int tuple_count, tuple_match;
+ u32 allow_status = 0;
+
+ if (!of_find_property(of_node, allow_str, &count)) {
+ /* CPR de-aging is not allowed for all fuse revisions. */
+ return allow_status;
+ }
+
+ count /= sizeof(u32);
+ if (cpr_vreg->cpr_fuse_map_count) {
+ if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH)
+ /* No matching index to use for CPR de-aging allowed. */
+ return 0;
+ tuple_count = cpr_vreg->cpr_fuse_map_count;
+ tuple_match = cpr_vreg->cpr_fuse_map_match;
+ } else {
+ tuple_count = 1;
+ tuple_match = 0;
+ }
+
+ if (count != tuple_count) {
+ cpr_err(cpr_vreg, "%s count=%d is invalid\n", allow_str,
+ count);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_index(of_node, allow_str, tuple_match,
+ &allow_status);
+ if (rc) {
+ cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
+ allow_str, tuple_match, rc);
+ return rc;
+ }
+
+ cpr_info(cpr_vreg, "CPR de-aging is %s for fuse revision %d\n",
+ allow_status ? "allowed" : "not allowed",
+ cpr_vreg->cpr_fuse_revision);
+
+ return allow_status;
+}
+
+static int cpr_aging_init(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ struct cpr_aging_info *aging_info;
+ struct cpr_aging_sensor_info *sensor_info;
+ int num_fuse_corners = cpr_vreg->num_fuse_corners;
+ int i, rc = 0, len = 0, num_aging_sensors, ro_sel, bits;
+ u32 *aging_sensor_id, *fuse_sel, *fuse_sel_orig;
+ u32 sensor = 0, non_collapsible_sensor_mask = 0;
+ u64 efuse_val;
+ struct property *prop;
+
+ if (!of_find_property(of_node, "qcom,cpr-aging-sensor-id", &len)) {
+ /* No CPR de-aging adjustments needed */
+ return 0;
+ }
+
+ if (len == 0) {
+ cpr_err(cpr_vreg, "qcom,cpr-aging-sensor-id property format is invalid\n");
+ return -EINVAL;
+ }
+ num_aging_sensors = len / sizeof(u32);
+ cpr_debug(cpr_vreg, "No of aging sensors = %d\n", num_aging_sensors);
+
+ if (cpumask_empty(&cpr_vreg->cpu_mask)) {
+ cpr_err(cpr_vreg, "qcom,cpr-cpus property missing\n");
+ return -EINVAL;
+ }
+
+ rc = cpr_check_de_aging_allowed(cpr_vreg, &pdev->dev);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "cpr_check_de_aging_allowed failed: rc=%d\n",
+ rc);
+ return rc;
+ } else if (rc == 0) {
+ /* CPR de-aging is not allowed for the current fuse combo */
+ return 0;
+ }
+
+ aging_info = devm_kzalloc(&pdev->dev, sizeof(*aging_info),
+ GFP_KERNEL);
+ if (!aging_info)
+ return -ENOMEM;
+
+ cpr_vreg->aging_info = aging_info;
+ aging_info->num_aging_sensors = num_aging_sensors;
+
+ rc = of_property_read_u32(of_node, "qcom,cpr-aging-ref-corner",
+ &aging_info->aging_corner);
+ if (rc) {
+ cpr_err(cpr_vreg, "qcom,cpr-aging-ref-corner missing rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-aging-ref-voltage",
+ &aging_info->aging_ref_voltage, rc);
+ if (rc)
+ return rc;
+
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-max-aging-margin",
+ &aging_info->max_aging_margin, rc);
+ if (rc)
+ return rc;
+
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-aging-ro-scaling-factor",
+ &aging_info->aging_ro_kv, rc);
+ if (rc)
+ return rc;
+
+ /* Check for DIV by 0 error */
+ if (aging_info->aging_ro_kv == 0) {
+ cpr_err(cpr_vreg, "invalid cpr-aging-ro-scaling-factor value: %u\n",
+ aging_info->aging_ro_kv);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cpr-ro-scaling-factor",
+ aging_info->cpr_ro_kv, CPR_NUM_RING_OSC);
+ if (rc) {
+ cpr_err(cpr_vreg, "qcom,cpr-ro-scaling-factor property read failed, rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ if (of_find_property(of_node, "qcom,cpr-non-collapsible-sensors",
+ &len)) {
+ len = len / sizeof(u32);
+ if (len <= 0 || len > 32) {
+ cpr_err(cpr_vreg, "qcom,cpr-non-collapsible-sensors has an incorrect size\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < len; i++) {
+ rc = of_property_read_u32_index(of_node,
+ "qcom,cpr-non-collapsible-sensors",
+ i, &sensor);
+ if (rc) {
+ cpr_err(cpr_vreg, "could not read qcom,cpr-non-collapsible-sensors index %u, rc=%d\n",
+ i, rc);
+ return rc;
+ }
+
+ if (sensor > 31) {
+ cpr_err(cpr_vreg, "invalid non-collapsible sensor = %u\n",
+ sensor);
+ return -EINVAL;
+ }
+
+ non_collapsible_sensor_mask |= BIT(sensor);
+ }
+
+ /*
+ * Bypass the sensors in collapsible domain for
+ * de-aging measurements
+ */
+ aging_info->aging_sensor_bypass =
+ ~(non_collapsible_sensor_mask);
+ cpr_debug(cpr_vreg, "sensor bypass mask for aging = 0x%08x\n",
+ aging_info->aging_sensor_bypass);
+ }
+
+ prop = of_find_property(pdev->dev.of_node, "qcom,cpr-aging-derate",
+ NULL);
+ if ((!prop) ||
+ (prop->length != num_fuse_corners * sizeof(u32))) {
+ cpr_err(cpr_vreg, "qcom,cpr-aging-derate incorrectly configured\n");
+ return -EINVAL;
+ }
+
+ aging_sensor_id = kcalloc(num_aging_sensors, sizeof(*aging_sensor_id),
+ GFP_KERNEL);
+ fuse_sel = kcalloc(num_aging_sensors * 4, sizeof(*fuse_sel),
+ GFP_KERNEL);
+ aging_info->voltage_adjust = devm_kcalloc(&pdev->dev,
+ num_fuse_corners + 1,
+ sizeof(*aging_info->voltage_adjust),
+ GFP_KERNEL);
+ aging_info->sensor_info = devm_kcalloc(&pdev->dev, num_aging_sensors,
+ sizeof(*aging_info->sensor_info),
+ GFP_KERNEL);
+ aging_info->aging_derate = devm_kcalloc(&pdev->dev,
+ num_fuse_corners + 1,
+ sizeof(*aging_info->aging_derate),
+ GFP_KERNEL);
+
+ if (!aging_info->aging_derate || !aging_sensor_id
+ || !aging_info->sensor_info || !fuse_sel
+ || !aging_info->voltage_adjust)
+ goto err;
+
+ rc = of_property_read_u32_array(of_node, "qcom,cpr-aging-sensor-id",
+ aging_sensor_id, num_aging_sensors);
+ if (rc) {
+ cpr_err(cpr_vreg, "qcom,cpr-aging-sensor-id property read failed, rc = %d\n",
+ rc);
+ goto err;
+ }
+
+ for (i = 0; i < num_aging_sensors; i++)
+ if (aging_sensor_id[i] < 0 || aging_sensor_id[i] > 31) {
+ cpr_err(cpr_vreg, "Invalid aging sensor id: %u\n",
+ aging_sensor_id[i]);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cpr-aging-derate",
+ &aging_info->aging_derate[CPR_FUSE_CORNER_MIN],
+ num_fuse_corners);
+ if (rc) {
+ cpr_err(cpr_vreg, "qcom,cpr-aging-derate property read failed, rc = %d\n",
+ rc);
+ goto err;
+ }
+
+ rc = of_property_read_u32_array(of_node,
+ "qcom,cpr-fuse-aging-init-quot-diff",
+ fuse_sel, (num_aging_sensors * 4));
+ if (rc) {
+ cpr_err(cpr_vreg, "qcom,cpr-fuse-aging-init-quot-diff read failed, rc = %d\n",
+ rc);
+ goto err;
+ }
+
+ fuse_sel_orig = fuse_sel;
+ sensor_info = aging_info->sensor_info;
+ for (i = 0; i < num_aging_sensors; i++, sensor_info++) {
+ sensor_info->sensor_id = aging_sensor_id[i];
+ efuse_val = cpr_read_efuse_param(cpr_vreg, fuse_sel[0],
+ fuse_sel[1], fuse_sel[2], fuse_sel[3]);
+ bits = fuse_sel[2];
+ sensor_info->initial_quot_diff = ((efuse_val & BIT(bits - 1)) ?
+ -1 : 1) * (efuse_val & (BIT(bits - 1) - 1));
+
+ cpr_debug(cpr_vreg, "Age sensor[%d] Initial quot diff = %d\n",
+ sensor_info->sensor_id,
+ sensor_info->initial_quot_diff);
+ fuse_sel += 4;
+ }
+
+ /*
+ * Add max aging margin here. This can be adjusted later in
+ * de-aging algorithm.
+ */
+ for (i = CPR_FUSE_CORNER_MIN; i <= num_fuse_corners; i++) {
+ ro_sel = cpr_vreg->cpr_fuse_ro_sel[i];
+ cpr_vreg->cpr_fuse_target_quot[i] +=
+ (aging_info->cpr_ro_kv[ro_sel]
+ * aging_info->max_aging_margin) / 1000000;
+ aging_info->voltage_adjust[i] = aging_info->max_aging_margin;
+ cpr_info(cpr_vreg, "Corner[%d]: age margin adjusted quotient = %d\n",
+ i, cpr_vreg->cpr_fuse_target_quot[i]);
+ }
+
+err:
+ kfree(fuse_sel_orig);
+ kfree(aging_sensor_id);
+ return rc;
+}
+
+static int cpr_cpu_map_init(struct cpr_regulator *cpr_vreg, struct device *dev)
+{
+ struct device_node *cpu_node;
+ int i, cpu;
+
+ if (!of_find_property(dev->of_node, "qcom,cpr-cpus",
+ &cpr_vreg->num_adj_cpus)) {
+ /* No adjustments based on online cores */
+ return 0;
+ }
+ cpr_vreg->num_adj_cpus /= sizeof(u32);
+
+ cpr_vreg->adj_cpus = devm_kcalloc(dev, cpr_vreg->num_adj_cpus,
+ sizeof(int), GFP_KERNEL);
+ if (!cpr_vreg->adj_cpus)
+ return -ENOMEM;
+
+ for (i = 0; i < cpr_vreg->num_adj_cpus; i++) {
+ cpu_node = of_parse_phandle(dev->of_node, "qcom,cpr-cpus", i);
+ if (!cpu_node) {
+ cpr_err(cpr_vreg, "could not find CPU node %d\n", i);
+ return -EINVAL;
+ }
+ cpr_vreg->adj_cpus[i] = -1;
+ for_each_possible_cpu(cpu) {
+ if (of_get_cpu_node(cpu, NULL) == cpu_node) {
+ cpr_vreg->adj_cpus[i] = cpu;
+ cpumask_set_cpu(cpu, &cpr_vreg->cpu_mask);
+ break;
+ }
+ }
+ of_node_put(cpu_node);
+ }
+
+ return 0;
+}
+
+static int cpr_init_cpr_efuse(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ int i, rc = 0;
+ bool scheme_fuse_valid = false;
+ bool disable_fuse_valid = false;
+ char *targ_quot_str;
+ u32 cpr_fuse_row[2];
+ u32 bp_cpr_disable, bp_scheme;
+ size_t len;
+ int *bp_target_quot;
+ u64 fuse_bits, fuse_bits_2;
+ u32 *target_quot_size;
+ struct cpr_quot_scale *quot_scale;
+
+ len = cpr_vreg->num_fuse_corners + 1;
+
+ bp_target_quot = kzalloc(len * sizeof(*bp_target_quot), GFP_KERNEL);
+ target_quot_size = kzalloc(len * sizeof(*target_quot_size), GFP_KERNEL);
+ quot_scale = kzalloc(len * sizeof(*quot_scale), GFP_KERNEL);
+
+ if (!bp_target_quot || !target_quot_size || !quot_scale) {
+ cpr_err(cpr_vreg,
+ "Could not allocate memory for fuse parsing arrays\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ if (cpr_vreg->cpr_fuse_redundant) {
+ rc = of_property_read_u32_array(of_node,
+ "qcom,cpr-fuse-redun-row",
+ cpr_fuse_row, 2);
+ targ_quot_str = "qcom,cpr-fuse-redun-target-quot";
+ } else {
+ rc = of_property_read_u32_array(of_node, "qcom,cpr-fuse-row",
+ cpr_fuse_row, 2);
+ targ_quot_str = "qcom,cpr-fuse-target-quot";
+ }
+ if (rc)
+ goto error;
+
+ rc = of_property_read_u32_array(of_node, targ_quot_str,
+ &bp_target_quot[CPR_FUSE_CORNER_MIN],
+ cpr_vreg->num_fuse_corners);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "missing %s: rc=%d\n", targ_quot_str, rc);
+ goto error;
+ }
+
+ if (of_find_property(of_node, "qcom,cpr-fuse-target-quot-size", NULL)) {
+ rc = of_property_read_u32_array(of_node,
+ "qcom,cpr-fuse-target-quot-size",
+ &target_quot_size[CPR_FUSE_CORNER_MIN],
+ cpr_vreg->num_fuse_corners);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "error while reading qcom,cpr-fuse-target-quot-size: rc=%d\n",
+ rc);
+ goto error;
+ }
+ } else {
+ /*
+ * Default fuse quotient parameter size to match target register
+ * size.
+ */
+ for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
+ i++)
+ target_quot_size[i] = CPR_FUSE_TARGET_QUOT_BITS;
+ }
+
+ if (of_find_property(of_node, "qcom,cpr-fuse-target-quot-scale",
+ NULL)) {
+ for (i = 0; i < cpr_vreg->num_fuse_corners; i++) {
+ rc = of_property_read_u32_index(of_node,
+ "qcom,cpr-fuse-target-quot-scale", i * 2,
+ &quot_scale[i + CPR_FUSE_CORNER_MIN].offset);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "error while reading qcom,cpr-fuse-target-quot-scale: rc=%d\n",
+ rc);
+ goto error;
+ }
+
+ rc = of_property_read_u32_index(of_node,
+ "qcom,cpr-fuse-target-quot-scale", i * 2 + 1,
+ &quot_scale[i + CPR_FUSE_CORNER_MIN].multiplier);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "error while reading qcom,cpr-fuse-target-quot-scale: rc=%d\n",
+ rc);
+ goto error;
+ }
+ }
+ } else {
+ /*
+ * In the default case, target quotients require no scaling so
+ * use offset = 0, multiplier = 1.
+ */
+ for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
+ i++) {
+ quot_scale[i].offset = 0;
+ quot_scale[i].multiplier = 1;
+ }
+ }
+
+ /* Read the control bits of eFuse */
+ fuse_bits = cpr_read_efuse_row(cpr_vreg, cpr_fuse_row[0],
+ cpr_fuse_row[1]);
+ cpr_info(cpr_vreg, "[row:%d] = 0x%llx\n", cpr_fuse_row[0], fuse_bits);
+
+ if (cpr_vreg->cpr_fuse_redundant) {
+ if (of_find_property(of_node,
+ "qcom,cpr-fuse-redun-bp-cpr-disable", NULL)) {
+ CPR_PROP_READ_U32(cpr_vreg, of_node,
+ "cpr-fuse-redun-bp-cpr-disable",
+ &bp_cpr_disable, rc);
+ disable_fuse_valid = true;
+ if (of_find_property(of_node,
+ "qcom,cpr-fuse-redun-bp-scheme",
+ NULL)) {
+ CPR_PROP_READ_U32(cpr_vreg, of_node,
+ "cpr-fuse-redun-bp-scheme",
+ &bp_scheme, rc);
+ scheme_fuse_valid = true;
+ }
+ if (rc)
+ goto error;
+ fuse_bits_2 = fuse_bits;
+ } else {
+ u32 temp_row[2];
+
+ /* Use original fuse if no optional property */
+ if (of_find_property(of_node,
+ "qcom,cpr-fuse-bp-cpr-disable", NULL)) {
+ CPR_PROP_READ_U32(cpr_vreg, of_node,
+ "cpr-fuse-bp-cpr-disable",
+ &bp_cpr_disable, rc);
+ disable_fuse_valid = true;
+ }
+ if (of_find_property(of_node,
+ "qcom,cpr-fuse-bp-scheme",
+ NULL)) {
+ CPR_PROP_READ_U32(cpr_vreg, of_node,
+ "cpr-fuse-bp-scheme",
+ &bp_scheme, rc);
+ scheme_fuse_valid = true;
+ }
+ rc = of_property_read_u32_array(of_node,
+ "qcom,cpr-fuse-row",
+ temp_row, 2);
+ if (rc)
+ goto error;
+
+ fuse_bits_2 = cpr_read_efuse_row(cpr_vreg, temp_row[0],
+ temp_row[1]);
+ cpr_info(cpr_vreg, "[original row:%d] = 0x%llx\n",
+ temp_row[0], fuse_bits_2);
+ }
+ } else {
+ if (of_find_property(of_node, "qcom,cpr-fuse-bp-cpr-disable",
+ NULL)) {
+ CPR_PROP_READ_U32(cpr_vreg, of_node,
+ "cpr-fuse-bp-cpr-disable", &bp_cpr_disable, rc);
+ disable_fuse_valid = true;
+ }
+ if (of_find_property(of_node, "qcom,cpr-fuse-bp-scheme",
+ NULL)) {
+ CPR_PROP_READ_U32(cpr_vreg, of_node,
+ "cpr-fuse-bp-scheme", &bp_scheme, rc);
+ scheme_fuse_valid = true;
+ }
+ if (rc)
+ goto error;
+ fuse_bits_2 = fuse_bits;
+ }
+
+ if (disable_fuse_valid) {
+ cpr_vreg->cpr_fuse_disable =
+ (fuse_bits_2 >> bp_cpr_disable) & 0x01;
+ cpr_info(cpr_vreg, "CPR disable fuse = %d\n",
+ cpr_vreg->cpr_fuse_disable);
+ } else {
+ cpr_vreg->cpr_fuse_disable = false;
+ }
+
+ if (scheme_fuse_valid) {
+ cpr_vreg->cpr_fuse_local = (fuse_bits_2 >> bp_scheme) & 0x01;
+ cpr_info(cpr_vreg, "local = %d\n", cpr_vreg->cpr_fuse_local);
+ } else {
+ cpr_vreg->cpr_fuse_local = true;
+ }
+
+ for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+ cpr_vreg->cpr_fuse_target_quot[i]
+ = cpr_read_efuse_param(cpr_vreg, cpr_fuse_row[0],
+ bp_target_quot[i], target_quot_size[i],
+ cpr_fuse_row[1]);
+ /* Unpack the target quotient by scaling. */
+ cpr_vreg->cpr_fuse_target_quot[i] *= quot_scale[i].multiplier;
+ cpr_vreg->cpr_fuse_target_quot[i] += quot_scale[i].offset;
+ cpr_info(cpr_vreg,
+ "Corner[%d]: ro_sel = %d, target quot = %d\n", i,
+ cpr_vreg->cpr_fuse_ro_sel[i],
+ cpr_vreg->cpr_fuse_target_quot[i]);
+ }
+
+ rc = cpr_cpu_map_init(cpr_vreg, &pdev->dev);
+ if (rc) {
+ cpr_err(cpr_vreg, "CPR cpu map init failed: rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = cpr_aging_init(pdev, cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "CPR aging init failed: rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = cpr_adjust_target_quots(pdev, cpr_vreg);
+ if (rc)
+ goto error;
+
+ for (i = CPR_FUSE_CORNER_MIN + 1;
+ i <= cpr_vreg->num_fuse_corners; i++) {
+ if (cpr_vreg->cpr_fuse_target_quot[i]
+ < cpr_vreg->cpr_fuse_target_quot[i - 1] &&
+ cpr_vreg->cpr_fuse_ro_sel[i] ==
+ cpr_vreg->cpr_fuse_ro_sel[i - 1]) {
+ cpr_vreg->cpr_fuse_disable = true;
+ cpr_err(cpr_vreg, "invalid quotient values; permanently disabling CPR\n");
+ }
+ }
+
+ if (cpr_vreg->flags & FLAGS_UPLIFT_QUOT_VOLT) {
+ cpr_voltage_uplift_wa_inc_quot(cpr_vreg, of_node);
+ for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
+ i++) {
+ cpr_info(cpr_vreg,
+ "Corner[%d]: uplifted target quot = %d\n",
+ i, cpr_vreg->cpr_fuse_target_quot[i]);
+ }
+ }
+
+ /*
+ * Check whether the fuse-quot-offset is defined per fuse corner.
+ * If it is defined, use it (quot_offset) in the calculation
+ * below for obtaining scaling factor per fuse corner.
+ */
+ rc = cpr_get_fuse_quot_offset(cpr_vreg, pdev, quot_scale);
+ if (rc < 0)
+ goto error;
+
+ rc = cpr_get_corner_quot_adjustment(cpr_vreg, &pdev->dev);
+ if (rc)
+ goto error;
+
+ cpr_vreg->cpr_fuse_bits = fuse_bits;
+ if (!cpr_vreg->cpr_fuse_bits) {
+ cpr_vreg->cpr_fuse_disable = true;
+ cpr_err(cpr_vreg,
+ "cpr_fuse_bits == 0; permanently disabling CPR\n");
+ } else if (!cpr_vreg->fuse_quot_offset) {
+ /*
+ * Check if the target quotients for the highest two fuse
+ * corners are too close together.
+ */
+ int *quot = cpr_vreg->cpr_fuse_target_quot;
+ int highest_fuse_corner = cpr_vreg->num_fuse_corners;
+ u32 min_diff_quot;
+ bool valid_fuse = true;
+
+ min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF;
+ of_property_read_u32(of_node, "qcom,cpr-quot-min-diff",
+ &min_diff_quot);
+
+ if (quot[highest_fuse_corner] > quot[highest_fuse_corner - 1]) {
+ if ((quot[highest_fuse_corner]
+ - quot[highest_fuse_corner - 1])
+ <= min_diff_quot)
+ valid_fuse = false;
+ } else {
+ valid_fuse = false;
+ }
+
+ if (!valid_fuse) {
+ cpr_vreg->cpr_fuse_disable = true;
+ cpr_err(cpr_vreg, "invalid quotient values; permanently disabling CPR\n");
+ }
+ }
+ rc = cpr_check_allowed(pdev, cpr_vreg);
+
+error:
+ kfree(bp_target_quot);
+ kfree(target_quot_size);
+ kfree(quot_scale);
+
+ return rc;
+}
+
+static int cpr_init_cpr_voltages(struct cpr_regulator *cpr_vreg,
+ struct device *dev)
+{
+ int i;
+ int size = cpr_vreg->num_corners + 1;
+
+ cpr_vreg->last_volt = devm_kzalloc(dev, sizeof(int) * size, GFP_KERNEL);
+ if (!cpr_vreg->last_volt)
+ return -EINVAL;
+
+ for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++)
+ cpr_vreg->last_volt[i] = cpr_vreg->open_loop_volt[i];
+
+ return 0;
+}
+
+/*
+ * This function fills the virtual_limit array with voltages read from the
+ * prop_name device tree property if a given tuple in the property matches
+ * the speedbin and PVS version fuses found on the chip. Otherwise,
+ * it fills the virtual_limit_array with corresponding values from the
+ * fuse_limit_array.
+ */
+static int cpr_fill_override_voltage(struct cpr_regulator *cpr_vreg,
+ struct device *dev, const char *prop_name, const char *label,
+ int *virtual_limit, int *fuse_limit)
+{
+ int rc = 0;
+ int i, j, size, pos;
+ struct property *prop;
+ bool match_found = false;
+ size_t buflen;
+ char *buf;
+ u32 *tmp;
+
+ prop = of_find_property(dev->of_node, prop_name, NULL);
+ if (!prop)
+ goto use_fuse_corner_limits;
+
+ size = prop->length / sizeof(u32);
+ if (size == 0 || size % (cpr_vreg->num_corners + 2)) {
+ cpr_err(cpr_vreg, "%s property format is invalid; reusing per-fuse-corner limits\n",
+ prop_name);
+ goto use_fuse_corner_limits;
+ }
+
+ tmp = kzalloc(size * sizeof(u32), GFP_KERNEL);
+ if (!tmp) {
+ cpr_err(cpr_vreg, "memory alloc failed\n");
+ return -ENOMEM;
+ }
+ rc = of_property_read_u32_array(dev->of_node, prop_name, tmp, size);
+ if (rc < 0) {
+ kfree(tmp);
+ cpr_err(cpr_vreg, "%s reading failed, rc = %d\n", prop_name,
+ rc);
+ return rc;
+ }
+
+ /*
+ * Get limit voltage for each virtual corner based upon the speed_bin
+ * and pvs_version values.
+ */
+ for (i = 0; i < size; i += cpr_vreg->num_corners + 2) {
+ if (tmp[i] != cpr_vreg->speed_bin &&
+ tmp[i] != FUSE_PARAM_MATCH_ANY)
+ continue;
+ if (tmp[i + 1] != cpr_vreg->pvs_version &&
+ tmp[i + 1] != FUSE_PARAM_MATCH_ANY)
+ continue;
+ for (j = CPR_CORNER_MIN; j <= cpr_vreg->num_corners; j++)
+ virtual_limit[j] = tmp[i + 2 + j - CPR_FUSE_CORNER_MIN];
+ match_found = true;
+ break;
+ }
+ kfree(tmp);
+
+ if (!match_found)
+ goto use_fuse_corner_limits;
+
+ /*
+ * Log per-virtual-corner voltage limits since they are useful for
+ * baseline CPR debugging.
+ */
+ buflen = cpr_vreg->num_corners * (MAX_CHARS_PER_INT + 2) * sizeof(*buf);
+ buf = kzalloc(buflen, GFP_KERNEL);
+ if (buf == NULL) {
+ cpr_err(cpr_vreg, "Could not allocate memory for corner limit voltage logging\n");
+ return 0;
+ }
+
+ for (i = CPR_CORNER_MIN, pos = 0; i <= cpr_vreg->num_corners; i++)
+ pos += scnprintf(buf + pos, buflen - pos, "%d%s",
+ virtual_limit[i], i < cpr_vreg->num_corners ? " " : "");
+ cpr_info(cpr_vreg, "%s override voltage: [%s] uV\n", label, buf);
+ kfree(buf);
+
+ return rc;
+
+use_fuse_corner_limits:
+ for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++)
+ virtual_limit[i] = fuse_limit[cpr_vreg->corner_map[i]];
+ return rc;
+}
+
+/*
+ * This function loads per-virtual-corner ceiling and floor voltages from device
+ * tree if their respective device tree properties are present. These limits
+ * override those found in the per-fuse-corner arrays fuse_ceiling_volt and
+ * fuse_floor_volt.
+ */
+static int cpr_init_ceiling_floor_override_voltages(
+ struct cpr_regulator *cpr_vreg, struct device *dev)
+{
+ int rc, i;
+ int size = cpr_vreg->num_corners + 1;
+
+ cpr_vreg->ceiling_volt = devm_kzalloc(dev, sizeof(int) * size,
+ GFP_KERNEL);
+ cpr_vreg->floor_volt = devm_kzalloc(dev, sizeof(int) * size,
+ GFP_KERNEL);
+ cpr_vreg->cpr_max_ceiling = devm_kzalloc(dev, sizeof(int) * size,
+ GFP_KERNEL);
+ if (!cpr_vreg->ceiling_volt || !cpr_vreg->floor_volt ||
+ !cpr_vreg->cpr_max_ceiling)
+ return -ENOMEM;
+
+ rc = cpr_fill_override_voltage(cpr_vreg, dev,
+ "qcom,cpr-voltage-ceiling-override", "ceiling",
+ cpr_vreg->ceiling_volt, cpr_vreg->fuse_ceiling_volt);
+ if (rc)
+ return rc;
+
+ rc = cpr_fill_override_voltage(cpr_vreg, dev,
+ "qcom,cpr-voltage-floor-override", "floor",
+ cpr_vreg->floor_volt, cpr_vreg->fuse_floor_volt);
+ if (rc)
+ return rc;
+
+ for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+ if (cpr_vreg->floor_volt[i] > cpr_vreg->ceiling_volt[i]) {
+ cpr_err(cpr_vreg, "virtual corner %d floor=%d uV > ceiling=%d uV\n",
+ i, cpr_vreg->floor_volt[i],
+ cpr_vreg->ceiling_volt[i]);
+ return -EINVAL;
+ }
+
+ if (cpr_vreg->ceiling_max < cpr_vreg->ceiling_volt[i])
+ cpr_vreg->ceiling_max = cpr_vreg->ceiling_volt[i];
+ cpr_vreg->cpr_max_ceiling[i] = cpr_vreg->ceiling_volt[i];
+ }
+
+ return rc;
+}
+
+/*
+ * This function computes the per-virtual-corner floor voltages from
+ * per-virtual-corner ceiling voltages with an offset specified by a
+ * device-tree property. This must be called after open-loop voltage
+ * scaling, floor_volt array loading and the ceiling voltage is
+ * conditionally reduced to the open-loop voltage. It selects the
+ * maximum value between the calculated floor voltage values and
+ * the floor_volt array values and stores them in the floor_volt array.
+ */
+static int cpr_init_floor_to_ceiling_range(
+ struct cpr_regulator *cpr_vreg, struct device *dev)
+{
+ int rc, i, tuple_count, tuple_match, len, pos;
+ u32 index, floor_volt_adjust = 0;
+ char *prop_str, *buf;
+ size_t buflen;
+
+ prop_str = "qcom,cpr-floor-to-ceiling-max-range";
+
+ if (!of_find_property(dev->of_node, prop_str, &len))
+ return 0;
+
+ if (cpr_vreg->cpr_fuse_map_count) {
+ if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+ /*
+ * No matching index to use for floor-to-ceiling
+ * max range.
+ */
+ return 0;
+ }
+ tuple_count = cpr_vreg->cpr_fuse_map_count;
+ tuple_match = cpr_vreg->cpr_fuse_map_match;
+ } else {
+ tuple_count = 1;
+ tuple_match = 0;
+ }
+
+ if (len != cpr_vreg->num_corners * tuple_count * sizeof(u32)) {
+ cpr_err(cpr_vreg, "%s length=%d is invalid\n", prop_str, len);
+ return -EINVAL;
+ }
+
+ for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+ index = tuple_match * cpr_vreg->num_corners
+ + i - CPR_CORNER_MIN;
+ rc = of_property_read_u32_index(dev->of_node, prop_str,
+ index, &floor_volt_adjust);
+ if (rc) {
+ cpr_err(cpr_vreg, "could not read %s index %u, rc=%d\n",
+ prop_str, index, rc);
+ return rc;
+ }
+
+ if ((int)floor_volt_adjust >= 0) {
+ cpr_vreg->floor_volt[i] = max(cpr_vreg->floor_volt[i],
+ (cpr_vreg->ceiling_volt[i]
+ - (int)floor_volt_adjust));
+ cpr_vreg->floor_volt[i]
+ = DIV_ROUND_UP(cpr_vreg->floor_volt[i],
+ cpr_vreg->step_volt) *
+ cpr_vreg->step_volt;
+ if (cpr_vreg->open_loop_volt[i]
+ < cpr_vreg->floor_volt[i])
+ cpr_vreg->open_loop_volt[i]
+ = cpr_vreg->floor_volt[i];
+ }
+ }
+
+ /*
+ * Log per-virtual-corner voltage limits resulted after considering the
+ * floor-to-ceiling max range since they are useful for baseline CPR
+ * debugging.
+ */
+ buflen = cpr_vreg->num_corners * (MAX_CHARS_PER_INT + 2) * sizeof(*buf);
+ buf = kzalloc(buflen, GFP_KERNEL);
+ if (buf == NULL) {
+ cpr_err(cpr_vreg, "Could not allocate memory for corner limit voltage logging\n");
+ return 0;
+ }
+
+ for (i = CPR_CORNER_MIN, pos = 0; i <= cpr_vreg->num_corners; i++)
+ pos += scnprintf(buf + pos, buflen - pos, "%d%s",
+ cpr_vreg->floor_volt[i],
+ i < cpr_vreg->num_corners ? " " : "");
+ cpr_info(cpr_vreg, "Final floor override voltages: [%s] uV\n", buf);
+ kfree(buf);
+
+ return 0;
+}
+
+static int cpr_init_step_quotient(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ int len = 0;
+ u32 step_quot[CPR_NUM_RING_OSC];
+ int i, rc;
+
+ if (!of_find_property(of_node, "qcom,cpr-step-quotient", &len)) {
+ cpr_err(cpr_vreg, "qcom,cpr-step-quotient property missing\n");
+ return -EINVAL;
+ }
+
+ if (len == sizeof(u32)) {
+ /* Single step quotient used for all ring oscillators. */
+ rc = of_property_read_u32(of_node, "qcom,cpr-step-quotient",
+ step_quot);
+ if (rc) {
+ cpr_err(cpr_vreg, "could not read qcom,cpr-step-quotient, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
+ i++)
+ cpr_vreg->step_quotient[i] = step_quot[0];
+ } else if (len == sizeof(u32) * CPR_NUM_RING_OSC) {
+ /* Unique step quotient used per ring oscillator. */
+ rc = of_property_read_u32_array(of_node,
+ "qcom,cpr-step-quotient", step_quot, CPR_NUM_RING_OSC);
+ if (rc) {
+ cpr_err(cpr_vreg, "could not read qcom,cpr-step-quotient, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
+ i++)
+ cpr_vreg->step_quotient[i]
+ = step_quot[cpr_vreg->cpr_fuse_ro_sel[i]];
+ } else {
+ cpr_err(cpr_vreg, "qcom,cpr-step-quotient has invalid length=%d\n",
+ len);
+ return -EINVAL;
+ }
+
+ for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++)
+ cpr_debug(cpr_vreg, "step_quotient[%d]=%u\n", i,
+ cpr_vreg->step_quotient[i]);
+
+ return 0;
+}
+
+static int cpr_init_cpr_parameters(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ int rc = 0;
+
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-ref-clk",
+ &cpr_vreg->ref_clk_khz, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-timer-delay",
+ &cpr_vreg->timer_delay_us, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-timer-cons-up",
+ &cpr_vreg->timer_cons_up, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-timer-cons-down",
+ &cpr_vreg->timer_cons_down, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-irq-line",
+ &cpr_vreg->irq_line, rc);
+ if (rc)
+ return rc;
+
+ rc = cpr_init_step_quotient(pdev, cpr_vreg);
+ if (rc)
+ return rc;
+
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-up-threshold",
+ &cpr_vreg->up_threshold, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-down-threshold",
+ &cpr_vreg->down_threshold, rc);
+ if (rc)
+ return rc;
+ cpr_info(cpr_vreg, "up threshold = %u, down threshold = %u\n",
+ cpr_vreg->up_threshold, cpr_vreg->down_threshold);
+
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-idle-clocks",
+ &cpr_vreg->idle_clocks, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-gcnt-time",
+ &cpr_vreg->gcnt_time_us, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "vdd-apc-step-up-limit",
+ &cpr_vreg->vdd_apc_step_up_limit, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "vdd-apc-step-down-limit",
+ &cpr_vreg->vdd_apc_step_down_limit, rc);
+ if (rc)
+ return rc;
+
+ rc = of_property_read_u32(of_node, "qcom,cpr-clamp-timer-interval",
+ &cpr_vreg->clamp_timer_interval);
+ if (rc && rc != -EINVAL) {
+ cpr_err(cpr_vreg,
+ "error reading qcom,cpr-clamp-timer-interval, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ cpr_vreg->clamp_timer_interval = min(cpr_vreg->clamp_timer_interval,
+ (u32)RBIF_TIMER_ADJ_CLAMP_INT_MASK);
+
+ /* Init module parameter with the DT value */
+ cpr_vreg->enable = of_property_read_bool(of_node, "qcom,cpr-enable");
+ cpr_info(cpr_vreg, "CPR is %s by default.\n",
+ cpr_vreg->enable ? "enabled" : "disabled");
+
+ return 0;
+}
+
+static void cpr_regulator_switch_adj_cpus(struct cpr_regulator *cpr_vreg)
+{
+ cpr_vreg->last_volt = cpr_vreg->adj_cpus_last_volt
+ [cpr_vreg->online_cpus];
+ cpr_vreg->save_ctl = cpr_vreg->adj_cpus_save_ctl[cpr_vreg->online_cpus];
+ cpr_vreg->save_irq = cpr_vreg->adj_cpus_save_irq[cpr_vreg->online_cpus];
+
+ if (cpr_vreg->adj_cpus_quot_adjust)
+ cpr_vreg->quot_adjust = cpr_vreg->adj_cpus_quot_adjust
+ [cpr_vreg->online_cpus];
+ if (cpr_vreg->adj_cpus_open_loop_volt)
+ cpr_vreg->open_loop_volt
+ = cpr_vreg->adj_cpus_open_loop_volt
+ [cpr_vreg->online_cpus];
+ if (cpr_vreg->adj_cpus_open_loop_volt_as_ceiling)
+ cpr_vreg->ceiling_volt = cpr_vreg->open_loop_volt;
+}
+
+static void cpr_regulator_set_online_cpus(struct cpr_regulator *cpr_vreg)
+{
+ int i, j;
+
+ cpr_vreg->online_cpus = 0;
+ get_online_cpus();
+ for_each_online_cpu(i)
+ for (j = 0; j < cpr_vreg->num_adj_cpus; j++)
+ if (i == cpr_vreg->adj_cpus[j])
+ cpr_vreg->online_cpus++;
+ put_online_cpus();
+}
+
+static int cpr_regulator_cpu_callback(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct cpr_regulator *cpr_vreg = container_of(nb, struct cpr_regulator,
+ cpu_notifier);
+ int cpu = (long)data;
+ int prev_online_cpus, rc, i;
+
+ action &= ~CPU_TASKS_FROZEN;
+
+ if (action != CPU_UP_PREPARE && action != CPU_UP_CANCELED
+ && action != CPU_DEAD)
+ return NOTIFY_OK;
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+
+ if (cpr_vreg->skip_voltage_change_during_suspend
+ && cpr_vreg->is_cpr_suspended) {
+ /* Do nothing during system suspend/resume */
+ goto done;
+ }
+
+ prev_online_cpus = cpr_vreg->online_cpus;
+ cpr_regulator_set_online_cpus(cpr_vreg);
+
+ if (action == CPU_UP_PREPARE)
+ for (i = 0; i < cpr_vreg->num_adj_cpus; i++)
+ if (cpu == cpr_vreg->adj_cpus[i]) {
+ cpr_vreg->online_cpus++;
+ break;
+ }
+
+ if (cpr_vreg->online_cpus == prev_online_cpus)
+ goto done;
+
+ cpr_debug(cpr_vreg, "adjusting corner %d quotient for %d cpus\n",
+ cpr_vreg->corner, cpr_vreg->online_cpus);
+
+ cpr_regulator_switch_adj_cpus(cpr_vreg);
+
+ if (cpr_vreg->corner) {
+ rc = cpr_regulator_set_voltage(cpr_vreg->rdev,
+ cpr_vreg->corner, true);
+ if (rc)
+ cpr_err(cpr_vreg, "could not update quotient, rc=%d\n",
+ rc);
+ }
+
+done:
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+ return NOTIFY_OK;
+}
+
+static void cpr_pm_disable(struct cpr_regulator *cpr_vreg, bool disable)
+{
+ u32 reg_val;
+
+ if (cpr_vreg->is_cpr_suspended)
+ return;
+
+ reg_val = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+
+ if (disable) {
+ /* Proceed only if CPR is enabled */
+ if (!(reg_val & RBCPR_CTL_LOOP_EN))
+ return;
+ cpr_ctl_disable(cpr_vreg);
+ cpr_vreg->cpr_disabled_in_pc = true;
+ } else {
+ /* Proceed only if CPR was disabled in PM_ENTER */
+ if (!cpr_vreg->cpr_disabled_in_pc)
+ return;
+ cpr_vreg->cpr_disabled_in_pc = false;
+ cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
+ }
+
+ /* Make sure register write is complete */
+ mb();
+}
+
+static int cpr_pm_callback(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct cpr_regulator *cpr_vreg = container_of(nb,
+ struct cpr_regulator, pm_notifier);
+
+ if (action != CPU_PM_ENTER && action != CPU_PM_ENTER_FAILED &&
+ action != CPU_PM_EXIT)
+ return NOTIFY_OK;
+
+ switch (action) {
+ case CPU_PM_ENTER:
+ cpr_pm_disable(cpr_vreg, true);
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ cpr_pm_disable(cpr_vreg, false);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int cpr_parse_adj_cpus_init_voltage(struct cpr_regulator *cpr_vreg,
+ struct device *dev)
+{
+ int rc, i, j, k, tuple_count, tuple_match, len, offset;
+ int *temp;
+
+ if (!of_find_property(dev->of_node,
+ "qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment",
+ NULL))
+ return 0;
+
+ if (cpr_vreg->cpr_fuse_map_count) {
+ if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+ /* No matching index to use for voltage adjustment. */
+ return 0;
+ }
+ tuple_count = cpr_vreg->cpr_fuse_map_count;
+ tuple_match = cpr_vreg->cpr_fuse_map_match;
+ } else {
+ tuple_count = 1;
+ tuple_match = 0;
+ }
+
+ len = (cpr_vreg->num_adj_cpus + 1) * tuple_count
+ * cpr_vreg->num_corners;
+
+ temp = kzalloc(sizeof(int) * len, GFP_KERNEL);
+ if (!temp) {
+ cpr_err(cpr_vreg, "Could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ cpr_vreg->adj_cpus_open_loop_volt = devm_kzalloc(dev,
+ sizeof(int *) * (cpr_vreg->num_adj_cpus + 1),
+ GFP_KERNEL);
+ if (!cpr_vreg->adj_cpus_open_loop_volt) {
+ cpr_err(cpr_vreg, "Could not allocate memory\n");
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ cpr_vreg->adj_cpus_open_loop_volt[0] = devm_kzalloc(dev,
+ sizeof(int) * (cpr_vreg->num_adj_cpus + 1)
+ * (cpr_vreg->num_corners + 1),
+ GFP_KERNEL);
+ if (!cpr_vreg->adj_cpus_open_loop_volt[0]) {
+ cpr_err(cpr_vreg, "Could not allocate memory\n");
+ rc = -ENOMEM;
+ goto done;
+ }
+ for (i = 1; i <= cpr_vreg->num_adj_cpus; i++)
+ cpr_vreg->adj_cpus_open_loop_volt[i] =
+ cpr_vreg->adj_cpus_open_loop_volt[0] +
+ i * (cpr_vreg->num_corners + 1);
+
+ rc = of_property_read_u32_array(dev->of_node,
+ "qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment",
+ temp, len);
+ if (rc) {
+ cpr_err(cpr_vreg, "failed to read qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ cpr_debug(cpr_vreg, "Open loop voltage based on number of online CPUs:\n");
+ offset = tuple_match * cpr_vreg->num_corners *
+ (cpr_vreg->num_adj_cpus + 1);
+
+ for (i = 0; i <= cpr_vreg->num_adj_cpus; i++) {
+ for (j = CPR_CORNER_MIN; j <= cpr_vreg->num_corners; j++) {
+ k = j - 1 + offset;
+
+ cpr_vreg->adj_cpus_open_loop_volt[i][j]
+ = cpr_vreg->open_loop_volt[j] + temp[k];
+ cpr_vreg->adj_cpus_open_loop_volt[i][j]
+ = DIV_ROUND_UP(cpr_vreg->
+ adj_cpus_open_loop_volt[i][j],
+ cpr_vreg->step_volt) * cpr_vreg->step_volt;
+
+ if (cpr_vreg->adj_cpus_open_loop_volt[i][j]
+ > cpr_vreg->ceiling_volt[j])
+ cpr_vreg->adj_cpus_open_loop_volt[i][j]
+ = cpr_vreg->ceiling_volt[j];
+ if (cpr_vreg->adj_cpus_open_loop_volt[i][j]
+ < cpr_vreg->floor_volt[j])
+ cpr_vreg->adj_cpus_open_loop_volt[i][j]
+ = cpr_vreg->floor_volt[j];
+
+ cpr_debug(cpr_vreg, "cpus=%d, corner=%d, volt=%d\n",
+ i, j, cpr_vreg->adj_cpus_open_loop_volt[i][j]);
+ }
+ offset += cpr_vreg->num_corners;
+ }
+
+ cpr_vreg->adj_cpus_open_loop_volt_as_ceiling
+ = of_property_read_bool(dev->of_node,
+ "qcom,cpr-online-cpu-init-voltage-as-ceiling");
+done:
+ kfree(temp);
+ return rc;
+}
+
+static int cpr_parse_adj_cpus_target_quot(struct cpr_regulator *cpr_vreg,
+ struct device *dev)
+{
+ int rc, i, j, k, tuple_count, tuple_match, len, offset;
+ int *temp;
+
+ if (!of_find_property(dev->of_node,
+ "qcom,cpr-online-cpu-virtual-corner-quotient-adjustment",
+ NULL))
+ return 0;
+
+ if (cpr_vreg->cpr_fuse_map_count) {
+ if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+ /* No matching index to use for quotient adjustment. */
+ return 0;
+ }
+ tuple_count = cpr_vreg->cpr_fuse_map_count;
+ tuple_match = cpr_vreg->cpr_fuse_map_match;
+ } else {
+ tuple_count = 1;
+ tuple_match = 0;
+ }
+
+ len = (cpr_vreg->num_adj_cpus + 1) * tuple_count
+ * cpr_vreg->num_corners;
+
+ temp = kzalloc(sizeof(int) * len, GFP_KERNEL);
+ if (!temp) {
+ cpr_err(cpr_vreg, "Could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ cpr_vreg->adj_cpus_quot_adjust = devm_kzalloc(dev,
+ sizeof(int *) * (cpr_vreg->num_adj_cpus + 1),
+ GFP_KERNEL);
+ if (!cpr_vreg->adj_cpus_quot_adjust) {
+ cpr_err(cpr_vreg, "Could not allocate memory\n");
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ cpr_vreg->adj_cpus_quot_adjust[0] = devm_kzalloc(dev,
+ sizeof(int) * (cpr_vreg->num_adj_cpus + 1)
+ * (cpr_vreg->num_corners + 1),
+ GFP_KERNEL);
+ if (!cpr_vreg->adj_cpus_quot_adjust[0]) {
+ cpr_err(cpr_vreg, "Could not allocate memory\n");
+ rc = -ENOMEM;
+ goto done;
+ }
+ for (i = 1; i <= cpr_vreg->num_adj_cpus; i++)
+ cpr_vreg->adj_cpus_quot_adjust[i] =
+ cpr_vreg->adj_cpus_quot_adjust[0] +
+ i * (cpr_vreg->num_corners + 1);
+
+
+ rc = of_property_read_u32_array(dev->of_node,
+ "qcom,cpr-online-cpu-virtual-corner-quotient-adjustment",
+ temp, len);
+ if (rc) {
+ cpr_err(cpr_vreg, "failed to read qcom,cpr-online-cpu-virtual-corner-quotient-adjustment, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ cpr_debug(cpr_vreg, "Target quotients based on number of online CPUs:\n");
+ offset = tuple_match * cpr_vreg->num_corners *
+ (cpr_vreg->num_adj_cpus + 1);
+
+ for (i = 0; i <= cpr_vreg->num_adj_cpus; i++) {
+ for (j = CPR_CORNER_MIN; j <= cpr_vreg->num_corners; j++) {
+ k = j - 1 + offset;
+
+ cpr_vreg->adj_cpus_quot_adjust[i][j] =
+ cpr_vreg->quot_adjust[j] - temp[k];
+
+ cpr_debug(cpr_vreg, "cpus=%d, corner=%d, quot=%d\n",
+ i, j,
+ cpr_vreg->cpr_fuse_target_quot[
+ cpr_vreg->corner_map[j]]
+ - cpr_vreg->adj_cpus_quot_adjust[i][j]);
+ }
+ offset += cpr_vreg->num_corners;
+ }
+
+done:
+ kfree(temp);
+ return rc;
+}
+
+static int cpr_init_per_cpu_adjustments(struct cpr_regulator *cpr_vreg,
+ struct device *dev)
+{
+ int rc, i, j;
+
+ if (!of_find_property(dev->of_node,
+ "qcom,cpr-online-cpu-virtual-corner-init-voltage-adjustment",
+ NULL)
+ && !of_find_property(dev->of_node,
+ "qcom,cpr-online-cpu-virtual-corner-quotient-adjustment",
+ NULL)) {
+ /* No per-online CPU adjustment needed */
+ return 0;
+ }
+
+ if (!cpr_vreg->num_adj_cpus) {
+ cpr_err(cpr_vreg, "qcom,cpr-cpus property missing\n");
+ return -EINVAL;
+ }
+
+ rc = cpr_parse_adj_cpus_init_voltage(cpr_vreg, dev);
+ if (rc) {
+ cpr_err(cpr_vreg, "cpr_parse_adj_cpus_init_voltage failed: rc =%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr_parse_adj_cpus_target_quot(cpr_vreg, dev);
+ if (rc) {
+ cpr_err(cpr_vreg, "cpr_parse_adj_cpus_target_quot failed: rc =%d\n",
+ rc);
+ return rc;
+ }
+
+ cpr_vreg->adj_cpus_last_volt = devm_kzalloc(dev,
+ sizeof(int *) * (cpr_vreg->num_adj_cpus + 1),
+ GFP_KERNEL);
+ cpr_vreg->adj_cpus_save_ctl = devm_kzalloc(dev,
+ sizeof(int *) * (cpr_vreg->num_adj_cpus + 1),
+ GFP_KERNEL);
+ cpr_vreg->adj_cpus_save_irq = devm_kzalloc(dev,
+ sizeof(int *) * (cpr_vreg->num_adj_cpus + 1),
+ GFP_KERNEL);
+ if (!cpr_vreg->adj_cpus_last_volt || !cpr_vreg->adj_cpus_save_ctl ||
+ !cpr_vreg->adj_cpus_save_irq) {
+ cpr_err(cpr_vreg, "Could not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ cpr_vreg->adj_cpus_last_volt[0] = devm_kzalloc(dev,
+ sizeof(int) * (cpr_vreg->num_adj_cpus + 1)
+ * (cpr_vreg->num_corners + 1),
+ GFP_KERNEL);
+ cpr_vreg->adj_cpus_save_ctl[0] = devm_kzalloc(dev,
+ sizeof(int) * (cpr_vreg->num_adj_cpus + 1)
+ * (cpr_vreg->num_corners + 1),
+ GFP_KERNEL);
+ cpr_vreg->adj_cpus_save_irq[0] = devm_kzalloc(dev,
+ sizeof(int) * (cpr_vreg->num_adj_cpus + 1)
+ * (cpr_vreg->num_corners + 1),
+ GFP_KERNEL);
+ if (!cpr_vreg->adj_cpus_last_volt[0] ||
+ !cpr_vreg->adj_cpus_save_ctl[0] ||
+ !cpr_vreg->adj_cpus_save_irq[0]) {
+ cpr_err(cpr_vreg, "Could not allocate memory\n");
+ return -ENOMEM;
+ }
+ for (i = 1; i <= cpr_vreg->num_adj_cpus; i++) {
+ j = i * (cpr_vreg->num_corners + 1);
+ cpr_vreg->adj_cpus_last_volt[i] =
+ cpr_vreg->adj_cpus_last_volt[0] + j;
+ cpr_vreg->adj_cpus_save_ctl[i] =
+ cpr_vreg->adj_cpus_save_ctl[0] + j;
+ cpr_vreg->adj_cpus_save_irq[i] =
+ cpr_vreg->adj_cpus_save_irq[0] + j;
+ }
+
+
+ for (i = 0; i <= cpr_vreg->num_adj_cpus; i++) {
+ for (j = CPR_CORNER_MIN; j <= cpr_vreg->num_corners; j++) {
+
+ cpr_vreg->adj_cpus_save_ctl[i][j] =
+ cpr_vreg->save_ctl[j];
+ cpr_vreg->adj_cpus_save_irq[i][j] =
+ cpr_vreg->save_irq[j];
+
+ cpr_vreg->adj_cpus_last_volt[i][j]
+ = cpr_vreg->adj_cpus_open_loop_volt
+ ? cpr_vreg->adj_cpus_open_loop_volt[i][j]
+ : cpr_vreg->open_loop_volt[j];
+ }
+ }
+
+ cpr_regulator_set_online_cpus(cpr_vreg);
+ cpr_debug(cpr_vreg, "%d cpus online\n", cpr_vreg->online_cpus);
+
+ devm_kfree(dev, cpr_vreg->last_volt);
+ devm_kfree(dev, cpr_vreg->save_ctl);
+ devm_kfree(dev, cpr_vreg->save_irq);
+ if (cpr_vreg->adj_cpus_quot_adjust)
+ devm_kfree(dev, cpr_vreg->quot_adjust);
+ if (cpr_vreg->adj_cpus_open_loop_volt)
+ devm_kfree(dev, cpr_vreg->open_loop_volt);
+ if (cpr_vreg->adj_cpus_open_loop_volt_as_ceiling)
+ devm_kfree(dev, cpr_vreg->ceiling_volt);
+
+ cpr_regulator_switch_adj_cpus(cpr_vreg);
+
+ cpr_vreg->skip_voltage_change_during_suspend
+ = of_property_read_bool(dev->of_node,
+ "qcom,cpr-skip-voltage-change-during-suspend");
+
+ cpr_vreg->cpu_notifier.notifier_call = cpr_regulator_cpu_callback;
+ register_hotcpu_notifier(&cpr_vreg->cpu_notifier);
+
+ return rc;
+}
+
+static int cpr_init_pm_notification(struct cpr_regulator *cpr_vreg)
+{
+ int rc;
+
+ /* enabled only for single-core designs */
+ if (cpr_vreg->num_adj_cpus != 1) {
+ pr_warn("qcom,cpr-cpus not defined or invalid %d\n",
+ cpr_vreg->num_adj_cpus);
+ return 0;
+ }
+
+ cpr_vreg->pm_notifier.notifier_call = cpr_pm_callback;
+ rc = cpu_pm_register_notifier(&cpr_vreg->pm_notifier);
+ if (rc)
+ cpr_err(cpr_vreg, "Unable to register pm notifier rc=%d\n", rc);
+
+ return rc;
+}
+
+static int cpr_rpm_apc_init(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ int rc, len = 0;
+ struct device_node *of_node = pdev->dev.of_node;
+
+ if (!of_find_property(of_node, "rpm-apc-supply", NULL))
+ return 0;
+
+ cpr_vreg->rpm_apc_vreg = devm_regulator_get(&pdev->dev, "rpm-apc");
+ if (IS_ERR_OR_NULL(cpr_vreg->rpm_apc_vreg)) {
+ rc = PTR_RET(cpr_vreg->rpm_apc_vreg);
+ if (rc != -EPROBE_DEFER)
+ cpr_err(cpr_vreg, "devm_regulator_get: rpm-apc: rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (!of_find_property(of_node, "qcom,rpm-apc-corner-map", &len)) {
+ cpr_err(cpr_vreg,
+ "qcom,rpm-apc-corner-map missing:\n");
+ return -EINVAL;
+ }
+ if (len != cpr_vreg->num_corners * sizeof(u32)) {
+ cpr_err(cpr_vreg,
+ "qcom,rpm-apc-corner-map length=%d is invalid: required:%d\n",
+ len, cpr_vreg->num_corners);
+ return -EINVAL;
+ }
+
+ cpr_vreg->rpm_apc_corner_map = devm_kzalloc(&pdev->dev,
+ (cpr_vreg->num_corners + 1) *
+ sizeof(*cpr_vreg->rpm_apc_corner_map), GFP_KERNEL);
+ if (!cpr_vreg->rpm_apc_corner_map) {
+ cpr_err(cpr_vreg, "Can't allocate memory for cpr_vreg->rpm_apc_corner_map\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,rpm-apc-corner-map",
+ &cpr_vreg->rpm_apc_corner_map[1], cpr_vreg->num_corners);
+ if (rc)
+ cpr_err(cpr_vreg, "read qcom,rpm-apc-corner-map failed, rc = %d\n",
+ rc);
+
+ return rc;
+}
+
+static int cpr_vsens_init(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ int rc = 0, len = 0;
+ struct device_node *of_node = pdev->dev.of_node;
+
+ if (of_find_property(of_node, "vdd-vsens-voltage-supply", NULL)) {
+ cpr_vreg->vdd_vsens_voltage = devm_regulator_get(&pdev->dev,
+ "vdd-vsens-voltage");
+ if (IS_ERR_OR_NULL(cpr_vreg->vdd_vsens_voltage)) {
+ rc = PTR_ERR(cpr_vreg->vdd_vsens_voltage);
+ cpr_vreg->vdd_vsens_voltage = NULL;
+ if (rc == -EPROBE_DEFER)
+ return rc;
+ /* device not found */
+ cpr_debug(cpr_vreg, "regulator_get: vdd-vsens-voltage: rc=%d\n",
+ rc);
+ return 0;
+ }
+ }
+
+ if (of_find_property(of_node, "vdd-vsens-corner-supply", NULL)) {
+ cpr_vreg->vdd_vsens_corner = devm_regulator_get(&pdev->dev,
+ "vdd-vsens-corner");
+ if (IS_ERR_OR_NULL(cpr_vreg->vdd_vsens_corner)) {
+ rc = PTR_ERR(cpr_vreg->vdd_vsens_corner);
+ cpr_vreg->vdd_vsens_corner = NULL;
+ if (rc == -EPROBE_DEFER)
+ return rc;
+ /* device not found */
+ cpr_debug(cpr_vreg, "regulator_get: vdd-vsens-corner: rc=%d\n",
+ rc);
+ return 0;
+ }
+
+ if (!of_find_property(of_node, "qcom,vsens-corner-map", &len)) {
+ cpr_err(cpr_vreg, "qcom,vsens-corner-map missing\n");
+ return -EINVAL;
+ }
+
+ if (len != cpr_vreg->num_fuse_corners * sizeof(u32)) {
+ cpr_err(cpr_vreg, "qcom,vsens-corner-map length=%d is invalid: required:%d\n",
+ len, cpr_vreg->num_fuse_corners);
+ return -EINVAL;
+ }
+
+ cpr_vreg->vsens_corner_map = devm_kcalloc(&pdev->dev,
+ (cpr_vreg->num_fuse_corners + 1),
+ sizeof(*cpr_vreg->vsens_corner_map), GFP_KERNEL);
+ if (!cpr_vreg->vsens_corner_map)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(of_node,
+ "qcom,vsens-corner-map",
+ &cpr_vreg->vsens_corner_map[1],
+ cpr_vreg->num_fuse_corners);
+ if (rc)
+ cpr_err(cpr_vreg, "read qcom,vsens-corner-map failed, rc = %d\n",
+ rc);
+ }
+
+ return rc;
+}
+
+static int cpr_disable_on_temp(struct cpr_regulator *cpr_vreg, bool disable)
+{
+ int rc = 0;
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+
+ if (cpr_vreg->cpr_fuse_disable ||
+ (cpr_vreg->cpr_thermal_disable == disable))
+ goto out;
+
+ cpr_vreg->cpr_thermal_disable = disable;
+
+ if (cpr_vreg->enable && cpr_vreg->corner) {
+ if (disable) {
+ cpr_debug(cpr_vreg, "Disabling CPR - below temperature threshold [%d]\n",
+ cpr_vreg->cpr_disable_temp_threshold);
+ /* disable CPR and force open-loop */
+ cpr_ctl_disable(cpr_vreg);
+ rc = cpr_regulator_set_voltage(cpr_vreg->rdev,
+ cpr_vreg->corner, false);
+ if (rc < 0)
+ cpr_err(cpr_vreg, "Failed to set voltage, rc=%d\n",
+ rc);
+ } else {
+ /* enable CPR */
+ cpr_debug(cpr_vreg, "Enabling CPR - above temperature thresold [%d]\n",
+ cpr_vreg->cpr_enable_temp_threshold);
+ rc = cpr_regulator_set_voltage(cpr_vreg->rdev,
+ cpr_vreg->corner, true);
+ if (rc < 0)
+ cpr_err(cpr_vreg, "Failed to set voltage, rc=%d\n",
+ rc);
+ }
+ }
+out:
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+ return rc;
+}
+
+static void tsens_threshold_notify(struct therm_threshold *tsens_cb_data)
+{
+ struct threshold_info *info = tsens_cb_data->parent;
+ struct cpr_regulator *cpr_vreg = container_of(info,
+ struct cpr_regulator, tsens_threshold_config);
+ int rc = 0;
+
+ cpr_debug(cpr_vreg, "Triggered tsens-notification trip_type=%d for thermal_zone_id=%d\n",
+ tsens_cb_data->trip_triggered, tsens_cb_data->sensor_id);
+
+ switch (tsens_cb_data->trip_triggered) {
+ case THERMAL_TRIP_CONFIGURABLE_HI:
+ rc = cpr_disable_on_temp(cpr_vreg, false);
+ if (rc < 0)
+ cpr_err(cpr_vreg, "Failed to enable CPR, rc=%d\n", rc);
+ break;
+ case THERMAL_TRIP_CONFIGURABLE_LOW:
+ rc = cpr_disable_on_temp(cpr_vreg, true);
+ if (rc < 0)
+ cpr_err(cpr_vreg, "Failed to disable CPR, rc=%d\n", rc);
+ break;
+ default:
+ cpr_debug(cpr_vreg, "trip-type %d not supported\n",
+ tsens_cb_data->trip_triggered);
+ break;
+ }
+
+ rc = sensor_mgr_set_threshold(tsens_cb_data->sensor_id,
+ tsens_cb_data->threshold);
+ if (rc < 0)
+ cpr_err(cpr_vreg, "Failed to set temp. threshold, rc=%d\n", rc);
+}
+
+static int cpr_check_tsens(struct cpr_regulator *cpr_vreg)
+{
+ int rc = 0;
+ struct tsens_device tsens_dev;
+ unsigned long temp = 0;
+ bool disable;
+
+ if (tsens_is_ready() > 0) {
+ tsens_dev.sensor_num = cpr_vreg->tsens_id;
+ rc = tsens_get_temp(&tsens_dev, &temp);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "Faled to read tsens, rc=%d\n", rc);
+ return rc;
+ }
+
+ disable = (int) temp <= cpr_vreg->cpr_disable_temp_threshold;
+ rc = cpr_disable_on_temp(cpr_vreg, disable);
+ if (rc)
+ cpr_err(cpr_vreg, "Failed to %s CPR, rc=%d\n",
+ disable ? "disable" : "enable", rc);
+ }
+
+ return rc;
+}
+
+static int cpr_thermal_init(struct cpr_regulator *cpr_vreg)
+{
+ int rc;
+ struct device_node *of_node = cpr_vreg->dev->of_node;
+
+ if (!of_find_property(of_node, "qcom,cpr-thermal-sensor-id", NULL))
+ return 0;
+
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-thermal-sensor-id",
+ &cpr_vreg->tsens_id, rc);
+ if (rc < 0)
+ return rc;
+
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-disable-temp-threshold",
+ &cpr_vreg->cpr_disable_temp_threshold, rc);
+ if (rc < 0)
+ return rc;
+
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "cpr-enable-temp-threshold",
+ &cpr_vreg->cpr_enable_temp_threshold, rc);
+ if (rc < 0)
+ return rc;
+
+ if (cpr_vreg->cpr_disable_temp_threshold >=
+ cpr_vreg->cpr_enable_temp_threshold) {
+ cpr_err(cpr_vreg, "Invalid temperature threshold cpr_disable_temp[%d] >= cpr_enable_temp[%d]\n",
+ cpr_vreg->cpr_disable_temp_threshold,
+ cpr_vreg->cpr_enable_temp_threshold);
+ return -EINVAL;
+ }
+
+ cpr_vreg->cpr_disable_on_temperature = true;
+
+ return 0;
+}
+
+static int cpr_init_cpr(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct resource *res;
+ int rc = 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rbcpr_clk");
+ if (res && res->start)
+ cpr_vreg->rbcpr_clk_addr = res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rbcpr");
+ if (!res || !res->start) {
+ cpr_err(cpr_vreg, "missing rbcpr address: res=%p\n", res);
+ return -EINVAL;
+ }
+ cpr_vreg->rbcpr_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+
+ /* Init CPR configuration parameters */
+ rc = cpr_init_cpr_parameters(pdev, cpr_vreg);
+ if (rc)
+ return rc;
+
+ rc = cpr_init_cpr_efuse(pdev, cpr_vreg);
+ if (rc)
+ return rc;
+
+ /* Load per corner ceiling and floor voltages if they exist. */
+ rc = cpr_init_ceiling_floor_override_voltages(cpr_vreg, &pdev->dev);
+ if (rc)
+ return rc;
+
+ /*
+ * Limit open loop voltages based upon per corner ceiling and floor
+ * voltages.
+ */
+ rc = cpr_limit_open_loop_voltage(cpr_vreg);
+ if (rc)
+ return rc;
+
+ /*
+ * Fill the OPP table for this device with virtual voltage corner to
+ * open-loop voltage pairs.
+ */
+ rc = cpr_populate_opp_table(cpr_vreg, &pdev->dev);
+ if (rc)
+ return rc;
+
+ /* Reduce the ceiling voltage if allowed. */
+ rc = cpr_reduce_ceiling_voltage(cpr_vreg, &pdev->dev);
+ if (rc)
+ return rc;
+
+ /* Load CPR floor to ceiling range if exist. */
+ rc = cpr_init_floor_to_ceiling_range(cpr_vreg, &pdev->dev);
+ if (rc)
+ return rc;
+
+ /* Init all voltage set points of APC regulator for CPR */
+ rc = cpr_init_cpr_voltages(cpr_vreg, &pdev->dev);
+ if (rc)
+ return rc;
+
+ /* Get and Init interrupt */
+ cpr_vreg->cpr_irq = platform_get_irq(pdev, 0);
+ if (!cpr_vreg->cpr_irq) {
+ cpr_err(cpr_vreg, "missing CPR IRQ\n");
+ return -EINVAL;
+ }
+
+ /* Configure CPR HW but keep it disabled */
+ rc = cpr_config(cpr_vreg, &pdev->dev);
+ if (rc)
+ return rc;
+
+ rc = request_threaded_irq(cpr_vreg->cpr_irq, NULL, cpr_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING, "cpr",
+ cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "CPR: request irq failed for IRQ %d\n",
+ cpr_vreg->cpr_irq);
+ return rc;
+ }
+
+ return 0;
+}
+
+/*
+ * Create a set of virtual fuse rows if optional device tree properties are
+ * present.
+ */
+static int cpr_remap_efuse_data(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ struct property *prop;
+ u64 fuse_param;
+ u32 *temp;
+ int size, rc, i, bits, in_row, in_bit, out_row, out_bit;
+
+ prop = of_find_property(of_node, "qcom,fuse-remap-source", NULL);
+ if (!prop) {
+ /* No fuse remapping needed. */
+ return 0;
+ }
+
+ size = prop->length / sizeof(u32);
+ if (size == 0 || size % 4) {
+ cpr_err(cpr_vreg, "qcom,fuse-remap-source has invalid size=%d\n",
+ size);
+ return -EINVAL;
+ }
+ size /= 4;
+
+ rc = of_property_read_u32(of_node, "qcom,fuse-remap-base-row",
+ &cpr_vreg->remapped_row_base);
+ if (rc) {
+ cpr_err(cpr_vreg, "could not read qcom,fuse-remap-base-row, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ temp = kzalloc(sizeof(*temp) * size * 4, GFP_KERNEL);
+ if (!temp) {
+ cpr_err(cpr_vreg, "temp memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,fuse-remap-source", temp,
+ size * 4);
+ if (rc) {
+ cpr_err(cpr_vreg, "could not read qcom,fuse-remap-source, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ /*
+ * Format of tuples in qcom,fuse-remap-source property:
+ * <row bit-offset bit-count fuse-read-method>
+ */
+ for (i = 0, bits = 0; i < size; i++)
+ bits += temp[i * 4 + 2];
+
+ cpr_vreg->num_remapped_rows = DIV_ROUND_UP(bits, 64);
+ cpr_vreg->remapped_row = devm_kzalloc(&pdev->dev,
+ sizeof(*cpr_vreg->remapped_row) * cpr_vreg->num_remapped_rows,
+ GFP_KERNEL);
+ if (!cpr_vreg->remapped_row) {
+ cpr_err(cpr_vreg, "remapped_row memory allocation failed\n");
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ for (i = 0, out_row = 0, out_bit = 0; i < size; i++) {
+ in_row = temp[i * 4];
+ in_bit = temp[i * 4 + 1];
+ bits = temp[i * 4 + 2];
+
+ while (bits > 64) {
+ fuse_param = cpr_read_efuse_param(cpr_vreg, in_row,
+ in_bit, 64, temp[i * 4 + 3]);
+
+ cpr_vreg->remapped_row[out_row++]
+ |= fuse_param << out_bit;
+ if (out_bit > 0)
+ cpr_vreg->remapped_row[out_row]
+ |= fuse_param >> (64 - out_bit);
+
+ bits -= 64;
+ in_bit += 64;
+ }
+
+ fuse_param = cpr_read_efuse_param(cpr_vreg, in_row, in_bit,
+ bits, temp[i * 4 + 3]);
+
+ cpr_vreg->remapped_row[out_row] |= fuse_param << out_bit;
+ if (bits < 64 - out_bit) {
+ out_bit += bits;
+ } else {
+ out_row++;
+ if (out_bit > 0)
+ cpr_vreg->remapped_row[out_row]
+ |= fuse_param >> (64 - out_bit);
+ out_bit = bits - (64 - out_bit);
+ }
+ }
+
+done:
+ kfree(temp);
+ return rc;
+}
+
+static int cpr_efuse_init(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct resource *res;
+ int len;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse_addr");
+ if (!res || !res->start) {
+ cpr_err(cpr_vreg, "efuse_addr missing: res=%p\n", res);
+ return -EINVAL;
+ }
+
+ cpr_vreg->efuse_addr = res->start;
+ len = res->end - res->start + 1;
+
+ cpr_info(cpr_vreg, "efuse_addr = %pa (len=0x%x)\n", &res->start, len);
+
+ cpr_vreg->efuse_base = ioremap(cpr_vreg->efuse_addr, len);
+ if (!cpr_vreg->efuse_base) {
+ cpr_err(cpr_vreg, "Unable to map efuse_addr %pa\n",
+ &cpr_vreg->efuse_addr);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void cpr_efuse_free(struct cpr_regulator *cpr_vreg)
+{
+ iounmap(cpr_vreg->efuse_base);
+}
+
+static void cpr_parse_cond_min_volt_fuse(struct cpr_regulator *cpr_vreg,
+ struct device_node *of_node)
+{
+ int rc;
+ u32 fuse_sel[5];
+ /*
+ * Restrict all pvs corner voltages to a minimum value of
+ * qcom,cpr-cond-min-voltage if the fuse defined in
+ * qcom,cpr-fuse-cond-min-volt-sel does not read back with
+ * the expected value.
+ */
+ rc = of_property_read_u32_array(of_node,
+ "qcom,cpr-fuse-cond-min-volt-sel", fuse_sel, 5);
+ if (!rc) {
+ if (!cpr_fuse_is_setting_expected(cpr_vreg, fuse_sel))
+ cpr_vreg->flags |= FLAGS_SET_MIN_VOLTAGE;
+ }
+}
+
+static void cpr_parse_speed_bin_fuse(struct cpr_regulator *cpr_vreg,
+ struct device_node *of_node)
+{
+ int rc;
+ u64 fuse_bits;
+ u32 fuse_sel[4];
+ u32 speed_bits;
+
+ rc = of_property_read_u32_array(of_node,
+ "qcom,speed-bin-fuse-sel", fuse_sel, 4);
+
+ if (!rc) {
+ fuse_bits = cpr_read_efuse_row(cpr_vreg,
+ fuse_sel[0], fuse_sel[3]);
+ speed_bits = (fuse_bits >> fuse_sel[1]) &
+ ((1 << fuse_sel[2]) - 1);
+ cpr_info(cpr_vreg, "[row: %d]: 0x%llx, speed_bits = %d\n",
+ fuse_sel[0], fuse_bits, speed_bits);
+ cpr_vreg->speed_bin = speed_bits;
+ } else {
+ cpr_vreg->speed_bin = SPEED_BIN_NONE;
+ }
+}
+
+static int cpr_voltage_uplift_enable_check(struct cpr_regulator *cpr_vreg,
+ struct device_node *of_node)
+{
+ int rc;
+ u32 fuse_sel[5];
+ u32 uplift_speed_bin;
+
+ rc = of_property_read_u32_array(of_node,
+ "qcom,cpr-fuse-uplift-sel", fuse_sel, 5);
+ if (!rc) {
+ rc = of_property_read_u32(of_node,
+ "qcom,cpr-uplift-speed-bin",
+ &uplift_speed_bin);
+ if (rc < 0) {
+ cpr_err(cpr_vreg,
+ "qcom,cpr-uplift-speed-bin missing\n");
+ return rc;
+ }
+ if (cpr_fuse_is_setting_expected(cpr_vreg, fuse_sel)
+ && (uplift_speed_bin == cpr_vreg->speed_bin)
+ && !(cpr_vreg->flags & FLAGS_SET_MIN_VOLTAGE)) {
+ cpr_vreg->flags |= FLAGS_UPLIFT_QUOT_VOLT;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Read in the number of fuse corners and then allocate memory for arrays that
+ * are sized based upon the number of fuse corners.
+ */
+static int cpr_fuse_corner_array_alloc(struct device *dev,
+ struct cpr_regulator *cpr_vreg)
+{
+ int rc;
+ size_t len;
+
+ rc = of_property_read_u32(dev->of_node, "qcom,cpr-fuse-corners",
+ &cpr_vreg->num_fuse_corners);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "qcom,cpr-fuse-corners missing: rc=%d\n", rc);
+ return rc;
+ }
+
+ if (cpr_vreg->num_fuse_corners < CPR_FUSE_CORNER_MIN
+ || cpr_vreg->num_fuse_corners > CPR_FUSE_CORNER_LIMIT) {
+ cpr_err(cpr_vreg, "corner count=%d is invalid\n",
+ cpr_vreg->num_fuse_corners);
+ return -EINVAL;
+ }
+
+ /*
+ * The arrays sized based on the fuse corner count ignore element 0
+ * in order to simplify indexing throughout the driver since min_uV = 0
+ * cannot be passed into a set_voltage() callback.
+ */
+ len = cpr_vreg->num_fuse_corners + 1;
+
+ cpr_vreg->pvs_corner_v = devm_kzalloc(dev,
+ len * sizeof(*cpr_vreg->pvs_corner_v), GFP_KERNEL);
+ cpr_vreg->cpr_fuse_target_quot = devm_kzalloc(dev,
+ len * sizeof(*cpr_vreg->cpr_fuse_target_quot), GFP_KERNEL);
+ cpr_vreg->cpr_fuse_ro_sel = devm_kzalloc(dev,
+ len * sizeof(*cpr_vreg->cpr_fuse_ro_sel), GFP_KERNEL);
+ cpr_vreg->fuse_ceiling_volt = devm_kzalloc(dev,
+ len * (sizeof(*cpr_vreg->fuse_ceiling_volt)), GFP_KERNEL);
+ cpr_vreg->fuse_floor_volt = devm_kzalloc(dev,
+ len * (sizeof(*cpr_vreg->fuse_floor_volt)), GFP_KERNEL);
+ cpr_vreg->step_quotient = devm_kzalloc(dev,
+ len * sizeof(*cpr_vreg->step_quotient), GFP_KERNEL);
+
+ if (cpr_vreg->pvs_corner_v == NULL || cpr_vreg->cpr_fuse_ro_sel == NULL
+ || cpr_vreg->fuse_ceiling_volt == NULL
+ || cpr_vreg->fuse_floor_volt == NULL
+ || cpr_vreg->cpr_fuse_target_quot == NULL
+ || cpr_vreg->step_quotient == NULL) {
+ cpr_err(cpr_vreg, "Could not allocate memory for CPR arrays\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int cpr_voltage_plan_init(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ int rc, i;
+ u32 min_uv = 0;
+
+ rc = of_property_read_u32_array(of_node, "qcom,cpr-voltage-ceiling",
+ &cpr_vreg->fuse_ceiling_volt[CPR_FUSE_CORNER_MIN],
+ cpr_vreg->num_fuse_corners);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "cpr-voltage-ceiling missing: rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cpr-voltage-floor",
+ &cpr_vreg->fuse_floor_volt[CPR_FUSE_CORNER_MIN],
+ cpr_vreg->num_fuse_corners);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "cpr-voltage-floor missing: rc=%d\n", rc);
+ return rc;
+ }
+
+ cpr_parse_cond_min_volt_fuse(cpr_vreg, of_node);
+ rc = cpr_voltage_uplift_enable_check(cpr_vreg, of_node);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "voltage uplift enable check failed, %d\n",
+ rc);
+ return rc;
+ }
+ if (cpr_vreg->flags & FLAGS_SET_MIN_VOLTAGE) {
+ of_property_read_u32(of_node, "qcom,cpr-cond-min-voltage",
+ &min_uv);
+ for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners;
+ i++)
+ if (cpr_vreg->fuse_ceiling_volt[i] < min_uv) {
+ cpr_vreg->fuse_ceiling_volt[i] = min_uv;
+ cpr_vreg->fuse_floor_volt[i] = min_uv;
+ } else if (cpr_vreg->fuse_floor_volt[i] < min_uv) {
+ cpr_vreg->fuse_floor_volt[i] = min_uv;
+ }
+ }
+
+ return 0;
+}
+
+static int cpr_mem_acc_init(struct platform_device *pdev,
+ struct cpr_regulator *cpr_vreg)
+{
+ int rc, size;
+ struct property *prop;
+ char *corner_map_str;
+
+ if (of_find_property(pdev->dev.of_node, "mem-acc-supply", NULL)) {
+ cpr_vreg->mem_acc_vreg = devm_regulator_get(&pdev->dev,
+ "mem-acc");
+ if (IS_ERR_OR_NULL(cpr_vreg->mem_acc_vreg)) {
+ rc = PTR_RET(cpr_vreg->mem_acc_vreg);
+ if (rc != -EPROBE_DEFER)
+ cpr_err(cpr_vreg,
+ "devm_regulator_get: mem-acc: rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ corner_map_str = "qcom,mem-acc-corner-map";
+ prop = of_find_property(pdev->dev.of_node, corner_map_str, NULL);
+ if (!prop) {
+ corner_map_str = "qcom,cpr-corner-map";
+ prop = of_find_property(pdev->dev.of_node, corner_map_str,
+ NULL);
+ if (!prop) {
+ cpr_err(cpr_vreg, "qcom,cpr-corner-map missing\n");
+ return -EINVAL;
+ }
+ }
+
+ size = prop->length / sizeof(u32);
+ cpr_vreg->mem_acc_corner_map = devm_kzalloc(&pdev->dev,
+ sizeof(int) * (size + 1),
+ GFP_KERNEL);
+
+ rc = of_property_read_u32_array(pdev->dev.of_node, corner_map_str,
+ &cpr_vreg->mem_acc_corner_map[CPR_FUSE_CORNER_MIN],
+ size);
+ if (rc) {
+ cpr_err(cpr_vreg, "%s missing, rc = %d\n", corner_map_str, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int cpr_enable_set(void *data, u64 val)
+{
+ struct cpr_regulator *cpr_vreg = data;
+ bool old_cpr_enable;
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+
+ old_cpr_enable = cpr_vreg->enable;
+ cpr_vreg->enable = val;
+
+ if (old_cpr_enable == cpr_vreg->enable)
+ goto _exit;
+
+ if (cpr_vreg->enable && cpr_vreg->cpr_fuse_disable) {
+ cpr_info(cpr_vreg,
+ "CPR permanently disabled due to fuse values\n");
+ cpr_vreg->enable = false;
+ goto _exit;
+ }
+
+ cpr_debug(cpr_vreg, "%s CPR [corner=%d, fuse_corner=%d]\n",
+ cpr_vreg->enable ? "enabling" : "disabling",
+ cpr_vreg->corner, cpr_vreg->corner_map[cpr_vreg->corner]);
+
+ if (cpr_vreg->corner) {
+ if (cpr_vreg->enable) {
+ cpr_ctl_disable(cpr_vreg);
+ cpr_irq_clr(cpr_vreg);
+ cpr_corner_restore(cpr_vreg, cpr_vreg->corner);
+ cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
+ } else {
+ cpr_ctl_disable(cpr_vreg);
+ cpr_irq_set(cpr_vreg, 0);
+ }
+ }
+
+_exit:
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+
+ return 0;
+}
+
+static int cpr_enable_get(void *data, u64 *val)
+{
+ struct cpr_regulator *cpr_vreg = data;
+
+ *val = cpr_vreg->enable;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr_enable_fops, cpr_enable_get, cpr_enable_set,
+ "%llu\n");
+
+static int cpr_get_cpr_ceiling(void *data, u64 *val)
+{
+ struct cpr_regulator *cpr_vreg = data;
+
+ *val = cpr_vreg->ceiling_volt[cpr_vreg->corner];
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr_ceiling_fops, cpr_get_cpr_ceiling, NULL,
+ "%llu\n");
+
+static int cpr_get_cpr_floor(void *data, u64 *val)
+{
+ struct cpr_regulator *cpr_vreg = data;
+
+ *val = cpr_vreg->floor_volt[cpr_vreg->corner];
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr_floor_fops, cpr_get_cpr_floor, NULL,
+ "%llu\n");
+
+static int cpr_get_cpr_max_ceiling(void *data, u64 *val)
+{
+ struct cpr_regulator *cpr_vreg = data;
+
+ *val = cpr_vreg->cpr_max_ceiling[cpr_vreg->corner];
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr_max_ceiling_fops, cpr_get_cpr_max_ceiling, NULL,
+ "%llu\n");
+
+static int cpr_debug_info_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+static ssize_t cpr_debug_info_read(struct file *file, char __user *buff,
+ size_t count, loff_t *ppos)
+{
+ struct cpr_regulator *cpr_vreg = file->private_data;
+ char *debugfs_buf;
+ ssize_t len, ret = 0;
+ u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps;
+ u32 step_dn, step_up, error, error_lt0, busy;
+ int fuse_corner;
+
+ debugfs_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!debugfs_buf)
+ return -ENOMEM;
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+
+ fuse_corner = cpr_vreg->corner_map[cpr_vreg->corner];
+
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ "corner = %d, current_volt = %d uV\n",
+ cpr_vreg->corner, cpr_vreg->last_volt[cpr_vreg->corner]);
+ ret += len;
+
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ "fuse_corner = %d, current_volt = %d uV\n",
+ fuse_corner, cpr_vreg->last_volt[cpr_vreg->corner]);
+ ret += len;
+
+ ro_sel = cpr_vreg->cpr_fuse_ro_sel[fuse_corner];
+ gcnt = cpr_read(cpr_vreg, REG_RBCPR_GCNT_TARGET(ro_sel));
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ "rbcpr_gcnt_target (%u) = 0x%02X\n", ro_sel, gcnt);
+ ret += len;
+
+ ctl = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ "rbcpr_ctl = 0x%02X\n", ctl);
+ ret += len;
+
+ irq_status = cpr_read(cpr_vreg, REG_RBIF_IRQ_STATUS);
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ "rbcpr_irq_status = 0x%02X\n", irq_status);
+ ret += len;
+
+ reg = cpr_read(cpr_vreg, REG_RBCPR_RESULT_0);
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ "rbcpr_result_0 = 0x%02X\n", reg);
+ ret += len;
+
+ step_dn = reg & 0x01;
+ step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01;
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ " [step_dn = %u", step_dn);
+ ret += len;
+
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ ", step_up = %u", step_up);
+ ret += len;
+
+ error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
+ & RBCPR_RESULT0_ERROR_STEPS_MASK;
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ ", error_steps = %u", error_steps);
+ ret += len;
+
+ error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK;
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ ", error = %u", error);
+ ret += len;
+
+ error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01;
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ ", error_lt_0 = %u", error_lt0);
+ ret += len;
+
+ busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01;
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ ", busy = %u]\n", busy);
+ ret += len;
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+
+ ret = simple_read_from_buffer(buff, count, ppos, debugfs_buf, ret);
+ kfree(debugfs_buf);
+ return ret;
+}
+
+static const struct file_operations cpr_debug_info_fops = {
+ .open = cpr_debug_info_open,
+ .read = cpr_debug_info_read,
+};
+
+static int cpr_aging_debug_info_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+static ssize_t cpr_aging_debug_info_read(struct file *file, char __user *buff,
+ size_t count, loff_t *ppos)
+{
+ struct cpr_regulator *cpr_vreg = file->private_data;
+ struct cpr_aging_info *aging_info = cpr_vreg->aging_info;
+ char *debugfs_buf;
+ ssize_t len, ret = 0;
+ int i;
+
+ debugfs_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!debugfs_buf)
+ return -ENOMEM;
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ "aging_adj_volt = [");
+ ret += len;
+
+ for (i = CPR_FUSE_CORNER_MIN; i <= cpr_vreg->num_fuse_corners; i++) {
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ " %d", aging_info->voltage_adjust[i]);
+ ret += len;
+ }
+
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ " ]uV\n");
+ ret += len;
+
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ "aging_measurement_done = %s\n",
+ aging_info->cpr_aging_done ? "true" : "false");
+ ret += len;
+
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ "aging_measurement_error = %s\n",
+ aging_info->cpr_aging_error ? "true" : "false");
+ ret += len;
+
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+
+ ret = simple_read_from_buffer(buff, count, ppos, debugfs_buf, ret);
+ kfree(debugfs_buf);
+ return ret;
+}
+
+static const struct file_operations cpr_aging_debug_info_fops = {
+ .open = cpr_aging_debug_info_open,
+ .read = cpr_aging_debug_info_read,
+};
+
+static void cpr_debugfs_init(struct cpr_regulator *cpr_vreg)
+{
+ struct dentry *temp;
+
+ if (IS_ERR_OR_NULL(cpr_debugfs_base)) {
+ cpr_err(cpr_vreg, "Could not create debugfs nodes since base directory is missing\n");
+ return;
+ }
+
+ cpr_vreg->debugfs = debugfs_create_dir(cpr_vreg->rdesc.name,
+ cpr_debugfs_base);
+ if (IS_ERR_OR_NULL(cpr_vreg->debugfs)) {
+ cpr_err(cpr_vreg, "debugfs directory creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_file("debug_info", S_IRUGO, cpr_vreg->debugfs,
+ cpr_vreg, &cpr_debug_info_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr_err(cpr_vreg, "debug_info node creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_file("cpr_enable", S_IRUGO | S_IWUSR,
+ cpr_vreg->debugfs, cpr_vreg, &cpr_enable_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr_err(cpr_vreg, "cpr_enable node creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_file("cpr_ceiling", S_IRUGO,
+ cpr_vreg->debugfs, cpr_vreg, &cpr_ceiling_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr_err(cpr_vreg, "cpr_ceiling node creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_file("cpr_floor", S_IRUGO,
+ cpr_vreg->debugfs, cpr_vreg, &cpr_floor_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr_err(cpr_vreg, "cpr_floor node creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_file("cpr_max_ceiling", S_IRUGO,
+ cpr_vreg->debugfs, cpr_vreg, &cpr_max_ceiling_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr_err(cpr_vreg, "cpr_max_ceiling node creation failed\n");
+ return;
+ }
+
+ if (cpr_vreg->aging_info) {
+ temp = debugfs_create_file("aging_debug_info", S_IRUGO,
+ cpr_vreg->debugfs, cpr_vreg,
+ &cpr_aging_debug_info_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr_err(cpr_vreg, "aging_debug_info node creation failed\n");
+ return;
+ }
+ }
+}
+
+static void cpr_debugfs_remove(struct cpr_regulator *cpr_vreg)
+{
+ debugfs_remove_recursive(cpr_vreg->debugfs);
+}
+
+static void cpr_debugfs_base_init(void)
+{
+ cpr_debugfs_base = debugfs_create_dir("cpr-regulator", NULL);
+ if (IS_ERR_OR_NULL(cpr_debugfs_base))
+ pr_err("cpr-regulator debugfs base directory creation failed\n");
+}
+
+static void cpr_debugfs_base_remove(void)
+{
+ debugfs_remove_recursive(cpr_debugfs_base);
+}
+
+#else
+
+static void cpr_debugfs_init(struct cpr_regulator *cpr_vreg)
+{}
+
+static void cpr_debugfs_remove(struct cpr_regulator *cpr_vreg)
+{}
+
+static void cpr_debugfs_base_init(void)
+{}
+
+static void cpr_debugfs_base_remove(void)
+{}
+
+#endif
+
+static int cpr_regulator_probe(struct platform_device *pdev)
+{
+ struct regulator_config reg_config = {};
+ struct cpr_regulator *cpr_vreg;
+ struct regulator_desc *rdesc;
+ struct device *dev = &pdev->dev;
+ struct regulator_init_data *init_data = pdev->dev.platform_data;
+ int rc;
+
+ if (!pdev->dev.of_node) {
+ dev_err(dev, "Device tree node is missing\n");
+ return -EINVAL;
+ }
+
+ init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
+ if (!init_data) {
+ dev_err(dev, "regulator init data is missing\n");
+ return -EINVAL;
+ } else {
+ init_data->constraints.input_uV
+ = init_data->constraints.max_uV;
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS;
+ }
+
+ cpr_vreg = devm_kzalloc(&pdev->dev, sizeof(struct cpr_regulator),
+ GFP_KERNEL);
+ if (!cpr_vreg) {
+ dev_err(dev, "Can't allocate cpr_regulator memory\n");
+ return -ENOMEM;
+ }
+
+ cpr_vreg->dev = &pdev->dev;
+ cpr_vreg->rdesc.name = init_data->constraints.name;
+ if (cpr_vreg->rdesc.name == NULL) {
+ dev_err(dev, "regulator-name missing\n");
+ return -EINVAL;
+ }
+
+ rc = cpr_fuse_corner_array_alloc(&pdev->dev, cpr_vreg);
+ if (rc)
+ return rc;
+
+ rc = cpr_mem_acc_init(pdev, cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "mem_acc intialization error rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = cpr_efuse_init(pdev, cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "Wrong eFuse address specified: rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = cpr_remap_efuse_data(pdev, cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "Could not remap fuse data: rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = cpr_check_redundant(pdev, cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "Could not check redundant fuse: rc=%d\n",
+ rc);
+ goto err_out;
+ }
+
+ rc = cpr_read_fuse_revision(pdev, cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "Could not read fuse revision: rc=%d\n", rc);
+ goto err_out;
+ }
+
+ cpr_parse_speed_bin_fuse(cpr_vreg, dev->of_node);
+ cpr_parse_pvs_version_fuse(cpr_vreg, dev->of_node);
+
+ rc = cpr_read_ro_select(pdev, cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "Could not read RO select: rc=%d\n", rc);
+ goto err_out;
+ }
+
+ rc = cpr_find_fuse_map_match(pdev, cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "Could not determine fuse mapping match: rc=%d\n",
+ rc);
+ goto err_out;
+ }
+
+ rc = cpr_voltage_plan_init(pdev, cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "Wrong DT parameter specified: rc=%d\n", rc);
+ goto err_out;
+ }
+
+ rc = cpr_pvs_init(pdev, cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "Initialize PVS wrong: rc=%d\n", rc);
+ goto err_out;
+ }
+
+ rc = cpr_vsens_init(pdev, cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "Initialize vsens configuration failed rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr_apc_init(pdev, cpr_vreg);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ cpr_err(cpr_vreg, "Initialize APC wrong: rc=%d\n", rc);
+ goto err_out;
+ }
+
+ rc = cpr_init_cpr(pdev, cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "Initialize CPR failed: rc=%d\n", rc);
+ goto err_out;
+ }
+
+ rc = cpr_rpm_apc_init(pdev, cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "Initialize RPM APC regulator failed rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr_thermal_init(cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "Thermal intialization failed rc=%d\n", rc);
+ return rc;
+ }
+
+ if (of_property_read_bool(pdev->dev.of_node,
+ "qcom,disable-closed-loop-in-pc")) {
+ rc = cpr_init_pm_notification(cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg,
+ "cpr_init_pm_notification failed rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ /* Load per-online CPU adjustment data */
+ rc = cpr_init_per_cpu_adjustments(cpr_vreg, &pdev->dev);
+ if (rc) {
+ cpr_err(cpr_vreg, "cpr_init_per_cpu_adjustments failed: rc=%d\n",
+ rc);
+ goto err_out;
+ }
+
+ /* Parse dependency parameters */
+ if (cpr_vreg->vdd_mx) {
+ rc = cpr_parse_vdd_mx_parameters(pdev, cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "parsing vdd_mx parameters failed: rc=%d\n",
+ rc);
+ goto err_out;
+ }
+ }
+
+ cpr_efuse_free(cpr_vreg);
+
+ /*
+ * Ensure that enable state accurately reflects the case in which CPR
+ * is permanently disabled.
+ */
+ cpr_vreg->enable &= !cpr_vreg->cpr_fuse_disable;
+
+ mutex_init(&cpr_vreg->cpr_mutex);
+
+ rdesc = &cpr_vreg->rdesc;
+ rdesc->owner = THIS_MODULE;
+ rdesc->type = REGULATOR_VOLTAGE;
+ rdesc->ops = &cpr_corner_ops;
+
+ reg_config.dev = &pdev->dev;
+ reg_config.init_data = init_data;
+ reg_config.driver_data = cpr_vreg;
+ reg_config.of_node = pdev->dev.of_node;
+ cpr_vreg->rdev = regulator_register(rdesc, &reg_config);
+ if (IS_ERR(cpr_vreg->rdev)) {
+ rc = PTR_ERR(cpr_vreg->rdev);
+ cpr_err(cpr_vreg, "regulator_register failed: rc=%d\n", rc);
+
+ cpr_apc_exit(cpr_vreg);
+ return rc;
+ }
+
+ platform_set_drvdata(pdev, cpr_vreg);
+ cpr_debugfs_init(cpr_vreg);
+
+ if (cpr_vreg->cpr_disable_on_temperature) {
+ rc = cpr_check_tsens(cpr_vreg);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "Unable to config CPR on tsens, rc=%d\n",
+ rc);
+ cpr_apc_exit(cpr_vreg);
+ cpr_debugfs_remove(cpr_vreg);
+ return rc;
+ }
+ }
+
+ mutex_lock(&cpr_regulator_list_mutex);
+ list_add(&cpr_vreg->list, &cpr_regulator_list);
+ mutex_unlock(&cpr_regulator_list_mutex);
+
+ return 0;
+
+err_out:
+ cpr_efuse_free(cpr_vreg);
+ return rc;
+}
+
+static int cpr_regulator_remove(struct platform_device *pdev)
+{
+ struct cpr_regulator *cpr_vreg;
+
+ cpr_vreg = platform_get_drvdata(pdev);
+ if (cpr_vreg) {
+ /* Disable CPR */
+ if (cpr_is_allowed(cpr_vreg)) {
+ cpr_ctl_disable(cpr_vreg);
+ cpr_irq_set(cpr_vreg, 0);
+ }
+
+ mutex_lock(&cpr_regulator_list_mutex);
+ list_del(&cpr_vreg->list);
+ mutex_unlock(&cpr_regulator_list_mutex);
+
+ if (cpr_vreg->cpu_notifier.notifier_call)
+ unregister_hotcpu_notifier(&cpr_vreg->cpu_notifier);
+
+ if (cpr_vreg->cpr_disable_on_temperature)
+ sensor_mgr_remove_threshold(
+ &cpr_vreg->tsens_threshold_config);
+
+ cpr_apc_exit(cpr_vreg);
+ cpr_debugfs_remove(cpr_vreg);
+ regulator_unregister(cpr_vreg->rdev);
+ }
+
+ return 0;
+}
+
+static struct of_device_id cpr_regulator_match_table[] = {
+ { .compatible = CPR_REGULATOR_DRIVER_NAME, },
+ {}
+};
+
+static struct platform_driver cpr_regulator_driver = {
+ .driver = {
+ .name = CPR_REGULATOR_DRIVER_NAME,
+ .of_match_table = cpr_regulator_match_table,
+ .owner = THIS_MODULE,
+ },
+ .probe = cpr_regulator_probe,
+ .remove = cpr_regulator_remove,
+ .suspend = cpr_regulator_suspend,
+ .resume = cpr_regulator_resume,
+};
+
+static int initialize_tsens_monitor(struct cpr_regulator *cpr_vreg)
+{
+ int rc;
+
+ rc = cpr_check_tsens(cpr_vreg);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "Unable to check tsens, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = sensor_mgr_init_threshold(&cpr_vreg->tsens_threshold_config,
+ cpr_vreg->tsens_id,
+ cpr_vreg->cpr_enable_temp_threshold, /* high */
+ cpr_vreg->cpr_disable_temp_threshold, /* low */
+ tsens_threshold_notify);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "Failed to init tsens monitor, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = sensor_mgr_convert_id_and_set_threshold(
+ &cpr_vreg->tsens_threshold_config);
+ if (rc < 0)
+ cpr_err(cpr_vreg, "Failed to set tsens threshold, rc=%d\n",
+ rc);
+
+ return rc;
+}
+
+int __init cpr_regulator_late_init(void)
+{
+ int rc;
+ struct cpr_regulator *cpr_vreg;
+
+ mutex_lock(&cpr_regulator_list_mutex);
+
+ list_for_each_entry(cpr_vreg, &cpr_regulator_list, list) {
+ if (cpr_vreg->cpr_disable_on_temperature) {
+ rc = initialize_tsens_monitor(cpr_vreg);
+ if (rc)
+ cpr_err(cpr_vreg, "Failed to initialize temperature monitor, rc=%d\n",
+ rc);
+ }
+ }
+
+ mutex_unlock(&cpr_regulator_list_mutex);
+ return 0;
+}
+late_initcall(cpr_regulator_late_init);
+
+/**
+ * cpr_regulator_init() - register cpr-regulator driver
+ *
+ * This initialization function should be called in systems in which driver
+ * registration ordering must be controlled precisely.
+ */
+int __init cpr_regulator_init(void)
+{
+ static bool initialized;
+
+ if (initialized)
+ return 0;
+ else
+ initialized = true;
+
+ cpr_debugfs_base_init();
+ return platform_driver_register(&cpr_regulator_driver);
+}
+EXPORT_SYMBOL(cpr_regulator_init);
+
+static void __exit cpr_regulator_exit(void)
+{
+ platform_driver_unregister(&cpr_regulator_driver);
+ cpr_debugfs_base_remove();
+}
+
+MODULE_DESCRIPTION("CPR regulator driver");
+MODULE_LICENSE("GPL v2");
+
+arch_initcall(cpr_regulator_init);
+module_exit(cpr_regulator_exit);
diff --git a/drivers/regulator/cpr2-gfx-regulator.c b/drivers/regulator/cpr2-gfx-regulator.c
new file mode 100644
index 000000000000..ca37e0ec6201
--- /dev/null
+++ b/drivers/regulator/cpr2-gfx-regulator.c
@@ -0,0 +1,2451 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+/* Register Offsets for RB-CPR and Bit Definitions */
+
+/* RBCPR Version Register */
+#define REG_RBCPR_VERSION 0
+#define RBCPR_VER_2 0x02
+
+/* RBCPR Gate Count and Target Registers */
+#define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * n)
+
+#define RBCPR_GCNT_TARGET_GCNT_BITS 10
+#define RBCPR_GCNT_TARGET_GCNT_SHIFT 12
+#define RBCPR_GCNT_TARGET_GCNT_MASK ((1<<RBCPR_GCNT_TARGET_GCNT_BITS)-1)
+
+/* RBCPR Timer Control */
+#define REG_RBCPR_TIMER_INTERVAL 0x44
+#define REG_RBIF_TIMER_ADJUST 0x4C
+
+#define RBIF_TIMER_ADJ_CONS_UP_BITS 4
+#define RBIF_TIMER_ADJ_CONS_UP_MASK ((1<<RBIF_TIMER_ADJ_CONS_UP_BITS)-1)
+#define RBIF_TIMER_ADJ_CONS_DOWN_BITS 4
+#define RBIF_TIMER_ADJ_CONS_DOWN_MASK ((1<<RBIF_TIMER_ADJ_CONS_DOWN_BITS)-1)
+#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4
+
+/* RBCPR Config Register */
+#define REG_RBIF_LIMIT 0x48
+#define REG_RBCPR_STEP_QUOT 0x80
+#define REG_RBIF_SW_VLEVEL 0x94
+
+#define RBIF_LIMIT_CEILING_BITS 6
+#define RBIF_LIMIT_CEILING_MASK ((1<<RBIF_LIMIT_CEILING_BITS)-1)
+#define RBIF_LIMIT_CEILING_SHIFT 6
+#define RBIF_LIMIT_FLOOR_BITS 6
+#define RBIF_LIMIT_FLOOR_MASK ((1<<RBIF_LIMIT_FLOOR_BITS)-1)
+
+#define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK
+#define RBIF_LIMIT_FLOOR_DEFAULT 0
+#define RBIF_SW_VLEVEL_DEFAULT 0x20
+
+#define RBCPR_STEP_QUOT_STEPQUOT_BITS 8
+#define RBCPR_STEP_QUOT_STEPQUOT_MASK ((1<<RBCPR_STEP_QUOT_STEPQUOT_BITS)-1)
+#define RBCPR_STEP_QUOT_IDLE_CLK_BITS 4
+#define RBCPR_STEP_QUOT_IDLE_CLK_MASK ((1<<RBCPR_STEP_QUOT_IDLE_CLK_BITS)-1)
+#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8
+
+/* RBCPR Control Register */
+#define REG_RBCPR_CTL 0x90
+
+#define RBCPR_CTL_LOOP_EN BIT(0)
+#define RBCPR_CTL_TIMER_EN BIT(3)
+#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5)
+#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6)
+#define RBCPR_CTL_COUNT_MODE BIT(10)
+#define RBCPR_CTL_UP_THRESHOLD_BITS 4
+#define RBCPR_CTL_UP_THRESHOLD_MASK ((1<<RBCPR_CTL_UP_THRESHOLD_BITS)-1)
+#define RBCPR_CTL_UP_THRESHOLD_SHIFT 24
+#define RBCPR_CTL_DN_THRESHOLD_BITS 4
+#define RBCPR_CTL_DN_THRESHOLD_MASK ((1<<RBCPR_CTL_DN_THRESHOLD_BITS)-1)
+#define RBCPR_CTL_DN_THRESHOLD_SHIFT 28
+
+/* RBCPR Ack/Nack Response */
+#define REG_RBIF_CONT_ACK_CMD 0x98
+#define REG_RBIF_CONT_NACK_CMD 0x9C
+
+/* RBCPR Result status Register */
+#define REG_RBCPR_RESULT_0 0xA0
+
+#define RBCPR_RESULT0_BUSY_SHIFT 19
+#define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT)
+#define RBCPR_RESULT0_ERROR_LT0_SHIFT 18
+#define RBCPR_RESULT0_ERROR_SHIFT 6
+#define RBCPR_RESULT0_ERROR_BITS 12
+#define RBCPR_RESULT0_ERROR_MASK ((1<<RBCPR_RESULT0_ERROR_BITS)-1)
+#define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2
+#define RBCPR_RESULT0_ERROR_STEPS_BITS 4
+#define RBCPR_RESULT0_ERROR_STEPS_MASK ((1<<RBCPR_RESULT0_ERROR_STEPS_BITS)-1)
+#define RBCPR_RESULT0_STEP_UP_SHIFT 1
+
+/* RBCPR Interrupt Control Register */
+#define REG_RBIF_IRQ_EN(n) (0x100 + 4 * n)
+#define REG_RBIF_IRQ_CLEAR 0x110
+#define REG_RBIF_IRQ_STATUS 0x114
+
+#define CPR_INT_DONE BIT(0)
+#define CPR_INT_MIN BIT(1)
+#define CPR_INT_DOWN BIT(2)
+#define CPR_INT_MID BIT(3)
+#define CPR_INT_UP BIT(4)
+#define CPR_INT_MAX BIT(5)
+#define CPR_INT_CLAMP BIT(6)
+#define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
+ CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
+#define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN)
+
+#define BYTES_PER_FUSE_ROW 8
+
+#define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0)
+
+#define FUSE_REVISION_UNKNOWN (-1)
+#define FUSE_MAP_NO_MATCH (-1)
+#define FUSE_PARAM_MATCH_ANY 0xFFFFFFFF
+
+#define CPR_CORNER_MIN 1
+/*
+ * This is an arbitrary upper limit which is used in a sanity check in order to
+ * avoid excessive memory allocation due to bad device tree data.
+ */
+#define CPR_CORNER_LIMIT 100
+
+enum voltage_change_dir {
+ NO_CHANGE,
+ DOWN,
+ UP,
+};
+
+struct cpr2_gfx_regulator {
+ struct list_head list;
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+ struct device *dev;
+ struct clk *core_clk;
+ struct clk *iface_clk;
+ bool vreg_enabled;
+ int corner;
+ int ceiling_max;
+ struct dentry *debugfs;
+
+ /* eFuse parameters */
+ phys_addr_t efuse_addr;
+ void __iomem *efuse_base;
+
+ /* Process voltage parameters */
+ int *open_loop_volt;
+ /* Process voltage variables */
+ u32 process_id;
+ u32 foundry_id;
+
+ /* GPU voltage regulator */
+ struct regulator *vdd_gfx;
+
+ /* Dependency parameters */
+ struct regulator *vdd_mx;
+ int vdd_mx_vmin;
+ int *vdd_mx_corner_map;
+
+ /* mem-acc regulator */
+ struct regulator *mem_acc_vreg;
+
+ /* CPR parameters */
+ bool cpr_fuse_disable;
+ int cpr_fuse_revision;
+ int cpr_fuse_map_count;
+ int cpr_fuse_map_match;
+ int **cpr_target_quot;
+ int gcnt;
+
+ unsigned int cpr_irq;
+ void __iomem *rbcpr_base;
+ struct mutex cpr_mutex;
+
+ int *ceiling_volt;
+ int *floor_volt;
+ int *last_volt;
+ int step_volt;
+
+ int *save_ctl;
+ int *save_irq;
+
+ /* Config parameters */
+ bool enable;
+ u32 ref_clk_khz;
+ u32 timer_delay_us;
+ u32 timer_cons_up;
+ u32 timer_cons_down;
+ u32 irq_line;
+ u32 step_quotient;
+ u32 up_threshold;
+ u32 down_threshold;
+ u32 idle_clocks;
+ u32 gcnt_time_us;
+ u32 vdd_gfx_step_up_limit;
+ u32 vdd_gfx_step_down_limit;
+ u32 flags;
+ u32 ro_count;
+ u32 num_corners;
+
+ bool is_cpr_suspended;
+ bool ctrl_enable;
+};
+
+#define CPR_DEBUG_MASK_IRQ BIT(0)
+#define CPR_DEBUG_MASK_API BIT(1)
+
+static int cpr_debug_enable;
+static struct dentry *cpr2_gfx_debugfs_base;
+
+static DEFINE_MUTEX(cpr2_gfx_regulator_list_mutex);
+static LIST_HEAD(cpr2_gfx_regulator_list);
+
+module_param_named(debug_enable, cpr_debug_enable, int, S_IRUGO | S_IWUSR);
+#define cpr_debug(cpr_vreg, message, ...) \
+ do { \
+ if (cpr_debug_enable & CPR_DEBUG_MASK_API) \
+ pr_info("%s: " message, (cpr_vreg)->rdesc.name, \
+ ##__VA_ARGS__); \
+ } while (0)
+#define cpr_debug_irq(cpr_vreg, message, ...) \
+ do { \
+ if (cpr_debug_enable & CPR_DEBUG_MASK_IRQ) \
+ pr_info("%s: " message, (cpr_vreg)->rdesc.name, \
+ ##__VA_ARGS__); \
+ else \
+ pr_debug("%s: " message, (cpr_vreg)->rdesc.name, \
+ ##__VA_ARGS__); \
+ } while (0)
+#define cpr_info(cpr_vreg, message, ...) \
+ pr_info("%s: " message, (cpr_vreg)->rdesc.name, ##__VA_ARGS__)
+#define cpr_err(cpr_vreg, message, ...) \
+ pr_err("%s: " message, (cpr_vreg)->rdesc.name, ##__VA_ARGS__)
+
+static u64 cpr_read_efuse_row(struct cpr2_gfx_regulator *cpr_vreg, u32 row_num)
+{
+ u64 efuse_bits;
+
+ efuse_bits = readq_relaxed(cpr_vreg->efuse_base
+ + row_num * BYTES_PER_FUSE_ROW);
+ return efuse_bits;
+}
+
+/**
+ * cpr_read_efuse_param() - read a parameter from one or two eFuse rows
+ * @cpr_vreg: Pointer to cpr2_gfx_regulator struct for this regulator.
+ * @row_start: Fuse row number to start reading from.
+ * @bit_start: The LSB of the parameter to read from the fuse.
+ * @bit_len: The length of the parameter in bits.
+ *
+ * This function reads a parameter of specified offset and bit size out of one
+ * or two consecutive eFuse rows. This allows for the reading of parameters
+ * that happen to be split between two eFuse rows.
+ *
+ * Returns the fuse parameter on success or 0 on failure.
+ */
+static u64 cpr_read_efuse_param(struct cpr2_gfx_regulator *cpr_vreg,
+ int row_start, int bit_start, int bit_len)
+{
+ u64 fuse[2];
+ u64 param = 0;
+ int bits_first, bits_second;
+
+ if (bit_start < 0) {
+ cpr_err(cpr_vreg, "Invalid LSB = %d specified\n", bit_start);
+ return 0;
+ }
+
+ if (bit_len < 0 || bit_len > 64) {
+ cpr_err(cpr_vreg, "Invalid bit length = %d specified\n",
+ bit_len);
+ return 0;
+ }
+
+ /* Allow bit indexing to start beyond the end of the start row. */
+ if (bit_start >= 64) {
+ row_start += bit_start >> 6; /* equivalent to bit_start / 64 */
+ bit_start &= 0x3F;
+ }
+
+ fuse[0] = cpr_read_efuse_row(cpr_vreg, row_start);
+
+ if (bit_start == 0 && bit_len == 64) {
+ param = fuse[0];
+ } else if (bit_start + bit_len <= 64) {
+ param = (fuse[0] >> bit_start) & ((1ULL << bit_len) - 1);
+ } else {
+ fuse[1] = cpr_read_efuse_row(cpr_vreg, row_start + 1);
+ bits_first = 64 - bit_start;
+ bits_second = bit_len - bits_first;
+ param = (fuse[0] >> bit_start) & ((1ULL << bits_first) - 1);
+ param |= (fuse[1] & ((1ULL << bits_second) - 1)) << bits_first;
+ }
+
+ return param;
+}
+
+static bool cpr_is_allowed(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ if (cpr_vreg->cpr_fuse_disable || !cpr_vreg->enable)
+ return false;
+ else
+ return true;
+}
+
+static void cpr_write(struct cpr2_gfx_regulator *cpr_vreg, u32 offset,
+ u32 value)
+{
+ writel_relaxed(value, cpr_vreg->rbcpr_base + offset);
+}
+
+static u32 cpr_read(struct cpr2_gfx_regulator *cpr_vreg, u32 offset)
+{
+ return readl_relaxed(cpr_vreg->rbcpr_base + offset);
+}
+
+static void cpr_masked_write(struct cpr2_gfx_regulator *cpr_vreg, u32 offset,
+ u32 mask, u32 value)
+{
+ u32 reg_val;
+
+ reg_val = readl_relaxed(cpr_vreg->rbcpr_base + offset);
+ reg_val &= ~mask;
+ reg_val |= value & mask;
+ writel_relaxed(reg_val, cpr_vreg->rbcpr_base + offset);
+}
+
+static void cpr_irq_clr(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ if (cpr_vreg->ctrl_enable)
+ cpr_write(cpr_vreg, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL);
+}
+
+static void cpr_irq_clr_nack(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ cpr_irq_clr(cpr_vreg);
+ cpr_write(cpr_vreg, REG_RBIF_CONT_NACK_CMD, 1);
+}
+
+static void cpr_irq_clr_ack(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ cpr_irq_clr(cpr_vreg);
+ cpr_write(cpr_vreg, REG_RBIF_CONT_ACK_CMD, 1);
+}
+
+static void cpr_irq_set(struct cpr2_gfx_regulator *cpr_vreg, u32 int_bits)
+{
+ if (cpr_vreg->ctrl_enable)
+ cpr_write(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line),
+ int_bits);
+}
+
+static void cpr_ctl_modify(struct cpr2_gfx_regulator *cpr_vreg, u32 mask,
+ u32 value)
+{
+ cpr_masked_write(cpr_vreg, REG_RBCPR_CTL, mask, value);
+}
+
+static void cpr_ctl_enable(struct cpr2_gfx_regulator *cpr_vreg, int corner)
+{
+ u32 val;
+
+ if (cpr_vreg->is_cpr_suspended || !cpr_vreg->ctrl_enable)
+ return;
+
+ /* Program Consecutive Up & Down */
+ val = ((cpr_vreg->timer_cons_down & RBIF_TIMER_ADJ_CONS_DOWN_MASK)
+ << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT) |
+ (cpr_vreg->timer_cons_up & RBIF_TIMER_ADJ_CONS_UP_MASK);
+ cpr_masked_write(cpr_vreg, REG_RBIF_TIMER_ADJUST,
+ RBIF_TIMER_ADJ_CONS_UP_MASK |
+ RBIF_TIMER_ADJ_CONS_DOWN_MASK, val);
+ cpr_masked_write(cpr_vreg, REG_RBCPR_CTL,
+ RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN,
+ cpr_vreg->save_ctl[corner]);
+ cpr_irq_set(cpr_vreg, cpr_vreg->save_irq[corner]);
+
+ if (cpr_vreg->ceiling_volt[corner] > cpr_vreg->floor_volt[corner])
+ val = RBCPR_CTL_LOOP_EN;
+ else
+ val = 0;
+ cpr_ctl_modify(cpr_vreg, RBCPR_CTL_LOOP_EN, val);
+}
+
+static void cpr_ctl_disable(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ if (cpr_vreg->is_cpr_suspended || !cpr_vreg->ctrl_enable)
+ return;
+
+ cpr_irq_set(cpr_vreg, 0);
+ cpr_ctl_modify(cpr_vreg, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0);
+ cpr_masked_write(cpr_vreg, REG_RBIF_TIMER_ADJUST,
+ RBIF_TIMER_ADJ_CONS_UP_MASK |
+ RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0);
+ cpr_irq_clr(cpr_vreg);
+ cpr_write(cpr_vreg, REG_RBIF_CONT_ACK_CMD, 1);
+ cpr_write(cpr_vreg, REG_RBIF_CONT_NACK_CMD, 1);
+ cpr_ctl_modify(cpr_vreg, RBCPR_CTL_LOOP_EN, 0);
+}
+
+static bool cpr_ctl_is_enabled(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+ return reg_val & RBCPR_CTL_LOOP_EN;
+}
+
+static bool cpr_ctl_is_busy(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(cpr_vreg, REG_RBCPR_RESULT_0);
+ return reg_val & RBCPR_RESULT0_BUSY_MASK;
+}
+
+static void cpr_corner_save(struct cpr2_gfx_regulator *cpr_vreg, int corner)
+{
+ cpr_vreg->save_ctl[corner] = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+ cpr_vreg->save_irq[corner] =
+ cpr_read(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line));
+}
+
+#define MAX_CHARS_PER_INT 10
+
+static void cpr_corner_restore(struct cpr2_gfx_regulator *cpr_vreg, int corner)
+{
+ u32 gcnt, ctl, irq, step_quot;
+ int i;
+
+ if (!cpr_vreg->ctrl_enable)
+ return;
+
+ /* Program the step quotient and idle clocks */
+ step_quot = ((cpr_vreg->idle_clocks & RBCPR_STEP_QUOT_IDLE_CLK_MASK)
+ << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT) |
+ (cpr_vreg->step_quotient & RBCPR_STEP_QUOT_STEPQUOT_MASK);
+ cpr_write(cpr_vreg, REG_RBCPR_STEP_QUOT, step_quot);
+
+ /* Program the target quotient value and gate count of all ROs */
+ for (i = 0; i < cpr_vreg->ro_count; i++) {
+ gcnt = cpr_vreg->gcnt
+ | (cpr_vreg->cpr_target_quot[corner][i]);
+ cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(i), gcnt);
+ }
+
+ ctl = cpr_vreg->save_ctl[corner];
+ cpr_write(cpr_vreg, REG_RBCPR_CTL, ctl);
+ irq = cpr_vreg->save_irq[corner];
+ cpr_irq_set(cpr_vreg, irq);
+ cpr_debug(cpr_vreg, "ctl = 0x%08x, irq = 0x%08x\n", ctl, irq);
+}
+
+static void cpr_corner_switch(struct cpr2_gfx_regulator *cpr_vreg, int corner)
+{
+ if (cpr_vreg->corner == corner)
+ return;
+
+ cpr_corner_restore(cpr_vreg, corner);
+}
+
+static int cpr_gfx_set(struct cpr2_gfx_regulator *cpr_vreg, u32 new_volt)
+{
+ int max_volt, rc;
+
+ max_volt = cpr_vreg->ceiling_max;
+ rc = regulator_set_voltage(cpr_vreg->vdd_gfx, new_volt, max_volt);
+ if (rc)
+ cpr_err(cpr_vreg, "set: vdd_gfx = %d uV: rc=%d\n",
+ new_volt, rc);
+ return rc;
+}
+
+static int cpr_mx_set(struct cpr2_gfx_regulator *cpr_vreg, int corner,
+ int vdd_mx_vmin)
+{
+ int rc, max_uV = INT_MAX;
+
+ rc = regulator_set_voltage(cpr_vreg->vdd_mx, vdd_mx_vmin, max_uV);
+ cpr_debug(cpr_vreg, "[corner:%d] %d uV\n", corner, vdd_mx_vmin);
+
+ if (!rc)
+ cpr_vreg->vdd_mx_vmin = vdd_mx_vmin;
+ else
+ cpr_err(cpr_vreg, "set: vdd_mx [corner:%d] = %d uV failed: rc=%d\n",
+ corner, vdd_mx_vmin, rc);
+ return rc;
+}
+
+static int cpr2_gfx_scale_voltage(struct cpr2_gfx_regulator *cpr_vreg,
+ int corner, int new_gfx_volt,
+ enum voltage_change_dir dir)
+{
+ int rc = 0, vdd_mx_vmin = 0;
+
+ /* Determine the vdd_mx voltage */
+ if (dir != NO_CHANGE && cpr_vreg->vdd_mx != NULL)
+ vdd_mx_vmin = cpr_vreg->vdd_mx_corner_map[corner];
+
+ if (cpr_vreg->mem_acc_vreg && dir == DOWN) {
+ rc = regulator_set_voltage(cpr_vreg->mem_acc_vreg,
+ corner, corner);
+ if (rc)
+ cpr_err(cpr_vreg, "set: mem_acc corner:%d failed: rc=%d\n",
+ corner, rc);
+ }
+
+ if (!rc && vdd_mx_vmin && dir == UP) {
+ if (vdd_mx_vmin != cpr_vreg->vdd_mx_vmin)
+ rc = cpr_mx_set(cpr_vreg, corner, vdd_mx_vmin);
+ }
+
+ if (!rc)
+ rc = cpr_gfx_set(cpr_vreg, new_gfx_volt);
+
+ if (!rc && cpr_vreg->mem_acc_vreg && dir == UP) {
+ rc = regulator_set_voltage(cpr_vreg->mem_acc_vreg, corner,
+ corner);
+ if (rc)
+ cpr_err(cpr_vreg, "set: mem_acc corner:%d failed: rc=%d\n",
+ corner, rc);
+ }
+
+ if (!rc && vdd_mx_vmin && dir == DOWN) {
+ if (vdd_mx_vmin != cpr_vreg->vdd_mx_vmin)
+ rc = cpr_mx_set(cpr_vreg, corner, vdd_mx_vmin);
+ }
+
+ return rc;
+}
+
+static void cpr2_gfx_scale(struct cpr2_gfx_regulator *cpr_vreg,
+ enum voltage_change_dir dir)
+{
+ u32 reg_val, error_steps, reg_mask, gcnt;
+ int last_volt, new_volt, corner, i, pos;
+ size_t buf_len;
+ char *buf;
+
+ corner = cpr_vreg->corner;
+
+ reg_val = cpr_read(cpr_vreg, REG_RBCPR_RESULT_0);
+
+ error_steps = (reg_val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
+ & RBCPR_RESULT0_ERROR_STEPS_MASK;
+ last_volt = cpr_vreg->last_volt[corner];
+
+ cpr_debug_irq(cpr_vreg, "last_volt[corner:%d] = %d uV\n", corner,
+ last_volt);
+
+ buf_len = cpr_vreg->ro_count * (MAX_CHARS_PER_INT + 2) * sizeof(*buf);
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (buf == NULL) {
+ cpr_err(cpr_vreg, "Could not allocate memory for target register logging\n");
+ return;
+ }
+
+ for (i = 0, pos = 0; i < cpr_vreg->ro_count; i++) {
+ gcnt = cpr_read(cpr_vreg, REG_RBCPR_GCNT_TARGET(i));
+ pos += scnprintf(buf + pos, buf_len - pos, "%u%s", gcnt,
+ i < cpr_vreg->ro_count - 1 ? " " : "");
+ }
+
+ if (dir == UP) {
+ cpr_debug_irq(cpr_vreg,
+ "Up: cpr status = 0x%08x (error_steps=%d)\n",
+ reg_val, error_steps);
+
+ if (last_volt >= cpr_vreg->ceiling_volt[corner]) {
+ cpr_debug_irq(cpr_vreg,
+ "[corn:%d] @ ceiling: %d >= %d: NACK\n",
+ corner, last_volt,
+ cpr_vreg->ceiling_volt[corner]);
+ cpr_irq_clr_nack(cpr_vreg);
+
+ cpr_debug_irq(cpr_vreg, "gcnt target dump: [%s]\n",
+ buf);
+
+ /* Maximize the UP threshold */
+ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK <<
+ RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ reg_val = reg_mask;
+ cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
+
+ /* Disable UP interrupt */
+ cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT & ~CPR_INT_UP);
+
+ goto _exit;
+ }
+
+ if (error_steps > cpr_vreg->vdd_gfx_step_up_limit) {
+ cpr_debug_irq(cpr_vreg,
+ "%d is over up-limit(%d): Clamp\n",
+ error_steps,
+ cpr_vreg->vdd_gfx_step_up_limit);
+ error_steps = cpr_vreg->vdd_gfx_step_up_limit;
+ }
+
+ /* Calculate new voltage */
+ new_volt = last_volt + (error_steps * cpr_vreg->step_volt);
+ if (new_volt > cpr_vreg->ceiling_volt[corner]) {
+ cpr_debug_irq(cpr_vreg,
+ "new_volt(%d) >= ceiling(%d): Clamp\n",
+ new_volt,
+ cpr_vreg->ceiling_volt[corner]);
+
+ new_volt = cpr_vreg->ceiling_volt[corner];
+ }
+
+ if (cpr2_gfx_scale_voltage(cpr_vreg, corner, new_volt, dir)) {
+ cpr_irq_clr_nack(cpr_vreg);
+ goto _exit;
+ }
+ cpr_vreg->last_volt[corner] = new_volt;
+
+ /* Disable auto nack down */
+ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ reg_val = 0;
+
+ cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
+
+ /* Re-enable default interrupts */
+ cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT);
+
+ /* Ack */
+ cpr_irq_clr_ack(cpr_vreg);
+
+ cpr_debug_irq(cpr_vreg, "UP: -> new_volt[corner:%d] = %d uV\n",
+ corner, new_volt);
+ } else if (dir == DOWN) {
+ cpr_debug_irq(cpr_vreg,
+ "Down: cpr status = 0x%08x (error_steps=%d)\n",
+ reg_val, error_steps);
+
+ if (last_volt <= cpr_vreg->floor_volt[corner]) {
+ cpr_debug_irq(cpr_vreg,
+ "[corn:%d] @ floor: %d <= %d: NACK\n",
+ corner, last_volt,
+ cpr_vreg->floor_volt[corner]);
+ cpr_irq_clr_nack(cpr_vreg);
+
+ cpr_debug_irq(cpr_vreg, "gcnt target dump: [%s]\n",
+ buf);
+
+ /* Enable auto nack down */
+ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ reg_val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+
+ cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
+
+ /* Disable DOWN interrupt */
+ cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT & ~CPR_INT_DOWN);
+
+ goto _exit;
+ }
+
+ if (error_steps > cpr_vreg->vdd_gfx_step_down_limit) {
+ cpr_debug_irq(cpr_vreg,
+ "%d is over down-limit(%d): Clamp\n",
+ error_steps,
+ cpr_vreg->vdd_gfx_step_down_limit);
+ error_steps = cpr_vreg->vdd_gfx_step_down_limit;
+ }
+
+ /* Calculte new voltage */
+ new_volt = last_volt - (error_steps * cpr_vreg->step_volt);
+ if (new_volt < cpr_vreg->floor_volt[corner]) {
+ cpr_debug_irq(cpr_vreg,
+ "new_volt(%d) < floor(%d): Clamp\n",
+ new_volt, cpr_vreg->floor_volt[corner]);
+ new_volt = cpr_vreg->floor_volt[corner];
+ }
+
+ if (cpr2_gfx_scale_voltage(cpr_vreg, corner, new_volt, dir)) {
+ cpr_irq_clr_nack(cpr_vreg);
+ goto _exit;
+ }
+ cpr_vreg->last_volt[corner] = new_volt;
+
+ /* Restore default threshold for UP */
+ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK <<
+ RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ reg_val = cpr_vreg->up_threshold <<
+ RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ cpr_ctl_modify(cpr_vreg, reg_mask, reg_val);
+
+ /* Re-enable default interrupts */
+ cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT);
+
+ /* Ack */
+ cpr_irq_clr_ack(cpr_vreg);
+
+ cpr_debug_irq(cpr_vreg,
+ "DOWN: -> new_volt[corner:%d] = %d uV\n",
+ corner, new_volt);
+ }
+
+_exit:
+ kfree(buf);
+}
+
+static irqreturn_t cpr2_gfx_irq_handler(int irq, void *dev)
+{
+ struct cpr2_gfx_regulator *cpr_vreg = dev;
+ u32 reg_val;
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+
+ reg_val = cpr_read(cpr_vreg, REG_RBIF_IRQ_STATUS);
+ if (cpr_vreg->flags & FLAGS_IGNORE_1ST_IRQ_STATUS)
+ reg_val = cpr_read(cpr_vreg, REG_RBIF_IRQ_STATUS);
+
+ cpr_debug_irq(cpr_vreg, "IRQ_STATUS = 0x%02X\n", reg_val);
+
+ if (!cpr_ctl_is_enabled(cpr_vreg)) {
+ cpr_debug_irq(cpr_vreg, "CPR is disabled\n");
+ goto _exit;
+ } else if (cpr_ctl_is_busy(cpr_vreg)) {
+ cpr_debug_irq(cpr_vreg, "CPR measurement is not ready\n");
+ goto _exit;
+ } else if (!cpr_is_allowed(cpr_vreg)) {
+ reg_val = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+ cpr_err(cpr_vreg, "Interrupt broken? RBCPR_CTL = 0x%02X\n",
+ reg_val);
+ goto _exit;
+ }
+
+ /* Following sequence of handling is as per each IRQ's priority */
+ if (reg_val & CPR_INT_UP) {
+ cpr2_gfx_scale(cpr_vreg, UP);
+ } else if (reg_val & CPR_INT_DOWN) {
+ cpr2_gfx_scale(cpr_vreg, DOWN);
+ } else if (reg_val & CPR_INT_MIN) {
+ cpr_irq_clr_nack(cpr_vreg);
+ } else if (reg_val & CPR_INT_MAX) {
+ cpr_irq_clr_nack(cpr_vreg);
+ } else if (reg_val & CPR_INT_MID) {
+ /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
+ cpr_debug_irq(cpr_vreg, "IRQ occurred for Mid Flag\n");
+ } else {
+ cpr_debug_irq(cpr_vreg,
+ "IRQ occurred for unknown flag (0x%08x)\n", reg_val);
+ }
+
+ /* Save register values for the corner */
+ cpr_corner_save(cpr_vreg, cpr_vreg->corner);
+
+_exit:
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+ return IRQ_HANDLED;
+}
+
+/**
+ * cpr2_gfx_clock_enable() - prepare and enable all clocks used by this CPR GFX
+ * controller
+ * @cpr_verg: Pointer to the cpr2 gfx controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr2_gfx_clock_enable(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ int rc;
+
+ if (cpr_vreg->iface_clk) {
+ rc = clk_prepare_enable(cpr_vreg->iface_clk);
+ if (rc) {
+ cpr_err(cpr_vreg, "failed to enable interface clock, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (cpr_vreg->core_clk) {
+ rc = clk_prepare_enable(cpr_vreg->core_clk);
+ if (rc) {
+ cpr_err(cpr_vreg, "failed to enable core clock, rc=%d\n",
+ rc);
+ clk_disable_unprepare(cpr_vreg->iface_clk);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * cpr2_gfx_clock_disable() - disable and unprepare all clocks used by this CPR
+ * GFX controller
+ * @cpr_vreg: Pointer to the CPR2 controller
+ *
+ * Return: none
+ */
+static void cpr2_gfx_clock_disable(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ if (cpr_vreg->core_clk)
+ clk_disable_unprepare(cpr_vreg->core_clk);
+
+ if (cpr_vreg->iface_clk)
+ clk_disable_unprepare(cpr_vreg->iface_clk);
+}
+
+static int cpr2_gfx_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct cpr2_gfx_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+
+ return cpr_vreg->vreg_enabled;
+}
+
+/**
+ * cpr2_gfx_closed_loop_enable() - enable logical CPR closed-loop operation
+ * @cpr_vreg: Pointer to the cpr2 gfx regulator
+ *
+ * Return: 0 on success, error on failure
+ */
+static inline int cpr2_gfx_closed_loop_enable(struct cpr2_gfx_regulator
+ *cpr_vreg)
+{
+ int rc = 0;
+
+ if (!cpr_is_allowed(cpr_vreg)) {
+ return -EPERM;
+ } else if (cpr_vreg->ctrl_enable) {
+ /* Already enabled */
+ return 0;
+ } else if (cpr_vreg->is_cpr_suspended) {
+ /* CPR must remain disabled as the system is entering suspend */
+ return 0;
+ }
+
+ rc = cpr2_gfx_clock_enable(cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "unable to enable CPR clocks, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ cpr_vreg->ctrl_enable = true;
+ cpr_debug(cpr_vreg, "CPR closed-loop operation enabled\n");
+
+ return 0;
+}
+
+/**
+ * cpr2_gfx_closed_loop_disable() - disable logical CPR closed-loop operation
+ * @cpr_vreg: Pointer to the cpr2 gfx regulator
+ *
+ * Return: 0 on success, error on failure
+ */
+static inline int cpr2_gfx_closed_loop_disable(struct cpr2_gfx_regulator
+ *cpr_vreg)
+{
+ if (!cpr_vreg->ctrl_enable) {
+ /* Already disabled */
+ return 0;
+ }
+
+ cpr2_gfx_clock_disable(cpr_vreg);
+ cpr_vreg->ctrl_enable = false;
+ cpr_debug(cpr_vreg, "CPR closed-loop operation disabled\n");
+
+ return 0;
+}
+
+static int cpr2_gfx_regulator_enable(struct regulator_dev *rdev)
+{
+ struct cpr2_gfx_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+ int rc = 0;
+
+ /* Enable dependency power before vdd_gfx */
+ if (cpr_vreg->vdd_mx) {
+ rc = regulator_enable(cpr_vreg->vdd_mx);
+ if (rc) {
+ cpr_err(cpr_vreg, "regulator_enable: vdd_mx: rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = regulator_enable(cpr_vreg->vdd_gfx);
+ if (rc) {
+ cpr_err(cpr_vreg, "regulator_enable: vdd_gfx: rc=%d\n", rc);
+ return rc;
+ }
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+ cpr_vreg->vreg_enabled = true;
+ if (cpr_is_allowed(cpr_vreg)) {
+ rc = cpr2_gfx_closed_loop_enable(cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "could not enable CPR, rc=%d\n", rc);
+ goto _exit;
+ }
+
+ if (cpr_vreg->corner) {
+ cpr_irq_clr(cpr_vreg);
+ cpr_corner_restore(cpr_vreg, cpr_vreg->corner);
+ cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
+ }
+ }
+
+ cpr_debug(cpr_vreg, "cpr_enable = %s cpr_corner = %d\n",
+ cpr_vreg->enable ? "enabled" : "disabled",
+ cpr_vreg->corner);
+_exit:
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+ return 0;
+}
+
+static int cpr2_gfx_regulator_disable(struct regulator_dev *rdev)
+{
+ struct cpr2_gfx_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+ int rc;
+
+ rc = regulator_disable(cpr_vreg->vdd_gfx);
+ if (!rc) {
+ if (cpr_vreg->vdd_mx) {
+ rc = regulator_disable(cpr_vreg->vdd_mx);
+ if (rc) {
+ cpr_err(cpr_vreg, "regulator_disable: vdd_mx: rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+ cpr_vreg->vreg_enabled = false;
+ if (cpr_is_allowed(cpr_vreg)) {
+ cpr_ctl_disable(cpr_vreg);
+ cpr2_gfx_closed_loop_disable(cpr_vreg);
+ }
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+ } else {
+ cpr_err(cpr_vreg, "regulator_disable: vdd_gfx: rc=%d\n", rc);
+ }
+
+ cpr_debug(cpr_vreg, "cpr_enable = %s\n",
+ cpr_vreg->enable ? "enabled" : "disabled");
+ return rc;
+}
+
+static int cpr2_gfx_regulator_set_voltage(struct regulator_dev *rdev,
+ int corner, int corner_max, unsigned *selector)
+{
+ struct cpr2_gfx_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+ int rc = 0;
+ int new_volt;
+ enum voltage_change_dir change_dir = NO_CHANGE;
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+
+ if (cpr_vreg->ctrl_enable) {
+ cpr_ctl_disable(cpr_vreg);
+ new_volt = cpr_vreg->last_volt[corner];
+ } else {
+ new_volt = cpr_vreg->open_loop_volt[corner];
+ }
+
+ cpr_debug(cpr_vreg, "[corner:%d] = %d uV\n", corner, new_volt);
+
+ if (corner > cpr_vreg->corner)
+ change_dir = UP;
+ else if (corner < cpr_vreg->corner)
+ change_dir = DOWN;
+
+ rc = cpr2_gfx_scale_voltage(cpr_vreg, corner, new_volt, change_dir);
+ if (rc)
+ goto _exit;
+
+ if (cpr_vreg->ctrl_enable) {
+ cpr_irq_clr(cpr_vreg);
+ cpr_corner_switch(cpr_vreg, corner);
+ cpr_ctl_enable(cpr_vreg, corner);
+ }
+
+ cpr_vreg->corner = corner;
+
+_exit:
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+ return rc;
+}
+
+static int cpr2_gfx_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct cpr2_gfx_regulator *cpr_vreg = rdev_get_drvdata(rdev);
+
+ return cpr_vreg->corner;
+}
+
+static struct regulator_ops cpr_corner_ops = {
+ .enable = cpr2_gfx_regulator_enable,
+ .disable = cpr2_gfx_regulator_disable,
+ .is_enabled = cpr2_gfx_regulator_is_enabled,
+ .set_voltage = cpr2_gfx_regulator_set_voltage,
+ .get_voltage = cpr2_gfx_regulator_get_voltage,
+};
+
+static int cpr2_gfx_regulator_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct cpr2_gfx_regulator *cpr_vreg = platform_get_drvdata(pdev);
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+
+ cpr_debug(cpr_vreg, "suspend\n");
+
+ if (cpr_vreg->vreg_enabled && cpr_is_allowed(cpr_vreg)) {
+ cpr_ctl_disable(cpr_vreg);
+ cpr_irq_clr(cpr_vreg);
+ cpr2_gfx_closed_loop_disable(cpr_vreg);
+ }
+
+ cpr_vreg->is_cpr_suspended = true;
+
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+
+ return 0;
+}
+
+static int cpr2_gfx_regulator_resume(struct platform_device *pdev)
+{
+ struct cpr2_gfx_regulator *cpr_vreg = platform_get_drvdata(pdev);
+ int rc = 0;
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+
+ cpr_vreg->is_cpr_suspended = false;
+ cpr_debug(cpr_vreg, "resume\n");
+
+ if (cpr_vreg->vreg_enabled && cpr_is_allowed(cpr_vreg)) {
+ rc = cpr2_gfx_closed_loop_enable(cpr_vreg);
+ if (rc)
+ cpr_err(cpr_vreg, "could not enable CPR, rc=%d\n", rc);
+
+ cpr_irq_clr(cpr_vreg);
+ cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
+ }
+
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+ return 0;
+}
+
+static int cpr2_gfx_allocate_memory(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ struct device *dev = cpr_vreg->dev;
+ int rc, i;
+ size_t len;
+
+ rc = of_property_read_u32(dev->of_node, "qcom,cpr-corners",
+ &cpr_vreg->num_corners);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "qcom,cpr-corners missing: rc=%d\n", rc);
+ return rc;
+ }
+
+ if (cpr_vreg->num_corners < CPR_CORNER_MIN
+ || cpr_vreg->num_corners > CPR_CORNER_LIMIT) {
+ cpr_err(cpr_vreg, "corner count=%d is invalid\n",
+ cpr_vreg->num_corners);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(dev->of_node, "qcom,cpr-ro-count",
+ &cpr_vreg->ro_count);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "qcom,cpr-ro-count missing or read failed: rc=%d\n",
+ rc);
+ return rc;
+ }
+ cpr_info(cpr_vreg, "ro_count = %d\n", cpr_vreg->ro_count);
+
+ /*
+ * The arrays sized based on the corner count ignore element 0
+ * in order to simplify indexing throughout the driver since min_uV = 0
+ * cannot be passed into a set_voltage() callback.
+ */
+ len = cpr_vreg->num_corners + 1;
+
+ cpr_vreg->open_loop_volt = devm_kzalloc(dev,
+ len * sizeof(*cpr_vreg->open_loop_volt), GFP_KERNEL);
+ cpr_vreg->cpr_target_quot = devm_kzalloc(dev,
+ len * sizeof(int *), GFP_KERNEL);
+ cpr_vreg->ceiling_volt = devm_kzalloc(dev,
+ len * (sizeof(*cpr_vreg->ceiling_volt)), GFP_KERNEL);
+ cpr_vreg->floor_volt = devm_kzalloc(dev,
+ len * (sizeof(*cpr_vreg->floor_volt)), GFP_KERNEL);
+
+ if (cpr_vreg->open_loop_volt == NULL
+ || cpr_vreg->cpr_target_quot == NULL
+ || cpr_vreg->ceiling_volt == NULL
+ || cpr_vreg->floor_volt == NULL) {
+ cpr_err(cpr_vreg, "Could not allocate memory for CPR arrays\n");
+ return -ENOMEM;
+ }
+
+ for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+ cpr_vreg->cpr_target_quot[i] = devm_kzalloc(dev,
+ cpr_vreg->ro_count * sizeof(*cpr_vreg->cpr_target_quot),
+ GFP_KERNEL);
+ if (!cpr_vreg->cpr_target_quot[i]) {
+ cpr_err(cpr_vreg, "Could not allocate memory\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int cpr_mem_acc_init(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ struct device *dev = cpr_vreg->dev;
+ int rc;
+
+ if (of_find_property(dev->of_node, "mem-acc-supply", NULL)) {
+ cpr_vreg->mem_acc_vreg = devm_regulator_get(dev, "mem-acc");
+ if (IS_ERR_OR_NULL(cpr_vreg->mem_acc_vreg)) {
+ rc = PTR_RET(cpr_vreg->mem_acc_vreg);
+ if (rc != -EPROBE_DEFER)
+ cpr_err(cpr_vreg,
+ "devm_regulator_get: mem-acc: rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static int cpr_efuse_init(struct platform_device *pdev,
+ struct cpr2_gfx_regulator *cpr_vreg)
+{
+ struct resource *res;
+ int len;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse_addr");
+ if (!res || !res->start) {
+ cpr_err(cpr_vreg, "efuse_addr missing: res=%p\n", res);
+ return -EINVAL;
+ }
+
+ cpr_vreg->efuse_addr = res->start;
+ len = res->end - res->start + 1;
+
+ cpr_info(cpr_vreg, "efuse_addr = %pa (len=0x%x)\n", &res->start, len);
+
+ cpr_vreg->efuse_base = ioremap(cpr_vreg->efuse_addr, len);
+ if (!cpr_vreg->efuse_base) {
+ cpr_err(cpr_vreg, "Unable to map efuse_addr %pa\n",
+ &cpr_vreg->efuse_addr);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cpr_parse_fuse_parameters(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ struct device *dev = cpr_vreg->dev;
+ u32 fuse_sel[3];
+ int rc;
+
+ rc = of_property_read_u32_array(dev->of_node, "qcom,cpr-fuse-revision",
+ fuse_sel, 3);
+ if (rc < 0) {
+ if (rc != -EINVAL) {
+ cpr_err(cpr_vreg, "qcom,cpr-fuse-revision read failed: rc=%d\n",
+ rc);
+ return rc;
+ } else {
+ /* Property not exist; Assigning a wild card value */
+ cpr_vreg->cpr_fuse_revision = FUSE_REVISION_UNKNOWN;
+ }
+ } else {
+ cpr_vreg->cpr_fuse_revision = cpr_read_efuse_param(cpr_vreg,
+ fuse_sel[0], fuse_sel[1], fuse_sel[2]);
+ cpr_info(cpr_vreg, "fuse revision = %d\n",
+ cpr_vreg->cpr_fuse_revision);
+ }
+
+ rc = of_property_read_u32_array(dev->of_node, "qcom,process-id-fuse",
+ fuse_sel, 3);
+ if (rc < 0) {
+ if (rc != -EINVAL) {
+ cpr_err(cpr_vreg, "qcom,process-id-fuse read failed: rc=%d\n",
+ rc);
+ return rc;
+ } else {
+ /* Property not exist; Assigning a wild card value */
+ cpr_vreg->process_id = (INT_MAX - 1);
+ }
+ } else {
+ cpr_vreg->process_id = cpr_read_efuse_param(cpr_vreg,
+ fuse_sel[0], fuse_sel[1], fuse_sel[2]);
+ cpr_info(cpr_vreg, "process id = %d\n", cpr_vreg->process_id);
+ }
+
+ rc = of_property_read_u32_array(dev->of_node, "qcom,foundry-id-fuse",
+ fuse_sel, 3);
+ if (rc < 0) {
+ if (rc != -EINVAL) {
+ cpr_err(cpr_vreg, "qcom,foundry-id-fuse read failed: rc=%d\n",
+ rc);
+ return rc;
+ } else {
+ /* Property not exist; Assigning a wild card value */
+ cpr_vreg->foundry_id = (INT_MAX - 1);
+ }
+ } else {
+ cpr_vreg->foundry_id
+ = cpr_read_efuse_param(cpr_vreg, fuse_sel[0],
+ fuse_sel[1], fuse_sel[2]);
+ cpr_info(cpr_vreg, "foundry_id = %d\n", cpr_vreg->foundry_id);
+ }
+
+ return 0;
+}
+
+static int cpr_find_fuse_map_match(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ struct device_node *of_node = cpr_vreg->dev->of_node;
+ int i, rc, tuple_size;
+ int len = 0;
+ u32 *tmp;
+
+ /* Specify default no match case. */
+ cpr_vreg->cpr_fuse_map_match = FUSE_MAP_NO_MATCH;
+ cpr_vreg->cpr_fuse_map_count = 0;
+
+ if (!of_find_property(of_node, "qcom,cpr-fuse-version-map", &len)) {
+ /* No mapping present. */
+ return 0;
+ }
+
+ tuple_size = 3; /* <foundry_id> <cpr_fuse_revision> <process_id> */
+ cpr_vreg->cpr_fuse_map_count = len / (sizeof(u32) * tuple_size);
+
+ if (len == 0 || len % (sizeof(u32) * tuple_size)) {
+ cpr_err(cpr_vreg, "qcom,cpr-fuse-version-map length=%d is invalid\n",
+ len);
+ return -EINVAL;
+ }
+
+ tmp = kzalloc(len, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(of_node, "qcom,cpr-fuse-version-map",
+ tmp, cpr_vreg->cpr_fuse_map_count * tuple_size);
+ if (rc) {
+ cpr_err(cpr_vreg, "could not read qcom,cpr-fuse-version-map, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ /*
+ * qcom,cpr-fuse-version-map tuple format:
+ * <foundry_id, cpr_fuse_revision process_id>
+ */
+ for (i = 0; i < cpr_vreg->cpr_fuse_map_count; i++) {
+ if (tmp[i * tuple_size] != cpr_vreg->foundry_id
+ && tmp[i * tuple_size] != FUSE_PARAM_MATCH_ANY)
+ continue;
+ if (tmp[i * tuple_size + 1] != cpr_vreg->cpr_fuse_revision
+ && tmp[i * tuple_size + 1] != FUSE_PARAM_MATCH_ANY)
+ continue;
+ if (tmp[i * tuple_size + 1] != cpr_vreg->process_id
+ && tmp[i * tuple_size + 1] != FUSE_PARAM_MATCH_ANY)
+ continue;
+
+ cpr_vreg->cpr_fuse_map_match = i;
+ break;
+ }
+
+ if (cpr_vreg->cpr_fuse_map_match != FUSE_MAP_NO_MATCH)
+ cpr_debug(cpr_vreg, "qcom,cpr-fuse-version-map tuple match found: %d\n",
+ cpr_vreg->cpr_fuse_map_match);
+ else
+ cpr_debug(cpr_vreg, "qcom,cpr-fuse-version-map tuple match not found\n");
+
+done:
+ kfree(tmp);
+ return rc;
+}
+
+static int cpr_voltage_plan_init(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ struct device_node *of_node = cpr_vreg->dev->of_node;
+ int highest_corner = cpr_vreg->num_corners;
+ int rc;
+
+ rc = of_property_read_u32_array(of_node, "qcom,cpr-voltage-ceiling",
+ &cpr_vreg->ceiling_volt[CPR_CORNER_MIN], cpr_vreg->num_corners);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "cpr-voltage-ceiling missing: rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cpr-voltage-floor",
+ &cpr_vreg->floor_volt[CPR_CORNER_MIN],
+ cpr_vreg->num_corners);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "cpr-voltage-floor missing: rc=%d\n", rc);
+ return rc;
+ }
+
+ cpr_vreg->ceiling_max
+ = cpr_vreg->ceiling_volt[highest_corner];
+
+ return 0;
+}
+
+static int cpr_adjust_init_voltages(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ struct device_node *of_node = cpr_vreg->dev->of_node;
+ int tuple_count, tuple_match, i;
+ u32 index;
+ u32 volt_adjust = 0;
+ int len = 0;
+ int rc = 0;
+
+ if (!of_find_property(of_node, "qcom,cpr-init-voltage-adjustment",
+ &len)) {
+ /* No initial voltage adjustment needed. */
+ return 0;
+ }
+
+ if (cpr_vreg->cpr_fuse_map_count) {
+ if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+ /*
+ * No matching index to use for initial voltage
+ * adjustment.
+ */
+ return 0;
+ }
+ tuple_count = cpr_vreg->cpr_fuse_map_count;
+ tuple_match = cpr_vreg->cpr_fuse_map_match;
+ } else {
+ tuple_count = 1;
+ tuple_match = 0;
+ }
+
+ if (len != cpr_vreg->num_corners * tuple_count * sizeof(u32)) {
+ cpr_err(cpr_vreg, "qcom,cpr-init-voltage-adjustment length=%d is invalid\n",
+ len);
+ return -EINVAL;
+ }
+
+ for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+ index = tuple_match * cpr_vreg->num_corners
+ + i - CPR_CORNER_MIN;
+ rc = of_property_read_u32_index(of_node,
+ "qcom,cpr-init-voltage-adjustment", index,
+ &volt_adjust);
+ if (rc) {
+ cpr_err(cpr_vreg, "could not read qcom,cpr-init-voltage-adjustment index %u, rc=%d\n",
+ index, rc);
+ return rc;
+ }
+
+ if (volt_adjust) {
+ cpr_vreg->open_loop_volt[i] += volt_adjust;
+ cpr_info(cpr_vreg, "adjusted initial voltage[%d]: %d -> %d uV\n",
+ i, cpr_vreg->open_loop_volt[i] - volt_adjust,
+ cpr_vreg->open_loop_volt[i]);
+ }
+ }
+
+ return rc;
+}
+
+static int cpr_pvs_init(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ struct device_node *of_node = cpr_vreg->dev->of_node;
+ u64 efuse_bits;
+ int i, size, sign, steps, step_size_uv, rc, pos;
+ u32 *fuse_sel, *tmp, *ref_uv;
+ struct property *prop;
+ size_t buflen;
+ char *buf;
+
+ rc = of_property_read_u32(of_node, "qcom,cpr-gfx-volt-step",
+ &cpr_vreg->step_volt);
+ if (rc < 0) {
+ cpr_err(cpr_vreg, "read cpr-gfx-volt-step failed, rc = %d\n",
+ rc);
+ return rc;
+ } else if (cpr_vreg->step_volt == 0) {
+ cpr_err(cpr_vreg, "gfx voltage step size can't be set to 0.\n");
+ return -EINVAL;
+ }
+
+ prop = of_find_property(of_node, "qcom,cpr-fuse-init-voltage", NULL);
+ if (!prop) {
+ cpr_err(cpr_vreg, "qcom,cpr-fuse-init-voltage is missing\n");
+ return -EINVAL;
+ }
+ size = prop->length / sizeof(u32);
+ if (size != cpr_vreg->num_corners * 3) {
+ cpr_err(cpr_vreg,
+ "fuse position for init voltages is invalid\n");
+ return -EINVAL;
+ }
+ fuse_sel = kcalloc(size, sizeof(u32), GFP_KERNEL);
+ if (!fuse_sel)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(of_node, "qcom,cpr-fuse-init-voltage",
+ fuse_sel, size);
+ if (rc < 0) {
+ cpr_err(cpr_vreg,
+ "read cpr-fuse-init-voltage failed, rc = %d\n", rc);
+ kfree(fuse_sel);
+ return rc;
+ }
+ rc = of_property_read_u32(of_node, "qcom,cpr-init-voltage-step",
+ &step_size_uv);
+ if (rc < 0) {
+ cpr_err(cpr_vreg,
+ "read cpr-init-voltage-step failed, rc = %d\n", rc);
+ kfree(fuse_sel);
+ return rc;
+ }
+
+ ref_uv = kcalloc((cpr_vreg->num_corners + 1), sizeof(*ref_uv),
+ GFP_KERNEL);
+ if (!ref_uv) {
+ kfree(fuse_sel);
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cpr-init-voltage-ref",
+ &ref_uv[CPR_CORNER_MIN], cpr_vreg->num_corners);
+ if (rc < 0) {
+ cpr_err(cpr_vreg,
+ "read qcom,cpr-init-voltage-ref failed, rc = %d\n", rc);
+ goto done;
+ }
+
+ tmp = fuse_sel;
+ for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+ efuse_bits = cpr_read_efuse_param(cpr_vreg, fuse_sel[0],
+ fuse_sel[1], fuse_sel[2]);
+ sign = (efuse_bits & (1 << (fuse_sel[2] - 1))) ? -1 : 1;
+ steps = efuse_bits & ((1 << (fuse_sel[2] - 1)) - 1);
+ cpr_vreg->open_loop_volt[i] =
+ ref_uv[i] + sign * steps * step_size_uv;
+ cpr_vreg->open_loop_volt[i] = DIV_ROUND_UP(
+ cpr_vreg->open_loop_volt[i],
+ cpr_vreg->step_volt) *
+ cpr_vreg->step_volt;
+ cpr_debug(cpr_vreg, "corner %d: sign = %d, steps = %d, volt = %d uV\n",
+ i, sign, steps, cpr_vreg->open_loop_volt[i]);
+ fuse_sel += 3;
+ }
+
+ rc = cpr_adjust_init_voltages(cpr_vreg);
+ if (rc)
+ goto done;
+
+ for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+ if (cpr_vreg->open_loop_volt[i]
+ > cpr_vreg->ceiling_volt[i]) {
+ cpr_info(cpr_vreg, "Warning: initial voltage[%d] %d above ceiling %d\n",
+ i, cpr_vreg->open_loop_volt[i],
+ cpr_vreg->ceiling_volt[i]);
+ cpr_vreg->open_loop_volt[i]
+ = cpr_vreg->ceiling_volt[i];
+ } else if (cpr_vreg->open_loop_volt[i] <
+ cpr_vreg->floor_volt[i]) {
+ cpr_info(cpr_vreg, "Warning: initial voltage[%d] %d below floor %d\n",
+ i, cpr_vreg->open_loop_volt[i],
+ cpr_vreg->floor_volt[i]);
+ cpr_vreg->open_loop_volt[i]
+ = cpr_vreg->floor_volt[i];
+ }
+ }
+
+ /*
+ * Log ceiling, floor, and initial voltages since they are critical for
+ * all CPR debugging.
+ */
+ buflen = cpr_vreg->num_corners * (MAX_CHARS_PER_INT + 2)
+ * sizeof(*buf);
+ buf = kzalloc(buflen, GFP_KERNEL);
+ if (buf == NULL) {
+ cpr_err(cpr_vreg, "Could not allocate memory for corner voltage logging\n");
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ for (i = CPR_CORNER_MIN, pos = 0; i <= cpr_vreg->num_corners; i++)
+ pos += scnprintf(buf + pos, buflen - pos, "%u%s",
+ cpr_vreg->open_loop_volt[i],
+ i < cpr_vreg->num_corners ? " " : "");
+ cpr_info(cpr_vreg, "pvs voltage: [%s] uV\n", buf);
+
+ for (i = CPR_CORNER_MIN, pos = 0; i <= cpr_vreg->num_corners; i++)
+ pos += scnprintf(buf + pos, buflen - pos, "%d%s",
+ cpr_vreg->ceiling_volt[i],
+ i < cpr_vreg->num_corners ? " " : "");
+ cpr_info(cpr_vreg, "ceiling voltage: [%s] uV\n", buf);
+
+ for (i = CPR_CORNER_MIN, pos = 0; i <= cpr_vreg->num_corners; i++)
+ pos += scnprintf(buf + pos, buflen - pos, "%d%s",
+ cpr_vreg->floor_volt[i],
+ i < cpr_vreg->num_corners ? " " : "");
+ cpr_info(cpr_vreg, "floor voltage: [%s] uV\n", buf);
+
+ kfree(buf);
+
+done:
+ kfree(tmp);
+ kfree(ref_uv);
+
+ return rc;
+}
+
+static int cpr_parse_vdd_mx_parameters(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ struct device_node *of_node = cpr_vreg->dev->of_node;
+ int rc, len, size;
+
+ if (!of_find_property(of_node, "qcom,vdd-mx-corner-map", &len)) {
+ cpr_err(cpr_vreg, "qcom,vdd-mx-corner-map missing");
+ return -EINVAL;
+ }
+
+ size = len / sizeof(u32);
+ if (size != cpr_vreg->num_corners) {
+ cpr_err(cpr_vreg,
+ "qcom,vdd-mx-corner-map length=%d is invalid: required:%u\n",
+ size, cpr_vreg->num_corners);
+ return -EINVAL;
+ }
+
+ cpr_vreg->vdd_mx_corner_map = devm_kzalloc(cpr_vreg->dev,
+ (size + 1) * sizeof(*cpr_vreg->vdd_mx_corner_map),
+ GFP_KERNEL);
+ if (!cpr_vreg->vdd_mx_corner_map) {
+ cpr_err(cpr_vreg,
+ "Can't allocate memory for cpr_vreg->vdd_mx_corner_map\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32_array(of_node,
+ "qcom,vdd-mx-corner-map",
+ &cpr_vreg->vdd_mx_corner_map[1],
+ cpr_vreg->num_corners);
+ if (rc)
+ cpr_err(cpr_vreg,
+ "read qcom,vdd-mx-corner-map failed, rc = %d\n", rc);
+
+ return rc;
+}
+
+static int cpr_gfx_init(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ struct device_node *of_node = cpr_vreg->dev->of_node;
+ int rc = 0;
+
+ cpr_vreg->vdd_gfx = devm_regulator_get(cpr_vreg->dev, "vdd-gfx");
+ rc = PTR_RET(cpr_vreg->vdd_gfx);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ cpr_err(cpr_vreg, "devm_regulator_get: rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Check dependencies */
+ if (of_find_property(of_node, "vdd-mx-supply", NULL)) {
+ cpr_vreg->vdd_mx = devm_regulator_get(cpr_vreg->dev, "vdd-mx");
+ if (IS_ERR_OR_NULL(cpr_vreg->vdd_mx)) {
+ rc = PTR_RET(cpr_vreg->vdd_mx);
+ if (rc != -EPROBE_DEFER)
+ cpr_err(cpr_vreg, "devm_regulator_get: vdd_mx: rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr_parse_vdd_mx_parameters(cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "parsing vdd_mx parameters failed: rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int cpr_get_clock_handles(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ int rc;
+
+ cpr_vreg->core_clk = devm_clk_get(cpr_vreg->dev, "core_clk");
+ if (IS_ERR(cpr_vreg->core_clk)) {
+ rc = PTR_RET(cpr_vreg->core_clk);
+ if (rc != -EPROBE_DEFER)
+ cpr_err(cpr_vreg, "unable to request core clock, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ cpr_vreg->iface_clk = devm_clk_get(cpr_vreg->dev, "iface_clk");
+ if (IS_ERR(cpr_vreg->iface_clk)) {
+ rc = PTR_RET(cpr_vreg->iface_clk);
+ if (rc != -EPROBE_DEFER)
+ cpr_err(cpr_vreg, "unable to request interface clock, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int cpr_init_target_quotients(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ struct device_node *of_node = cpr_vreg->dev->of_node;
+ int rc, len, size, tuple_count, tuple_match, pos, i, j, k;
+ char *buf, *target_quot_str = "qcom,cpr-target-quotients";
+ size_t buflen;
+ u32 index;
+ int *temp;
+
+ if (!of_find_property(of_node, target_quot_str, &len)) {
+ cpr_err(cpr_vreg, "%s missing\n", target_quot_str);
+ return -EINVAL;
+ }
+
+ if (cpr_vreg->cpr_fuse_map_count) {
+ if (cpr_vreg->cpr_fuse_map_match == FUSE_MAP_NO_MATCH) {
+ /*
+ * No matching index to use for initial voltage
+ * adjustment.
+ */
+ return 0;
+ }
+ tuple_count = cpr_vreg->cpr_fuse_map_count;
+ tuple_match = cpr_vreg->cpr_fuse_map_match;
+ } else {
+ tuple_count = 1;
+ tuple_match = 0;
+ }
+
+ size = len / sizeof(u32);
+
+ if (size != tuple_count * cpr_vreg->ro_count * cpr_vreg->num_corners) {
+ cpr_err(cpr_vreg, "%s length=%d is invalid\n", target_quot_str,
+ size);
+ return -EINVAL;
+ }
+
+ temp = kcalloc(size, sizeof(int), GFP_KERNEL);
+ if (!temp)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(of_node, target_quot_str, temp, size);
+ if (rc) {
+ cpr_err(cpr_vreg, "failed to read %s, rc=%d\n",
+ target_quot_str, rc);
+ kfree(temp);
+ return rc;
+ }
+
+ for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+ index = tuple_match * cpr_vreg->num_corners
+ * cpr_vreg->ro_count + i - CPR_CORNER_MIN;
+ for (j = 0; j < cpr_vreg->ro_count; j++) {
+ k = index * cpr_vreg->ro_count + j;
+ cpr_vreg->cpr_target_quot[i][j] = temp[k];
+ }
+ }
+ kfree(temp);
+ /*
+ * Log per-virtual corner target quotients since they are useful for
+ * baseline CPR logging.
+ */
+ buflen = cpr_vreg->ro_count * (MAX_CHARS_PER_INT + 2) * sizeof(*buf);
+ buf = kzalloc(buflen, GFP_KERNEL);
+ if (buf == NULL) {
+ cpr_err(cpr_vreg, "Could not allocate memory for target quotient logging\n");
+ return 0;
+ }
+
+ for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+ pos = 0;
+ for (j = 0; j < cpr_vreg->ro_count; j++)
+ pos += scnprintf(buf + pos, buflen - pos, "%d%s",
+ cpr_vreg->cpr_target_quot[i][j],
+ j < cpr_vreg->ro_count ? " " : "\0");
+ cpr_info(cpr_vreg, "Corner[%d]: Target quotients: %s\n",
+ i, buf);
+ }
+ kfree(buf);
+
+ for (j = 0; j < cpr_vreg->ro_count; j++) {
+ for (i = CPR_CORNER_MIN + 1; i <= cpr_vreg->num_corners; i++) {
+ if (cpr_vreg->cpr_target_quot[i][j]
+ < cpr_vreg->cpr_target_quot[i - 1][j]) {
+ cpr_vreg->cpr_fuse_disable = true;
+ cpr_err(cpr_vreg, "invalid quotient values; permanently disabling CPR\n");
+ }
+ }
+ }
+
+ return rc;
+}
+
+/*
+ * Conditionally reduce the per-virtual-corner ceiling voltages if certain
+ * device tree flags are present.
+ */
+static int cpr_reduce_ceiling_voltage(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ int i;
+
+ if (!of_property_read_bool(cpr_vreg->dev->of_node,
+ "qcom,cpr-init-voltage-as-ceiling"))
+ return 0;
+
+ for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++) {
+ cpr_vreg->ceiling_volt[i] = cpr_vreg->open_loop_volt[i];
+ cpr_debug(cpr_vreg, "lowered ceiling[%d] = %d uV\n",
+ i, cpr_vreg->ceiling_volt[i]);
+ }
+
+ return 0;
+}
+
+static int cpr_init_cpr_voltages(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ int i;
+ int size = cpr_vreg->num_corners + 1;
+
+ cpr_vreg->last_volt = devm_kzalloc(cpr_vreg->dev, sizeof(int) * size,
+ GFP_KERNEL);
+ if (!cpr_vreg->last_volt)
+ return -EINVAL;
+
+ for (i = CPR_CORNER_MIN; i <= cpr_vreg->num_corners; i++)
+ cpr_vreg->last_volt[i] = cpr_vreg->open_loop_volt[i];
+
+ return 0;
+}
+
+#define CPR_PROP_READ_U32(cpr_vreg, of_node, cpr_property, cpr_config, rc) \
+do { \
+ if (!rc) { \
+ rc = of_property_read_u32(of_node, cpr_property, \
+ cpr_config); \
+ if (rc) { \
+ cpr_err(cpr_vreg, "Missing " #cpr_property \
+ ": rc = %d\n", rc); \
+ } \
+ } \
+} while (0)
+
+static int cpr_init_cpr_parameters(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ struct device_node *of_node = cpr_vreg->dev->of_node;
+ int rc = 0;
+
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "qcom,cpr-ref-clk",
+ &cpr_vreg->ref_clk_khz, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "qcom,cpr-timer-delay",
+ &cpr_vreg->timer_delay_us, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "qcom,cpr-timer-cons-up",
+ &cpr_vreg->timer_cons_up, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "qcom,cpr-timer-cons-down",
+ &cpr_vreg->timer_cons_down, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "qcom,cpr-irq-line",
+ &cpr_vreg->irq_line, rc);
+ if (rc)
+ return rc;
+
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "qcom,cpr-step-quotient",
+ &cpr_vreg->step_quotient, rc);
+ if (rc)
+ return rc;
+ cpr_info(cpr_vreg, "step_quotient = %u\n", cpr_vreg->step_quotient);
+
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "qcom,cpr-up-threshold",
+ &cpr_vreg->up_threshold, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "qcom,cpr-down-threshold",
+ &cpr_vreg->down_threshold, rc);
+ if (rc)
+ return rc;
+ cpr_info(cpr_vreg, "up threshold = %u, down threshold = %u\n",
+ cpr_vreg->up_threshold, cpr_vreg->down_threshold);
+
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "qcom,cpr-idle-clocks",
+ &cpr_vreg->idle_clocks, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "qcom,cpr-gcnt-time",
+ &cpr_vreg->gcnt_time_us, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "qcom,vdd-gfx-step-up-limit",
+ &cpr_vreg->vdd_gfx_step_up_limit, rc);
+ if (rc)
+ return rc;
+ CPR_PROP_READ_U32(cpr_vreg, of_node, "qcom,vdd-gfx-step-down-limit",
+ &cpr_vreg->vdd_gfx_step_down_limit, rc);
+ if (rc)
+ return rc;
+
+ /* Init module parameter with the DT value */
+ cpr_vreg->enable = of_property_read_bool(of_node, "qcom,cpr-enable");
+ cpr_info(cpr_vreg, "CPR is %s by default.\n",
+ cpr_vreg->enable ? "enabled" : "disabled");
+
+ return 0;
+}
+
+static int cpr_config(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ int i, rc;
+ u32 val, gcnt;
+ int size;
+
+ rc = clk_set_rate(cpr_vreg->core_clk, cpr_vreg->ref_clk_khz * 1000);
+ if (rc) {
+ cpr_err(cpr_vreg, "clk_set_rate(core_clk, %u) failed, rc=%d\n",
+ cpr_vreg->ref_clk_khz, rc);
+ return rc;
+ }
+
+ rc = cpr2_gfx_clock_enable(cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "unable to enable CPR clocks, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Disable interrupt and CPR */
+ cpr_write(cpr_vreg, REG_RBIF_IRQ_EN(cpr_vreg->irq_line), 0);
+ cpr_write(cpr_vreg, REG_RBCPR_CTL, 0);
+
+ /* Program the default HW Ceiling, Floor and vlevel */
+ val = ((RBIF_LIMIT_CEILING_DEFAULT & RBIF_LIMIT_CEILING_MASK)
+ << RBIF_LIMIT_CEILING_SHIFT)
+ | (RBIF_LIMIT_FLOOR_DEFAULT & RBIF_LIMIT_FLOOR_MASK);
+ cpr_write(cpr_vreg, REG_RBIF_LIMIT, val);
+ cpr_write(cpr_vreg, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT);
+
+ /* Clear the target quotient value and gate count of all ROs */
+ for (i = 0; i < cpr_vreg->ro_count; i++)
+ cpr_write(cpr_vreg, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ /* Init and save gcnt */
+ gcnt = (cpr_vreg->ref_clk_khz * cpr_vreg->gcnt_time_us) / 1000;
+ gcnt = (gcnt & RBCPR_GCNT_TARGET_GCNT_MASK) <<
+ RBCPR_GCNT_TARGET_GCNT_SHIFT;
+ cpr_vreg->gcnt = gcnt;
+
+ /* Program the delay count for the timer */
+ val = (cpr_vreg->ref_clk_khz * cpr_vreg->timer_delay_us) / 1000;
+ cpr_write(cpr_vreg, REG_RBCPR_TIMER_INTERVAL, val);
+ cpr_info(cpr_vreg, "Timer count: 0x%0x (for %d us)\n", val,
+ cpr_vreg->timer_delay_us);
+
+ /* Program Consecutive Up & Down */
+ val = ((cpr_vreg->timer_cons_down & RBIF_TIMER_ADJ_CONS_DOWN_MASK)
+ << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT) |
+ (cpr_vreg->timer_cons_up & RBIF_TIMER_ADJ_CONS_UP_MASK);
+ cpr_write(cpr_vreg, REG_RBIF_TIMER_ADJUST, val);
+
+ /* Program the control register */
+ cpr_vreg->up_threshold &= RBCPR_CTL_UP_THRESHOLD_MASK;
+ cpr_vreg->down_threshold &= RBCPR_CTL_DN_THRESHOLD_MASK;
+ val = (cpr_vreg->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT)
+ | (cpr_vreg->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT);
+ val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE;
+ val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN;
+ cpr_write(cpr_vreg, REG_RBCPR_CTL, val);
+
+ cpr_irq_set(cpr_vreg, CPR_INT_DEFAULT);
+
+ val = cpr_read(cpr_vreg, REG_RBCPR_VERSION);
+ if (val <= RBCPR_VER_2)
+ cpr_vreg->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS;
+
+ size = cpr_vreg->num_corners + 1;
+ cpr_vreg->save_ctl = devm_kzalloc(cpr_vreg->dev, sizeof(int) * size,
+ GFP_KERNEL);
+ cpr_vreg->save_irq = devm_kzalloc(cpr_vreg->dev, sizeof(int) * size,
+ GFP_KERNEL);
+ if (!cpr_vreg->save_ctl || !cpr_vreg->save_irq) {
+ rc = -ENOMEM;
+ goto _exit;
+ }
+
+ for (i = 1; i < size; i++)
+ cpr_corner_save(cpr_vreg, i);
+
+_exit:
+ cpr2_gfx_clock_disable(cpr_vreg);
+ return rc;
+}
+
+static int cpr_init_cpr(struct platform_device *pdev,
+ struct cpr2_gfx_regulator *cpr_vreg)
+{
+ struct resource *res;
+ int rc;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rbcpr");
+ if (!res || !res->start) {
+ cpr_err(cpr_vreg, "missing rbcpr address: res=%p\n", res);
+ return -EINVAL;
+ }
+ cpr_vreg->rbcpr_base = devm_ioremap(&pdev->dev, res->start, GFP_KERNEL);
+ if (!cpr_vreg->rbcpr_base) {
+ cpr_err(cpr_vreg, "ioremap rbcpr address=%p failed\n", res);
+ return -ENXIO;
+ }
+
+ rc = cpr_get_clock_handles(cpr_vreg);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ cpr_err(cpr_vreg, "clocks read failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Read target quotients from global target-quotient table passed
+ * through device node.
+ */
+ rc = cpr_init_target_quotients(cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "target quotient table read failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* Reduce the ceiling voltage if allowed. */
+ rc = cpr_reduce_ceiling_voltage(cpr_vreg);
+ if (rc)
+ return rc;
+
+ /* Init all voltage set points of GFX regulator for CPR */
+ rc = cpr_init_cpr_voltages(cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "init closed loop voltages failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* Init CPR configuration parameters */
+ rc = cpr_init_cpr_parameters(cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "init cpr configuration parameters failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* Get and Init interrupt */
+ cpr_vreg->cpr_irq = platform_get_irq(pdev, 0);
+ if (!cpr_vreg->cpr_irq) {
+ cpr_err(cpr_vreg, "missing CPR IRQ\n");
+ return -EINVAL;
+ }
+
+ /* Configure CPR HW but keep it disabled */
+ rc = cpr_config(cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "configure CPR HW failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = devm_request_threaded_irq(&pdev->dev, cpr_vreg->cpr_irq, NULL,
+ cpr2_gfx_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "cpr", cpr_vreg);
+ if (rc)
+ cpr_err(cpr_vreg, "CPR: request irq failed for IRQ %d\n",
+ cpr_vreg->cpr_irq);
+
+ return rc;
+}
+
+static void cpr_gfx_exit(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ if (cpr_vreg->vreg_enabled) {
+ regulator_disable(cpr_vreg->vdd_gfx);
+
+ if (cpr_vreg->vdd_mx)
+ regulator_disable(cpr_vreg->vdd_mx);
+ }
+}
+
+static void cpr_efuse_free(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ iounmap(cpr_vreg->efuse_base);
+}
+
+static int cpr_enable_set(void *data, u64 val)
+{
+ struct cpr2_gfx_regulator *cpr_vreg = data;
+ bool old_cpr_enable;
+ int rc = 0;
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+
+ old_cpr_enable = cpr_vreg->enable;
+ cpr_vreg->enable = val;
+
+ if (old_cpr_enable == cpr_vreg->enable)
+ goto _exit;
+
+ if (cpr_vreg->enable && cpr_vreg->cpr_fuse_disable) {
+ cpr_info(cpr_vreg,
+ "CPR permanently disabled due to fuse values\n");
+ cpr_vreg->enable = false;
+ goto _exit;
+ }
+
+ cpr_debug(cpr_vreg, "%s CPR [corner=%d]\n",
+ cpr_vreg->enable ? "enabling" : "disabling", cpr_vreg->corner);
+
+ if (cpr_vreg->corner) {
+ if (cpr_vreg->enable) {
+ rc = cpr2_gfx_closed_loop_enable(cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "could not enable CPR, rc=%d\n",
+ rc);
+ goto _exit;
+ }
+
+ cpr_ctl_disable(cpr_vreg);
+ cpr_irq_clr(cpr_vreg);
+ cpr_corner_restore(cpr_vreg, cpr_vreg->corner);
+ cpr_ctl_enable(cpr_vreg, cpr_vreg->corner);
+ } else {
+ cpr_ctl_disable(cpr_vreg);
+ cpr_irq_set(cpr_vreg, 0);
+ cpr2_gfx_closed_loop_disable(cpr_vreg);
+ }
+ }
+
+_exit:
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+ return 0;
+}
+
+static int cpr_enable_get(void *data, u64 *val)
+{
+ struct cpr2_gfx_regulator *cpr_vreg = data;
+
+ *val = cpr_vreg->enable;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr_enable_fops, cpr_enable_get, cpr_enable_set,
+ "%llu\n");
+
+static int cpr_get_cpr_ceiling(void *data, u64 *val)
+{
+ struct cpr2_gfx_regulator *cpr_vreg = data;
+
+ *val = cpr_vreg->ceiling_volt[cpr_vreg->corner];
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr_ceiling_fops, cpr_get_cpr_ceiling, NULL,
+ "%llu\n");
+
+static int cpr_get_cpr_floor(void *data, u64 *val)
+{
+ struct cpr2_gfx_regulator *cpr_vreg = data;
+
+ *val = cpr_vreg->floor_volt[cpr_vreg->corner];
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr_floor_fops, cpr_get_cpr_floor, NULL,
+ "%llu\n");
+
+static int cpr2_gfx_debug_info_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+static ssize_t cpr2_gfx_debug_info_read(struct file *file, char __user *buff,
+ size_t count, loff_t *ppos)
+{
+ struct cpr2_gfx_regulator *cpr_vreg = file->private_data;
+ char *debugfs_buf;
+ ssize_t len, ret = 0;
+ u32 gcnt, ro_sel, ctl, irq_status, reg, error_steps;
+ u32 step_dn, step_up, error, error_lt0, busy;
+
+ debugfs_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!debugfs_buf)
+ return -ENOMEM;
+
+ mutex_lock(&cpr_vreg->cpr_mutex);
+
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ "corner = %d, current_volt = %d uV\n",
+ cpr_vreg->corner, cpr_vreg->last_volt[cpr_vreg->corner]);
+ ret += len;
+
+ for (ro_sel = 0; ro_sel < cpr_vreg->ro_count; ro_sel++) {
+ gcnt = cpr_read(cpr_vreg, REG_RBCPR_GCNT_TARGET(ro_sel));
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ "rbcpr_gcnt_target (%u) = 0x%02X\n",
+ ro_sel, gcnt);
+ ret += len;
+ }
+
+ ctl = cpr_read(cpr_vreg, REG_RBCPR_CTL);
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ "rbcpr_ctl = 0x%02X\n", ctl);
+ ret += len;
+
+ irq_status = cpr_read(cpr_vreg, REG_RBIF_IRQ_STATUS);
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ "rbcpr_irq_status = 0x%02X\n", irq_status);
+ ret += len;
+
+ reg = cpr_read(cpr_vreg, REG_RBCPR_RESULT_0);
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ "rbcpr_result_0 = 0x%02X\n", reg);
+ ret += len;
+
+ step_dn = reg & 0x01;
+ step_up = (reg >> RBCPR_RESULT0_STEP_UP_SHIFT) & 0x01;
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ " [step_dn = %u", step_dn);
+ ret += len;
+
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ ", step_up = %u", step_up);
+ ret += len;
+
+ error_steps = (reg >> RBCPR_RESULT0_ERROR_STEPS_SHIFT)
+ & RBCPR_RESULT0_ERROR_STEPS_MASK;
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ ", error_steps = %u", error_steps);
+ ret += len;
+
+ error = (reg >> RBCPR_RESULT0_ERROR_SHIFT) & RBCPR_RESULT0_ERROR_MASK;
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ ", error = %u", error);
+ ret += len;
+
+ error_lt0 = (reg >> RBCPR_RESULT0_ERROR_LT0_SHIFT) & 0x01;
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ ", error_lt_0 = %u", error_lt0);
+ ret += len;
+
+ busy = (reg >> RBCPR_RESULT0_BUSY_SHIFT) & 0x01;
+ len = snprintf(debugfs_buf + ret, PAGE_SIZE - ret,
+ ", busy = %u]\n", busy);
+ ret += len;
+ mutex_unlock(&cpr_vreg->cpr_mutex);
+
+ ret = simple_read_from_buffer(buff, count, ppos, debugfs_buf, ret);
+ kfree(debugfs_buf);
+ return ret;
+}
+
+static const struct file_operations cpr2_gfx_debug_info_fops = {
+ .open = cpr2_gfx_debug_info_open,
+ .read = cpr2_gfx_debug_info_read,
+};
+
+static void cpr2_gfx_debugfs_init(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ struct dentry *temp;
+
+ if (IS_ERR_OR_NULL(cpr2_gfx_debugfs_base)) {
+ cpr_err(cpr_vreg, "Could not create debugfs nodes since base directory is missing\n");
+ return;
+ }
+
+ cpr_vreg->debugfs = debugfs_create_dir(cpr_vreg->rdesc.name,
+ cpr2_gfx_debugfs_base);
+ if (IS_ERR_OR_NULL(cpr_vreg->debugfs)) {
+ cpr_err(cpr_vreg, "debugfs directory creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_file("debug_info", S_IRUGO, cpr_vreg->debugfs,
+ cpr_vreg, &cpr2_gfx_debug_info_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr_err(cpr_vreg, "debug_info node creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_file("cpr_enable", S_IRUGO | S_IWUSR,
+ cpr_vreg->debugfs, cpr_vreg, &cpr_enable_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr_err(cpr_vreg, "cpr_enable node creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_file("cpr_ceiling", S_IRUGO,
+ cpr_vreg->debugfs, cpr_vreg, &cpr_ceiling_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr_err(cpr_vreg, "cpr_ceiling node creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_file("cpr_floor", S_IRUGO,
+ cpr_vreg->debugfs, cpr_vreg, &cpr_floor_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr_err(cpr_vreg, "cpr_floor node creation failed\n");
+ return;
+ }
+}
+
+static void cpr2_gfx_debugfs_remove(struct cpr2_gfx_regulator *cpr_vreg)
+{
+ debugfs_remove_recursive(cpr_vreg->debugfs);
+}
+
+static void cpr2_gfx_debugfs_base_init(void)
+{
+ cpr2_gfx_debugfs_base = debugfs_create_dir("cpr2-gfx-regulator",
+ NULL);
+ if (IS_ERR_OR_NULL(cpr2_gfx_debugfs_base))
+ pr_err("cpr2-gfx-regulator debugfs base directory creation failed\n");
+}
+
+static void cpr2_gfx_debugfs_base_remove(void)
+{
+ debugfs_remove_recursive(cpr2_gfx_debugfs_base);
+}
+
+static int cpr2_gfx_regulator_probe(struct platform_device *pdev)
+{
+ struct regulator_config reg_config = {};
+ struct cpr2_gfx_regulator *cpr_vreg;
+ struct regulator_desc *rdesc;
+ struct device *dev = &pdev->dev;
+ struct regulator_init_data *init_data = pdev->dev.platform_data;
+ int rc;
+
+ if (!dev->of_node) {
+ dev_err(dev, "Device tree node is missing\n");
+ return -EINVAL;
+ }
+
+ init_data = of_get_regulator_init_data(dev, dev->of_node);
+ if (!init_data) {
+ dev_err(dev, "regulator init data is missing\n");
+ return -EINVAL;
+ }
+
+ init_data->constraints.input_uV = init_data->constraints.max_uV;
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS;
+
+ cpr_vreg = devm_kzalloc(dev, sizeof(*cpr_vreg), GFP_KERNEL);
+ if (!cpr_vreg)
+ return -ENOMEM;
+
+ cpr_vreg->dev = dev;
+ mutex_init(&cpr_vreg->cpr_mutex);
+
+ cpr_vreg->rdesc.name = init_data->constraints.name;
+ if (cpr_vreg->rdesc.name == NULL) {
+ dev_err(dev, "regulator-name missing\n");
+ return -EINVAL;
+ }
+
+ rc = cpr2_gfx_allocate_memory(cpr_vreg);
+ if (rc)
+ return rc;
+
+ rc = cpr_mem_acc_init(cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "mem_acc initialization error: rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = cpr_efuse_init(pdev, cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "Wrong eFuse address specified: rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = cpr_parse_fuse_parameters(cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "Failed to parse fuse parameters: rc=%d\n",
+ rc);
+ goto err_out;
+ }
+
+ rc = cpr_find_fuse_map_match(cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "Could not determine fuse mapping match: rc=%d\n",
+ rc);
+ goto err_out;
+ }
+
+ rc = cpr_voltage_plan_init(cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "Wrong DT parameter specified: rc=%d\n", rc);
+ goto err_out;
+ }
+
+ rc = cpr_pvs_init(cpr_vreg);
+ if (rc) {
+ cpr_err(cpr_vreg, "Initialize PVS wrong: rc=%d\n", rc);
+ goto err_out;
+ }
+
+ rc = cpr_gfx_init(cpr_vreg);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ cpr_err(cpr_vreg, "Initialize GFX wrong: rc=%d\n", rc);
+ goto err_out;
+ }
+
+ rc = cpr_init_cpr(pdev, cpr_vreg);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ cpr_err(cpr_vreg, "Initialize CPR failed: rc=%d\n", rc);
+ goto err_out;
+ }
+
+ /*
+ * Ensure that enable state accurately reflects the case in which CPR
+ * is permanently disabled.
+ */
+ cpr_vreg->enable &= !cpr_vreg->cpr_fuse_disable;
+
+ platform_set_drvdata(pdev, cpr_vreg);
+
+ rdesc = &cpr_vreg->rdesc;
+ rdesc->owner = THIS_MODULE;
+ rdesc->type = REGULATOR_VOLTAGE;
+ rdesc->ops = &cpr_corner_ops;
+
+ reg_config.dev = &pdev->dev;
+ reg_config.init_data = init_data;
+ reg_config.driver_data = cpr_vreg;
+ reg_config.of_node = pdev->dev.of_node;
+ cpr_vreg->rdev = regulator_register(rdesc, &reg_config);
+ if (IS_ERR(cpr_vreg->rdev)) {
+ rc = PTR_ERR(cpr_vreg->rdev);
+ cpr_err(cpr_vreg, "regulator_register failed: rc=%d\n", rc);
+
+ cpr_gfx_exit(cpr_vreg);
+ goto err_out;
+ }
+
+ cpr2_gfx_debugfs_init(cpr_vreg);
+
+ mutex_lock(&cpr2_gfx_regulator_list_mutex);
+ list_add(&cpr_vreg->list, &cpr2_gfx_regulator_list);
+ mutex_unlock(&cpr2_gfx_regulator_list_mutex);
+
+err_out:
+ cpr_efuse_free(cpr_vreg);
+ return rc;
+}
+
+static int cpr2_gfx_regulator_remove(struct platform_device *pdev)
+{
+ struct cpr2_gfx_regulator *cpr_vreg = platform_get_drvdata(pdev);
+
+ if (cpr_vreg) {
+ /* Disable CPR */
+ if (cpr_vreg->ctrl_enable) {
+ cpr_ctl_disable(cpr_vreg);
+ cpr_irq_set(cpr_vreg, 0);
+ cpr2_gfx_closed_loop_disable(cpr_vreg);
+ }
+
+ mutex_lock(&cpr2_gfx_regulator_list_mutex);
+ list_del(&cpr_vreg->list);
+ mutex_unlock(&cpr2_gfx_regulator_list_mutex);
+
+ cpr_gfx_exit(cpr_vreg);
+ cpr2_gfx_debugfs_remove(cpr_vreg);
+ regulator_unregister(cpr_vreg->rdev);
+ }
+
+ return 0;
+}
+
+static struct of_device_id cpr2_gfx_regulator_match_table[] = {
+ { .compatible = "qcom,cpr2-gfx-regulator", },
+ {}
+};
+
+static struct platform_driver cpr2_gfx_regulator_driver = {
+ .driver = {
+ .name = "qcom,cpr2-gfx-regulator",
+ .of_match_table = cpr2_gfx_regulator_match_table,
+ .owner = THIS_MODULE,
+ },
+ .probe = cpr2_gfx_regulator_probe,
+ .remove = cpr2_gfx_regulator_remove,
+ .suspend = cpr2_gfx_regulator_suspend,
+ .resume = cpr2_gfx_regulator_resume,
+};
+
+static int cpr2_gfx_regulator_init(void)
+{
+ cpr2_gfx_debugfs_base_init();
+ return platform_driver_register(&cpr2_gfx_regulator_driver);
+}
+arch_initcall(cpr2_gfx_regulator_init);
+
+static void cpr2_gfx_regulator_exit(void)
+{
+ cpr2_gfx_debugfs_base_remove();
+ platform_driver_unregister(&cpr2_gfx_regulator_driver);
+}
+module_exit(cpr2_gfx_regulator_exit);
+
+MODULE_DESCRIPTION("CPR2 GFX regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/cpr3-hmss-regulator.c b/drivers/regulator/cpr3-hmss-regulator.c
new file mode 100644
index 000000000000..8c8761576bbc
--- /dev/null
+++ b/drivers/regulator/cpr3-hmss-regulator.c
@@ -0,0 +1,1730 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/kryo-regulator.h>
+
+#include "cpr3-regulator.h"
+
+#define MSM8996_HMSS_FUSE_CORNERS 5
+
+/**
+ * struct cpr3_msm8996_hmss_fuses - HMSS specific fuse data for MSM8996
+ * @ro_sel: Ring oscillator select fuse parameter value for each
+ * fuse corner
+ * @init_voltage: Initial (i.e. open-loop) voltage fuse parameter value
+ * for each fuse corner (raw, not converted to a voltage)
+ * @target_quot: CPR target quotient fuse parameter value for each fuse
+ * corner
+ * @quot_offset: CPR target quotient offset fuse parameter value for each
+ * fuse corner (raw, not unpacked) used for target quotient
+ * interpolation
+ * @speed_bin: Application processor speed bin fuse parameter value for
+ * the given chip
+ * @cpr_fusing_rev: CPR fusing revision fuse parameter value
+ * @redundant_fusing: Redundant fusing select fuse parameter value
+ * @limitation: CPR limitation select fuse parameter value
+ * @partial_binning: Chip partial binning fuse parameter value which defines
+ * limitations found on a given chip
+ * @vdd_mx_ret_fuse: Defines the logic retention voltage of VDD_MX
+ * @vdd_apcc_ret_fuse: Defines the logic retention voltage of VDD_APCC
+ * @aging_init_quot_diff: Initial quotient difference between CPR aging
+ * min and max sensors measured at time of manufacturing
+ *
+ * This struct holds the values for all of the fuses read from memory. The
+ * values for ro_sel, init_voltage, target_quot, and quot_offset come from
+ * either the primary or redundant fuse locations depending upon the value of
+ * redundant_fusing.
+ */
+struct cpr3_msm8996_hmss_fuses {
+ u64 ro_sel[MSM8996_HMSS_FUSE_CORNERS];
+ u64 init_voltage[MSM8996_HMSS_FUSE_CORNERS];
+ u64 target_quot[MSM8996_HMSS_FUSE_CORNERS];
+ u64 quot_offset[MSM8996_HMSS_FUSE_CORNERS];
+ u64 speed_bin;
+ u64 cpr_fusing_rev;
+ u64 redundant_fusing;
+ u64 limitation;
+ u64 partial_binning;
+ u64 vdd_mx_ret_fuse;
+ u64 vdd_apcc_ret_fuse;
+ u64 aging_init_quot_diff;
+};
+
+/*
+ * Fuse combos 0 - 7 map to CPR fusing revision 0 - 7 with speed bin fuse = 0.
+ * Fuse combos 8 - 15 map to CPR fusing revision 0 - 7 with speed bin fuse = 1.
+ */
+#define CPR3_MSM8996_HMSS_FUSE_COMBO_COUNT 16
+
+/*
+ * Constants which define the name of each fuse corner. Note that no actual
+ * fuses are defined for LowSVS. However, a mapping from corner to LowSVS
+ * is required in order to perform target quotient interpolation properly.
+ */
+enum cpr3_msm8996_hmss_fuse_corner {
+ CPR3_MSM8996_HMSS_FUSE_CORNER_MINSVS = 0,
+ CPR3_MSM8996_HMSS_FUSE_CORNER_LOWSVS = 1,
+ CPR3_MSM8996_HMSS_FUSE_CORNER_SVS = 2,
+ CPR3_MSM8996_HMSS_FUSE_CORNER_NOM = 3,
+ CPR3_MSM8996_HMSS_FUSE_CORNER_TURBO = 4,
+};
+
+static const char * const cpr3_msm8996_hmss_fuse_corner_name[] = {
+ [CPR3_MSM8996_HMSS_FUSE_CORNER_MINSVS] = "MinSVS",
+ [CPR3_MSM8996_HMSS_FUSE_CORNER_LOWSVS] = "LowSVS",
+ [CPR3_MSM8996_HMSS_FUSE_CORNER_SVS] = "SVS",
+ [CPR3_MSM8996_HMSS_FUSE_CORNER_NOM] = "NOM",
+ [CPR3_MSM8996_HMSS_FUSE_CORNER_TURBO] = "TURBO",
+};
+
+/* CPR3 hardware thread IDs */
+#define MSM8996_HMSS_POWER_CLUSTER_THREAD_ID 0
+#define MSM8996_HMSS_PERFORMANCE_CLUSTER_THREAD_ID 1
+
+/*
+ * MSM8996 HMSS fuse parameter locations:
+ *
+ * Structs are organized with the following dimensions:
+ * Outer: 0 or 1 for power or performance cluster
+ * Middle: 0 to 3 for fuse corners from lowest to highest corner
+ * Inner: large enough to hold the longest set of parameter segments which
+ * fully defines a fuse parameter, +1 (for NULL termination).
+ * Each segment corresponds to a contiguous group of bits from a
+ * single fuse row. These segments are concatentated together in
+ * order to form the full fuse parameter value. The segments for
+ * a given parameter may correspond to different fuse rows.
+ *
+ * Note that there are only physically 4 sets of fuse parameters which
+ * correspond to the MinSVS, SVS, NOM, and TURBO fuse corners. However, the SVS
+ * quotient offset fuse is used to define the target quotient for the LowSVS
+ * fuse corner. In order to utilize LowSVS, it must be treated as if it were a
+ * real fully defined fuse corner. Thus, LowSVS fuse parameter locations are
+ * specified. These locations duplicate the SVS values in order to simplify
+ * interpolation logic.
+ */
+static const struct cpr3_fuse_param
+msm8996_hmss_ro_sel_param[2][MSM8996_HMSS_FUSE_CORNERS][2] = {
+ [MSM8996_HMSS_POWER_CLUSTER_THREAD_ID] = {
+ {{66, 38, 41}, {} },
+ {{66, 38, 41}, {} },
+ {{66, 38, 41}, {} },
+ {{66, 34, 37}, {} },
+ {{66, 30, 33}, {} },
+ },
+ [MSM8996_HMSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+ {{64, 54, 57}, {} },
+ {{64, 54, 57}, {} },
+ {{64, 54, 57}, {} },
+ {{64, 50, 53}, {} },
+ {{64, 46, 49}, {} },
+ },
+};
+
+static const struct cpr3_fuse_param
+msm8996_hmss_init_voltage_param[2][MSM8996_HMSS_FUSE_CORNERS][3] = {
+ [MSM8996_HMSS_POWER_CLUSTER_THREAD_ID] = {
+ {{67, 0, 5}, {} },
+ {{66, 58, 63}, {} },
+ {{66, 58, 63}, {} },
+ {{66, 52, 57}, {} },
+ {{66, 46, 51}, {} },
+ },
+ [MSM8996_HMSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+ {{65, 16, 21}, {} },
+ {{65, 10, 15}, {} },
+ {{65, 10, 15}, {} },
+ {{65, 4, 9}, {} },
+ {{64, 62, 63}, {65, 0, 3}, {} },
+ },
+};
+
+static const struct cpr3_fuse_param
+msm8996_hmss_target_quot_param[2][MSM8996_HMSS_FUSE_CORNERS][3] = {
+ [MSM8996_HMSS_POWER_CLUSTER_THREAD_ID] = {
+ {{67, 42, 53}, {} },
+ {{67, 30, 41}, {} },
+ {{67, 30, 41}, {} },
+ {{67, 18, 29}, {} },
+ {{67, 6, 17}, {} },
+ },
+ [MSM8996_HMSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+ {{65, 58, 63}, {66, 0, 5}, {} },
+ {{65, 46, 57}, {} },
+ {{65, 46, 57}, {} },
+ {{65, 34, 45}, {} },
+ {{65, 22, 33}, {} },
+ },
+};
+
+static const struct cpr3_fuse_param
+msm8996_hmss_quot_offset_param[2][MSM8996_HMSS_FUSE_CORNERS][3] = {
+ [MSM8996_HMSS_POWER_CLUSTER_THREAD_ID] = {
+ {{} },
+ {{} },
+ {{68, 6, 13}, {} },
+ {{67, 62, 63}, {68, 0, 5}, {} },
+ {{67, 54, 61}, {} },
+ },
+ [MSM8996_HMSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+ {{} },
+ {{} },
+ {{66, 22, 29}, {} },
+ {{66, 14, 21}, {} },
+ {{66, 6, 13}, {} },
+ },
+};
+
+/*
+ * This fuse is used to define if the redundant set of fuses should be used for
+ * any particular feature. CPR is one such feature. The redundant CPR fuses
+ * should be used if this fuse parameter has a value of 1.
+ */
+static const struct cpr3_fuse_param msm8996_redundant_fusing_param[] = {
+ {73, 61, 63},
+ {},
+};
+#define MSM8996_CPR_REDUNDANT_FUSING 1
+
+static const struct cpr3_fuse_param
+msm8996_hmss_redun_ro_sel_param[2][MSM8996_HMSS_FUSE_CORNERS][2] = {
+ [MSM8996_HMSS_POWER_CLUSTER_THREAD_ID] = {
+ {{76, 36, 39}, {} },
+ {{76, 32, 35}, {} },
+ {{76, 32, 35}, {} },
+ {{76, 28, 31}, {} },
+ {{76, 24, 27}, {} },
+ },
+ [MSM8996_HMSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+ {{74, 52, 55}, {} },
+ {{74, 48, 51}, {} },
+ {{74, 48, 51}, {} },
+ {{74, 44, 47}, {} },
+ {{74, 40, 43}, {} },
+ },
+};
+
+static const struct cpr3_fuse_param
+msm8996_hmss_redun_init_voltage_param[2][MSM8996_HMSS_FUSE_CORNERS][3] = {
+ [MSM8996_HMSS_POWER_CLUSTER_THREAD_ID] = {
+ {{76, 58, 63}, {} },
+ {{76, 52, 57}, {} },
+ {{76, 52, 57}, {} },
+ {{76, 46, 51}, {} },
+ {{76, 40, 45}, {} },
+ },
+ [MSM8996_HMSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+ {{75, 10, 15}, {} },
+ {{75, 4, 9}, {} },
+ {{75, 4, 9}, {} },
+ {{74, 62, 63}, {75, 0, 3}, {} },
+ {{74, 56, 61}, {} },
+ },
+};
+
+static const struct cpr3_fuse_param
+msm8996_hmss_redun_target_quot_param[2][MSM8996_HMSS_FUSE_CORNERS][2] = {
+ [MSM8996_HMSS_POWER_CLUSTER_THREAD_ID] = {
+ {{77, 36, 47}, {} },
+ {{77, 24, 35}, {} },
+ {{77, 24, 35}, {} },
+ {{77, 12, 23}, {} },
+ {{77, 0, 11}, {} },
+ },
+ [MSM8996_HMSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+ {{75, 52, 63}, {} },
+ {{75, 40, 51}, {} },
+ {{75, 40, 51}, {} },
+ {{75, 28, 39}, {} },
+ {{75, 16, 27}, {} },
+ },
+};
+
+static const struct cpr3_fuse_param
+msm8996_hmss_redun_quot_offset_param[2][MSM8996_HMSS_FUSE_CORNERS][2] = {
+ [MSM8996_HMSS_POWER_CLUSTER_THREAD_ID] = {
+ {{} },
+ {{} },
+ {{68, 11, 18}, {} },
+ {{77, 56, 63}, {} },
+ {{77, 48, 55}, {} },
+ },
+ [MSM8996_HMSS_PERFORMANCE_CLUSTER_THREAD_ID] = {
+ {{} },
+ {{} },
+ {{76, 16, 23}, {} },
+ {{76, 8, 15}, {} },
+ {{76, 0, 7}, {} },
+ },
+};
+
+static const struct cpr3_fuse_param msm8996_cpr_fusing_rev_param[] = {
+ {39, 51, 53},
+ {},
+};
+
+static const struct cpr3_fuse_param msm8996_hmss_speed_bin_param[] = {
+ {38, 29, 31},
+ {},
+};
+
+static const struct cpr3_fuse_param msm8996_cpr_limitation_param[] = {
+ {41, 31, 32},
+ {},
+};
+
+static const struct cpr3_fuse_param msm8996_vdd_mx_ret_param[] = {
+ {41, 2, 4},
+ {},
+};
+
+static const struct cpr3_fuse_param msm8996_vdd_apcc_ret_param[] = {
+ {41, 52, 54},
+ {},
+};
+
+static const struct cpr3_fuse_param msm8996_cpr_partial_binning_param[] = {
+ {39, 55, 59},
+ {},
+};
+
+static const struct cpr3_fuse_param
+msm8996_hmss_aging_init_quot_diff_param[] = {
+ {68, 14, 19},
+ {},
+};
+
+/*
+ * Some initial msm8996 parts cannot be used in a meaningful way by software.
+ * Other parts can only be used when operating with CPR disabled (i.e. at the
+ * fused open-loop voltage) when no voltage interpolation is applied. A fuse
+ * parameter is provided so that software can properly handle these limitations.
+ */
+enum msm8996_cpr_limitation {
+ MSM8996_CPR_LIMITATION_NONE = 0,
+ MSM8996_CPR_LIMITATION_UNSUPPORTED = 2,
+ MSM8996_CPR_LIMITATION_NO_CPR_OR_INTERPOLATION = 3,
+};
+
+/*
+ * Some initial msm8996 parts cannot be operated at low voltages. A fuse
+ * parameter is provided so that software can properly handle these limitations.
+ */
+enum msm8996_cpr_partial_binning {
+ MSM8996_CPR_PARTIAL_BINNING_SVS = 11,
+ MSM8996_CPR_PARTIAL_BINNING_NOM = 12,
+};
+
+/* Additional MSM8996 specific data: */
+
+/* Open loop voltage fuse reference voltages in microvolts for MSM8996 v1/v2 */
+static const int msm8996_v1_v2_hmss_fuse_ref_volt[MSM8996_HMSS_FUSE_CORNERS] = {
+ 605000,
+ 745000, /* Place holder entry for LowSVS */
+ 745000,
+ 905000,
+ 1015000,
+};
+
+/* Open loop voltage fuse reference voltages in microvolts for MSM8996 v3 */
+static const int msm8996_v3_hmss_fuse_ref_volt[MSM8996_HMSS_FUSE_CORNERS] = {
+ 605000,
+ 745000, /* Place holder entry for LowSVS */
+ 745000,
+ 905000,
+ 1140000,
+};
+
+/*
+ * Open loop voltage fuse reference voltages in microvolts for MSM8996 v3 with
+ * speed_bin == 1 and cpr_fusing_rev >= 5.
+ */
+static const int msm8996_v3_speed_bin1_rev5_hmss_fuse_ref_volt[
+ MSM8996_HMSS_FUSE_CORNERS] = {
+ 605000,
+ 745000, /* Place holder entry for LowSVS */
+ 745000,
+ 905000,
+ 1040000,
+};
+
+/* Defines mapping from retention fuse values to voltages in microvolts */
+static const int msm8996_vdd_apcc_fuse_ret_volt[] = {
+ 600000, 550000, 500000, 450000, 400000, 350000, 300000, 600000,
+};
+
+static const int msm8996_vdd_mx_fuse_ret_volt[] = {
+ 700000, 650000, 580000, 550000, 490000, 490000, 490000, 490000,
+};
+
+#define MSM8996_HMSS_FUSE_STEP_VOLT 10000
+#define MSM8996_HMSS_VOLTAGE_FUSE_SIZE 6
+#define MSM8996_HMSS_QUOT_OFFSET_SCALE 5
+#define MSM8996_HMSS_AGING_INIT_QUOT_DIFF_SCALE 2
+#define MSM8996_HMSS_AGING_INIT_QUOT_DIFF_SIZE 6
+
+#define MSM8996_HMSS_CPR_SENSOR_COUNT 25
+#define MSM8996_HMSS_THREAD0_SENSOR_MIN 0
+#define MSM8996_HMSS_THREAD0_SENSOR_MAX 14
+#define MSM8996_HMSS_THREAD1_SENSOR_MIN 15
+#define MSM8996_HMSS_THREAD1_SENSOR_MAX 24
+
+#define MSM8996_HMSS_CPR_CLOCK_RATE 19200000
+
+#define MSM8996_HMSS_AGING_SENSOR_ID 11
+#define MSM8996_HMSS_AGING_BYPASS_MASK0 (GENMASK(7, 0) & ~BIT(3))
+
+/**
+ * cpr3_msm8996_hmss_read_fuse_data() - load HMSS specific fuse parameter values
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * This function allocates a cpr3_msm8996_hmss_fuses struct, fills it with
+ * values read out of hardware fuses, and finally copies common fuse values
+ * into the CPR3 regulator struct.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_msm8996_hmss_read_fuse_data(struct cpr3_regulator *vreg)
+{
+ void __iomem *base = vreg->thread->ctrl->fuse_base;
+ struct cpr3_msm8996_hmss_fuses *fuse;
+ bool redundant;
+ int i, id, rc;
+
+ fuse = devm_kzalloc(vreg->thread->ctrl->dev, sizeof(*fuse), GFP_KERNEL);
+ if (!fuse)
+ return -ENOMEM;
+
+ rc = cpr3_read_fuse_param(base, msm8996_hmss_speed_bin_param,
+ &fuse->speed_bin);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read speed bin fuse, rc=%d\n", rc);
+ return rc;
+ }
+ cpr3_info(vreg, "speed bin = %llu\n", fuse->speed_bin);
+
+ rc = cpr3_read_fuse_param(base, msm8996_cpr_fusing_rev_param,
+ &fuse->cpr_fusing_rev);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read CPR fusing revision fuse, rc=%d\n",
+ rc);
+ return rc;
+ }
+ cpr3_info(vreg, "CPR fusing revision = %llu\n", fuse->cpr_fusing_rev);
+
+ rc = cpr3_read_fuse_param(base, msm8996_redundant_fusing_param,
+ &fuse->redundant_fusing);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read redundant fusing config fuse, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ redundant = (fuse->redundant_fusing == MSM8996_CPR_REDUNDANT_FUSING);
+ cpr3_info(vreg, "using redundant fuses = %c\n",
+ redundant ? 'Y' : 'N');
+
+ rc = cpr3_read_fuse_param(base, msm8996_cpr_limitation_param,
+ &fuse->limitation);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read CPR limitation fuse, rc=%d\n",
+ rc);
+ return rc;
+ }
+ cpr3_info(vreg, "CPR limitation = %s\n",
+ fuse->limitation == MSM8996_CPR_LIMITATION_UNSUPPORTED
+ ? "unsupported chip" : fuse->limitation
+ == MSM8996_CPR_LIMITATION_NO_CPR_OR_INTERPOLATION
+ ? "CPR disabled and no interpolation" : "none");
+
+ rc = cpr3_read_fuse_param(base, msm8996_cpr_partial_binning_param,
+ &fuse->partial_binning);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read partial binning fuse, rc=%d\n",
+ rc);
+ return rc;
+ }
+ cpr3_info(vreg, "CPR partial binning limitation = %s\n",
+ fuse->partial_binning == MSM8996_CPR_PARTIAL_BINNING_SVS
+ ? "SVS min voltage"
+ : fuse->partial_binning == MSM8996_CPR_PARTIAL_BINNING_NOM
+ ? "NOM min voltage"
+ : "none");
+
+ rc = cpr3_read_fuse_param(base, msm8996_vdd_mx_ret_param,
+ &fuse->vdd_mx_ret_fuse);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read VDD_MX retention fuse, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr3_read_fuse_param(base, msm8996_vdd_apcc_ret_param,
+ &fuse->vdd_apcc_ret_fuse);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read VDD_APCC retention fuse, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ cpr3_info(vreg, "Retention voltage fuses: VDD_MX = %llu, VDD_APCC = %llu\n",
+ fuse->vdd_mx_ret_fuse, fuse->vdd_apcc_ret_fuse);
+
+ rc = cpr3_read_fuse_param(base, msm8996_hmss_aging_init_quot_diff_param,
+ &fuse->aging_init_quot_diff);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read aging initial quotient difference fuse, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ id = vreg->thread->thread_id;
+
+ for (i = 0; i < MSM8996_HMSS_FUSE_CORNERS; i++) {
+ rc = cpr3_read_fuse_param(base,
+ redundant
+ ? msm8996_hmss_redun_init_voltage_param[id][i]
+ : msm8996_hmss_init_voltage_param[id][i],
+ &fuse->init_voltage[i]);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read fuse-corner %d initial voltage fuse, rc=%d\n",
+ i, rc);
+ return rc;
+ }
+
+ rc = cpr3_read_fuse_param(base,
+ redundant
+ ? msm8996_hmss_redun_target_quot_param[id][i]
+ : msm8996_hmss_target_quot_param[id][i],
+ &fuse->target_quot[i]);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read fuse-corner %d target quotient fuse, rc=%d\n",
+ i, rc);
+ return rc;
+ }
+
+ rc = cpr3_read_fuse_param(base,
+ redundant
+ ? msm8996_hmss_redun_ro_sel_param[id][i]
+ : msm8996_hmss_ro_sel_param[id][i],
+ &fuse->ro_sel[i]);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read fuse-corner %d RO select fuse, rc=%d\n",
+ i, rc);
+ return rc;
+ }
+
+ rc = cpr3_read_fuse_param(base,
+ redundant
+ ? msm8996_hmss_redun_quot_offset_param[id][i]
+ : msm8996_hmss_quot_offset_param[id][i],
+ &fuse->quot_offset[i]);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read fuse-corner %d quotient offset fuse, rc=%d\n",
+ i, rc);
+ return rc;
+ }
+ }
+
+ vreg->fuse_combo = fuse->cpr_fusing_rev + 8 * fuse->speed_bin;
+ if (vreg->fuse_combo >= CPR3_MSM8996_HMSS_FUSE_COMBO_COUNT) {
+ cpr3_err(vreg, "invalid CPR fuse combo = %d found\n",
+ vreg->fuse_combo);
+ return -EINVAL;
+ }
+
+ vreg->speed_bin_fuse = fuse->speed_bin;
+ vreg->cpr_rev_fuse = fuse->cpr_fusing_rev;
+ vreg->fuse_corner_count = MSM8996_HMSS_FUSE_CORNERS;
+ vreg->platform_fuses = fuse;
+
+ return 0;
+}
+
+/**
+ * cpr3_hmss_parse_corner_data() - parse HMSS corner data from device tree
+ * properties of the CPR3 regulator's device node
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_hmss_parse_corner_data(struct cpr3_regulator *vreg)
+{
+ int rc;
+
+ rc = cpr3_parse_common_corner_data(vreg);
+ if (rc) {
+ cpr3_err(vreg, "error reading corner data, rc=%d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+/**
+ * cpr3_msm8996_hmss_calculate_open_loop_voltages() - calculate the open-loop
+ * voltage for each corner of a CPR3 regulator
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * If open-loop voltage interpolation is allowed in both device tree and in
+ * hardware fuses, then this function calculates the open-loop voltage for a
+ * given corner using linear interpolation. This interpolation is performed
+ * using the processor frequencies of the lower and higher Fmax corners along
+ * with their fused open-loop voltages.
+ *
+ * If open-loop voltage interpolation is not allowed, then this function uses
+ * the Fmax fused open-loop voltage for all of the corners associated with a
+ * given fuse corner.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_msm8996_hmss_calculate_open_loop_voltages(
+ struct cpr3_regulator *vreg)
+{
+ struct device_node *node = vreg->of_node;
+ struct cpr3_msm8996_hmss_fuses *fuse = vreg->platform_fuses;
+ int rc = 0;
+ bool allow_interpolation;
+ u64 freq_low, volt_low, freq_high, volt_high;
+ int i, j, soc_revision;
+ const int *ref_volt;
+ int *fuse_volt;
+ int *fmax_corner;
+
+ fuse_volt = kcalloc(vreg->fuse_corner_count, sizeof(*fuse_volt),
+ GFP_KERNEL);
+ fmax_corner = kcalloc(vreg->fuse_corner_count, sizeof(*fmax_corner),
+ GFP_KERNEL);
+ if (!fuse_volt || !fmax_corner) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ soc_revision = vreg->thread->ctrl->soc_revision;
+ if (soc_revision == 1 || soc_revision == 2)
+ ref_volt = msm8996_v1_v2_hmss_fuse_ref_volt;
+ else if (fuse->speed_bin == 1 && fuse->cpr_fusing_rev >= 5)
+ ref_volt = msm8996_v3_speed_bin1_rev5_hmss_fuse_ref_volt;
+ else
+ ref_volt = msm8996_v3_hmss_fuse_ref_volt;
+
+ for (i = 0; i < vreg->fuse_corner_count; i++) {
+ fuse_volt[i] = cpr3_convert_open_loop_voltage_fuse(
+ ref_volt[i],
+ MSM8996_HMSS_FUSE_STEP_VOLT, fuse->init_voltage[i],
+ MSM8996_HMSS_VOLTAGE_FUSE_SIZE);
+
+ /* Log fused open-loop voltage values for debugging purposes. */
+ if (i != CPR3_MSM8996_HMSS_FUSE_CORNER_LOWSVS)
+ cpr3_info(vreg, "fused %6s: open-loop=%7d uV\n",
+ cpr3_msm8996_hmss_fuse_corner_name[i],
+ fuse_volt[i]);
+ }
+
+ rc = cpr3_adjust_fused_open_loop_voltages(vreg, fuse_volt);
+ if (rc) {
+ cpr3_err(vreg, "fused open-loop voltage adjustment failed, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ allow_interpolation = of_property_read_bool(node,
+ "qcom,allow-voltage-interpolation");
+
+ /*
+ * No LowSVS open-loop voltage fuse exists. Instead, intermediate
+ * voltages are interpolated between MinSVS and SVS. Set the LowSVS
+ * voltage to be equal to the adjusted SVS voltage in order to avoid
+ * triggering an incorrect condition violation in the following loop.
+ */
+ fuse_volt[CPR3_MSM8996_HMSS_FUSE_CORNER_LOWSVS]
+ = fuse_volt[CPR3_MSM8996_HMSS_FUSE_CORNER_SVS];
+
+ for (i = 1; i < vreg->fuse_corner_count; i++) {
+ if (fuse_volt[i] < fuse_volt[i - 1]) {
+ cpr3_debug(vreg, "fuse corner %d voltage=%d uV < fuse corner %d voltage=%d uV; overriding: fuse corner %d voltage=%d\n",
+ i, fuse_volt[i], i - 1, fuse_volt[i - 1],
+ i, fuse_volt[i - 1]);
+ fuse_volt[i] = fuse_volt[i - 1];
+ }
+ }
+
+ if (fuse->limitation == MSM8996_CPR_LIMITATION_NO_CPR_OR_INTERPOLATION)
+ allow_interpolation = false;
+
+ if (!allow_interpolation) {
+ /* Use fused open-loop voltage for lower frequencies. */
+ for (i = 0; i < vreg->corner_count; i++)
+ vreg->corner[i].open_loop_volt
+ = fuse_volt[vreg->corner[i].cpr_fuse_corner];
+ goto done;
+ }
+
+ /* Determine highest corner mapped to each fuse corner */
+ j = vreg->fuse_corner_count - 1;
+ for (i = vreg->corner_count - 1; i >= 0; i--) {
+ if (vreg->corner[i].cpr_fuse_corner == j) {
+ fmax_corner[j] = i;
+ j--;
+ }
+ }
+ if (j >= 0) {
+ cpr3_err(vreg, "invalid fuse corner mapping\n");
+ rc = -EINVAL;
+ goto done;
+ }
+
+ /*
+ * Interpolation is not possible for corners mapped to the lowest fuse
+ * corner so use the fuse corner value directly.
+ */
+ for (i = 0; i <= fmax_corner[0]; i++)
+ vreg->corner[i].open_loop_volt = fuse_volt[0];
+
+ /*
+ * Corner LowSVS should be skipped for voltage interpolation
+ * since no fuse exists for it. Instead, the lowest interpolation
+ * should be between MinSVS and SVS.
+ */
+ for (i = CPR3_MSM8996_HMSS_FUSE_CORNER_LOWSVS;
+ i < vreg->fuse_corner_count - 1; i++) {
+ fmax_corner[i] = fmax_corner[i + 1];
+ fuse_volt[i] = fuse_volt[i + 1];
+ }
+
+ /* Interpolate voltages for the higher fuse corners. */
+ for (i = 1; i < vreg->fuse_corner_count - 1; i++) {
+ freq_low = vreg->corner[fmax_corner[i - 1]].proc_freq;
+ volt_low = fuse_volt[i - 1];
+ freq_high = vreg->corner[fmax_corner[i]].proc_freq;
+ volt_high = fuse_volt[i];
+
+ for (j = fmax_corner[i - 1] + 1; j <= fmax_corner[i]; j++)
+ vreg->corner[j].open_loop_volt = cpr3_interpolate(
+ freq_low, volt_low, freq_high, volt_high,
+ vreg->corner[j].proc_freq);
+ }
+
+done:
+ if (rc == 0) {
+ cpr3_debug(vreg, "unadjusted per-corner open-loop voltages:\n");
+ for (i = 0; i < vreg->corner_count; i++)
+ cpr3_debug(vreg, "open-loop[%2d] = %d uV\n", i,
+ vreg->corner[i].open_loop_volt);
+
+ rc = cpr3_adjust_open_loop_voltages(vreg);
+ if (rc)
+ cpr3_err(vreg, "open-loop voltage adjustment failed, rc=%d\n",
+ rc);
+ }
+
+ kfree(fuse_volt);
+ kfree(fmax_corner);
+ return rc;
+}
+
+/**
+ * cpr3_msm8996_hmss_set_no_interpolation_quotients() - use the fused target
+ * quotient values for lower frequencies.
+ * @vreg: Pointer to the CPR3 regulator
+ * @volt_adjust: Pointer to array of per-corner closed-loop adjustment
+ * voltages
+ * @volt_adjust_fuse: Pointer to array of per-fuse-corner closed-loop
+ * adjustment voltages
+ * @ro_scale: Pointer to array of per-fuse-corner RO scaling factor
+ * values with units of QUOT/V
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_msm8996_hmss_set_no_interpolation_quotients(
+ struct cpr3_regulator *vreg, int *volt_adjust,
+ int *volt_adjust_fuse, int *ro_scale)
+{
+ struct cpr3_msm8996_hmss_fuses *fuse = vreg->platform_fuses;
+ u32 quot, ro;
+ int quot_adjust;
+ int i, fuse_corner;
+
+ for (i = 0; i < vreg->corner_count; i++) {
+ fuse_corner = vreg->corner[i].cpr_fuse_corner;
+ quot = fuse->target_quot[fuse_corner];
+ quot_adjust = cpr3_quot_adjustment(ro_scale[fuse_corner],
+ volt_adjust_fuse[fuse_corner] + volt_adjust[i]);
+ ro = fuse->ro_sel[fuse_corner];
+ vreg->corner[i].target_quot[ro] = quot + quot_adjust;
+ if (quot_adjust)
+ cpr3_debug(vreg, "adjusted corner %d RO%u target quot: %u --> %u (%d uV)\n",
+ i, ro, quot, vreg->corner[i].target_quot[ro],
+ volt_adjust_fuse[fuse_corner] + volt_adjust[i]);
+ }
+
+ return 0;
+}
+
+/**
+ * cpr3_msm8996_hmss_calculate_target_quotients() - calculate the CPR target
+ * quotient for each corner of a CPR3 regulator
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * If target quotient interpolation is allowed in both device tree and in
+ * hardware fuses, then this function calculates the target quotient for a
+ * given corner using linear interpolation. This interpolation is performed
+ * using the processor frequencies of the lower and higher Fmax corners along
+ * with the fused target quotient and quotient offset of the higher Fmax corner.
+ *
+ * If target quotient interpolation is not allowed, then this function uses
+ * the Fmax fused target quotient for all of the corners associated with a
+ * given fuse corner.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_msm8996_hmss_calculate_target_quotients(
+ struct cpr3_regulator *vreg)
+{
+ struct cpr3_msm8996_hmss_fuses *fuse = vreg->platform_fuses;
+ int rc;
+ bool allow_interpolation;
+ u64 freq_low, freq_high, prev_quot;
+ u64 *quot_low;
+ u64 *quot_high;
+ u32 quot, ro;
+ int i, j, fuse_corner, quot_adjust;
+ int *fmax_corner;
+ int *volt_adjust, *volt_adjust_fuse, *ro_scale;
+
+ /* Log fused quotient values for debugging purposes. */
+ cpr3_info(vreg, "fused MinSVS: quot[%2llu]=%4llu\n",
+ fuse->ro_sel[CPR3_MSM8996_HMSS_FUSE_CORNER_MINSVS],
+ fuse->target_quot[CPR3_MSM8996_HMSS_FUSE_CORNER_MINSVS]);
+ for (i = CPR3_MSM8996_HMSS_FUSE_CORNER_SVS;
+ i <= CPR3_MSM8996_HMSS_FUSE_CORNER_TURBO; i++)
+ cpr3_info(vreg, "fused %6s: quot[%2llu]=%4llu, quot_offset[%2llu]=%4llu\n",
+ cpr3_msm8996_hmss_fuse_corner_name[i],
+ fuse->ro_sel[i], fuse->target_quot[i], fuse->ro_sel[i],
+ fuse->quot_offset[i] * MSM8996_HMSS_QUOT_OFFSET_SCALE);
+
+ allow_interpolation = of_property_read_bool(vreg->of_node,
+ "qcom,allow-quotient-interpolation");
+
+ if (fuse->limitation == MSM8996_CPR_LIMITATION_NO_CPR_OR_INTERPOLATION)
+ allow_interpolation = false;
+
+ volt_adjust = kcalloc(vreg->corner_count, sizeof(*volt_adjust),
+ GFP_KERNEL);
+ volt_adjust_fuse = kcalloc(vreg->fuse_corner_count,
+ sizeof(*volt_adjust_fuse), GFP_KERNEL);
+ ro_scale = kcalloc(vreg->fuse_corner_count, sizeof(*ro_scale),
+ GFP_KERNEL);
+ fmax_corner = kcalloc(vreg->fuse_corner_count, sizeof(*fmax_corner),
+ GFP_KERNEL);
+ quot_low = kcalloc(vreg->fuse_corner_count, sizeof(*quot_low),
+ GFP_KERNEL);
+ quot_high = kcalloc(vreg->fuse_corner_count, sizeof(*quot_high),
+ GFP_KERNEL);
+ if (!volt_adjust || !volt_adjust_fuse || !ro_scale ||
+ !fmax_corner || !quot_low || !quot_high) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ rc = cpr3_parse_closed_loop_voltage_adjustments(vreg, &fuse->ro_sel[0],
+ volt_adjust, volt_adjust_fuse, ro_scale);
+ if (rc) {
+ cpr3_err(vreg, "could not load closed-loop voltage adjustments, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ if (!allow_interpolation) {
+ /* Use fused target quotients for lower frequencies. */
+ return cpr3_msm8996_hmss_set_no_interpolation_quotients(vreg,
+ volt_adjust, volt_adjust_fuse, ro_scale);
+ }
+
+ /* Determine highest corner mapped to each fuse corner */
+ j = vreg->fuse_corner_count - 1;
+ for (i = vreg->corner_count - 1; i >= 0; i--) {
+ if (vreg->corner[i].cpr_fuse_corner == j) {
+ fmax_corner[j] = i;
+ j--;
+ }
+ }
+ if (j >= 0) {
+ cpr3_err(vreg, "invalid fuse corner mapping\n");
+ rc = -EINVAL;
+ goto done;
+ }
+
+ /*
+ * Interpolation is not possible for corners mapped to the lowest fuse
+ * corner so use the fuse corner value directly.
+ */
+ i = CPR3_MSM8996_HMSS_FUSE_CORNER_MINSVS;
+ quot_adjust = cpr3_quot_adjustment(ro_scale[i], volt_adjust_fuse[i]);
+ quot = fuse->target_quot[i] + quot_adjust;
+ quot_high[i] = quot;
+ ro = fuse->ro_sel[i];
+ if (quot_adjust)
+ cpr3_debug(vreg, "adjusted fuse corner %d RO%u target quot: %llu --> %u (%d uV)\n",
+ i, ro, fuse->target_quot[i], quot, volt_adjust_fuse[i]);
+ for (i = 0; i <= fmax_corner[CPR3_MSM8996_HMSS_FUSE_CORNER_MINSVS]; i++)
+ vreg->corner[i].target_quot[ro] = quot;
+
+ /*
+ * The LowSVS target quotient is defined as:
+ * (SVS target quotient) - (the unpacked SVS quotient offset)
+ * MinSVS, LowSVS, and SVS fuse corners all share the same RO so it is
+ * possible to interpolate between their target quotient values.
+ */
+ i = CPR3_MSM8996_HMSS_FUSE_CORNER_LOWSVS;
+ quot_high[i] = fuse->target_quot[CPR3_MSM8996_HMSS_FUSE_CORNER_SVS]
+ - fuse->quot_offset[CPR3_MSM8996_HMSS_FUSE_CORNER_SVS]
+ * MSM8996_HMSS_QUOT_OFFSET_SCALE;
+ quot_low[i] = fuse->target_quot[CPR3_MSM8996_HMSS_FUSE_CORNER_MINSVS];
+ if (quot_high[i] < quot_low[i]) {
+ cpr3_info(vreg, "quot_lowsvs=%llu < quot_minsvs=%llu; overriding: quot_lowsvs=%llu\n",
+ quot_high[i], quot_low[i], quot_low[i]);
+ quot_high[i] = quot_low[i];
+ }
+ if (fuse->ro_sel[CPR3_MSM8996_HMSS_FUSE_CORNER_MINSVS]
+ != fuse->ro_sel[CPR3_MSM8996_HMSS_FUSE_CORNER_SVS]) {
+ cpr3_info(vreg, "MinSVS RO=%llu != SVS RO=%llu; disabling LowSVS interpolation\n",
+ fuse->ro_sel[CPR3_MSM8996_HMSS_FUSE_CORNER_MINSVS],
+ fuse->ro_sel[CPR3_MSM8996_HMSS_FUSE_CORNER_SVS]);
+ quot_low[i] = quot_high[i];
+ }
+
+ for (i = CPR3_MSM8996_HMSS_FUSE_CORNER_SVS;
+ i < vreg->fuse_corner_count; i++) {
+ quot_high[i] = fuse->target_quot[i];
+ if (fuse->ro_sel[i] == fuse->ro_sel[i - 1])
+ quot_low[i] = quot_high[i - 1];
+ else
+ quot_low[i] = quot_high[i]
+ - fuse->quot_offset[i]
+ * MSM8996_HMSS_QUOT_OFFSET_SCALE;
+ if (quot_high[i] < quot_low[i]) {
+ cpr3_debug(vreg, "quot_high[%d]=%llu < quot_low[%d]=%llu; overriding: quot_high[%d]=%llu\n",
+ i, quot_high[i], i, quot_low[i],
+ i, quot_low[i]);
+ quot_high[i] = quot_low[i];
+ }
+ }
+
+ /* Perform per-fuse-corner target quotient adjustment */
+ for (i = 1; i < vreg->fuse_corner_count; i++) {
+ quot_adjust = cpr3_quot_adjustment(ro_scale[i],
+ volt_adjust_fuse[i]);
+ if (quot_adjust) {
+ prev_quot = quot_high[i];
+ quot_high[i] += quot_adjust;
+ cpr3_debug(vreg, "adjusted fuse corner %d RO%llu target quot: %llu --> %llu (%d uV)\n",
+ i, fuse->ro_sel[i], prev_quot, quot_high[i],
+ volt_adjust_fuse[i]);
+ }
+
+ if (fuse->ro_sel[i] == fuse->ro_sel[i - 1])
+ quot_low[i] = quot_high[i - 1];
+ else
+ quot_low[i] += cpr3_quot_adjustment(ro_scale[i],
+ volt_adjust_fuse[i - 1]);
+
+ if (quot_high[i] < quot_low[i]) {
+ cpr3_debug(vreg, "quot_high[%d]=%llu < quot_low[%d]=%llu after adjustment; overriding: quot_high[%d]=%llu\n",
+ i, quot_high[i], i, quot_low[i],
+ i, quot_low[i]);
+ quot_high[i] = quot_low[i];
+ }
+ }
+
+ /* Interpolate voltages for the higher fuse corners. */
+ for (i = 1; i < vreg->fuse_corner_count; i++) {
+ freq_low = vreg->corner[fmax_corner[i - 1]].proc_freq;
+ freq_high = vreg->corner[fmax_corner[i]].proc_freq;
+
+ ro = fuse->ro_sel[i];
+ for (j = fmax_corner[i - 1] + 1; j <= fmax_corner[i]; j++)
+ vreg->corner[j].target_quot[ro] = cpr3_interpolate(
+ freq_low, quot_low[i], freq_high, quot_high[i],
+ vreg->corner[j].proc_freq);
+ }
+
+ /* Perform per-corner target quotient adjustment */
+ for (i = 0; i < vreg->corner_count; i++) {
+ fuse_corner = vreg->corner[i].cpr_fuse_corner;
+ ro = fuse->ro_sel[fuse_corner];
+ quot_adjust = cpr3_quot_adjustment(ro_scale[fuse_corner],
+ volt_adjust[i]);
+ if (quot_adjust) {
+ prev_quot = vreg->corner[i].target_quot[ro];
+ vreg->corner[i].target_quot[ro] += quot_adjust;
+ cpr3_debug(vreg, "adjusted corner %d RO%u target quot: %llu --> %u (%d uV)\n",
+ i, ro, prev_quot,
+ vreg->corner[i].target_quot[ro],
+ volt_adjust[i]);
+ }
+ }
+
+ /* Ensure that target quotients increase monotonically */
+ for (i = 1; i < vreg->corner_count; i++) {
+ ro = fuse->ro_sel[vreg->corner[i].cpr_fuse_corner];
+ if (fuse->ro_sel[vreg->corner[i - 1].cpr_fuse_corner] == ro
+ && vreg->corner[i].target_quot[ro]
+ < vreg->corner[i - 1].target_quot[ro]) {
+ cpr3_debug(vreg, "adjusted corner %d RO%u target quot=%u < adjusted corner %d RO%u target quot=%u; overriding: corner %d RO%u target quot=%u\n",
+ i, ro, vreg->corner[i].target_quot[ro],
+ i - 1, ro, vreg->corner[i - 1].target_quot[ro],
+ i, ro, vreg->corner[i - 1].target_quot[ro]);
+ vreg->corner[i].target_quot[ro]
+ = vreg->corner[i - 1].target_quot[ro];
+ }
+ }
+
+done:
+ kfree(volt_adjust);
+ kfree(volt_adjust_fuse);
+ kfree(ro_scale);
+ kfree(fmax_corner);
+ kfree(quot_low);
+ kfree(quot_high);
+ return rc;
+}
+
+/**
+ * cpr3_msm8996_partial_binning_override() - override the voltage and quotient
+ * settings for low corners based upon the value of the partial
+ * binning fuse
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * Some parts are not able to operate at low voltages. The partial binning
+ * fuse specifies if a given part has such limitations.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_msm8996_partial_binning_override(struct cpr3_regulator *vreg)
+{
+ struct cpr3_msm8996_hmss_fuses *fuse = vreg->platform_fuses;
+ int i, fuse_corner, fmax_corner;
+
+ if (fuse->partial_binning == MSM8996_CPR_PARTIAL_BINNING_SVS)
+ fuse_corner = CPR3_MSM8996_HMSS_FUSE_CORNER_SVS;
+ else if (fuse->partial_binning == MSM8996_CPR_PARTIAL_BINNING_NOM)
+ fuse_corner = CPR3_MSM8996_HMSS_FUSE_CORNER_NOM;
+ else
+ return 0;
+
+ cpr3_info(vreg, "overriding voltages and quotients for all corners below %s Fmax\n",
+ cpr3_msm8996_hmss_fuse_corner_name[fuse_corner]);
+
+ fmax_corner = -1;
+ for (i = vreg->corner_count - 1; i >= 0; i--) {
+ if (vreg->corner[i].cpr_fuse_corner == fuse_corner) {
+ fmax_corner = i;
+ break;
+ }
+ }
+ if (fmax_corner < 0) {
+ cpr3_err(vreg, "could not find %s Fmax corner\n",
+ cpr3_msm8996_hmss_fuse_corner_name[fuse_corner]);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < fmax_corner; i++)
+ vreg->corner[i] = vreg->corner[fmax_corner];
+
+ return 0;
+}
+
+/**
+ * cpr3_hmss_print_settings() - print out HMSS CPR configuration settings into
+ * the kernel log for debugging purposes
+ * @vreg: Pointer to the CPR3 regulator
+ */
+static void cpr3_hmss_print_settings(struct cpr3_regulator *vreg)
+{
+ struct cpr3_corner *corner;
+ int i;
+
+ cpr3_debug(vreg, "Corner: Frequency (Hz), Fuse Corner, Floor (uV), Open-Loop (uV), Ceiling (uV)\n");
+ for (i = 0; i < vreg->corner_count; i++) {
+ corner = &vreg->corner[i];
+ cpr3_debug(vreg, "%3d: %10u, %2d, %7d, %7d, %7d\n",
+ i, corner->proc_freq, corner->cpr_fuse_corner,
+ corner->floor_volt, corner->open_loop_volt,
+ corner->ceiling_volt);
+ }
+
+ if (vreg->thread->ctrl->apm)
+ cpr3_debug(vreg, "APM threshold = %d uV, APM adjust = %d uV\n",
+ vreg->thread->ctrl->apm_threshold_volt,
+ vreg->thread->ctrl->apm_adj_volt);
+}
+
+/**
+ * cpr3_hmss_init_thread() - perform steps necessary to initialize the
+ * configuration data for a CPR3 thread
+ * @thread: Pointer to the CPR3 thread
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_hmss_init_thread(struct cpr3_thread *thread)
+{
+ int rc;
+
+ rc = cpr3_parse_common_thread_data(thread);
+ if (rc) {
+ cpr3_err(thread->ctrl, "thread %u unable to read CPR thread data from device tree, rc=%d\n",
+ thread->thread_id, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+#define MAX_VREG_NAME_SIZE 25
+/**
+ * cpr3_hmss_kvreg_init() - initialize HMSS Kryo Regulator data for a CPR3
+ * regulator
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * This function loads Kryo Regulator data from device tree if it is present
+ * and requests a handle to the appropriate Kryo regulator device. In addition,
+ * it initializes Kryo Regulator data originating from hardware fuses, such as
+ * the LDO retention voltage, and requests the Kryo retention regulator to
+ * be configured to that value.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_hmss_kvreg_init(struct cpr3_regulator *vreg)
+{
+ struct cpr3_msm8996_hmss_fuses *fuse = vreg->platform_fuses;
+ struct device_node *node = vreg->of_node;
+ struct cpr3_controller *ctrl = vreg->thread->ctrl;
+ int id = vreg->thread->thread_id;
+ char kvreg_name_buf[MAX_VREG_NAME_SIZE];
+ int rc;
+
+ scnprintf(kvreg_name_buf, MAX_VREG_NAME_SIZE,
+ "vdd-thread%d-ldo-supply", id);
+
+ if (!of_find_property(ctrl->dev->of_node, kvreg_name_buf , NULL))
+ return 0;
+ else if (!of_find_property(node, "qcom,ldo-min-headroom-voltage", NULL))
+ return 0;
+
+ scnprintf(kvreg_name_buf, MAX_VREG_NAME_SIZE, "vdd-thread%d-ldo", id);
+
+ vreg->ldo_regulator = devm_regulator_get(ctrl->dev, kvreg_name_buf);
+ if (IS_ERR(vreg->ldo_regulator)) {
+ rc = PTR_ERR(vreg->ldo_regulator);
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(vreg, "unable to request %s regulator, rc=%d\n",
+ kvreg_name_buf, rc);
+ return rc;
+ }
+
+ vreg->ldo_regulator_bypass = BHS_MODE;
+
+ scnprintf(kvreg_name_buf, MAX_VREG_NAME_SIZE, "vdd-thread%d-ldo-ret",
+ id);
+
+ vreg->ldo_ret_regulator = devm_regulator_get(ctrl->dev, kvreg_name_buf);
+ if (IS_ERR(vreg->ldo_ret_regulator)) {
+ rc = PTR_ERR(vreg->ldo_ret_regulator);
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(vreg, "unable to request %s regulator, rc=%d\n",
+ kvreg_name_buf, rc);
+ return rc;
+ }
+
+ if (!ctrl->system_supply_max_volt) {
+ cpr3_err(ctrl, "system-supply max voltage must be specified\n");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(node, "qcom,ldo-min-headroom-voltage",
+ &vreg->ldo_min_headroom_volt);
+ if (rc) {
+ cpr3_err(vreg, "error reading qcom,ldo-min-headroom-voltage, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(node, "qcom,ldo-max-headroom-voltage",
+ &vreg->ldo_max_headroom_volt);
+ if (rc) {
+ cpr3_err(vreg, "error reading qcom,ldo-max-headroom-voltage, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(node, "qcom,ldo-max-voltage",
+ &vreg->ldo_max_volt);
+ if (rc) {
+ cpr3_err(vreg, "error reading qcom,ldo-max-voltage, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* Determine the CPU retention voltage based on fused data */
+ vreg->ldo_ret_volt =
+ max(msm8996_vdd_apcc_fuse_ret_volt[fuse->vdd_apcc_ret_fuse],
+ msm8996_vdd_mx_fuse_ret_volt[fuse->vdd_mx_ret_fuse]);
+
+ rc = regulator_set_voltage(vreg->ldo_ret_regulator, vreg->ldo_ret_volt,
+ INT_MAX);
+ if (rc < 0) {
+ cpr3_err(vreg, "regulator_set_voltage(ldo_ret) == %d failed, rc=%d\n",
+ vreg->ldo_ret_volt, rc);
+ return rc;
+ }
+
+ /* optional properties, do not error out if missing */
+ of_property_read_u32(node, "qcom,ldo-adjust-voltage",
+ &vreg->ldo_adjust_volt);
+
+ vreg->ldo_mode_allowed = !of_property_read_bool(node,
+ "qcom,ldo-disable");
+
+ cpr3_info(vreg, "LDO min headroom=%d uV, LDO max headroom=%d uV, LDO adj=%d uV, LDO mode=%s, LDO retention=%d uV\n",
+ vreg->ldo_min_headroom_volt,
+ vreg->ldo_max_headroom_volt,
+ vreg->ldo_adjust_volt,
+ vreg->ldo_mode_allowed ? "allowed" : "disallowed",
+ vreg->ldo_ret_volt);
+
+ return 0;
+}
+
+/**
+ * cpr3_hmss_mem_acc_init() - initialize mem-acc regulator data for
+ * a CPR3 regulator
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * This function loads mem-acc data from device tree to enable
+ * the control of mem-acc settings based upon the CPR3 regulator
+ * output voltage.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_hmss_mem_acc_init(struct cpr3_regulator *vreg)
+{
+ struct cpr3_controller *ctrl = vreg->thread->ctrl;
+ int id = vreg->thread->thread_id;
+ char mem_acc_vreg_name_buf[MAX_VREG_NAME_SIZE];
+ int rc;
+
+ scnprintf(mem_acc_vreg_name_buf, MAX_VREG_NAME_SIZE,
+ "mem-acc-thread%d-supply", id);
+
+ if (!of_find_property(ctrl->dev->of_node, mem_acc_vreg_name_buf,
+ NULL)) {
+ cpr3_debug(vreg, "not using memory accelerator regulator\n");
+ return 0;
+ } else if (!of_property_read_bool(vreg->of_node, "qcom,uses-mem-acc")) {
+ return 0;
+ }
+
+ scnprintf(mem_acc_vreg_name_buf, MAX_VREG_NAME_SIZE,
+ "mem-acc-thread%d", id);
+
+ vreg->mem_acc_regulator = devm_regulator_get(ctrl->dev,
+ mem_acc_vreg_name_buf);
+ if (IS_ERR(vreg->mem_acc_regulator)) {
+ rc = PTR_ERR(vreg->mem_acc_regulator);
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(vreg, "unable to request %s regulator, rc=%d\n",
+ mem_acc_vreg_name_buf, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * cpr3_hmss_init_regulator() - perform all steps necessary to initialize the
+ * configuration data for a CPR3 regulator
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_hmss_init_regulator(struct cpr3_regulator *vreg)
+{
+ struct cpr3_msm8996_hmss_fuses *fuse;
+ int rc;
+
+ rc = cpr3_msm8996_hmss_read_fuse_data(vreg);
+ if (rc) {
+ cpr3_err(vreg, "unable to read CPR fuse data, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = cpr3_hmss_kvreg_init(vreg);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(vreg, "unable to initialize Kryo Regulator settings, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr3_hmss_mem_acc_init(vreg);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(vreg, "unable to initialize mem-acc regulator settings, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ fuse = vreg->platform_fuses;
+ if (fuse->limitation == MSM8996_CPR_LIMITATION_UNSUPPORTED) {
+ cpr3_err(vreg, "this chip requires an unsupported voltage\n");
+ return -EPERM;
+ } else if (fuse->limitation
+ == MSM8996_CPR_LIMITATION_NO_CPR_OR_INTERPOLATION) {
+ vreg->thread->ctrl->cpr_allowed_hw = false;
+ }
+
+ rc = of_property_read_u32(vreg->of_node, "qcom,cpr-pd-bypass-mask",
+ &vreg->pd_bypass_mask);
+ if (rc) {
+ cpr3_err(vreg, "error reading qcom,cpr-pd-bypass-mask, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr3_hmss_parse_corner_data(vreg);
+ if (rc) {
+ cpr3_err(vreg, "unable to read CPR corner data from device tree, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (of_find_property(vreg->of_node, "qcom,cpr-dynamic-floor-corner",
+ NULL)) {
+ rc = cpr3_parse_array_property(vreg,
+ "qcom,cpr-dynamic-floor-corner",
+ 1, &vreg->dynamic_floor_corner);
+ if (rc) {
+ cpr3_err(vreg, "error reading qcom,cpr-dynamic-floor-corner, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (vreg->dynamic_floor_corner <= 0) {
+ vreg->uses_dynamic_floor = false;
+ } else if (vreg->dynamic_floor_corner < CPR3_CORNER_OFFSET
+ || vreg->dynamic_floor_corner
+ > vreg->corner_count - 1 + CPR3_CORNER_OFFSET) {
+ cpr3_err(vreg, "dynamic floor corner=%d not in range [%d, %d]\n",
+ vreg->dynamic_floor_corner, CPR3_CORNER_OFFSET,
+ vreg->corner_count - 1 + CPR3_CORNER_OFFSET);
+ return -EINVAL;
+ }
+
+ vreg->dynamic_floor_corner -= CPR3_CORNER_OFFSET;
+ vreg->uses_dynamic_floor = true;
+ }
+
+ rc = cpr3_msm8996_hmss_calculate_open_loop_voltages(vreg);
+ if (rc) {
+ cpr3_err(vreg, "unable to calculate open-loop voltages, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr3_limit_open_loop_voltages(vreg);
+ if (rc) {
+ cpr3_err(vreg, "unable to limit open-loop voltages, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ cpr3_open_loop_voltage_as_ceiling(vreg);
+
+ rc = cpr3_limit_floor_voltages(vreg);
+ if (rc) {
+ cpr3_err(vreg, "unable to limit floor voltages, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = cpr3_msm8996_hmss_calculate_target_quotients(vreg);
+ if (rc) {
+ cpr3_err(vreg, "unable to calculate target quotients, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr3_msm8996_partial_binning_override(vreg);
+ if (rc) {
+ cpr3_err(vreg, "unable to override voltages and quotients based on partial binning fuse, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ cpr3_hmss_print_settings(vreg);
+
+ return 0;
+}
+
+/**
+ * cpr3_hmss_init_aging() - perform HMSS CPR3 controller specific
+ * aging initializations
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_hmss_init_aging(struct cpr3_controller *ctrl)
+{
+ struct cpr3_msm8996_hmss_fuses *fuse = NULL;
+ struct cpr3_regulator *vreg;
+ u32 aging_ro_scale;
+ int i, j, rc;
+
+ for (i = 0; i < ctrl->thread_count; i++) {
+ for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+ if (ctrl->thread[i].vreg[j].aging_allowed) {
+ ctrl->aging_required = true;
+ vreg = &ctrl->thread[i].vreg[j];
+ fuse = vreg->platform_fuses;
+ break;
+ }
+ }
+ }
+
+ if (!ctrl->aging_required || !fuse)
+ return 0;
+
+ rc = cpr3_parse_array_property(vreg, "qcom,cpr-aging-ro-scaling-factor",
+ 1, &aging_ro_scale);
+ if (rc)
+ return rc;
+
+ if (aging_ro_scale == 0) {
+ cpr3_err(ctrl, "aging RO scaling factor is invalid: %u\n",
+ aging_ro_scale);
+ return -EINVAL;
+ }
+
+ ctrl->aging_vdd_mode = REGULATOR_MODE_NORMAL;
+ ctrl->aging_complete_vdd_mode = REGULATOR_MODE_IDLE;
+
+ ctrl->aging_sensor_count = 1;
+ ctrl->aging_sensor = kzalloc(sizeof(*ctrl->aging_sensor), GFP_KERNEL);
+ if (!ctrl->aging_sensor)
+ return -ENOMEM;
+
+ ctrl->aging_sensor->sensor_id = MSM8996_HMSS_AGING_SENSOR_ID;
+ ctrl->aging_sensor->bypass_mask[0] = MSM8996_HMSS_AGING_BYPASS_MASK0;
+ ctrl->aging_sensor->ro_scale = aging_ro_scale;
+
+ ctrl->aging_sensor->init_quot_diff
+ = cpr3_convert_open_loop_voltage_fuse(0,
+ MSM8996_HMSS_AGING_INIT_QUOT_DIFF_SCALE,
+ fuse->aging_init_quot_diff,
+ MSM8996_HMSS_AGING_INIT_QUOT_DIFF_SIZE);
+
+ cpr3_debug(ctrl, "sensor %u aging init quotient diff = %d, aging RO scale = %u QUOT/V\n",
+ ctrl->aging_sensor->sensor_id,
+ ctrl->aging_sensor->init_quot_diff,
+ ctrl->aging_sensor->ro_scale);
+
+ return 0;
+}
+
+/**
+ * cpr3_hmss_init_controller() - perform HMSS CPR3 controller specific
+ * initializations
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_hmss_init_controller(struct cpr3_controller *ctrl)
+{
+ int i, rc;
+
+ rc = cpr3_parse_common_ctrl_data(ctrl);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(ctrl, "unable to parse common controller data, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ ctrl->vdd_limit_regulator = devm_regulator_get(ctrl->dev, "vdd-limit");
+ if (IS_ERR(ctrl->vdd_limit_regulator)) {
+ rc = PTR_ERR(ctrl->vdd_limit_regulator);
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(ctrl, "unable to request vdd-supply regulator, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(ctrl->dev->of_node,
+ "qcom,cpr-up-down-delay-time",
+ &ctrl->up_down_delay_time);
+ if (rc) {
+ cpr3_err(ctrl, "error reading property qcom,cpr-up-down-delay-time, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* No error check since this is an optional property. */
+ of_property_read_u32(ctrl->dev->of_node,
+ "qcom,system-supply-max-voltage",
+ &ctrl->system_supply_max_volt);
+
+ /* No error check since this is an optional property. */
+ of_property_read_u32(ctrl->dev->of_node, "qcom,cpr-clock-throttling",
+ &ctrl->proc_clock_throttle);
+
+ rc = cpr3_apm_init(ctrl);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(ctrl, "unable to initialize APM settings, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ ctrl->sensor_count = MSM8996_HMSS_CPR_SENSOR_COUNT;
+
+ ctrl->sensor_owner = devm_kcalloc(ctrl->dev, ctrl->sensor_count,
+ sizeof(*ctrl->sensor_owner), GFP_KERNEL);
+ if (!ctrl->sensor_owner)
+ return -ENOMEM;
+
+ /* Specify sensor ownership */
+ for (i = MSM8996_HMSS_THREAD0_SENSOR_MIN;
+ i <= MSM8996_HMSS_THREAD0_SENSOR_MAX; i++)
+ ctrl->sensor_owner[i] = 0;
+ for (i = MSM8996_HMSS_THREAD1_SENSOR_MIN;
+ i <= MSM8996_HMSS_THREAD1_SENSOR_MAX; i++)
+ ctrl->sensor_owner[i] = 1;
+
+ ctrl->cpr_clock_rate = MSM8996_HMSS_CPR_CLOCK_RATE;
+ ctrl->ctrl_type = CPR_CTRL_TYPE_CPR3;
+ ctrl->supports_hw_closed_loop = true;
+ ctrl->use_hw_closed_loop = of_property_read_bool(ctrl->dev->of_node,
+ "qcom,cpr-hw-closed-loop");
+
+ if (ctrl->mem_acc_regulator) {
+ rc = of_property_read_u32(ctrl->dev->of_node,
+ "qcom,mem-acc-supply-threshold-voltage",
+ &ctrl->mem_acc_threshold_volt);
+ if (rc) {
+ cpr3_err(ctrl, "error reading property qcom,mem-acc-supply-threshold-voltage, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ ctrl->mem_acc_threshold_volt =
+ CPR3_ROUND(ctrl->mem_acc_threshold_volt,
+ ctrl->step_volt);
+
+ rc = of_property_read_u32_array(ctrl->dev->of_node,
+ "qcom,mem-acc-supply-corner-map",
+ &ctrl->mem_acc_corner_map[CPR3_MEM_ACC_LOW_CORNER],
+ CPR3_MEM_ACC_CORNERS);
+ if (rc) {
+ cpr3_err(ctrl, "error reading qcom,mem-acc-supply-corner-map, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int cpr3_hmss_regulator_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct cpr3_controller *ctrl = platform_get_drvdata(pdev);
+
+ return cpr3_regulator_suspend(ctrl);
+}
+
+static int cpr3_hmss_regulator_resume(struct platform_device *pdev)
+{
+ struct cpr3_controller *ctrl = platform_get_drvdata(pdev);
+
+ return cpr3_regulator_resume(ctrl);
+}
+
+/* Data corresponds to the SoC revision */
+static struct of_device_id cpr_regulator_match_table[] = {
+ {
+ .compatible = "qcom,cpr3-msm8996-v1-hmss-regulator",
+ .data = (void *)1
+ },
+ {
+ .compatible = "qcom,cpr3-msm8996-v2-hmss-regulator",
+ .data = (void *)2
+ },
+ {
+ .compatible = "qcom,cpr3-msm8996-v3-hmss-regulator",
+ .data = (void *)3
+ },
+ {
+ .compatible = "qcom,cpr3-msm8996-hmss-regulator",
+ .data = (void *)3
+ },
+ {}
+};
+
+static int cpr3_hmss_regulator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *match;
+ struct cpr3_controller *ctrl;
+ struct cpr3_regulator *vreg;
+ int i, j, rc;
+
+ if (!dev->of_node) {
+ dev_err(dev, "Device tree node is missing\n");
+ return -EINVAL;
+ }
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->dev = dev;
+ /* Set to false later if anything precludes CPR operation. */
+ ctrl->cpr_allowed_hw = true;
+
+ rc = of_property_read_string(dev->of_node, "qcom,cpr-ctrl-name",
+ &ctrl->name);
+ if (rc) {
+ cpr3_err(ctrl, "unable to read qcom,cpr-ctrl-name, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ match = of_match_node(cpr_regulator_match_table, dev->of_node);
+ if (match)
+ ctrl->soc_revision = (uintptr_t)match->data;
+ else
+ cpr3_err(ctrl, "could not find compatible string match\n");
+
+ rc = cpr3_map_fuse_base(ctrl, pdev);
+ if (rc) {
+ cpr3_err(ctrl, "could not map fuse base address\n");
+ return rc;
+ }
+
+ rc = cpr3_allocate_threads(ctrl, MSM8996_HMSS_POWER_CLUSTER_THREAD_ID,
+ MSM8996_HMSS_PERFORMANCE_CLUSTER_THREAD_ID);
+ if (rc) {
+ cpr3_err(ctrl, "failed to allocate CPR thread array, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (ctrl->thread_count < 1) {
+ cpr3_err(ctrl, "thread nodes are missing\n");
+ return -EINVAL;
+ }
+
+ rc = cpr3_hmss_init_controller(ctrl);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(ctrl, "failed to initialize CPR controller parameters, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ for (i = 0; i < ctrl->thread_count; i++) {
+ rc = cpr3_hmss_init_thread(&ctrl->thread[i]);
+ if (rc) {
+ cpr3_err(ctrl, "thread %u initialization failed, rc=%d\n",
+ ctrl->thread[i].thread_id, rc);
+ return rc;
+ }
+
+ for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+ vreg = &ctrl->thread[i].vreg[j];
+
+ rc = cpr3_hmss_init_regulator(vreg);
+ if (rc) {
+ cpr3_err(vreg, "regulator initialization failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+ }
+
+ rc = cpr3_hmss_init_aging(ctrl);
+ if (rc) {
+ cpr3_err(ctrl, "failed to initialize aging configurations, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ platform_set_drvdata(pdev, ctrl);
+
+ return cpr3_regulator_register(pdev, ctrl);
+}
+
+static int cpr3_hmss_regulator_remove(struct platform_device *pdev)
+{
+ struct cpr3_controller *ctrl = platform_get_drvdata(pdev);
+
+ return cpr3_regulator_unregister(ctrl);
+}
+
+static struct platform_driver cpr3_hmss_regulator_driver = {
+ .driver = {
+ .name = "qcom,cpr3-hmss-regulator",
+ .of_match_table = cpr_regulator_match_table,
+ .owner = THIS_MODULE,
+ },
+ .probe = cpr3_hmss_regulator_probe,
+ .remove = cpr3_hmss_regulator_remove,
+ .suspend = cpr3_hmss_regulator_suspend,
+ .resume = cpr3_hmss_regulator_resume,
+};
+
+static int cpr_regulator_init(void)
+{
+ return platform_driver_register(&cpr3_hmss_regulator_driver);
+}
+
+static void cpr_regulator_exit(void)
+{
+ platform_driver_unregister(&cpr3_hmss_regulator_driver);
+}
+
+MODULE_DESCRIPTION("CPR3 HMSS regulator driver");
+MODULE_LICENSE("GPL v2");
+
+arch_initcall(cpr_regulator_init);
+module_exit(cpr_regulator_exit);
diff --git a/drivers/regulator/cpr3-mmss-regulator.c b/drivers/regulator/cpr3-mmss-regulator.c
new file mode 100644
index 000000000000..9d17e127fe60
--- /dev/null
+++ b/drivers/regulator/cpr3-mmss-regulator.c
@@ -0,0 +1,1039 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+#include "cpr3-regulator.h"
+
+#define MSM8996_MMSS_FUSE_CORNERS 4
+
+/**
+ * struct cpr3_msm8996_mmss_fuses - MMSS specific fuse data for MSM8996
+ * @init_voltage: Initial (i.e. open-loop) voltage fuse parameter value
+ * for each fuse corner (raw, not converted to a voltage)
+ * @offset_voltage: The closed-loop voltage margin adjustment fuse parameter
+ * value for each fuse corner (raw, not converted to a
+ * voltage)
+ * @cpr_fusing_rev: CPR fusing revision fuse parameter value
+ * @limitation: CPR limitation select fuse parameter value
+ * @aging_init_quot_diff: Initial quotient difference between CPR aging
+ * min and max sensors measured at time of manufacturing
+ *
+ * This struct holds the values for all of the fuses read from memory.
+ */
+struct cpr3_msm8996_mmss_fuses {
+ u64 init_voltage[MSM8996_MMSS_FUSE_CORNERS];
+ u64 offset_voltage[MSM8996_MMSS_FUSE_CORNERS];
+ u64 cpr_fusing_rev;
+ u64 limitation;
+ u64 aging_init_quot_diff;
+};
+
+/**
+ * enum cpr3_msm8996_mmss_fuse_combo - fuse combinations supported by the MMSS
+ * CPR3 controller on MSM8996
+ * %CPR3_MSM8996_MMSS_FUSE_COMBO_CPR_REV0: Part with CPR fusing rev == 0
+ * %CPR3_MSM8996_MMSS_FUSE_COMBO_CPR_REV1: Part with CPR fusing rev == 1
+ * %CPR3_MSM8996_MMSS_FUSE_COMBO_CPR_REV2: Part with CPR fusing rev == 2
+ * %CPR3_MSM8996_MMSS_FUSE_COMBO_CPR_REV3: Part with CPR fusing rev == 3
+ * %CPR3_MSM8996_MMSS_FUSE_COMBO_CPR_REV4: Part with CPR fusing rev == 4
+ * %CPR3_MSM8996_MMSS_FUSE_COMBO_CPR_REV5: Part with CPR fusing rev == 5
+ * %CPR3_MSM8996_MMSS_FUSE_COMBO_CPR_REV6: Part with CPR fusing rev == 6
+ * %CPR3_MSM8996_MMSS_FUSE_COMBO_CPR_REV7: Part with CPR fusing rev == 7
+ * %CPR3_MSM8996_MMSS_FUSE_COMBO_COUNT: Defines the number of
+ * combinations supported
+ *
+ * This list will be expanded as new requirements are added.
+ */
+enum cpr3_msm8996_mmss_fuse_combo {
+ CPR3_MSM8996_MMSS_FUSE_COMBO_CPR_REV0 = 0,
+ CPR3_MSM8996_MMSS_FUSE_COMBO_CPR_REV1 = 1,
+ CPR3_MSM8996_MMSS_FUSE_COMBO_CPR_REV2 = 2,
+ CPR3_MSM8996_MMSS_FUSE_COMBO_CPR_REV3 = 3,
+ CPR3_MSM8996_MMSS_FUSE_COMBO_CPR_REV4 = 4,
+ CPR3_MSM8996_MMSS_FUSE_COMBO_CPR_REV5 = 5,
+ CPR3_MSM8996_MMSS_FUSE_COMBO_CPR_REV6 = 6,
+ CPR3_MSM8996_MMSS_FUSE_COMBO_CPR_REV7 = 7,
+ CPR3_MSM8996_MMSS_FUSE_COMBO_COUNT
+};
+
+/*
+ * MSM8996 MMSS fuse parameter locations:
+ *
+ * Structs are organized with the following dimensions:
+ * Outer: 0 to 3 for fuse corners from lowest to highest corner
+ * Inner: large enough to hold the longest set of parameter segments which
+ * fully defines a fuse parameter, +1 (for NULL termination).
+ * Each segment corresponds to a contiguous group of bits from a
+ * single fuse row. These segments are concatentated together in
+ * order to form the full fuse parameter value. The segments for
+ * a given parameter may correspond to different fuse rows.
+ */
+static const struct cpr3_fuse_param
+msm8996_mmss_init_voltage_param[MSM8996_MMSS_FUSE_CORNERS][2] = {
+ {{63, 55, 59}, {} },
+ {{63, 50, 54}, {} },
+ {{63, 45, 49}, {} },
+ {{63, 40, 44}, {} },
+};
+
+static const struct cpr3_fuse_param msm8996_cpr_fusing_rev_param[] = {
+ {39, 48, 50},
+ {},
+};
+
+static const struct cpr3_fuse_param msm8996_cpr_limitation_param[] = {
+ {41, 31, 32},
+ {},
+};
+
+static const struct cpr3_fuse_param
+msm8996_mmss_aging_init_quot_diff_param[] = {
+ {68, 26, 31},
+ {},
+};
+
+/* Offset voltages are defined for SVS and Turbo fuse corners only */
+static const struct cpr3_fuse_param
+msm8996_mmss_offset_voltage_param[MSM8996_MMSS_FUSE_CORNERS][2] = {
+ {{} },
+ {{66, 42, 44}, {} },
+ {{} },
+ {{64, 58, 61}, {} },
+};
+
+/*
+ * Some initial msm8996 parts cannot be used in a meaningful way by software.
+ * Other parts can only be used when operating with CPR disabled (i.e. at the
+ * fused open-loop voltage) when no voltage interpolation is applied. A fuse
+ * parameter is provided so that software can properly handle these limitations.
+ */
+enum msm8996_cpr_limitation {
+ MSM8996_CPR_LIMITATION_NONE = 0,
+ MSM8996_CPR_LIMITATION_UNSUPPORTED = 2,
+ MSM8996_CPR_LIMITATION_NO_CPR_OR_INTERPOLATION = 3,
+};
+
+/* Additional MSM8996 specific data: */
+
+/* Open loop voltage fuse reference voltages in microvolts */
+static const int msm8996_mmss_fuse_ref_volt[MSM8996_MMSS_FUSE_CORNERS] = {
+ 670000,
+ 745000,
+ 905000,
+ 1015000,
+};
+
+#define MSM8996_MMSS_FUSE_STEP_VOLT 10000
+#define MSM8996_MMSS_OFFSET_FUSE_STEP_VOLT 10000
+#define MSM8996_MMSS_VOLTAGE_FUSE_SIZE 5
+#define MSM8996_MMSS_AGING_INIT_QUOT_DIFF_SCALE 2
+#define MSM8996_MMSS_AGING_INIT_QUOT_DIFF_SIZE 6
+
+#define MSM8996_MMSS_CPR_SENSOR_COUNT 35
+
+#define MSM8996_MMSS_CPR_CLOCK_RATE 19200000
+
+#define MSM8996_MMSS_AGING_SENSOR_ID 29
+#define MSM8996_MMSS_AGING_BYPASS_MASK0 (GENMASK(23, 0))
+
+/**
+ * cpr3_msm8996_mmss_read_fuse_data() - load MMSS specific fuse parameter values
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * This function allocates a cpr3_msm8996_mmss_fuses struct, fills it with
+ * values read out of hardware fuses, and finally copies common fuse values
+ * into the regulator struct.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_msm8996_mmss_read_fuse_data(struct cpr3_regulator *vreg)
+{
+ void __iomem *base = vreg->thread->ctrl->fuse_base;
+ struct cpr3_msm8996_mmss_fuses *fuse;
+ int i, rc;
+
+ fuse = devm_kzalloc(vreg->thread->ctrl->dev, sizeof(*fuse), GFP_KERNEL);
+ if (!fuse)
+ return -ENOMEM;
+
+ rc = cpr3_read_fuse_param(base, msm8996_cpr_fusing_rev_param,
+ &fuse->cpr_fusing_rev);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read CPR fusing revision fuse, rc=%d\n",
+ rc);
+ return rc;
+ }
+ cpr3_info(vreg, "CPR fusing revision = %llu\n", fuse->cpr_fusing_rev);
+
+ rc = cpr3_read_fuse_param(base, msm8996_cpr_limitation_param,
+ &fuse->limitation);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read CPR limitation fuse, rc=%d\n",
+ rc);
+ return rc;
+ }
+ cpr3_info(vreg, "CPR limitation = %s\n",
+ fuse->limitation == MSM8996_CPR_LIMITATION_UNSUPPORTED
+ ? "unsupported chip" : fuse->limitation
+ == MSM8996_CPR_LIMITATION_NO_CPR_OR_INTERPOLATION
+ ? "CPR disabled and no interpolation" : "none");
+
+ rc = cpr3_read_fuse_param(base, msm8996_mmss_aging_init_quot_diff_param,
+ &fuse->aging_init_quot_diff);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read aging initial quotient difference fuse, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ for (i = 0; i < MSM8996_MMSS_FUSE_CORNERS; i++) {
+ rc = cpr3_read_fuse_param(base,
+ msm8996_mmss_init_voltage_param[i],
+ &fuse->init_voltage[i]);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read fuse-corner %d initial voltage fuse, rc=%d\n",
+ i, rc);
+ return rc;
+ }
+
+ rc = cpr3_read_fuse_param(base,
+ msm8996_mmss_offset_voltage_param[i],
+ &fuse->offset_voltage[i]);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read fuse-corner %d offset voltage fuse, rc=%d\n",
+ i, rc);
+ return rc;
+ }
+ }
+
+ vreg->fuse_combo = fuse->cpr_fusing_rev;
+ if (vreg->fuse_combo >= CPR3_MSM8996_MMSS_FUSE_COMBO_COUNT) {
+ cpr3_err(vreg, "invalid CPR fuse combo = %d found\n",
+ vreg->fuse_combo);
+ return -EINVAL;
+ }
+
+ vreg->cpr_rev_fuse = fuse->cpr_fusing_rev;
+ vreg->fuse_corner_count = MSM8996_MMSS_FUSE_CORNERS;
+ vreg->platform_fuses = fuse;
+
+ return 0;
+}
+
+/**
+ * cpr3_mmss_parse_corner_data() - parse MMSS corner data from device tree
+ * properties of the regulator's device node
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_mmss_parse_corner_data(struct cpr3_regulator *vreg)
+{
+ int i, rc;
+ u32 *temp;
+
+ rc = cpr3_parse_common_corner_data(vreg);
+ if (rc) {
+ cpr3_err(vreg, "error reading corner data, rc=%d\n", rc);
+ return rc;
+ }
+
+ temp = kcalloc(vreg->corner_count * CPR3_RO_COUNT, sizeof(*temp),
+ GFP_KERNEL);
+ if (!temp)
+ return -ENOMEM;
+
+ rc = cpr3_parse_corner_array_property(vreg, "qcom,cpr-target-quotients",
+ CPR3_RO_COUNT, temp);
+ if (rc) {
+ cpr3_err(vreg, "could not load target quotients, rc=%d\n", rc);
+ goto done;
+ }
+
+ for (i = 0; i < vreg->corner_count; i++)
+ memcpy(vreg->corner[i].target_quot, &temp[i * CPR3_RO_COUNT],
+ sizeof(*temp) * CPR3_RO_COUNT);
+
+done:
+ kfree(temp);
+ return rc;
+}
+
+/**
+ * cpr3_msm8996_mmss_apply_closed_loop_offset_voltages() - modify the
+ * closed-loop voltage adjustments by the amounts that are needed
+ * for this fuse combo
+ * @vreg: Pointer to the CPR3 regulator
+ * @volt_adjust: Array of closed-loop voltage adjustment values of length
+ * vreg->corner_count which is further adjusted based upon
+ * offset voltage fuse values.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_msm8996_mmss_apply_closed_loop_offset_voltages(
+ struct cpr3_regulator *vreg, int *volt_adjust)
+{
+ struct cpr3_msm8996_mmss_fuses *fuse = vreg->platform_fuses;
+ u32 *corner_map;
+ int *volt_offset;
+ int rc = 0, i, fuse_len;
+
+ if (!of_find_property(vreg->of_node,
+ "qcom,cpr-fused-closed-loop-voltage-adjustment-map", NULL)) {
+ /* No closed-loop offset required. */
+ return 0;
+ }
+
+ corner_map = kcalloc(vreg->corner_count, sizeof(*corner_map),
+ GFP_KERNEL);
+ volt_offset = kcalloc(vreg->fuse_corner_count, sizeof(*volt_offset),
+ GFP_KERNEL);
+ if (!corner_map || !volt_offset) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ rc = cpr3_parse_corner_array_property(vreg,
+ "qcom,cpr-fused-closed-loop-voltage-adjustment-map",
+ 1, corner_map);
+ if (rc)
+ goto done;
+
+ for (i = 0; i < vreg->fuse_corner_count; i++) {
+ fuse_len = msm8996_mmss_offset_voltage_param[i][0].bit_end + 1
+ - msm8996_mmss_offset_voltage_param[i][0].bit_start;
+ volt_offset[i] = cpr3_convert_open_loop_voltage_fuse(
+ 0, MSM8996_MMSS_OFFSET_FUSE_STEP_VOLT,
+ fuse->offset_voltage[i], fuse_len);
+ if (volt_offset[i])
+ cpr3_info(vreg, "fuse_corner[%d] offset=%7d uV\n",
+ i, volt_offset[i]);
+ }
+
+ for (i = 0; i < vreg->corner_count; i++) {
+ if (corner_map[i] == 0) {
+ continue;
+ } else if (corner_map[i] > vreg->fuse_corner_count) {
+ cpr3_err(vreg, "corner %d mapped to invalid fuse corner: %u\n",
+ i, corner_map[i]);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ volt_adjust[i] += volt_offset[corner_map[i] - 1];
+ }
+
+done:
+ kfree(corner_map);
+ kfree(volt_offset);
+
+ return rc;
+}
+
+/**
+ * cpr3_mmss_enforce_inc_quotient_monotonicity() - Ensure that target quotients
+ * increase monotonically from lower to higher corners
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static void cpr3_mmss_enforce_inc_quotient_monotonicity(
+ struct cpr3_regulator *vreg)
+{
+ int i, j;
+
+ for (i = 1; i < vreg->corner_count; i++) {
+ for (j = 0; j < CPR3_RO_COUNT; j++) {
+ if (vreg->corner[i].target_quot[j]
+ && vreg->corner[i].target_quot[j]
+ < vreg->corner[i - 1].target_quot[j]) {
+ cpr3_debug(vreg, "corner %d RO%u target quot=%u < corner %d RO%u target quot=%u; overriding: corner %d RO%u target quot=%u\n",
+ i, j,
+ vreg->corner[i].target_quot[j],
+ i - 1, j,
+ vreg->corner[i - 1].target_quot[j],
+ i, j,
+ vreg->corner[i - 1].target_quot[j]);
+ vreg->corner[i].target_quot[j]
+ = vreg->corner[i - 1].target_quot[j];
+ }
+ }
+ }
+}
+
+/**
+ * cpr3_mmss_enforce_dec_quotient_monotonicity() - Ensure that target quotients
+ * decrease monotonically from higher to lower corners
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static void cpr3_mmss_enforce_dec_quotient_monotonicity(
+ struct cpr3_regulator *vreg)
+{
+ int i, j;
+
+ for (i = vreg->corner_count - 2; i >= 0; i--) {
+ for (j = 0; j < CPR3_RO_COUNT; j++) {
+ if (vreg->corner[i].target_quot[j]
+ && vreg->corner[i].target_quot[j]
+ > vreg->corner[i + 1].target_quot[j]) {
+ cpr3_debug(vreg, "corner %d RO%u target quot=%u > corner %d RO%u target quot=%u; overriding: corner %d RO%u target quot=%u\n",
+ i, j,
+ vreg->corner[i].target_quot[j],
+ i + 1, j,
+ vreg->corner[i + 1].target_quot[j],
+ i, j,
+ vreg->corner[i + 1].target_quot[j]);
+ vreg->corner[i].target_quot[j]
+ = vreg->corner[i + 1].target_quot[j];
+ }
+ }
+ }
+}
+
+/**
+ * _cpr3_mmss_adjust_target_quotients() - adjust the target quotients for each
+ * corner of the regulator according to input adjustment and
+ * scaling arrays
+ * @vreg: Pointer to the CPR3 regulator
+ * @volt_adjust: Pointer to an array of closed-loop voltage adjustments
+ * with units of microvolts. The array must have
+ * vreg->corner_count number of elements.
+ * @ro_scale: Pointer to a flattened 2D array of RO scaling factors.
+ * The array must have an inner dimension of CPR3_RO_COUNT
+ * and an outer dimension of vreg->corner_count
+ * @label: Null terminated string providing a label for the type
+ * of adjustment.
+ *
+ * Return: true if any corners received a positive voltage adjustment (> 0),
+ * else false
+ */
+static bool _cpr3_mmss_adjust_target_quotients(struct cpr3_regulator *vreg,
+ const int *volt_adjust, const int *ro_scale, const char *label)
+{
+ int i, j, quot_adjust;
+ bool is_increasing = false;
+ u32 prev_quot;
+
+ for (i = 0; i < vreg->corner_count; i++) {
+ for (j = 0; j < CPR3_RO_COUNT; j++) {
+ if (vreg->corner[i].target_quot[j]) {
+ quot_adjust = cpr3_quot_adjustment(
+ ro_scale[i * CPR3_RO_COUNT + j],
+ volt_adjust[i]);
+ if (quot_adjust) {
+ prev_quot = vreg->corner[i].
+ target_quot[j];
+ vreg->corner[i].target_quot[j]
+ += quot_adjust;
+ cpr3_debug(vreg, "adjusted corner %d RO%d target quot %s: %u --> %u (%d uV)\n",
+ i, j, label, prev_quot,
+ vreg->corner[i].target_quot[j],
+ volt_adjust[i]);
+ }
+ }
+ }
+ if (volt_adjust[i] > 0)
+ is_increasing = true;
+ }
+
+ return is_increasing;
+}
+
+/**
+ * cpr3_mmss_adjust_target_quotients() - adjust the target quotients for each
+ * corner according to device tree values and fuse values
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_mmss_adjust_target_quotients(struct cpr3_regulator *vreg)
+{
+ int i, rc;
+ int *volt_adjust, *ro_scale;
+ bool explicit_adjustment, fused_adjustment, is_increasing;
+
+ explicit_adjustment = of_find_property(vreg->of_node,
+ "qcom,cpr-closed-loop-voltage-adjustment", NULL);
+ fused_adjustment = of_find_property(vreg->of_node,
+ "qcom,cpr-fused-closed-loop-voltage-adjustment-map", NULL);
+
+ if (!explicit_adjustment && !fused_adjustment && !vreg->aging_allowed) {
+ /* No adjustment required. */
+ return 0;
+ } else if (!of_find_property(vreg->of_node,
+ "qcom,cpr-ro-scaling-factor", NULL)) {
+ cpr3_err(vreg, "qcom,cpr-ro-scaling-factor is required for closed-loop voltage adjustment, but is missing\n");
+ return -EINVAL;
+ }
+
+ volt_adjust = kcalloc(vreg->corner_count, sizeof(*volt_adjust),
+ GFP_KERNEL);
+ ro_scale = kcalloc(vreg->corner_count * CPR3_RO_COUNT,
+ sizeof(*ro_scale), GFP_KERNEL);
+ if (!volt_adjust || !ro_scale) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ rc = cpr3_parse_corner_array_property(vreg,
+ "qcom,cpr-ro-scaling-factor", CPR3_RO_COUNT, ro_scale);
+ if (rc) {
+ cpr3_err(vreg, "could not load RO scaling factors, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ for (i = 0; i < vreg->corner_count; i++)
+ memcpy(vreg->corner[i].ro_scale, &ro_scale[i * CPR3_RO_COUNT],
+ sizeof(*ro_scale) * CPR3_RO_COUNT);
+
+ if (explicit_adjustment) {
+ rc = cpr3_parse_corner_array_property(vreg,
+ "qcom,cpr-closed-loop-voltage-adjustment",
+ 1, volt_adjust);
+ if (rc) {
+ cpr3_err(vreg, "could not load closed-loop voltage adjustments, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ _cpr3_mmss_adjust_target_quotients(vreg, volt_adjust, ro_scale,
+ "from DT");
+ cpr3_mmss_enforce_inc_quotient_monotonicity(vreg);
+ }
+
+ if (fused_adjustment) {
+ memset(volt_adjust, 0,
+ sizeof(*volt_adjust) * vreg->corner_count);
+
+ rc = cpr3_msm8996_mmss_apply_closed_loop_offset_voltages(vreg,
+ volt_adjust);
+ if (rc) {
+ cpr3_err(vreg, "could not apply fused closed-loop voltage reductions, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ is_increasing = _cpr3_mmss_adjust_target_quotients(vreg,
+ volt_adjust, ro_scale, "from fuse");
+ if (is_increasing)
+ cpr3_mmss_enforce_inc_quotient_monotonicity(vreg);
+ else
+ cpr3_mmss_enforce_dec_quotient_monotonicity(vreg);
+ }
+
+done:
+ kfree(volt_adjust);
+ kfree(ro_scale);
+ return rc;
+}
+
+/**
+ * cpr3_msm8996_mmss_calculate_open_loop_voltages() - calculate the open-loop
+ * voltage for each corner of a CPR3 regulator
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * If open-loop voltage interpolation is allowed in both device tree and in
+ * hardware fuses, then this function calculates the open-loop voltage for a
+ * given corner using linear interpolation. This interpolation is performed
+ * using the processor frequencies of the lower and higher Fmax corners along
+ * with their fused open-loop voltages.
+ *
+ * If open-loop voltage interpolation is not allowed, then this function uses
+ * the Fmax fused open-loop voltage for all of the corners associated with a
+ * given fuse corner.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_msm8996_mmss_calculate_open_loop_voltages(
+ struct cpr3_regulator *vreg)
+{
+ struct device_node *node = vreg->of_node;
+ struct cpr3_msm8996_mmss_fuses *fuse = vreg->platform_fuses;
+ int rc = 0;
+ bool allow_interpolation;
+ u64 freq_low, volt_low, freq_high, volt_high;
+ int i, j;
+ int *fuse_volt;
+ int *fmax_corner;
+
+ fuse_volt = kcalloc(vreg->fuse_corner_count, sizeof(*fuse_volt),
+ GFP_KERNEL);
+ fmax_corner = kcalloc(vreg->fuse_corner_count, sizeof(*fmax_corner),
+ GFP_KERNEL);
+ if (!fuse_volt || !fmax_corner) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ for (i = 0; i < vreg->fuse_corner_count; i++) {
+ fuse_volt[i] = cpr3_convert_open_loop_voltage_fuse(
+ msm8996_mmss_fuse_ref_volt[i],
+ MSM8996_MMSS_FUSE_STEP_VOLT, fuse->init_voltage[i],
+ MSM8996_MMSS_VOLTAGE_FUSE_SIZE);
+ cpr3_info(vreg, "fuse_corner[%d] open-loop=%7d uV\n",
+ i, fuse_volt[i]);
+ }
+
+ rc = cpr3_adjust_fused_open_loop_voltages(vreg, fuse_volt);
+ if (rc) {
+ cpr3_err(vreg, "fused open-loop voltage adjustment failed, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ allow_interpolation = of_property_read_bool(node,
+ "qcom,allow-voltage-interpolation");
+
+ for (i = 1; i < vreg->fuse_corner_count; i++) {
+ if (fuse_volt[i] < fuse_volt[i - 1]) {
+ cpr3_debug(vreg, "fuse corner %d voltage=%d uV < fuse corner %d voltage=%d uV; overriding: fuse corner %d voltage=%d\n",
+ i, fuse_volt[i], i - 1, fuse_volt[i - 1],
+ i, fuse_volt[i - 1]);
+ fuse_volt[i] = fuse_volt[i - 1];
+ }
+ }
+
+ if (fuse->limitation == MSM8996_CPR_LIMITATION_NO_CPR_OR_INTERPOLATION)
+ allow_interpolation = false;
+
+ if (!allow_interpolation) {
+ /* Use fused open-loop voltage for lower frequencies. */
+ for (i = 0; i < vreg->corner_count; i++)
+ vreg->corner[i].open_loop_volt
+ = fuse_volt[vreg->corner[i].cpr_fuse_corner];
+ goto done;
+ }
+
+ /* Determine highest corner mapped to each fuse corner */
+ j = vreg->fuse_corner_count - 1;
+ for (i = vreg->corner_count - 1; i >= 0; i--) {
+ if (vreg->corner[i].cpr_fuse_corner == j) {
+ fmax_corner[j] = i;
+ j--;
+ }
+ }
+ if (j >= 0) {
+ cpr3_err(vreg, "invalid fuse corner mapping\n");
+ rc = -EINVAL;
+ goto done;
+ }
+
+ /*
+ * Interpolation is not possible for corners mapped to the lowest fuse
+ * corner so use the fuse corner value directly.
+ */
+ for (i = 0; i <= fmax_corner[0]; i++)
+ vreg->corner[i].open_loop_volt = fuse_volt[0];
+
+ /* Interpolate voltages for the higher fuse corners. */
+ for (i = 1; i < vreg->fuse_corner_count; i++) {
+ freq_low = vreg->corner[fmax_corner[i - 1]].proc_freq;
+ volt_low = fuse_volt[i - 1];
+ freq_high = vreg->corner[fmax_corner[i]].proc_freq;
+ volt_high = fuse_volt[i];
+
+ for (j = fmax_corner[i - 1] + 1; j <= fmax_corner[i]; j++)
+ vreg->corner[j].open_loop_volt = cpr3_interpolate(
+ freq_low, volt_low, freq_high, volt_high,
+ vreg->corner[j].proc_freq);
+ }
+
+done:
+ if (rc == 0) {
+ cpr3_debug(vreg, "unadjusted per-corner open-loop voltages:\n");
+ for (i = 0; i < vreg->corner_count; i++)
+ cpr3_debug(vreg, "open-loop[%2d] = %d uV\n", i,
+ vreg->corner[i].open_loop_volt);
+
+ rc = cpr3_adjust_open_loop_voltages(vreg);
+ if (rc)
+ cpr3_err(vreg, "open-loop voltage adjustment failed, rc=%d\n",
+ rc);
+ }
+
+ kfree(fuse_volt);
+ kfree(fmax_corner);
+ return rc;
+}
+
+/**
+ * cpr3_mmss_print_settings() - print out MMSS CPR configuration settings into
+ * the kernel log for debugging purposes
+ * @vreg: Pointer to the CPR3 regulator
+ */
+static void cpr3_mmss_print_settings(struct cpr3_regulator *vreg)
+{
+ struct cpr3_corner *corner;
+ int i;
+
+ cpr3_debug(vreg, "Corner: Frequency (Hz), Fuse Corner, Floor (uV), Open-Loop (uV), Ceiling (uV)\n");
+ for (i = 0; i < vreg->corner_count; i++) {
+ corner = &vreg->corner[i];
+ cpr3_debug(vreg, "%3d: %10u, %2d, %7d, %7d, %7d\n",
+ i, corner->proc_freq, corner->cpr_fuse_corner,
+ corner->floor_volt, corner->open_loop_volt,
+ corner->ceiling_volt);
+ }
+}
+
+/**
+ * cpr3_mmss_init_aging() - perform MMSS CPR3 controller specific
+ * aging initializations
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_mmss_init_aging(struct cpr3_controller *ctrl)
+{
+ struct cpr3_msm8996_mmss_fuses *fuse;
+ struct cpr3_regulator *vreg;
+ u32 aging_ro_scale;
+ int rc;
+
+ vreg = &ctrl->thread[0].vreg[0];
+
+ ctrl->aging_required = vreg->aging_allowed;
+ fuse = vreg->platform_fuses;
+
+ if (!ctrl->aging_required || !fuse)
+ return 0;
+
+ rc = cpr3_parse_array_property(vreg, "qcom,cpr-aging-ro-scaling-factor",
+ 1, &aging_ro_scale);
+ if (rc)
+ return rc;
+
+ if (aging_ro_scale == 0) {
+ cpr3_err(ctrl, "aging RO scaling factor is invalid: %u\n",
+ aging_ro_scale);
+ return -EINVAL;
+ }
+
+ ctrl->aging_vdd_mode = REGULATOR_MODE_NORMAL;
+ ctrl->aging_complete_vdd_mode = REGULATOR_MODE_IDLE;
+
+ ctrl->aging_sensor_count = 1;
+ ctrl->aging_sensor = kzalloc(sizeof(*ctrl->aging_sensor), GFP_KERNEL);
+ if (!ctrl->aging_sensor)
+ return -ENOMEM;
+
+ ctrl->aging_sensor->sensor_id = MSM8996_MMSS_AGING_SENSOR_ID;
+ ctrl->aging_sensor->bypass_mask[0] = MSM8996_MMSS_AGING_BYPASS_MASK0;
+ ctrl->aging_sensor->ro_scale = aging_ro_scale;
+
+ ctrl->aging_sensor->init_quot_diff
+ = cpr3_convert_open_loop_voltage_fuse(0,
+ MSM8996_MMSS_AGING_INIT_QUOT_DIFF_SCALE,
+ fuse->aging_init_quot_diff,
+ MSM8996_MMSS_AGING_INIT_QUOT_DIFF_SIZE);
+
+ cpr3_debug(ctrl, "sensor %u aging init quotient diff = %d, aging RO scale = %u QUOT/V\n",
+ ctrl->aging_sensor->sensor_id,
+ ctrl->aging_sensor->init_quot_diff,
+ ctrl->aging_sensor->ro_scale);
+
+ return 0;
+}
+
+/**
+ * cpr3_mmss_init_thread() - perform all steps necessary to initialize the
+ * configuration data for a CPR3 thread
+ * @thread: Pointer to the CPR3 thread
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_mmss_init_thread(struct cpr3_thread *thread)
+{
+ struct cpr3_regulator *vreg = &thread->vreg[0];
+ struct cpr3_msm8996_mmss_fuses *fuse;
+ int rc;
+
+ rc = cpr3_parse_common_thread_data(thread);
+ if (rc) {
+ cpr3_err(vreg, "unable to read CPR thread data from device tree, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr3_msm8996_mmss_read_fuse_data(vreg);
+ if (rc) {
+ cpr3_err(vreg, "unable to read CPR fuse data, rc=%d\n", rc);
+ return rc;
+ }
+
+ fuse = vreg->platform_fuses;
+ if (fuse->limitation == MSM8996_CPR_LIMITATION_UNSUPPORTED) {
+ cpr3_err(vreg, "this chip requires an unsupported voltage\n");
+ return -EPERM;
+ } else if (fuse->limitation
+ == MSM8996_CPR_LIMITATION_NO_CPR_OR_INTERPOLATION) {
+ thread->ctrl->cpr_allowed_hw = false;
+ }
+
+ rc = cpr3_mmss_parse_corner_data(vreg);
+ if (rc) {
+ cpr3_err(vreg, "unable to read CPR corner data from device tree, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr3_mmss_adjust_target_quotients(vreg);
+ if (rc) {
+ cpr3_err(vreg, "unable to adjust target quotients, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr3_msm8996_mmss_calculate_open_loop_voltages(vreg);
+ if (rc) {
+ cpr3_err(vreg, "unable to calculate open-loop voltages, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr3_limit_open_loop_voltages(vreg);
+ if (rc) {
+ cpr3_err(vreg, "unable to limit open-loop voltages, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ cpr3_open_loop_voltage_as_ceiling(vreg);
+
+ rc = cpr3_limit_floor_voltages(vreg);
+ if (rc) {
+ cpr3_err(vreg, "unable to limit floor voltages, rc=%d\n", rc);
+ return rc;
+ }
+
+ cpr3_mmss_print_settings(vreg);
+
+ return 0;
+}
+
+/**
+ * cpr3_mmss_init_controller() - perform MMSS CPR3 controller specific
+ * initializations
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_mmss_init_controller(struct cpr3_controller *ctrl)
+{
+ int rc;
+
+ rc = cpr3_parse_common_ctrl_data(ctrl);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(ctrl, "unable to parse common controller data, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ ctrl->sensor_count = MSM8996_MMSS_CPR_SENSOR_COUNT;
+
+ /*
+ * MMSS only has one thread (0) so the zeroed array does not need
+ * further modification.
+ */
+ ctrl->sensor_owner = devm_kcalloc(ctrl->dev, ctrl->sensor_count,
+ sizeof(*ctrl->sensor_owner), GFP_KERNEL);
+ if (!ctrl->sensor_owner)
+ return -ENOMEM;
+
+ ctrl->cpr_clock_rate = MSM8996_MMSS_CPR_CLOCK_RATE;
+ ctrl->ctrl_type = CPR_CTRL_TYPE_CPR3;
+
+ ctrl->iface_clk = devm_clk_get(ctrl->dev, "iface_clk");
+ if (IS_ERR(ctrl->iface_clk)) {
+ rc = PTR_ERR(ctrl->iface_clk);
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(ctrl, "unable request interface clock, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ ctrl->bus_clk = devm_clk_get(ctrl->dev, "bus_clk");
+ if (IS_ERR(ctrl->bus_clk)) {
+ rc = PTR_ERR(ctrl->bus_clk);
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(ctrl, "unable request bus clock, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int cpr3_mmss_regulator_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct cpr3_controller *ctrl = platform_get_drvdata(pdev);
+
+ return cpr3_regulator_suspend(ctrl);
+}
+
+static int cpr3_mmss_regulator_resume(struct platform_device *pdev)
+{
+ struct cpr3_controller *ctrl = platform_get_drvdata(pdev);
+
+ return cpr3_regulator_resume(ctrl);
+}
+
+static struct of_device_id cpr_regulator_match_table[] = {
+ { .compatible = "qcom,cpr3-msm8996-mmss-regulator", },
+ {}
+};
+
+static int cpr3_mmss_regulator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cpr3_controller *ctrl;
+ int rc;
+
+ if (!dev->of_node) {
+ dev_err(dev, "Device tree node is missing\n");
+ return -EINVAL;
+ }
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->dev = dev;
+ /* Set to false later if anything precludes CPR operation. */
+ ctrl->cpr_allowed_hw = true;
+
+ rc = of_property_read_string(dev->of_node, "qcom,cpr-ctrl-name",
+ &ctrl->name);
+ if (rc) {
+ cpr3_err(ctrl, "unable to read qcom,cpr-ctrl-name, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr3_map_fuse_base(ctrl, pdev);
+ if (rc) {
+ cpr3_err(ctrl, "could not map fuse base address\n");
+ return rc;
+ }
+
+ rc = cpr3_allocate_threads(ctrl, 0, 0);
+ if (rc) {
+ cpr3_err(ctrl, "failed to allocate CPR thread array, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (ctrl->thread_count != 1) {
+ cpr3_err(ctrl, "expected 1 thread but found %d\n",
+ ctrl->thread_count);
+ return -EINVAL;
+ } else if (ctrl->thread[0].vreg_count != 1) {
+ cpr3_err(ctrl, "expected 1 regulator but found %d\n",
+ ctrl->thread[0].vreg_count);
+ return -EINVAL;
+ }
+
+ rc = cpr3_mmss_init_controller(ctrl);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(ctrl, "failed to initialize CPR controller parameters, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr3_mmss_init_thread(&ctrl->thread[0]);
+ if (rc) {
+ cpr3_err(&ctrl->thread[0].vreg[0], "thread initialization failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr3_mem_acc_init(&ctrl->thread[0].vreg[0]);
+ if (rc) {
+ cpr3_err(ctrl, "failed to initialize mem-acc configuration, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr3_mmss_init_aging(ctrl);
+ if (rc) {
+ cpr3_err(ctrl, "failed to initialize aging configurations, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ platform_set_drvdata(pdev, ctrl);
+
+ return cpr3_regulator_register(pdev, ctrl);
+}
+
+static int cpr3_mmss_regulator_remove(struct platform_device *pdev)
+{
+ struct cpr3_controller *ctrl = platform_get_drvdata(pdev);
+
+ return cpr3_regulator_unregister(ctrl);
+}
+
+static struct platform_driver cpr3_mmss_regulator_driver = {
+ .driver = {
+ .name = "qcom,cpr3-mmss-regulator",
+ .of_match_table = cpr_regulator_match_table,
+ .owner = THIS_MODULE,
+ },
+ .probe = cpr3_mmss_regulator_probe,
+ .remove = cpr3_mmss_regulator_remove,
+ .suspend = cpr3_mmss_regulator_suspend,
+ .resume = cpr3_mmss_regulator_resume,
+};
+
+static int cpr_regulator_init(void)
+{
+ return platform_driver_register(&cpr3_mmss_regulator_driver);
+}
+
+static void cpr_regulator_exit(void)
+{
+ platform_driver_unregister(&cpr3_mmss_regulator_driver);
+}
+
+MODULE_DESCRIPTION("CPR3 MMSS regulator driver");
+MODULE_LICENSE("GPL v2");
+
+arch_initcall(cpr_regulator_init);
+module_exit(cpr_regulator_exit);
diff --git a/drivers/regulator/cpr3-regulator.c b/drivers/regulator/cpr3-regulator.c
new file mode 100644
index 000000000000..b8097456109b
--- /dev/null
+++ b/drivers/regulator/cpr3-regulator.c
@@ -0,0 +1,4641 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/kryo-regulator.h>
+
+#include <soc/qcom/spm.h>
+
+#include "cpr3-regulator.h"
+
+#define CPR3_REGULATOR_CORNER_INVALID (-1)
+#define CPR3_RO_MASK GENMASK(CPR3_RO_COUNT - 1, 0)
+
+/* CPR3 registers */
+#define CPR3_REG_CPR_CTL 0x4
+#define CPR3_CPR_CTL_LOOP_EN_MASK BIT(0)
+#define CPR3_CPR_CTL_LOOP_ENABLE BIT(0)
+#define CPR3_CPR_CTL_LOOP_DISABLE 0
+#define CPR3_CPR_CTL_IDLE_CLOCKS_MASK GENMASK(5, 1)
+#define CPR3_CPR_CTL_IDLE_CLOCKS_SHIFT 1
+#define CPR3_CPR_CTL_COUNT_MODE_MASK GENMASK(7, 6)
+#define CPR3_CPR_CTL_COUNT_MODE_SHIFT 6
+#define CPR3_CPR_CTL_COUNT_MODE_ALL_AT_ONCE_MIN 0
+#define CPR3_CPR_CTL_COUNT_MODE_ALL_AT_ONCE_MAX 1
+#define CPR3_CPR_CTL_COUNT_MODE_STAGGERED 2
+#define CPR3_CPR_CTL_COUNT_MODE_ALL_AT_ONCE_AGE 3
+#define CPR3_CPR_CTL_COUNT_REPEAT_MASK GENMASK(31, 9)
+#define CPR3_CPR_CTL_COUNT_REPEAT_SHIFT 9
+
+#define CPR3_REG_CPR_STATUS 0x8
+#define CPR3_CPR_STATUS_BUSY_MASK BIT(0)
+#define CPR3_CPR_STATUS_AGING_MEASUREMENT_MASK BIT(1)
+
+/*
+ * This register is not present on controllers that support HW closed-loop
+ * except CPR4 APSS controller.
+ */
+#define CPR3_REG_CPR_TIMER_AUTO_CONT 0xC
+
+#define CPR3_REG_CPR_STEP_QUOT 0x14
+#define CPR3_CPR_STEP_QUOT_MIN_MASK GENMASK(5, 0)
+#define CPR3_CPR_STEP_QUOT_MIN_SHIFT 0
+#define CPR3_CPR_STEP_QUOT_MAX_MASK GENMASK(11, 6)
+#define CPR3_CPR_STEP_QUOT_MAX_SHIFT 6
+
+#define CPR3_REG_GCNT(ro) (0xA0 + 0x4 * (ro))
+
+#define CPR3_REG_SENSOR_BYPASS_WRITE(sensor) (0xE0 + 0x4 * ((sensor) / 32))
+#define CPR3_REG_SENSOR_BYPASS_WRITE_BANK(bank) (0xE0 + 0x4 * (bank))
+
+#define CPR3_REG_SENSOR_MASK_WRITE(sensor) (0x120 + 0x4 * ((sensor) / 32))
+#define CPR3_REG_SENSOR_MASK_WRITE_BANK(bank) (0x120 + 0x4 * (bank))
+#define CPR3_REG_SENSOR_MASK_READ(sensor) (0x140 + 0x4 * ((sensor) / 32))
+
+#define CPR3_REG_SENSOR_OWNER(sensor) (0x200 + 0x4 * (sensor))
+
+#define CPR3_REG_CONT_CMD 0x800
+#define CPR3_CONT_CMD_ACK 0x1
+#define CPR3_CONT_CMD_NACK 0x0
+
+#define CPR3_REG_THRESH(thread) (0x808 + 0x440 * (thread))
+#define CPR3_THRESH_CONS_DOWN_MASK GENMASK(3, 0)
+#define CPR3_THRESH_CONS_DOWN_SHIFT 0
+#define CPR3_THRESH_CONS_UP_MASK GENMASK(7, 4)
+#define CPR3_THRESH_CONS_UP_SHIFT 4
+#define CPR3_THRESH_DOWN_THRESH_MASK GENMASK(12, 8)
+#define CPR3_THRESH_DOWN_THRESH_SHIFT 8
+#define CPR3_THRESH_UP_THRESH_MASK GENMASK(17, 13)
+#define CPR3_THRESH_UP_THRESH_SHIFT 13
+
+#define CPR3_REG_RO_MASK(thread) (0x80C + 0x440 * (thread))
+
+#define CPR3_REG_RESULT0(thread) (0x810 + 0x440 * (thread))
+#define CPR3_RESULT0_BUSY_MASK BIT(0)
+#define CPR3_RESULT0_STEP_DN_MASK BIT(1)
+#define CPR3_RESULT0_STEP_UP_MASK BIT(2)
+#define CPR3_RESULT0_ERROR_STEPS_MASK GENMASK(7, 3)
+#define CPR3_RESULT0_ERROR_STEPS_SHIFT 3
+#define CPR3_RESULT0_ERROR_MASK GENMASK(19, 8)
+#define CPR3_RESULT0_ERROR_SHIFT 8
+#define CPR3_RESULT0_NEGATIVE_MASK BIT(20)
+
+#define CPR3_REG_RESULT1(thread) (0x814 + 0x440 * (thread))
+#define CPR3_RESULT1_QUOT_MIN_MASK GENMASK(11, 0)
+#define CPR3_RESULT1_QUOT_MIN_SHIFT 0
+#define CPR3_RESULT1_QUOT_MAX_MASK GENMASK(23, 12)
+#define CPR3_RESULT1_QUOT_MAX_SHIFT 12
+#define CPR3_RESULT1_RO_MIN_MASK GENMASK(27, 24)
+#define CPR3_RESULT1_RO_MIN_SHIFT 24
+#define CPR3_RESULT1_RO_MAX_MASK GENMASK(31, 28)
+#define CPR3_RESULT1_RO_MAX_SHIFT 28
+
+#define CPR3_REG_RESULT2(thread) (0x818 + 0x440 * (thread))
+#define CPR3_RESULT2_STEP_QUOT_MIN_MASK GENMASK(5, 0)
+#define CPR3_RESULT2_STEP_QUOT_MIN_SHIFT 0
+#define CPR3_RESULT2_STEP_QUOT_MAX_MASK GENMASK(11, 6)
+#define CPR3_RESULT2_STEP_QUOT_MAX_SHIFT 6
+#define CPR3_RESULT2_SENSOR_MIN_MASK GENMASK(23, 16)
+#define CPR3_RESULT2_SENSOR_MIN_SHIFT 16
+#define CPR3_RESULT2_SENSOR_MAX_MASK GENMASK(31, 24)
+#define CPR3_RESULT2_SENSOR_MAX_SHIFT 24
+
+#define CPR3_REG_IRQ_EN 0x81C
+#define CPR3_REG_IRQ_CLEAR 0x820
+#define CPR3_REG_IRQ_STATUS 0x824
+#define CPR3_IRQ_UP BIT(3)
+#define CPR3_IRQ_MID BIT(2)
+#define CPR3_IRQ_DOWN BIT(1)
+
+#define CPR3_REG_TARGET_QUOT(thread, ro) \
+ (0x840 + 0x440 * (thread) + 0x4 * (ro))
+
+/* Registers found only on controllers that support HW closed-loop. */
+#define CPR3_REG_PD_THROTTLE 0xE8
+#define CPR3_PD_THROTTLE_DISABLE 0x0
+
+#define CPR3_REG_HW_CLOSED_LOOP 0x3000
+#define CPR3_HW_CLOSED_LOOP_ENABLE 0x0
+#define CPR3_HW_CLOSED_LOOP_DISABLE 0x1
+
+#define CPR3_REG_CPR_TIMER_MID_CONT 0x3004
+#define CPR3_REG_CPR_TIMER_UP_DN_CONT 0x3008
+
+#define CPR3_REG_LAST_MEASUREMENT 0x7F8
+#define CPR3_LAST_MEASUREMENT_THREAD_DN_SHIFT 0
+#define CPR3_LAST_MEASUREMENT_THREAD_UP_SHIFT 4
+#define CPR3_LAST_MEASUREMENT_THREAD_DN(thread) \
+ (BIT(thread) << CPR3_LAST_MEASUREMENT_THREAD_DN_SHIFT)
+#define CPR3_LAST_MEASUREMENT_THREAD_UP(thread) \
+ (BIT(thread) << CPR3_LAST_MEASUREMENT_THREAD_UP_SHIFT)
+#define CPR3_LAST_MEASUREMENT_AGGR_DN BIT(8)
+#define CPR3_LAST_MEASUREMENT_AGGR_MID BIT(9)
+#define CPR3_LAST_MEASUREMENT_AGGR_UP BIT(10)
+#define CPR3_LAST_MEASUREMENT_VALID BIT(11)
+#define CPR3_LAST_MEASUREMENT_SAW_ERROR BIT(12)
+#define CPR3_LAST_MEASUREMENT_PD_BYPASS_MASK GENMASK(23, 16)
+#define CPR3_LAST_MEASUREMENT_PD_BYPASS_SHIFT 16
+
+/* CPR4 controller specific registers and bit definitions */
+#define CPR4_REG_SAW_ERROR_STEP_LIMIT 0x7A4
+#define CPR4_SAW_ERROR_STEP_LIMIT_UP_MASK GENMASK(4, 0)
+#define CPR4_SAW_ERROR_STEP_LIMIT_UP_SHIFT 0
+#define CPR4_SAW_ERROR_STEP_LIMIT_DN_MASK GENMASK(9, 5)
+#define CPR4_SAW_ERROR_STEP_LIMIT_DN_SHIFT 5
+
+#define CPR4_REG_MARGIN_ADJ_CTL 0x7F8
+#define CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK BIT(4)
+#define CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_ENABLE BIT(4)
+#define CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_DISABLE 0
+#define CPR4_MARGIN_ADJ_CTL_PMIC_STEP_SIZE_MASK GENMASK(16, 12)
+#define CPR4_MARGIN_ADJ_CTL_PMIC_STEP_SIZE_SHIFT 12
+
+/*
+ * The amount of time to wait for the CPR controller to become idle when
+ * performing an aging measurement.
+ */
+#define CPR3_AGING_MEASUREMENT_TIMEOUT_NS 5000000
+
+/*
+ * The number of individual aging measurements to perform which are then
+ * averaged together in order to determine the final aging adjustment value.
+ */
+#define CPR3_AGING_MEASUREMENT_ITERATIONS 16
+
+/*
+ * Aging measurements for the aged and unaged ring oscillators take place a few
+ * microseconds apart. If the vdd-supply voltage fluctuates between the two
+ * measurements, then the difference between them will be incorrect. The
+ * difference could end up too high or too low. This constant defines the
+ * number of lowest and highest measurements to ignore when averaging.
+ */
+#define CPR3_AGING_MEASUREMENT_FILTER 3
+
+/*
+ * The number of times to attempt the full aging measurement sequence before
+ * declaring a measurement failure.
+ */
+#define CPR3_AGING_RETRY_COUNT 5
+
+/*
+ * The maximum time to wait in microseconds for a CPR register write to
+ * complete.
+ */
+#define CPR3_REGISTER_WRITE_DELAY_US 200
+
+static DEFINE_MUTEX(cpr3_controller_list_mutex);
+static LIST_HEAD(cpr3_controller_list);
+static struct dentry *cpr3_debugfs_base;
+
+/**
+ * cpr3_read() - read four bytes from the memory address specified
+ * @ctrl: Pointer to the CPR3 controller
+ * @offset: Offset in bytes from the CPR3 controller's base address
+ *
+ * Return: memory address value
+ */
+static inline u32 cpr3_read(struct cpr3_controller *ctrl, u32 offset)
+{
+ if (!ctrl->cpr_enabled) {
+ cpr3_err(ctrl, "CPR register reads are not possible when CPR clocks are disabled\n");
+ return 0;
+ }
+
+ return readl_relaxed(ctrl->cpr_ctrl_base + offset);
+}
+
+/**
+ * cpr3_write() - write four bytes to the memory address specified
+ * @ctrl: Pointer to the CPR3 controller
+ * @offset: Offset in bytes from the CPR3 controller's base address
+ * @value: Value to write to the memory address
+ *
+ * Return: none
+ */
+static inline void cpr3_write(struct cpr3_controller *ctrl, u32 offset,
+ u32 value)
+{
+ if (!ctrl->cpr_enabled) {
+ cpr3_err(ctrl, "CPR register writes are not possible when CPR clocks are disabled\n");
+ return;
+ }
+
+ writel_relaxed(value, ctrl->cpr_ctrl_base + offset);
+}
+
+/**
+ * cpr3_masked_write() - perform a read-modify-write sequence so that only
+ * masked bits are modified
+ * @ctrl: Pointer to the CPR3 controller
+ * @offset: Offset in bytes from the CPR3 controller's base address
+ * @mask: Mask identifying the bits that should be modified
+ * @value: Value to write to the memory address
+ *
+ * Return: none
+ */
+static inline void cpr3_masked_write(struct cpr3_controller *ctrl, u32 offset,
+ u32 mask, u32 value)
+{
+ u32 reg_val, orig_val;
+
+ if (!ctrl->cpr_enabled) {
+ cpr3_err(ctrl, "CPR register writes are not possible when CPR clocks are disabled\n");
+ return;
+ }
+
+ reg_val = orig_val = readl_relaxed(ctrl->cpr_ctrl_base + offset);
+ reg_val &= ~mask;
+ reg_val |= value & mask;
+
+ if (reg_val != orig_val)
+ writel_relaxed(reg_val, ctrl->cpr_ctrl_base + offset);
+}
+
+/**
+ * cpr3_ctrl_loop_enable() - enable the CPR sensing loop for a given controller
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: none
+ */
+static inline void cpr3_ctrl_loop_enable(struct cpr3_controller *ctrl)
+{
+ if (ctrl->cpr_enabled)
+ cpr3_masked_write(ctrl, CPR3_REG_CPR_CTL,
+ CPR3_CPR_CTL_LOOP_EN_MASK, CPR3_CPR_CTL_LOOP_ENABLE);
+}
+
+/**
+ * cpr3_ctrl_loop_disable() - disable the CPR sensing loop for a given
+ * controller
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: none
+ */
+static inline void cpr3_ctrl_loop_disable(struct cpr3_controller *ctrl)
+{
+ if (ctrl->cpr_enabled)
+ cpr3_masked_write(ctrl, CPR3_REG_CPR_CTL,
+ CPR3_CPR_CTL_LOOP_EN_MASK, CPR3_CPR_CTL_LOOP_DISABLE);
+}
+
+/**
+ * cpr3_clock_enable() - prepare and enable all clocks used by this CPR3
+ * controller
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_clock_enable(struct cpr3_controller *ctrl)
+{
+ int rc;
+
+ rc = clk_prepare_enable(ctrl->bus_clk);
+ if (rc) {
+ cpr3_err(ctrl, "failed to enable bus clock, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = clk_prepare_enable(ctrl->iface_clk);
+ if (rc) {
+ cpr3_err(ctrl, "failed to enable interface clock, rc=%d\n", rc);
+ clk_disable_unprepare(ctrl->bus_clk);
+ return rc;
+ }
+
+ rc = clk_prepare_enable(ctrl->core_clk);
+ if (rc) {
+ cpr3_err(ctrl, "failed to enable core clock, rc=%d\n", rc);
+ clk_disable_unprepare(ctrl->iface_clk);
+ clk_disable_unprepare(ctrl->bus_clk);
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * cpr3_clock_disable() - disable and unprepare all clocks used by this CPR3
+ * controller
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: none
+ */
+static void cpr3_clock_disable(struct cpr3_controller *ctrl)
+{
+ clk_disable_unprepare(ctrl->core_clk);
+ clk_disable_unprepare(ctrl->iface_clk);
+ clk_disable_unprepare(ctrl->bus_clk);
+}
+
+/**
+ * cpr3_closed_loop_enable() - enable logical CPR closed-loop operation
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_closed_loop_enable(struct cpr3_controller *ctrl)
+{
+ int rc;
+
+ if (!ctrl->cpr_allowed_hw || !ctrl->cpr_allowed_sw) {
+ cpr3_err(ctrl, "cannot enable closed-loop CPR operation because it is disallowed\n");
+ return -EPERM;
+ } else if (ctrl->cpr_enabled) {
+ /* Already enabled */
+ return 0;
+ } else if (ctrl->cpr_suspended) {
+ /*
+ * CPR must remain disabled as the system is entering suspend.
+ */
+ return 0;
+ }
+
+ rc = cpr3_clock_enable(ctrl);
+ if (rc) {
+ cpr3_err(ctrl, "unable to enable CPR clocks, rc=%d\n", rc);
+ return rc;
+ }
+
+ ctrl->cpr_enabled = true;
+ cpr3_debug(ctrl, "CPR closed-loop operation enabled\n");
+
+ return 0;
+}
+
+/**
+ * cpr3_closed_loop_disable() - disable logical CPR closed-loop operation
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static inline int cpr3_closed_loop_disable(struct cpr3_controller *ctrl)
+{
+ if (!ctrl->cpr_enabled) {
+ /* Already disabled */
+ return 0;
+ }
+
+ cpr3_clock_disable(ctrl);
+ ctrl->cpr_enabled = false;
+ cpr3_debug(ctrl, "CPR closed-loop operation disabled\n");
+
+ return 0;
+}
+
+/**
+ * cpr3_regulator_get_gcnt() - returns the GCNT register value corresponding
+ * to the clock rate and sensor time of the CPR3 controller
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: GCNT value
+ */
+static u32 cpr3_regulator_get_gcnt(struct cpr3_controller *ctrl)
+{
+ u64 temp;
+ unsigned int remainder;
+ u32 gcnt;
+
+ temp = (u64)ctrl->cpr_clock_rate * (u64)ctrl->sensor_time;
+ remainder = do_div(temp, 1000000000);
+ if (remainder)
+ temp++;
+ /*
+ * GCNT == 0 corresponds to a single ref clock measurement interval so
+ * offset GCNT values by 1.
+ */
+ gcnt = temp - 1;
+
+ return gcnt;
+}
+
+/**
+ * cpr3_regulator_init_thread() - performs hardware initialization of CPR
+ * thread registers
+ * @thread: Pointer to the CPR3 thread
+ *
+ * CPR interface/bus clocks must be enabled before calling this function.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_init_thread(struct cpr3_thread *thread)
+{
+ u32 reg;
+
+ reg = (thread->consecutive_up << CPR3_THRESH_CONS_UP_SHIFT)
+ & CPR3_THRESH_CONS_UP_MASK;
+ reg |= (thread->consecutive_down << CPR3_THRESH_CONS_DOWN_SHIFT)
+ & CPR3_THRESH_CONS_DOWN_MASK;
+ reg |= (thread->up_threshold << CPR3_THRESH_UP_THRESH_SHIFT)
+ & CPR3_THRESH_UP_THRESH_MASK;
+ reg |= (thread->down_threshold << CPR3_THRESH_DOWN_THRESH_SHIFT)
+ & CPR3_THRESH_DOWN_THRESH_MASK;
+
+ cpr3_write(thread->ctrl, CPR3_REG_THRESH(thread->thread_id), reg);
+
+ /*
+ * Mask all RO's initially so that unused thread doesn't contribute
+ * to closed-loop voltage.
+ */
+ cpr3_write(thread->ctrl, CPR3_REG_RO_MASK(thread->thread_id),
+ CPR3_RO_MASK);
+
+ return 0;
+}
+
+/**
+ * cpr3_regulator_init_cpr4() - performs hardware initialization at the
+ * controller and thread level required for CPR4 operation.
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * CPR interface/bus clocks must be enabled before calling this function.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_init_cpr4(struct cpr3_controller *ctrl)
+{
+ u32 pmic_step_size = 1;
+
+ if (ctrl->saw_use_unit_mV)
+ pmic_step_size = ctrl->step_volt / 1000;
+ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+ CPR4_MARGIN_ADJ_CTL_PMIC_STEP_SIZE_MASK,
+ (pmic_step_size
+ << CPR4_MARGIN_ADJ_CTL_PMIC_STEP_SIZE_SHIFT));
+
+ cpr3_masked_write(ctrl, CPR4_REG_SAW_ERROR_STEP_LIMIT,
+ CPR4_SAW_ERROR_STEP_LIMIT_DN_MASK,
+ (ctrl->down_error_step_limit
+ << CPR4_SAW_ERROR_STEP_LIMIT_DN_SHIFT));
+
+ cpr3_masked_write(ctrl, CPR4_REG_SAW_ERROR_STEP_LIMIT,
+ CPR4_SAW_ERROR_STEP_LIMIT_UP_MASK,
+ (ctrl->up_error_step_limit
+ << CPR4_SAW_ERROR_STEP_LIMIT_UP_SHIFT));
+ return 0;
+}
+
+/**
+ * cpr3_regulator_init_ctrl() - performs hardware initialization of CPR
+ * controller registers
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_init_ctrl(struct cpr3_controller *ctrl)
+{
+ int i, j, k, m, rc;
+ u32 ro_used = 0;
+ u32 gcnt, cont_dly, up_down_dly, val;
+ u64 temp;
+ char *mode;
+
+ if (ctrl->core_clk) {
+ rc = clk_set_rate(ctrl->core_clk, ctrl->cpr_clock_rate);
+ if (rc) {
+ cpr3_err(ctrl, "clk_set_rate(core_clk, %u) failed, rc=%d\n",
+ ctrl->cpr_clock_rate, rc);
+ return rc;
+ }
+ }
+
+ rc = cpr3_clock_enable(ctrl);
+ if (rc) {
+ cpr3_err(ctrl, "clock enable failed, rc=%d\n", rc);
+ return rc;
+ }
+ ctrl->cpr_enabled = true;
+
+ /* Find all RO's used by any corner of any regulator. */
+ for (i = 0; i < ctrl->thread_count; i++)
+ for (j = 0; j < ctrl->thread[i].vreg_count; j++)
+ for (k = 0; k < ctrl->thread[i].vreg[j].corner_count;
+ k++)
+ for (m = 0; m < CPR3_RO_COUNT; m++)
+ if (ctrl->thread[i].vreg[j].corner[k].
+ target_quot[m])
+ ro_used |= BIT(m);
+
+ /* Configure the GCNT of the RO's that will be used */
+ gcnt = cpr3_regulator_get_gcnt(ctrl);
+ for (i = 0; i < CPR3_RO_COUNT; i++)
+ if (ro_used & BIT(i))
+ cpr3_write(ctrl, CPR3_REG_GCNT(i), gcnt);
+
+ /* Configure the loop delay time */
+ temp = (u64)ctrl->cpr_clock_rate * (u64)ctrl->loop_time;
+ do_div(temp, 1000000000);
+ cont_dly = temp;
+ if (ctrl->supports_hw_closed_loop
+ && ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3)
+ cpr3_write(ctrl, CPR3_REG_CPR_TIMER_MID_CONT, cont_dly);
+ else
+ cpr3_write(ctrl, CPR3_REG_CPR_TIMER_AUTO_CONT, cont_dly);
+
+ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) {
+ temp = (u64)ctrl->cpr_clock_rate *
+ (u64)ctrl->up_down_delay_time;
+ do_div(temp, 1000000000);
+ up_down_dly = temp;
+ if (ctrl->supports_hw_closed_loop)
+ cpr3_write(ctrl, CPR3_REG_CPR_TIMER_UP_DN_CONT,
+ up_down_dly);
+ cpr3_debug(ctrl, "up_down_dly=%u, up_down_delay_time=%u ns\n",
+ up_down_dly, ctrl->up_down_delay_time);
+ }
+
+ cpr3_debug(ctrl, "cpr_clock_rate=%u HZ, sensor_time=%u ns, loop_time=%u ns, gcnt=%u, cont_dly=%u\n",
+ ctrl->cpr_clock_rate, ctrl->sensor_time, ctrl->loop_time,
+ gcnt, cont_dly);
+
+ /* Configure CPR sensor operation */
+ val = (ctrl->idle_clocks << CPR3_CPR_CTL_IDLE_CLOCKS_SHIFT)
+ & CPR3_CPR_CTL_IDLE_CLOCKS_MASK;
+ val |= (ctrl->count_mode << CPR3_CPR_CTL_COUNT_MODE_SHIFT)
+ & CPR3_CPR_CTL_COUNT_MODE_MASK;
+ val |= (ctrl->count_repeat << CPR3_CPR_CTL_COUNT_REPEAT_SHIFT)
+ & CPR3_CPR_CTL_COUNT_REPEAT_MASK;
+ cpr3_write(ctrl, CPR3_REG_CPR_CTL, val);
+
+ cpr3_debug(ctrl, "idle_clocks=%u, count_mode=%u, count_repeat=%u; CPR_CTL=0x%08X\n",
+ ctrl->idle_clocks, ctrl->count_mode, ctrl->count_repeat, val);
+
+ /* Configure CPR default step quotients */
+ val = (ctrl->step_quot_init_min << CPR3_CPR_STEP_QUOT_MIN_SHIFT)
+ & CPR3_CPR_STEP_QUOT_MIN_MASK;
+ val |= (ctrl->step_quot_init_max << CPR3_CPR_STEP_QUOT_MAX_SHIFT)
+ & CPR3_CPR_STEP_QUOT_MAX_MASK;
+ cpr3_write(ctrl, CPR3_REG_CPR_STEP_QUOT, val);
+
+ cpr3_debug(ctrl, "step_quot_min=%u, step_quot_max=%u; STEP_QUOT=0x%08X\n",
+ ctrl->step_quot_init_min, ctrl->step_quot_init_max, val);
+
+ /* Configure the CPR sensor ownership */
+ for (i = 0; i < ctrl->sensor_count; i++)
+ cpr3_write(ctrl, CPR3_REG_SENSOR_OWNER(i),
+ ctrl->sensor_owner[i]);
+
+ /* Configure per-thread registers */
+ for (i = 0; i < ctrl->thread_count; i++) {
+ rc = cpr3_regulator_init_thread(&ctrl->thread[i]);
+ if (rc) {
+ cpr3_err(ctrl, "CPR thread register initialization failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (ctrl->supports_hw_closed_loop) {
+ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) {
+ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+ CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK,
+ ctrl->use_hw_closed_loop
+ ? CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_ENABLE
+ : CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_DISABLE);
+ } else if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) {
+ cpr3_write(ctrl, CPR3_REG_HW_CLOSED_LOOP,
+ ctrl->use_hw_closed_loop
+ ? CPR3_HW_CLOSED_LOOP_ENABLE
+ : CPR3_HW_CLOSED_LOOP_DISABLE);
+
+ cpr3_debug(ctrl, "PD_THROTTLE=0x%08X\n",
+ ctrl->proc_clock_throttle);
+ }
+ }
+
+ if (ctrl->use_hw_closed_loop) {
+ rc = regulator_enable(ctrl->vdd_limit_regulator);
+ if (rc) {
+ cpr3_err(ctrl, "CPR limit regulator enable failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = msm_spm_avs_enable_irq(0, MSM_SPM_AVS_IRQ_MAX);
+ if (rc) {
+ cpr3_err(ctrl, "could not enable max IRQ, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) {
+ rc = cpr3_regulator_init_cpr4(ctrl);
+ if (rc) {
+ cpr3_err(ctrl, "CPR4-specific controller initialization failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ /* Ensure that all register writes complete before disabling clocks. */
+ wmb();
+
+ cpr3_clock_disable(ctrl);
+ ctrl->cpr_enabled = false;
+
+ if (!ctrl->cpr_allowed_sw || !ctrl->cpr_allowed_hw)
+ mode = "open-loop";
+ else if (ctrl->supports_hw_closed_loop)
+ mode = ctrl->use_hw_closed_loop
+ ? "HW closed-loop" : "SW closed-loop";
+ else
+ mode = "closed-loop";
+
+ cpr3_info(ctrl, "Default CPR mode = %s", mode);
+
+ return 0;
+}
+
+/**
+ * cpr3_regulator_set_target_quot() - configure the target quotient for each
+ * RO of the CPR3 thread and set the RO mask
+ * @thread: Pointer to the CPR3 thread
+ *
+ * Return: none
+ */
+static void cpr3_regulator_set_target_quot(struct cpr3_thread *thread)
+{
+ u32 new_quot, last_quot;
+ int i;
+
+ if (thread->aggr_corner.ro_mask == CPR3_RO_MASK
+ && thread->last_closed_loop_aggr_corner.ro_mask == CPR3_RO_MASK) {
+ /* Avoid writing target quotients since all RO's are masked. */
+ return;
+ } else if (thread->aggr_corner.ro_mask == CPR3_RO_MASK) {
+ cpr3_write(thread->ctrl, CPR3_REG_RO_MASK(thread->thread_id),
+ CPR3_RO_MASK);
+ thread->last_closed_loop_aggr_corner.ro_mask = CPR3_RO_MASK;
+ /*
+ * Only the RO_MASK register needs to be written since all
+ * RO's are masked.
+ */
+ return;
+ } else if (thread->aggr_corner.ro_mask
+ != thread->last_closed_loop_aggr_corner.ro_mask) {
+ cpr3_write(thread->ctrl, CPR3_REG_RO_MASK(thread->thread_id),
+ thread->aggr_corner.ro_mask);
+ }
+
+ for (i = 0; i < CPR3_RO_COUNT; i++) {
+ new_quot = thread->aggr_corner.target_quot[i];
+ last_quot = thread->last_closed_loop_aggr_corner.target_quot[i];
+ if (new_quot != last_quot)
+ cpr3_write(thread->ctrl,
+ CPR3_REG_TARGET_QUOT(thread->thread_id, i),
+ new_quot);
+ }
+
+ thread->last_closed_loop_aggr_corner = thread->aggr_corner;
+
+ return;
+}
+
+/**
+ * cpr3_update_vreg_closed_loop_volt() - update the last known settled
+ * closed loop voltage for a CPR3 regulator
+ * @vreg: Pointer to the CPR3 regulator
+ * @vdd_volt: Last known settled voltage in microvolts for the
+ * VDD supply
+ * @reg_last_measurement: Value read from the LAST_MEASUREMENT register
+ *
+ * Return: none
+ */
+static void cpr3_update_vreg_closed_loop_volt(struct cpr3_regulator *vreg,
+ int vdd_volt, u32 reg_last_measurement)
+{
+ bool step_dn, step_up, aggr_step_up, aggr_step_dn, aggr_step_mid;
+ bool valid, pd_valid, saw_error;
+ struct cpr3_controller *ctrl = vreg->thread->ctrl;
+ struct cpr3_corner *corner;
+ u32 id;
+
+ if (vreg->last_closed_loop_corner == CPR3_REGULATOR_CORNER_INVALID)
+ return;
+ else
+ corner = &vreg->corner[vreg->last_closed_loop_corner];
+
+ if (vreg->thread->last_closed_loop_aggr_corner.ro_mask
+ == CPR3_RO_MASK || !vreg->aggregated) {
+ return;
+ } else if (!ctrl->cpr_enabled || !ctrl->last_corner_was_closed_loop) {
+ return;
+ } else if (ctrl->thread_count == 1
+ && vdd_volt >= corner->floor_volt
+ && vdd_volt <= corner->ceiling_volt) {
+ corner->last_volt = vdd_volt;
+ cpr3_debug(vreg, "last_volt updated: last_volt[%d]=%d, ceiling_volt[%d]=%d, floor_volt[%d]=%d\n",
+ vreg->last_closed_loop_corner, corner->last_volt,
+ vreg->last_closed_loop_corner,
+ corner->ceiling_volt,
+ vreg->last_closed_loop_corner,
+ corner->floor_volt);
+ return;
+ } else if (!ctrl->supports_hw_closed_loop) {
+ return;
+ } else if (ctrl->ctrl_type != CPR_CTRL_TYPE_CPR3) {
+ corner->last_volt = vdd_volt;
+ cpr3_debug(vreg, "last_volt updated: last_volt[%d]=%d, ceiling_volt[%d]=%d, floor_volt[%d]=%d\n",
+ vreg->last_closed_loop_corner, corner->last_volt,
+ vreg->last_closed_loop_corner,
+ corner->ceiling_volt,
+ vreg->last_closed_loop_corner,
+ corner->floor_volt);
+ return;
+ }
+
+ /* CPR clocks are on and HW closed loop is supported */
+ valid = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_VALID);
+ if (!valid) {
+ cpr3_debug(vreg, "CPR_LAST_VALID_MEASUREMENT=0x%X valid bit not set\n",
+ reg_last_measurement);
+ return;
+ }
+
+ id = vreg->thread->thread_id;
+
+ step_dn
+ = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_THREAD_DN(id));
+ step_up
+ = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_THREAD_UP(id));
+ aggr_step_dn = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_AGGR_DN);
+ aggr_step_mid
+ = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_AGGR_MID);
+ aggr_step_up = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_AGGR_UP);
+ saw_error = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_SAW_ERROR);
+ pd_valid
+ = !((((reg_last_measurement & CPR3_LAST_MEASUREMENT_PD_BYPASS_MASK)
+ >> CPR3_LAST_MEASUREMENT_PD_BYPASS_SHIFT)
+ & vreg->pd_bypass_mask) == vreg->pd_bypass_mask);
+
+ if (!pd_valid) {
+ cpr3_debug(vreg, "CPR_LAST_VALID_MEASUREMENT=0x%X, all power domains bypassed\n",
+ reg_last_measurement);
+ return;
+ } else if (step_dn && step_up) {
+ cpr3_err(vreg, "both up and down status bits set, CPR_LAST_VALID_MEASUREMENT=0x%X\n",
+ reg_last_measurement);
+ return;
+ } else if (aggr_step_dn && step_dn && vdd_volt < corner->last_volt
+ && vdd_volt >= corner->floor_volt) {
+ corner->last_volt = vdd_volt;
+ } else if (aggr_step_up && step_up && vdd_volt > corner->last_volt
+ && vdd_volt <= corner->ceiling_volt) {
+ corner->last_volt = vdd_volt;
+ } else if (aggr_step_mid
+ && vdd_volt >= corner->floor_volt
+ && vdd_volt <= corner->ceiling_volt) {
+ corner->last_volt = vdd_volt;
+ } else if (saw_error && (vdd_volt == corner->ceiling_volt
+ || vdd_volt == corner->floor_volt)) {
+ corner->last_volt = vdd_volt;
+ } else {
+ cpr3_debug(vreg, "last_volt not updated: last_volt[%d]=%d, ceiling_volt[%d]=%d, floor_volt[%d]=%d, vdd_volt=%d, CPR_LAST_VALID_MEASUREMENT=0x%X\n",
+ vreg->last_closed_loop_corner, corner->last_volt,
+ vreg->last_closed_loop_corner,
+ corner->ceiling_volt,
+ vreg->last_closed_loop_corner, corner->floor_volt,
+ vdd_volt, reg_last_measurement);
+ return;
+ }
+
+ cpr3_debug(vreg, "last_volt updated: last_volt[%d]=%d, ceiling_volt[%d]=%d, floor_volt[%d]=%d, CPR_LAST_VALID_MEASUREMENT=0x%X\n",
+ vreg->last_closed_loop_corner, corner->last_volt,
+ vreg->last_closed_loop_corner, corner->ceiling_volt,
+ vreg->last_closed_loop_corner, corner->floor_volt,
+ reg_last_measurement);
+}
+
+/**
+ * cpr3_regulator_config_ldo_retention() - configure per-regulator LDO retention
+ * mode
+ * @vreg: Pointer to the CPR3 regulator to configure
+ * @ref_volt: Reference voltage used to determine if LDO retention
+ * mode can be allowed. It corresponds either to the
+ * aggregated floor voltage or the next VDD supply setpoint
+ *
+ * This function determines if a CPR3 regulator's configuration satisfies safe
+ * operating voltages for LDO retention and uses the regulator_allow_bypass()
+ * interface on the LDO retention regulator to enable or disable such feature
+ * accordingly.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_config_ldo_retention(struct cpr3_regulator *vreg,
+ int ref_volt)
+{
+ struct regulator *ldo_ret_reg = vreg->ldo_ret_regulator;
+ int retention_volt, rc;
+ enum kryo_supply_mode mode;
+
+ retention_volt = regulator_get_voltage(ldo_ret_reg);
+ if (retention_volt < 0) {
+ cpr3_err(vreg, "regulator_get_voltage(ldo_ret) failed, rc=%d\n",
+ retention_volt);
+ return retention_volt;
+
+ }
+
+ mode = ref_volt >= retention_volt + vreg->ldo_min_headroom_volt
+ ? LDO_MODE : BHS_MODE;
+
+ rc = regulator_allow_bypass(ldo_ret_reg, mode);
+ if (rc)
+ cpr3_err(vreg, "regulator_allow_bypass(ldo_ret) == %s failed, rc=%d\n",
+ mode ? "true" : "false", rc);
+
+ return rc;
+}
+
+/**
+ * cpr3_regulator_config_ldo_mem_acc() - configure the mem-acc regulator
+ * corner based upon a future LDO regulator voltage setpoint
+ * @vreg: Pointer to the CPR3 regulator
+ * @new_volt: New voltage in microvolts that the LDO regulator needs
+ * to end up at
+ *
+ * This function determines if a new LDO regulator set point will result
+ * in crossing the voltage threshold that requires reconfiguration of
+ * the mem-acc regulator associated with a CPR3 regulator and if so, performs
+ * the correct sequence to select the correct mem-acc corner.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_config_ldo_mem_acc(struct cpr3_regulator *vreg,
+ int new_volt)
+{
+ struct cpr3_controller *ctrl = vreg->thread->ctrl;
+ struct regulator *ldo_reg = vreg->ldo_regulator;
+ struct regulator *mem_acc_reg = vreg->mem_acc_regulator;
+ int mem_acc_volt = ctrl->mem_acc_threshold_volt;
+ int last_volt, safe_volt, mem_acc_corn, rc;
+ enum msm_apm_supply apm_mode;
+
+ if (!mem_acc_reg || !mem_acc_volt || !ldo_reg)
+ return 0;
+
+ apm_mode = msm_apm_get_supply(ctrl->apm);
+ if (apm_mode < 0) {
+ cpr3_err(ctrl, "APM get supply failed, rc=%d\n",
+ apm_mode);
+ return apm_mode;
+ }
+
+ last_volt = regulator_get_voltage(ldo_reg);
+ if (last_volt < 0) {
+ cpr3_err(vreg, "regulator_get_voltage(ldo) failed, rc=%d\n",
+ last_volt);
+ return last_volt;
+ }
+
+ if (((last_volt < mem_acc_volt && mem_acc_volt <= new_volt)
+ || (last_volt >= mem_acc_volt && mem_acc_volt > new_volt))) {
+
+ if (apm_mode == ctrl->apm_high_supply)
+ safe_volt = min(vreg->ldo_max_volt, mem_acc_volt);
+ else
+ safe_volt = min(max(ctrl->system_supply_max_volt -
+ vreg->ldo_max_headroom_volt,
+ mem_acc_volt), vreg->ldo_max_volt);
+
+ rc = regulator_set_voltage(ldo_reg, safe_volt,
+ max(new_volt, last_volt));
+ if (rc) {
+ cpr3_err(ctrl, "regulator_set_voltage(ldo) == %d failed, rc=%d\n",
+ mem_acc_volt, rc);
+ return rc;
+ }
+
+ mem_acc_corn = new_volt < mem_acc_volt ?
+ ctrl->mem_acc_corner_map[CPR3_MEM_ACC_LOW_CORNER] :
+ ctrl->mem_acc_corner_map[CPR3_MEM_ACC_HIGH_CORNER];
+
+ rc = regulator_set_voltage(mem_acc_reg, mem_acc_corn,
+ mem_acc_corn);
+ if (rc) {
+ cpr3_err(ctrl, "regulator_set_voltage(mem_acc) == %d failed, rc=%d\n",
+ 0, rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * cpr3_regulator_set_bhs_mode() - configure the LDO regulator associated with
+ * a CPR3 regulator to BHS mode
+ * @vreg: Pointer to the CPR3 regulator
+ * @vdd_volt: Last known settled voltage in microvolts for the VDD
+ * supply
+ * @vdd_ceiling_volt: Last known aggregated ceiling voltage in microvolts for
+ * the VDD supply
+ *
+ * This function performs the necessary steps to switch an LDO regulator
+ * to BHS mode (LDO bypassed mode).
+ */
+static int cpr3_regulator_set_bhs_mode(struct cpr3_regulator *vreg,
+ int vdd_volt, int vdd_ceiling_volt)
+{
+ struct regulator *ldo_reg = vreg->ldo_regulator;
+ int bhs_volt, rc;
+
+ bhs_volt = vdd_volt - vreg->ldo_min_headroom_volt;
+ if (bhs_volt > vreg->ldo_max_volt) {
+ cpr3_debug(vreg, "limited to LDO output of %d uV when switching to BHS mode\n",
+ vreg->ldo_max_volt);
+ bhs_volt = vreg->ldo_max_volt;
+ }
+
+ rc = cpr3_regulator_config_ldo_mem_acc(vreg, bhs_volt);
+ if (rc) {
+ cpr3_err(vreg, "failed to configure mem-acc settings\n");
+ return rc;
+ }
+
+ rc = regulator_set_voltage(ldo_reg, bhs_volt, min(vdd_ceiling_volt,
+ vreg->ldo_max_volt));
+ if (rc) {
+ cpr3_err(vreg, "regulator_set_voltage(ldo) == %d failed, rc=%d\n",
+ bhs_volt, rc);
+ return rc;
+ }
+
+ rc = regulator_allow_bypass(ldo_reg, BHS_MODE);
+ if (rc) {
+ cpr3_err(vreg, "regulator_allow_bypass(ldo) == %s failed, rc=%d\n",
+ BHS_MODE ? "true" : "false", rc);
+ return rc;
+ }
+ vreg->ldo_regulator_bypass = BHS_MODE;
+
+ return rc;
+}
+
+/**
+ * cpr3_regulator_ldo_apm_prepare() - configure LDO regulators associated
+ * with each CPR3 regulator of a CPR3 controller in preparation
+ * for an APM switch.
+ * @ctrl: Pointer to the CPR3 controller
+ * @new_volt: New voltage in microvolts that the VDD supply
+ * needs to end up at
+ * @last_volt: Last known voltage in microvolts for the VDD supply
+ * @aggr_corner: Pointer to the CPR3 corner which corresponds to the max
+ * corner aggregated from all CPR3 threads managed by the
+ * CPR3 controller
+ *
+ * This function ensures LDO regulator hardware requirements are met before
+ * an APM switch is requested. The function must be called as the last step
+ * before switching the APM mode.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_ldo_apm_prepare(struct cpr3_controller *ctrl,
+ int new_volt, int last_volt,
+ struct cpr3_corner *aggr_corner)
+{
+ struct cpr3_regulator *vreg;
+ struct cpr3_corner *current_corner;
+ enum msm_apm_supply apm_mode;
+ int i, j, safe_volt, max_volt, ldo_volt, ref_volt, rc;
+
+ apm_mode = msm_apm_get_supply(ctrl->apm);
+ if (apm_mode < 0) {
+ cpr3_err(ctrl, "APM get supply failed, rc=%d\n", apm_mode);
+ return apm_mode;
+ }
+
+ if (apm_mode == ctrl->apm_low_supply ||
+ new_volt >= ctrl->apm_threshold_volt)
+ return 0;
+
+ /*
+ * Guarantee LDO maximum headroom is not violated when the APM is
+ * switched to the system-supply source.
+ */
+ for (i = 0; i < ctrl->thread_count; i++) {
+ for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+ vreg = &ctrl->thread[i].vreg[j];
+
+ if (!vreg->vreg_enabled || vreg->current_corner
+ == CPR3_REGULATOR_CORNER_INVALID)
+ continue;
+
+ if (!vreg->ldo_regulator || !vreg->ldo_mode_allowed ||
+ vreg->ldo_regulator_bypass == BHS_MODE)
+ continue;
+
+ /*
+ * If the new VDD configuration does not satisfy
+ * requirements for LDO usage, switch the regulator
+ * to BHS mode. By doing so, the LDO maximum headroom
+ * does not need to be enforced.
+ */
+ current_corner = &vreg->corner[vreg->current_corner];
+ ldo_volt = current_corner->open_loop_volt
+ - vreg->ldo_adjust_volt;
+ ref_volt = ctrl->use_hw_closed_loop ?
+ aggr_corner->floor_volt :
+ new_volt;
+
+ if (ref_volt < ldo_volt + vreg->ldo_min_headroom_volt
+ || ldo_volt < ctrl->system_supply_max_volt -
+ vreg->ldo_max_headroom_volt ||
+ ldo_volt > vreg->ldo_max_volt) {
+ rc = cpr3_regulator_set_bhs_mode(vreg,
+ last_volt, aggr_corner->ceiling_volt);
+ if (rc)
+ return rc;
+ /*
+ * Do not enforce LDO maximum headroom since the
+ * regulator is now configured to BHS mode.
+ */
+ continue;
+ }
+
+ safe_volt = min(max(ldo_volt,
+ ctrl->system_supply_max_volt
+ - vreg->ldo_max_headroom_volt),
+ vreg->ldo_max_volt);
+ max_volt = min(ctrl->system_supply_max_volt,
+ vreg->ldo_max_volt);
+
+ rc = regulator_set_voltage(vreg->ldo_regulator,
+ safe_volt, max_volt);
+ if (rc) {
+ cpr3_err(vreg, "regulator_set_voltage(ldo) == %d failed, rc=%d\n",
+ safe_volt, rc);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * cpr3_regulator_config_vreg_ldo() - configure the voltage and bypass state for
+ * the LDO regulator associated with a single CPR3 regulator.
+ *
+ * @vreg: Pointer to the CPR3 regulator
+ * @vdd_floor_volt: Last known aggregated floor voltage in microvolts for
+ * the VDD supply
+ * @vdd_ceiling_volt: Last known aggregated ceiling voltage in microvolts for
+ * the VDD supply
+ * @new_volt: New voltage in microvolts that VDD supply needs to
+ * end up at
+ * @last_volt: Last known voltage in microvolts for the VDD supply
+ *
+ * This function performs all relevant LDO or BHS configurations if an LDO
+ * regulator is specified.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_config_vreg_ldo(struct cpr3_regulator *vreg,
+ int vdd_floor_volt, int vdd_ceiling_volt,
+ int new_volt, int last_volt)
+{
+ struct cpr3_controller *ctrl = vreg->thread->ctrl;
+ struct regulator *ldo_reg = vreg->ldo_regulator;
+ struct cpr3_corner *current_corner;
+ enum msm_apm_supply apm_mode;
+ int rc, ldo_volt, final_ldo_volt, bhs_volt, max_volt, safe_volt;
+ int ref_volt;
+
+ ref_volt = ctrl->use_hw_closed_loop ? vdd_floor_volt :
+ new_volt;
+
+ rc = cpr3_regulator_config_ldo_retention(vreg, ref_volt);
+ if (rc)
+ return rc;
+
+ if (!vreg->vreg_enabled || vreg->current_corner
+ == CPR3_REGULATOR_CORNER_INVALID)
+ return 0;
+
+ current_corner = &vreg->corner[vreg->current_corner];
+ ldo_volt = current_corner->open_loop_volt
+ - vreg->ldo_adjust_volt;
+ bhs_volt = last_volt - vreg->ldo_min_headroom_volt;
+ max_volt = min(vdd_ceiling_volt, vreg->ldo_max_volt);
+
+ if (ref_volt >= ldo_volt + vreg->ldo_min_headroom_volt &&
+ ldo_volt >= ctrl->system_supply_max_volt -
+ vreg->ldo_max_headroom_volt &&
+ bhs_volt >= ctrl->system_supply_max_volt -
+ vreg->ldo_max_headroom_volt &&
+ ldo_volt <= vreg->ldo_max_volt) {
+ /* LDO minimum and maximum headrooms satisfied */
+ apm_mode = msm_apm_get_supply(ctrl->apm);
+ if (apm_mode < 0) {
+ cpr3_err(ctrl, "APM get supply failed, rc=%d\n",
+ apm_mode);
+ return apm_mode;
+ }
+
+ if (vreg->ldo_regulator_bypass == BHS_MODE) {
+ /*
+ * BHS to LDO transition. Configure LDO output
+ * to min(max LDO output, VDD - LDO headroom)
+ * voltage if APM is on high supply source or
+ * min(max(system-supply ceiling - LDO max headroom,
+ * VDD - LDO headroom), max LDO output) if
+ * APM is on low supply source, then switch
+ * regulator mode.
+ */
+ if (apm_mode == ctrl->apm_high_supply)
+ safe_volt = min(vreg->ldo_max_volt, bhs_volt);
+ else
+ safe_volt =
+ min(max(ctrl->system_supply_max_volt -
+ vreg->ldo_max_headroom_volt,
+ bhs_volt),
+ vreg->ldo_max_volt);
+
+ rc = cpr3_regulator_config_ldo_mem_acc(vreg,
+ safe_volt);
+ if (rc) {
+ cpr3_err(vreg, "failed to configure mem-acc settings\n");
+ return rc;
+ }
+
+ rc = regulator_set_voltage(ldo_reg, safe_volt,
+ max_volt);
+ if (rc) {
+ cpr3_err(vreg, "regulator_set_voltage(ldo) == %d failed, rc=%d\n",
+ safe_volt, rc);
+ return rc;
+ }
+
+ rc = regulator_allow_bypass(ldo_reg, LDO_MODE);
+ if (rc) {
+ cpr3_err(vreg, "regulator_allow_bypass(ldo) == %s failed, rc=%d\n",
+ LDO_MODE ? "true" : "false", rc);
+ return rc;
+ }
+ vreg->ldo_regulator_bypass = LDO_MODE;
+ }
+
+ /* Configure final LDO output voltage */
+ if (apm_mode == ctrl->apm_high_supply)
+ final_ldo_volt = max(ldo_volt,
+ vdd_ceiling_volt -
+ vreg->ldo_max_headroom_volt);
+ else
+ final_ldo_volt = ldo_volt;
+
+ rc = cpr3_regulator_config_ldo_mem_acc(vreg,
+ final_ldo_volt);
+ if (rc) {
+ cpr3_err(vreg, "failed to configure mem-acc settings\n");
+ return rc;
+ }
+
+ rc = regulator_set_voltage(ldo_reg, final_ldo_volt, max_volt);
+ if (rc) {
+ cpr3_err(vreg, "regulator_set_voltage(ldo) == %d failed, rc=%d\n",
+ final_ldo_volt, rc);
+ return rc;
+ }
+ } else {
+ if (vreg->ldo_regulator_bypass == LDO_MODE) {
+ /* LDO to BHS transition */
+ rc = cpr3_regulator_set_bhs_mode(vreg, last_volt,
+ vdd_ceiling_volt);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * cpr3_regulator_config_ldo() - configure the voltage and bypass state for the
+ * LDO regulator associated with each CPR3 regulator of a CPR3
+ * controller
+ * @ctrl: Pointer to the CPR3 controller
+ * @vdd_floor_volt: Last known aggregated floor voltage in microvolts for
+ * the VDD supply
+ * @vdd_ceiling_volt: Last known aggregated ceiling voltage in microvolts for
+ * the VDD supply
+ * @new_volt: New voltage in microvolts that VDD supply needs to
+ * end up at
+ * @last_volt: Last known voltage in microvolts for the VDD supply
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_config_ldo(struct cpr3_controller *ctrl,
+ int vdd_floor_volt, int vdd_ceiling_volt,
+ int new_volt, int last_volt)
+{
+ struct cpr3_regulator *vreg;
+ int i, j, rc;
+
+ for (i = 0; i < ctrl->thread_count; i++) {
+ for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+ vreg = &ctrl->thread[i].vreg[j];
+
+ if (!vreg->ldo_regulator || !vreg->ldo_mode_allowed)
+ continue;
+
+ rc = cpr3_regulator_config_vreg_ldo(vreg,
+ vdd_floor_volt, vdd_ceiling_volt,
+ new_volt, last_volt);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * cpr3_regulator_mem_acc_bhs_used() - determines if mem-acc regulators powered
+ * through a BHS are associated with the CPR3 controller or any of
+ * the CPR3 regulators it controls.
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * This function determines if the CPR3 controller or any of its CPR3 regulators
+ * need to manage mem-acc regulators that are currently powered through a BHS
+ * and whose corner selection is based upon a particular voltage threshold.
+ *
+ * Return: true or false
+ */
+static bool cpr3_regulator_mem_acc_bhs_used(struct cpr3_controller *ctrl)
+{
+ struct cpr3_regulator *vreg;
+ int i, j;
+
+ if (!ctrl->mem_acc_threshold_volt)
+ return false;
+
+ if (ctrl->mem_acc_regulator)
+ return true;
+
+ for (i = 0; i < ctrl->thread_count; i++) {
+ for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+ vreg = &ctrl->thread[i].vreg[j];
+
+ if (vreg->mem_acc_regulator &&
+ (!vreg->ldo_regulator ||
+ vreg->ldo_regulator_bypass
+ == BHS_MODE))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * cpr3_regulator_config_bhs_mem_acc() - configure the mem-acc regulator
+ * settings for hardware blocks currently powered through the BHS.
+ * @ctrl: Pointer to the CPR3 controller
+ * @new_volt: New voltage in microvolts that VDD supply needs to
+ * end up at
+ * @last_volt: Pointer to the last known voltage in microvolts for the
+ * VDD supply
+ * @aggr_corner: Pointer to the CPR3 corner which corresponds to the max
+ * corner aggregated from all CPR3 threads managed by the
+ * CPR3 controller
+ *
+ * This function programs the mem-acc regulator corners for CPR3 regulators
+ * whose LDO regulators are in bypassed state. The function also handles
+ * CPR3 controllers which utilize mem-acc regulators that operate independently
+ * from the LDO hardware and that must be programmed when the VDD supply
+ * crosses a particular voltage threshold.
+ *
+ * Return: 0 on success, errno on failure. If the VDD supply voltage is
+ * modified, last_volt is updated to reflect the new voltage setpoint.
+ */
+static int cpr3_regulator_config_bhs_mem_acc(struct cpr3_controller *ctrl,
+ int new_volt, int *last_volt,
+ struct cpr3_corner *aggr_corner)
+{
+ struct cpr3_regulator *vreg;
+ int i, j, rc, mem_acc_corn, safe_volt;
+ int mem_acc_volt = ctrl->mem_acc_threshold_volt;
+ int ref_volt;
+
+ if (!cpr3_regulator_mem_acc_bhs_used(ctrl))
+ return 0;
+
+ ref_volt = ctrl->use_hw_closed_loop ? aggr_corner->floor_volt :
+ new_volt;
+
+ if (((*last_volt < mem_acc_volt && mem_acc_volt <= ref_volt) ||
+ (*last_volt >= mem_acc_volt && mem_acc_volt > ref_volt))) {
+ if (ref_volt < *last_volt)
+ safe_volt = max(mem_acc_volt, aggr_corner->last_volt);
+ else
+ safe_volt = max(mem_acc_volt, *last_volt);
+
+ rc = regulator_set_voltage(ctrl->vdd_regulator, safe_volt,
+ new_volt < *last_volt ?
+ ctrl->aggr_corner.ceiling_volt :
+ new_volt);
+ if (rc) {
+ cpr3_err(ctrl, "regulator_set_voltage(vdd) == %d failed, rc=%d\n",
+ safe_volt, rc);
+ return rc;
+ }
+
+ *last_volt = safe_volt;
+
+ mem_acc_corn = ref_volt < mem_acc_volt ?
+ ctrl->mem_acc_corner_map[CPR3_MEM_ACC_LOW_CORNER] :
+ ctrl->mem_acc_corner_map[CPR3_MEM_ACC_HIGH_CORNER];
+
+ if (ctrl->mem_acc_regulator) {
+ rc = regulator_set_voltage(ctrl->mem_acc_regulator,
+ mem_acc_corn, mem_acc_corn);
+ if (rc) {
+ cpr3_err(ctrl, "regulator_set_voltage(mem_acc) == %d failed, rc=%d\n",
+ mem_acc_corn, rc);
+ return rc;
+ }
+ }
+
+ for (i = 0; i < ctrl->thread_count; i++) {
+ for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+ vreg = &ctrl->thread[i].vreg[j];
+
+ if (!vreg->mem_acc_regulator ||
+ (vreg->ldo_regulator &&
+ vreg->ldo_regulator_bypass
+ == LDO_MODE))
+ continue;
+
+ rc = regulator_set_voltage(
+ vreg->mem_acc_regulator, mem_acc_corn,
+ mem_acc_corn);
+ if (rc) {
+ cpr3_err(vreg, "regulator_set_voltage(mem_acc) == %d failed, rc=%d\n",
+ mem_acc_corn, rc);
+ return rc;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * cpr3_regulator_switch_apm_mode() - switch the mode of the APM controller
+ * associated with a given CPR3 controller
+ * @ctrl: Pointer to the CPR3 controller
+ * @new_volt: New voltage in microvolts that VDD supply needs to
+ * end up at
+ * @last_volt: Pointer to the last known voltage in microvolts for the
+ * VDD supply
+ * @aggr_corner: Pointer to the CPR3 corner which corresponds to the max
+ * corner aggregated from all CPR3 threads managed by the
+ * CPR3 controller
+ *
+ * This function requests a switch of the APM mode while guaranteeing
+ * any LDO regulator hardware requirements are satisfied. The function must
+ * be called once it is known a new VDD supply setpoint crosses the APM
+ * voltage threshold.
+ *
+ * Return: 0 on success, errno on failure. If the VDD supply voltage is
+ * modified, last_volt is updated to reflect the new voltage setpoint.
+ */
+static int cpr3_regulator_switch_apm_mode(struct cpr3_controller *ctrl,
+ int new_volt, int *last_volt,
+ struct cpr3_corner *aggr_corner)
+{
+ struct regulator *vdd = ctrl->vdd_regulator;
+ int apm_volt = ctrl->apm_threshold_volt;
+ int orig_last_volt = *last_volt;
+ int rc;
+
+ rc = regulator_set_voltage(vdd, apm_volt, apm_volt);
+ if (rc) {
+ cpr3_err(ctrl, "regulator_set_voltage(vdd) == %d failed, rc=%d\n",
+ apm_volt, rc);
+ return rc;
+ }
+
+ *last_volt = apm_volt;
+
+ rc = cpr3_regulator_ldo_apm_prepare(ctrl, new_volt, *last_volt,
+ aggr_corner);
+ if (rc) {
+ cpr3_err(ctrl, "unable to prepare LDO state for APM switch, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = msm_apm_set_supply(ctrl->apm, new_volt >= apm_volt
+ ? ctrl->apm_high_supply : ctrl->apm_low_supply);
+ if (rc) {
+ cpr3_err(ctrl, "APM switch failed, rc=%d\n", rc);
+ /* Roll back the voltage. */
+ regulator_set_voltage(vdd, orig_last_volt, INT_MAX);
+ *last_volt = orig_last_volt;
+ return rc;
+ }
+ return 0;
+}
+
+/**
+ * cpr3_regulator_config_voltage_crossings() - configure APM and mem-acc
+ * settings depending upon a new VDD supply setpoint
+ *
+ * @ctrl: Pointer to the CPR3 controller
+ * @new_volt: New voltage in microvolts that VDD supply needs to
+ * end up at
+ * @last_volt: Pointer to the last known voltage in microvolts for the
+ * VDD supply
+ * @aggr_corner: Pointer to the CPR3 corner which corresponds to the max
+ * corner aggregated from all CPR3 threads managed by the
+ * CPR3 controller
+ *
+ * This function handles the APM and mem-acc regulator reconfiguration if
+ * the new VDD supply voltage will result in crossing their respective voltage
+ * thresholds.
+ *
+ * Return: 0 on success, errno on failure. If the VDD supply voltage is
+ * modified, last_volt is updated to reflect the new voltage setpoint.
+ */
+static int cpr3_regulator_config_voltage_crossings(struct cpr3_controller *ctrl,
+ int new_volt, int *last_volt,
+ struct cpr3_corner *aggr_corner)
+{
+ bool apm_crossing = false, mem_acc_crossing = false;
+ bool mem_acc_bhs_used;
+ int apm_volt = ctrl->apm_threshold_volt;
+ int mem_acc_volt = ctrl->mem_acc_threshold_volt;
+ int ref_volt, rc;
+
+ if (ctrl->apm && apm_volt > 0
+ && ((*last_volt < apm_volt && apm_volt <= new_volt)
+ || (*last_volt >= apm_volt && apm_volt > new_volt)))
+ apm_crossing = true;
+
+ mem_acc_bhs_used = cpr3_regulator_mem_acc_bhs_used(ctrl);
+
+ ref_volt = ctrl->use_hw_closed_loop ? aggr_corner->floor_volt :
+ new_volt;
+
+ if (mem_acc_bhs_used &&
+ (((*last_volt < mem_acc_volt && mem_acc_volt <= ref_volt) ||
+ (*last_volt >= mem_acc_volt && mem_acc_volt > ref_volt))))
+ mem_acc_crossing = true;
+
+ if (apm_crossing && mem_acc_crossing) {
+ if ((new_volt < *last_volt && apm_volt >= mem_acc_volt) ||
+ (new_volt >= *last_volt && apm_volt < mem_acc_volt)) {
+ rc = cpr3_regulator_switch_apm_mode(ctrl, new_volt,
+ last_volt,
+ aggr_corner);
+ if (rc) {
+ cpr3_err(ctrl, "unable to switch APM mode\n");
+ return rc;
+ }
+
+ rc = cpr3_regulator_config_bhs_mem_acc(ctrl, new_volt,
+ last_volt, aggr_corner);
+ if (rc) {
+ cpr3_err(ctrl, "unable to configure BHS mem-acc settings\n");
+ return rc;
+ }
+ } else {
+ rc = cpr3_regulator_config_bhs_mem_acc(ctrl, new_volt,
+ last_volt, aggr_corner);
+ if (rc) {
+ cpr3_err(ctrl, "unable to configure BHS mem-acc settings\n");
+ return rc;
+ }
+
+ rc = cpr3_regulator_switch_apm_mode(ctrl, new_volt,
+ last_volt,
+ aggr_corner);
+ if (rc) {
+ cpr3_err(ctrl, "unable to switch APM mode\n");
+ return rc;
+ }
+ }
+ } else if (apm_crossing) {
+ rc = cpr3_regulator_switch_apm_mode(ctrl, new_volt, last_volt,
+ aggr_corner);
+ if (rc) {
+ cpr3_err(ctrl, "unable to switch APM mode\n");
+ return rc;
+ }
+ } else if (mem_acc_crossing) {
+ rc = cpr3_regulator_config_bhs_mem_acc(ctrl, new_volt,
+ last_volt, aggr_corner);
+ if (rc) {
+ cpr3_err(ctrl, "unable to configure BHS mem-acc settings\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * cpr3_regulator_config_mem_acc() - configure the corner of the mem-acc
+ * regulator associated with the CPR3 controller
+ * @ctrl: Pointer to the CPR3 controller
+ * @aggr_corner: Pointer to the CPR3 corner which corresponds to the max
+ * corner aggregated from all CPR3 threads managed by the
+ * CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_config_mem_acc(struct cpr3_controller *ctrl,
+ struct cpr3_corner *aggr_corner)
+{
+ int rc;
+
+ if (ctrl->mem_acc_regulator && aggr_corner->mem_acc_volt) {
+ rc = regulator_set_voltage(ctrl->mem_acc_regulator,
+ aggr_corner->mem_acc_volt,
+ aggr_corner->mem_acc_volt);
+ if (rc) {
+ cpr3_err(ctrl, "regulator_set_voltage(mem_acc) == %d failed, rc=%d\n",
+ aggr_corner->mem_acc_volt, rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * cpr3_regulator_scale_vdd_voltage() - scale the CPR controlled VDD supply
+ * voltage to the new level while satisfying any other hardware
+ * requirements
+ * @ctrl: Pointer to the CPR3 controller
+ * @new_volt: New voltage in microvolts that VDD supply needs to end
+ * up at
+ * @last_volt: Last known voltage in microvolts for the VDD supply
+ * @aggr_corner: Pointer to the CPR3 corner which corresponds to the max
+ * corner aggregated from all CPR3 threads managed by the
+ * CPR3 controller
+ *
+ * This function scales the CPR controlled VDD supply voltage from its
+ * current level to the new voltage that is specified. If the supply is
+ * configured to use the APM and the APM threshold is crossed as a result of
+ * the voltage scaling, then this function also stops at the APM threshold,
+ * switches the APM source, and finally sets the final new voltage.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_scale_vdd_voltage(struct cpr3_controller *ctrl,
+ int new_volt, int last_volt,
+ struct cpr3_corner *aggr_corner)
+{
+ struct regulator *vdd = ctrl->vdd_regulator;
+ int rc;
+
+ if (new_volt < last_volt) {
+ /* Decreasing VDD voltage */
+ rc = cpr3_regulator_config_ldo(ctrl, aggr_corner->floor_volt,
+ ctrl->aggr_corner.ceiling_volt,
+ new_volt, last_volt);
+ if (rc) {
+ cpr3_err(ctrl, "unable to configure LDO state, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr3_regulator_config_mem_acc(ctrl, aggr_corner);
+ if (rc)
+ return rc;
+
+ } else {
+ /* Increasing VDD voltage */
+ if (ctrl->system_regulator) {
+ rc = regulator_set_voltage(ctrl->system_regulator,
+ aggr_corner->system_volt, INT_MAX);
+ if (rc) {
+ cpr3_err(ctrl, "regulator_set_voltage(system) == %d failed, rc=%d\n",
+ aggr_corner->system_volt, rc);
+ return rc;
+ }
+ }
+ }
+
+ rc = cpr3_regulator_config_voltage_crossings(ctrl, new_volt, &last_volt,
+ aggr_corner);
+ if (rc) {
+ cpr3_err(ctrl, "unable to handle voltage threshold crossing configurations, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /*
+ * Subtract a small amount from the min_uV parameter so that the
+ * set voltage request is not dropped by the framework due to being
+ * duplicate. This is needed in order to switch from hardware
+ * closed-loop to open-loop successfully.
+ */
+ rc = regulator_set_voltage(vdd, new_volt - (ctrl->cpr_enabled ? 0 : 1),
+ aggr_corner->ceiling_volt);
+ if (rc) {
+ cpr3_err(ctrl, "regulator_set_voltage(vdd) == %d failed, rc=%d\n",
+ new_volt, rc);
+ return rc;
+ }
+
+ if (new_volt >= last_volt) {
+ /* Increasing VDD voltage */
+ rc = cpr3_regulator_config_ldo(ctrl, aggr_corner->floor_volt,
+ aggr_corner->ceiling_volt,
+ new_volt, new_volt);
+ if (rc) {
+ cpr3_err(ctrl, "unable to configure LDO state, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr3_regulator_config_mem_acc(ctrl, aggr_corner);
+ if (rc)
+ return rc;
+ } else {
+ /* Decreasing VDD voltage */
+ if (ctrl->system_regulator) {
+ rc = regulator_set_voltage(ctrl->system_regulator,
+ aggr_corner->system_volt, INT_MAX);
+ if (rc) {
+ cpr3_err(ctrl, "regulator_set_voltage(system) == %d failed, rc=%d\n",
+ aggr_corner->system_volt, rc);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * cpr3_regulator_get_dynamic_floor_volt() - returns the current dynamic floor
+ * voltage based upon static configurations and the state of all
+ * power domains during the last CPR measurement
+ * @ctrl: Pointer to the CPR3 controller
+ * @reg_last_measurement: Value read from the LAST_MEASUREMENT register
+ *
+ * When using HW closed-loop, the dynamic floor voltage is always returned
+ * regardless of the current state of the power domains.
+ *
+ * Return: dynamic floor voltage in microvolts or 0 if dynamic floor is not
+ * currently required
+ */
+static int cpr3_regulator_get_dynamic_floor_volt(struct cpr3_controller *ctrl,
+ u32 reg_last_measurement)
+{
+ int dynamic_floor_volt = 0;
+ struct cpr3_regulator *vreg;
+ bool valid, pd_valid;
+ u32 bypass_bits;
+ int i, j;
+
+ if (!ctrl->supports_hw_closed_loop)
+ return 0;
+
+ if (likely(!ctrl->use_hw_closed_loop)) {
+ valid = !!(reg_last_measurement & CPR3_LAST_MEASUREMENT_VALID);
+ bypass_bits
+ = (reg_last_measurement & CPR3_LAST_MEASUREMENT_PD_BYPASS_MASK)
+ >> CPR3_LAST_MEASUREMENT_PD_BYPASS_SHIFT;
+ } else {
+ /*
+ * Ensure that the dynamic floor voltage is always used for
+ * HW closed-loop since the conditions below cannot be evaluated
+ * after each CPR measurement.
+ */
+ valid = false;
+ bypass_bits = 0;
+ }
+
+ for (i = 0; i < ctrl->thread_count; i++) {
+ for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+ vreg = &ctrl->thread[i].vreg[j];
+
+ if (!vreg->uses_dynamic_floor)
+ continue;
+
+ pd_valid = !((bypass_bits & vreg->pd_bypass_mask)
+ == vreg->pd_bypass_mask);
+
+ if (!valid || !pd_valid)
+ dynamic_floor_volt = max(dynamic_floor_volt,
+ vreg->corner[
+ vreg->dynamic_floor_corner].last_volt);
+ }
+ }
+
+ return dynamic_floor_volt;
+}
+
+/**
+ * cpr3_regulator_aggregate_corners() - aggregate two corners together
+ * @aggr_corner: Pointer to accumulated aggregated corner which
+ * is both an input and an output
+ * @corner: Pointer to the corner to be aggregated with
+ * aggr_corner
+ * @aggr_quot: Flag indicating that target quotients should be
+ * aggregated as well.
+ *
+ * Return: none
+ */
+static void cpr3_regulator_aggregate_corners(struct cpr3_corner *aggr_corner,
+ const struct cpr3_corner *corner, bool aggr_quot)
+{
+ int i;
+
+ aggr_corner->ceiling_volt
+ = max(aggr_corner->ceiling_volt, corner->ceiling_volt);
+ aggr_corner->floor_volt
+ = max(aggr_corner->floor_volt, corner->floor_volt);
+ aggr_corner->last_volt
+ = max(aggr_corner->last_volt, corner->last_volt);
+ aggr_corner->open_loop_volt
+ = max(aggr_corner->open_loop_volt, corner->open_loop_volt);
+ aggr_corner->system_volt
+ = max(aggr_corner->system_volt, corner->system_volt);
+ aggr_corner->mem_acc_volt
+ = max(aggr_corner->mem_acc_volt, corner->mem_acc_volt);
+ aggr_corner->irq_en |= corner->irq_en;
+
+ if (aggr_quot) {
+ aggr_corner->ro_mask &= corner->ro_mask;
+
+ for (i = 0; i < CPR3_RO_COUNT; i++)
+ aggr_corner->target_quot[i]
+ = max(aggr_corner->target_quot[i],
+ corner->target_quot[i]);
+ }
+}
+
+/**
+ * cpr3_regulator_update_ctrl_state() - update the state of the CPR controller
+ * to reflect the corners used by all CPR3 regulators as well as
+ * the CPR operating mode
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * This function aggregates the CPR parameters for all CPR3 regulators
+ * associated with the VDD supply. Upon success, it sets the aggregated last
+ * known good voltage.
+ *
+ * The VDD supply voltage will not be physically configured unless this
+ * condition is met by at least one of the regulators of the controller:
+ * regulator->vreg_enabled == true &&
+ * regulator->current_corner != CPR3_REGULATOR_CORNER_INVALID
+ *
+ * CPR registers for the controller and each thread are updated as long as
+ * ctrl->cpr_enabled == true.
+ *
+ * Note, CPR3 controller lock must be held by the caller.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int _cpr3_regulator_update_ctrl_state(struct cpr3_controller *ctrl)
+{
+ struct cpr3_corner aggr_corner = {};
+ struct cpr3_thread *thread;
+ struct cpr3_regulator *vreg;
+ bool valid = false;
+ bool thread_valid;
+ int i, j, rc, new_volt, vdd_volt, dynamic_floor_volt;
+ u32 reg_last_measurement = 0;
+
+ cpr3_ctrl_loop_disable(ctrl);
+
+ vdd_volt = regulator_get_voltage(ctrl->vdd_regulator);
+ if (vdd_volt < 0) {
+ cpr3_err(ctrl, "regulator_get_voltage(vdd) failed, rc=%d\n",
+ vdd_volt);
+ return vdd_volt;
+ }
+
+ if (ctrl->cpr_enabled && ctrl->use_hw_closed_loop)
+ reg_last_measurement
+ = cpr3_read(ctrl, CPR3_REG_LAST_MEASUREMENT);
+
+ /* Aggregate the requests of all threads */
+ for (i = 0; i < ctrl->thread_count; i++) {
+ thread = &ctrl->thread[i];
+ thread_valid = false;
+ memset(&thread->aggr_corner, 0, sizeof(thread->aggr_corner));
+ thread->aggr_corner.ro_mask = CPR3_RO_MASK;
+
+ for (j = 0; j < thread->vreg_count; j++) {
+ vreg = &thread->vreg[j];
+
+ if (ctrl->cpr_enabled && ctrl->use_hw_closed_loop)
+ cpr3_update_vreg_closed_loop_volt(vreg,
+ vdd_volt, reg_last_measurement);
+
+ if (!vreg->vreg_enabled
+ || vreg->current_corner
+ == CPR3_REGULATOR_CORNER_INVALID) {
+ /* Cannot participate in aggregation. */
+ vreg->aggregated = false;
+ continue;
+ } else {
+ vreg->aggregated = true;
+ thread_valid = true;
+ }
+
+ cpr3_regulator_aggregate_corners(&thread->aggr_corner,
+ &vreg->corner[vreg->current_corner], true);
+ }
+
+ valid |= thread_valid;
+
+ if (thread_valid)
+ cpr3_regulator_aggregate_corners(&aggr_corner,
+ &thread->aggr_corner, false);
+ }
+
+ if (valid && ctrl->cpr_allowed_hw && ctrl->cpr_allowed_sw) {
+ rc = cpr3_closed_loop_enable(ctrl);
+ if (rc) {
+ cpr3_err(ctrl, "could not enable CPR, rc=%d\n", rc);
+ return rc;
+ }
+ } else {
+ rc = cpr3_closed_loop_disable(ctrl);
+ if (rc) {
+ cpr3_err(ctrl, "could not disable CPR, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ /* No threads are enabled with a valid corner so exit. */
+ if (!valid)
+ return 0;
+
+ /*
+ * When using CPR hardware closed-loop, the voltage may vary anywhere
+ * between the floor and ceiling voltage without software notification.
+ * Therefore, it is required that the floor to ceiling range for the
+ * aggregated corner not intersect the APM threshold voltage. Adjust
+ * the floor to ceiling range if this requirement is violated.
+ *
+ * The following algorithm is applied in the case that
+ * floor < threshold <= ceiling:
+ * if open_loop >= threshold - adj, then floor = threshold
+ * else ceiling = threshold - step
+ * where adj = an adjustment factor to ensure sufficient voltage margin
+ * and step = VDD output step size
+ *
+ * The open-loop and last known voltages are also bounded by the new
+ * floor or ceiling value as needed.
+ */
+ if (ctrl->use_hw_closed_loop
+ && aggr_corner.ceiling_volt >= ctrl->apm_threshold_volt
+ && aggr_corner.floor_volt < ctrl->apm_threshold_volt) {
+
+ if (aggr_corner.open_loop_volt
+ >= ctrl->apm_threshold_volt - ctrl->apm_adj_volt)
+ aggr_corner.floor_volt = ctrl->apm_threshold_volt;
+ else
+ aggr_corner.ceiling_volt
+ = ctrl->apm_threshold_volt - ctrl->step_volt;
+
+ aggr_corner.last_volt
+ = max(aggr_corner.last_volt, aggr_corner.floor_volt);
+ aggr_corner.last_volt
+ = min(aggr_corner.last_volt, aggr_corner.ceiling_volt);
+ aggr_corner.open_loop_volt
+ = max(aggr_corner.open_loop_volt, aggr_corner.floor_volt);
+ aggr_corner.open_loop_volt
+ = min(aggr_corner.open_loop_volt, aggr_corner.ceiling_volt);
+ }
+
+ if (ctrl->use_hw_closed_loop
+ && aggr_corner.ceiling_volt >= ctrl->mem_acc_threshold_volt
+ && aggr_corner.floor_volt < ctrl->mem_acc_threshold_volt) {
+ aggr_corner.floor_volt = ctrl->mem_acc_threshold_volt;
+ aggr_corner.last_volt = max(aggr_corner.last_volt,
+ aggr_corner.floor_volt);
+ aggr_corner.open_loop_volt = max(aggr_corner.open_loop_volt,
+ aggr_corner.floor_volt);
+ }
+
+ if (ctrl->use_hw_closed_loop) {
+ dynamic_floor_volt
+ = cpr3_regulator_get_dynamic_floor_volt(ctrl,
+ reg_last_measurement);
+ if (aggr_corner.floor_volt < dynamic_floor_volt) {
+ aggr_corner.floor_volt = dynamic_floor_volt;
+ aggr_corner.last_volt = max(aggr_corner.last_volt,
+ aggr_corner.floor_volt);
+ aggr_corner.open_loop_volt
+ = max(aggr_corner.open_loop_volt,
+ aggr_corner.floor_volt);
+ aggr_corner.ceiling_volt = max(aggr_corner.ceiling_volt,
+ aggr_corner.floor_volt);
+ }
+ }
+
+ if (ctrl->cpr_enabled && ctrl->last_corner_was_closed_loop) {
+ new_volt = aggr_corner.last_volt;
+ } else {
+ new_volt = aggr_corner.open_loop_volt;
+ aggr_corner.last_volt = aggr_corner.open_loop_volt;
+ }
+
+ cpr3_debug(ctrl, "setting new voltage=%d uV\n", new_volt);
+ rc = cpr3_regulator_scale_vdd_voltage(ctrl, new_volt,
+ vdd_volt, &aggr_corner);
+ if (rc) {
+ cpr3_err(ctrl, "vdd voltage scaling failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Only update registers if CPR is enabled. */
+ if (ctrl->cpr_enabled) {
+ if (ctrl->use_hw_closed_loop) {
+ /* Hardware closed-loop */
+
+ /* Set ceiling and floor limits in hardware */
+ rc = regulator_set_voltage(ctrl->vdd_limit_regulator,
+ aggr_corner.floor_volt,
+ aggr_corner.ceiling_volt);
+ if (rc) {
+ cpr3_err(ctrl, "could not configure HW closed-loop voltage limits, rc=%d\n",
+ rc);
+ return rc;
+ }
+ } else {
+ /* Software closed-loop */
+
+ /*
+ * Disable UP or DOWN interrupts when at ceiling or
+ * floor respectively.
+ */
+ if (new_volt == aggr_corner.floor_volt)
+ aggr_corner.irq_en &= ~CPR3_IRQ_DOWN;
+ if (new_volt == aggr_corner.ceiling_volt)
+ aggr_corner.irq_en &= ~CPR3_IRQ_UP;
+
+ cpr3_write(ctrl, CPR3_REG_IRQ_CLEAR,
+ CPR3_IRQ_UP | CPR3_IRQ_DOWN);
+ cpr3_write(ctrl, CPR3_REG_IRQ_EN, aggr_corner.irq_en);
+ }
+
+ for (i = 0; i < ctrl->thread_count; i++) {
+ cpr3_regulator_set_target_quot(&ctrl->thread[i]);
+
+ for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+ vreg = &ctrl->thread[i].vreg[j];
+
+ if (vreg->vreg_enabled)
+ vreg->last_closed_loop_corner
+ = vreg->current_corner;
+ }
+ }
+
+ if (ctrl->proc_clock_throttle) {
+ if (aggr_corner.ceiling_volt > aggr_corner.floor_volt
+ && (ctrl->use_hw_closed_loop
+ || new_volt < aggr_corner.ceiling_volt))
+ cpr3_write(ctrl, CPR3_REG_PD_THROTTLE,
+ ctrl->proc_clock_throttle);
+ else
+ cpr3_write(ctrl, CPR3_REG_PD_THROTTLE,
+ CPR3_PD_THROTTLE_DISABLE);
+ }
+
+ /*
+ * Ensure that all CPR register writes complete before
+ * re-enabling CPR loop operation.
+ */
+ wmb();
+ }
+
+ /*
+ * Only enable the CPR controller if it is possible to set more than
+ * one vdd-supply voltage.
+ */
+ if (aggr_corner.ceiling_volt > aggr_corner.floor_volt)
+ cpr3_ctrl_loop_enable(ctrl);
+
+ ctrl->aggr_corner = aggr_corner;
+ ctrl->last_corner_was_closed_loop = ctrl->cpr_enabled;
+
+ cpr3_debug(ctrl, "CPR configuration updated\n");
+
+ return 0;
+}
+
+/**
+ * cpr3_regulator_wait_for_idle() - wait for the CPR controller to no longer be
+ * busy
+ * @ctrl: Pointer to the CPR3 controller
+ * @max_wait_ns: Max wait time in nanoseconds
+ *
+ * Return: 0 on success or -ETIMEDOUT if the controller was still busy after
+ * the maximum delay time
+ */
+static int cpr3_regulator_wait_for_idle(struct cpr3_controller *ctrl,
+ s64 max_wait_ns)
+{
+ ktime_t start, end;
+ s64 time_ns;
+ u32 reg;
+
+ /*
+ * Ensure that all previous CPR register writes have completed before
+ * checking the status register.
+ */
+ mb();
+
+ start = ktime_get();
+ do {
+ end = ktime_get();
+ time_ns = ktime_to_ns(ktime_sub(end, start));
+ if (time_ns > max_wait_ns) {
+ cpr3_err(ctrl, "CPR controller still busy after %lld us\n",
+ time_ns / 1000);
+ return -ETIMEDOUT;
+ }
+ usleep_range(50, 100);
+ reg = cpr3_read(ctrl, CPR3_REG_CPR_STATUS);
+ } while (reg & CPR3_CPR_STATUS_BUSY_MASK);
+
+ return 0;
+}
+
+/**
+ * cmp_int() - int comparison function to be passed into the sort() function
+ * which leads to ascending sorting
+ * @a: First int value
+ * @b: Second int value
+ *
+ * Return: >0 if a > b, 0 if a == b, <0 if a < b
+ */
+static int cmp_int(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
+/**
+ * cpr3_regulator_measure_aging() - measure the quotient difference for the
+ * specified CPR aging sensor
+ * @ctrl: Pointer to the CPR3 controller
+ * @aging_sensor: Aging sensor to measure
+ *
+ * Note that vdd-supply must be configured to the aging reference voltage before
+ * calling this function.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_measure_aging(struct cpr3_controller *ctrl,
+ struct cpr3_aging_sensor_info *aging_sensor)
+{
+ u32 mask, reg, result, quot_min, quot_max, sel_min, sel_max;
+ u32 quot_min_scaled, quot_max_scaled;
+ u32 gcnt, gcnt_ref, gcnt0_restore, gcnt1_restore, irq_restore;
+ u32 cont_dly_restore, up_down_dly_restore;
+ int quot_delta, quot_delta_scaled, quot_delta_scaled_sum;
+ int *quot_delta_results;
+ int rc, i, aging_measurement_count, filtered_count;
+ bool is_aging_measurement;
+
+ quot_delta_results = kcalloc(CPR3_AGING_MEASUREMENT_ITERATIONS,
+ sizeof(*quot_delta_results), GFP_KERNEL);
+ if (!quot_delta_results)
+ return -ENOMEM;
+
+ cpr3_ctrl_loop_disable(ctrl);
+
+ /* Enable up, down, and mid CPR interrupts */
+ irq_restore = cpr3_read(ctrl, CPR3_REG_IRQ_EN);
+ cpr3_write(ctrl, CPR3_REG_IRQ_EN,
+ CPR3_IRQ_UP | CPR3_IRQ_DOWN | CPR3_IRQ_MID);
+
+ /* Ensure that the aging sensor is assigned to CPR thread 0 */
+ cpr3_write(ctrl, CPR3_REG_SENSOR_OWNER(aging_sensor->sensor_id), 0);
+
+ /* Switch from HW to SW closed-loop if necessary */
+ if (ctrl->supports_hw_closed_loop) {
+ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) {
+ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+ CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK,
+ CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_DISABLE);
+ } else if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) {
+ cpr3_write(ctrl, CPR3_REG_HW_CLOSED_LOOP,
+ CPR3_HW_CLOSED_LOOP_DISABLE);
+ }
+ }
+
+ /* Configure the GCNT for RO0 and RO1 that are used for aging */
+ gcnt0_restore = cpr3_read(ctrl, CPR3_REG_GCNT(0));
+ gcnt1_restore = cpr3_read(ctrl, CPR3_REG_GCNT(1));
+ gcnt_ref = cpr3_regulator_get_gcnt(ctrl);
+ gcnt = gcnt_ref * 3 / 2;
+ cpr3_write(ctrl, CPR3_REG_GCNT(0), gcnt);
+ cpr3_write(ctrl, CPR3_REG_GCNT(1), gcnt);
+
+ /*
+ * Mask all sensors except for the one to measure and bypass all
+ * sensors in collapsible domains.
+ */
+ for (i = 0; i <= ctrl->sensor_count / 32; i++) {
+ mask = GENMASK(min(31, ctrl->sensor_count - i * 32), 0);
+ if (aging_sensor->sensor_id / 32 >= i
+ && aging_sensor->sensor_id / 32 < (i + 1))
+ mask &= ~BIT(aging_sensor->sensor_id % 32);
+ cpr3_write(ctrl, CPR3_REG_SENSOR_MASK_WRITE_BANK(i), mask);
+ cpr3_write(ctrl, CPR3_REG_SENSOR_BYPASS_WRITE_BANK(i),
+ aging_sensor->bypass_mask[i]);
+ }
+
+ /* Set CPR loop delays to 0 us */
+ if (ctrl->supports_hw_closed_loop
+ && ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) {
+ cont_dly_restore = cpr3_read(ctrl, CPR3_REG_CPR_TIMER_MID_CONT);
+ up_down_dly_restore = cpr3_read(ctrl,
+ CPR3_REG_CPR_TIMER_UP_DN_CONT);
+ cpr3_write(ctrl, CPR3_REG_CPR_TIMER_MID_CONT, 0);
+ cpr3_write(ctrl, CPR3_REG_CPR_TIMER_UP_DN_CONT, 0);
+ } else {
+ cont_dly_restore = cpr3_read(ctrl,
+ CPR3_REG_CPR_TIMER_AUTO_CONT);
+ cpr3_write(ctrl, CPR3_REG_CPR_TIMER_AUTO_CONT, 0);
+ }
+
+ /* Set count mode to all-at-once min with no repeat */
+ cpr3_masked_write(ctrl, CPR3_REG_CPR_CTL,
+ CPR3_CPR_CTL_COUNT_MODE_MASK | CPR3_CPR_CTL_COUNT_REPEAT_MASK,
+ CPR3_CPR_CTL_COUNT_MODE_ALL_AT_ONCE_MIN
+ << CPR3_CPR_CTL_COUNT_MODE_SHIFT);
+
+ cpr3_ctrl_loop_enable(ctrl);
+
+ rc = cpr3_regulator_wait_for_idle(ctrl,
+ CPR3_AGING_MEASUREMENT_TIMEOUT_NS);
+ if (rc)
+ goto cleanup;
+
+ /* Set count mode to all-at-once aging */
+ cpr3_masked_write(ctrl, CPR3_REG_CPR_CTL, CPR3_CPR_CTL_COUNT_MODE_MASK,
+ CPR3_CPR_CTL_COUNT_MODE_ALL_AT_ONCE_AGE
+ << CPR3_CPR_CTL_COUNT_MODE_SHIFT);
+
+ aging_measurement_count = 0;
+ for (i = 0; i < CPR3_AGING_MEASUREMENT_ITERATIONS; i++) {
+ /* Send CONT_NACK */
+ cpr3_write(ctrl, CPR3_REG_CONT_CMD, CPR3_CONT_CMD_NACK);
+
+ rc = cpr3_regulator_wait_for_idle(ctrl,
+ CPR3_AGING_MEASUREMENT_TIMEOUT_NS);
+ if (rc)
+ goto cleanup;
+
+ /* Check for PAGE_IS_AGE flag in status register */
+ reg = cpr3_read(ctrl, CPR3_REG_CPR_STATUS);
+ is_aging_measurement
+ = reg & CPR3_CPR_STATUS_AGING_MEASUREMENT_MASK;
+
+ /* Read CPR measurement results */
+ result = cpr3_read(ctrl, CPR3_REG_RESULT1(0));
+ quot_min = (result & CPR3_RESULT1_QUOT_MIN_MASK)
+ >> CPR3_RESULT1_QUOT_MIN_SHIFT;
+ quot_max = (result & CPR3_RESULT1_QUOT_MAX_MASK)
+ >> CPR3_RESULT1_QUOT_MAX_SHIFT;
+ sel_min = (result & CPR3_RESULT1_RO_MIN_MASK)
+ >> CPR3_RESULT1_RO_MIN_SHIFT;
+ sel_max = (result & CPR3_RESULT1_RO_MAX_MASK)
+ >> CPR3_RESULT1_RO_MAX_SHIFT;
+
+ /*
+ * Scale the quotients so that they are equivalent to the fused
+ * values. This accounts for the difference in measurement
+ * interval times.
+ */
+ quot_min_scaled = quot_min * (gcnt_ref + 1) / (gcnt + 1);
+ quot_max_scaled = quot_max * (gcnt_ref + 1) / (gcnt + 1);
+
+ if (sel_max == 1) {
+ quot_delta = quot_max - quot_min;
+ quot_delta_scaled = quot_max_scaled - quot_min_scaled;
+ } else {
+ quot_delta = quot_min - quot_max;
+ quot_delta_scaled = quot_min_scaled - quot_max_scaled;
+ }
+
+ if (is_aging_measurement)
+ quot_delta_results[aging_measurement_count++]
+ = quot_delta_scaled;
+
+ cpr3_debug(ctrl, "aging results: page_is_age=%u, sel_min=%u, sel_max=%u, quot_min=%u, quot_max=%u, quot_delta=%d, quot_min_scaled=%u, quot_max_scaled=%u, quot_delta_scaled=%d\n",
+ is_aging_measurement, sel_min, sel_max, quot_min,
+ quot_max, quot_delta, quot_min_scaled, quot_max_scaled,
+ quot_delta_scaled);
+ }
+
+ filtered_count
+ = aging_measurement_count - CPR3_AGING_MEASUREMENT_FILTER * 2;
+ if (filtered_count > 0) {
+ sort(quot_delta_results, aging_measurement_count,
+ sizeof(*quot_delta_results), cmp_int, NULL);
+
+ quot_delta_scaled_sum = 0;
+ for (i = 0; i < filtered_count; i++)
+ quot_delta_scaled_sum
+ += quot_delta_results[i
+ + CPR3_AGING_MEASUREMENT_FILTER];
+
+ aging_sensor->measured_quot_diff
+ = quot_delta_scaled_sum / filtered_count;
+ cpr3_info(ctrl, "average quotient delta=%d (count=%d)\n",
+ aging_sensor->measured_quot_diff,
+ filtered_count);
+ } else {
+ cpr3_err(ctrl, "%d aging measurements completed after %d iterations\n",
+ aging_measurement_count,
+ CPR3_AGING_MEASUREMENT_ITERATIONS);
+ rc = -EBUSY;
+ }
+
+cleanup:
+ kfree(quot_delta_results);
+
+ cpr3_ctrl_loop_disable(ctrl);
+
+ cpr3_write(ctrl, CPR3_REG_IRQ_EN, irq_restore);
+
+ cpr3_write(ctrl, CPR3_REG_GCNT(0), gcnt0_restore);
+ cpr3_write(ctrl, CPR3_REG_GCNT(1), gcnt1_restore);
+
+ if (ctrl->supports_hw_closed_loop
+ && ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) {
+ cpr3_write(ctrl, CPR3_REG_CPR_TIMER_MID_CONT, cont_dly_restore);
+ cpr3_write(ctrl, CPR3_REG_CPR_TIMER_UP_DN_CONT,
+ up_down_dly_restore);
+ } else {
+ cpr3_write(ctrl, CPR3_REG_CPR_TIMER_AUTO_CONT,
+ cont_dly_restore);
+ }
+
+ for (i = 0; i <= ctrl->sensor_count / 32; i++) {
+ cpr3_write(ctrl, CPR3_REG_SENSOR_MASK_WRITE_BANK(i), 0);
+ cpr3_write(ctrl, CPR3_REG_SENSOR_BYPASS_WRITE_BANK(i), 0);
+ }
+
+ cpr3_masked_write(ctrl, CPR3_REG_CPR_CTL,
+ CPR3_CPR_CTL_COUNT_MODE_MASK | CPR3_CPR_CTL_COUNT_REPEAT_MASK,
+ (ctrl->count_mode << CPR3_CPR_CTL_COUNT_MODE_SHIFT)
+ | (ctrl->count_repeat << CPR3_CPR_CTL_COUNT_REPEAT_SHIFT));
+
+ cpr3_write(ctrl, CPR3_REG_SENSOR_OWNER(aging_sensor->sensor_id),
+ ctrl->sensor_owner[aging_sensor->sensor_id]);
+
+ cpr3_write(ctrl, CPR3_REG_IRQ_CLEAR,
+ CPR3_IRQ_UP | CPR3_IRQ_DOWN | CPR3_IRQ_MID);
+
+ if (ctrl->supports_hw_closed_loop) {
+ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) {
+ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+ CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK,
+ ctrl->use_hw_closed_loop
+ ? CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_ENABLE
+ : CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_DISABLE);
+ } else if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) {
+ cpr3_write(ctrl, CPR3_REG_HW_CLOSED_LOOP,
+ ctrl->use_hw_closed_loop
+ ? CPR3_HW_CLOSED_LOOP_ENABLE
+ : CPR3_HW_CLOSED_LOOP_DISABLE);
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * cpr3_regulator_readjust_quotients() - readjust the target quotients for the
+ * regulator by removing the old adjustment and adding the new one
+ * @vreg: Pointer to the CPR3 regulator
+ * @old_adjust_volt: Old aging adjustment voltage in microvolts
+ * @new_adjust_volt: New aging adjustment voltage in microvolts
+ *
+ * Also reset the cached closed loop voltage (last_volt) to equal the open-loop
+ * voltage for each corner.
+ */
+static void cpr3_regulator_readjust_quotients(struct cpr3_regulator *vreg,
+ int old_adjust_volt, int new_adjust_volt)
+{
+ unsigned long long temp;
+ int i, j, old_volt, new_volt;
+
+ if (!vreg->aging_allowed)
+ return;
+
+ for (i = 0; i < vreg->corner_count; i++) {
+ temp = (unsigned long long)old_adjust_volt
+ * (unsigned long long)vreg->corner[i].aging_derate;
+ do_div(temp, 1000);
+ old_volt = temp;
+
+ temp = (unsigned long long)new_adjust_volt
+ * (unsigned long long)vreg->corner[i].aging_derate;
+ do_div(temp, 1000);
+ new_volt = temp;
+
+ old_volt = min(vreg->aging_max_adjust_volt, old_volt);
+ new_volt = min(vreg->aging_max_adjust_volt, new_volt);
+
+ for (j = 0; j < CPR3_RO_COUNT; j++) {
+ if (vreg->corner[i].target_quot[j] != 0) {
+ vreg->corner[i].target_quot[j]
+ += cpr3_quot_adjustment(
+ vreg->corner[i].ro_scale[j],
+ new_volt)
+ - cpr3_quot_adjustment(
+ vreg->corner[i].ro_scale[j],
+ old_volt);
+ }
+ }
+ vreg->corner[i].last_volt = vreg->corner[i].open_loop_volt;
+
+ cpr3_debug(vreg, "corner %d: applying %d uV closed-loop voltage margin adjustment\n",
+ i, new_volt);
+ }
+}
+
+
+/**
+ * cpr3_regulator_set_aging_ref_adjustment() - adjust target quotients for the
+ * regulators managed by this CPR controller to account for aging
+ * @ctrl: Pointer to the CPR3 controller
+ * @ref_adjust_volt: New aging reference adjustment voltage in microvolts to
+ * apply to all regulators managed by this CPR controller
+ *
+ * The existing aging adjustment as defined by ctrl->aging_ref_adjust_volt is
+ * first removed and then the adjustment is applied. Lastly, the value of
+ * ctrl->aging_ref_adjust_volt is updated to ref_adjust_volt.
+ */
+static void cpr3_regulator_set_aging_ref_adjustment(
+ struct cpr3_controller *ctrl, int ref_adjust_volt)
+{
+ int i, j;
+
+ for (i = 0; i < ctrl->thread_count; i++) {
+ for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+ cpr3_regulator_readjust_quotients(
+ &ctrl->thread[i].vreg[j],
+ ctrl->aging_ref_adjust_volt,
+ ref_adjust_volt);
+ }
+ }
+
+ ctrl->aging_ref_adjust_volt = ref_adjust_volt;
+}
+
+/**
+ * cpr3_regulator_aging_adjust() - adjust the target quotients for regulators
+ * based on the output of CPR aging sensors
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_aging_adjust(struct cpr3_controller *ctrl)
+{
+ struct cpr3_regulator *vreg;
+ struct cpr3_corner restore_aging_corner;
+ struct cpr3_corner *corner;
+ int *restore_current_corner;
+ bool *restore_vreg_enabled;
+ int i, j, id, rc, rc2, vreg_count, aging_volt, max_aging_volt;
+ u32 reg;
+
+ if (!ctrl->aging_required || !ctrl->cpr_enabled
+ || ctrl->aggr_corner.ceiling_volt == 0
+ || ctrl->aggr_corner.ceiling_volt > ctrl->aging_ref_volt)
+ return 0;
+
+ for (i = 0, vreg_count = 0; i < ctrl->thread_count; i++) {
+ for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+ vreg = &ctrl->thread[i].vreg[j];
+ vreg_count++;
+
+ if (vreg->aging_allowed && vreg->vreg_enabled
+ && vreg->current_corner > vreg->aging_corner)
+ return 0;
+ }
+ }
+
+ /* Verify that none of the aging sensors are currently masked. */
+ for (i = 0; i < ctrl->aging_sensor_count; i++) {
+ id = ctrl->aging_sensor[i].sensor_id;
+ reg = cpr3_read(ctrl, CPR3_REG_SENSOR_MASK_READ(id));
+ if (reg & BIT(id % 32))
+ return 0;
+ }
+
+ restore_current_corner = kcalloc(vreg_count,
+ sizeof(*restore_current_corner), GFP_KERNEL);
+ restore_vreg_enabled = kcalloc(vreg_count,
+ sizeof(*restore_vreg_enabled), GFP_KERNEL);
+ if (!restore_current_corner || !restore_vreg_enabled) {
+ kfree(restore_current_corner);
+ kfree(restore_vreg_enabled);
+ return -ENOMEM;
+ }
+
+ /* Force all regulators to the aging corner */
+ for (i = 0, vreg_count = 0; i < ctrl->thread_count; i++) {
+ for (j = 0; j < ctrl->thread[i].vreg_count; j++, vreg_count++) {
+ vreg = &ctrl->thread[i].vreg[j];
+
+ restore_current_corner[vreg_count]
+ = vreg->current_corner;
+ restore_vreg_enabled[vreg_count]
+ = vreg->vreg_enabled;
+
+ vreg->current_corner = vreg->aging_corner;
+ vreg->vreg_enabled = true;
+ }
+ }
+
+ /* Force one of the regulators to require the aging reference voltage */
+ vreg = &ctrl->thread[0].vreg[0];
+ corner = &vreg->corner[vreg->current_corner];
+ restore_aging_corner = *corner;
+ corner->ceiling_volt = ctrl->aging_ref_volt;
+ corner->floor_volt = ctrl->aging_ref_volt;
+ corner->open_loop_volt = ctrl->aging_ref_volt;
+ corner->last_volt = ctrl->aging_ref_volt;
+
+ /* Skip last_volt caching */
+ ctrl->last_corner_was_closed_loop = false;
+
+ /* Set the vdd supply voltage to the aging reference voltage */
+ rc = _cpr3_regulator_update_ctrl_state(ctrl);
+ if (rc) {
+ cpr3_err(ctrl, "unable to force vdd-supply to the aging reference voltage=%d uV, rc=%d\n",
+ ctrl->aging_ref_volt, rc);
+ goto cleanup;
+ }
+
+ if (ctrl->aging_vdd_mode) {
+ rc = regulator_set_mode(ctrl->vdd_regulator,
+ ctrl->aging_vdd_mode);
+ if (rc) {
+ cpr3_err(ctrl, "unable to configure vdd-supply for mode=%u, rc=%d\n",
+ ctrl->aging_vdd_mode, rc);
+ goto cleanup;
+ }
+ }
+
+ /* Perform aging measurement on all aging sensors */
+ max_aging_volt = 0;
+ for (i = 0; i < ctrl->aging_sensor_count; i++) {
+ for (j = 0; j < CPR3_AGING_RETRY_COUNT; j++) {
+ rc = cpr3_regulator_measure_aging(ctrl,
+ &ctrl->aging_sensor[i]);
+ if (!rc)
+ break;
+ }
+
+ if (!rc) {
+ aging_volt =
+ cpr3_voltage_adjustment(
+ ctrl->aging_sensor[i].ro_scale,
+ ctrl->aging_sensor[i].measured_quot_diff
+ - ctrl->aging_sensor[i].init_quot_diff);
+ max_aging_volt = max(max_aging_volt, aging_volt);
+ } else {
+ cpr3_err(ctrl, "CPR aging measurement failed after %d tries, rc=%d\n",
+ rc, CPR3_AGING_RETRY_COUNT);
+ ctrl->aging_failed = true;
+ ctrl->aging_required = false;
+ goto cleanup;
+ }
+ }
+
+cleanup:
+ vreg = &ctrl->thread[0].vreg[0];
+ vreg->corner[vreg->current_corner] = restore_aging_corner;
+
+ for (i = 0, vreg_count = 0; i < ctrl->thread_count; i++) {
+ for (j = 0; j < ctrl->thread[i].vreg_count; j++, vreg_count++) {
+ vreg = &ctrl->thread[i].vreg[j];
+ vreg->current_corner
+ = restore_current_corner[vreg_count];
+ vreg->vreg_enabled = restore_vreg_enabled[vreg_count];
+ }
+ }
+
+ kfree(restore_current_corner);
+ kfree(restore_vreg_enabled);
+
+ /* Adjust the CPR target quotients according to the aging measurement */
+ if (!rc) {
+ cpr3_regulator_set_aging_ref_adjustment(ctrl, max_aging_volt);
+
+ cpr3_info(ctrl, "aging measurement successful; aging reference adjustment voltage=%d uV\n",
+ ctrl->aging_ref_adjust_volt);
+ ctrl->aging_succeeded = true;
+ ctrl->aging_required = false;
+ }
+
+ if (ctrl->aging_complete_vdd_mode) {
+ rc = regulator_set_mode(ctrl->vdd_regulator,
+ ctrl->aging_complete_vdd_mode);
+ if (rc)
+ cpr3_err(ctrl, "unable to configure vdd-supply for mode=%u, rc=%d\n",
+ ctrl->aging_complete_vdd_mode, rc);
+ }
+
+ /* Skip last_volt caching */
+ ctrl->last_corner_was_closed_loop = false;
+
+ /*
+ * Restore vdd-supply to the voltage before the aging measurement and
+ * restore the CPR3 controller hardware state.
+ */
+ rc2 = _cpr3_regulator_update_ctrl_state(ctrl);
+
+ /* Stop last_volt caching on for the next request */
+ ctrl->last_corner_was_closed_loop = false;
+
+ return rc ? rc : rc2;
+}
+
+/**
+ * cpr3_regulator_update_ctrl_state() - update the state of the CPR controller
+ * to reflect the corners used by all CPR3 regulators as well as
+ * the CPR operating mode and perform aging adjustments if needed
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Note, CPR3 controller lock must be held by the caller.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_update_ctrl_state(struct cpr3_controller *ctrl)
+{
+ int rc;
+
+ rc = _cpr3_regulator_update_ctrl_state(ctrl);
+ if (rc)
+ return rc;
+
+ return cpr3_regulator_aging_adjust(ctrl);
+}
+
+/**
+ * cpr3_regulator_set_voltage() - set the voltage corner for the CPR3 regulator
+ * associated with the regulator device
+ * @rdev: Regulator device pointer for the cpr3-regulator
+ * @corner: New voltage corner to set (offset by CPR3_CORNER_OFFSET)
+ * @corner_max: Maximum voltage corner allowed (offset by
+ * CPR3_CORNER_OFFSET)
+ * @selector: Pointer which is filled with the selector value for the
+ * corner
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each cpr3-regulator device. The VDD voltage will not be
+ * physically configured until both this function and cpr3_regulator_enable()
+ * are called.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_set_voltage(struct regulator_dev *rdev,
+ int corner, int corner_max, unsigned *selector)
+{
+ struct cpr3_regulator *vreg = rdev_get_drvdata(rdev);
+ struct cpr3_controller *ctrl = vreg->thread->ctrl;
+ int rc = 0;
+ int last_corner;
+
+ corner -= CPR3_CORNER_OFFSET;
+ corner_max -= CPR3_CORNER_OFFSET;
+ *selector = corner;
+
+ mutex_lock(&ctrl->lock);
+
+ if (!vreg->vreg_enabled) {
+ vreg->current_corner = corner;
+ cpr3_debug(vreg, "stored corner=%d\n", corner);
+ goto done;
+ } else if (vreg->current_corner == corner) {
+ goto done;
+ }
+
+ last_corner = vreg->current_corner;
+ vreg->current_corner = corner;
+
+ rc = cpr3_regulator_update_ctrl_state(ctrl);
+ if (rc) {
+ cpr3_err(vreg, "could not update CPR state, rc=%d\n", rc);
+ vreg->current_corner = last_corner;
+ }
+
+ cpr3_debug(vreg, "set corner=%d\n", corner);
+done:
+ mutex_unlock(&ctrl->lock);
+
+ return rc;
+}
+
+/**
+ * cpr3_regulator_get_voltage() - get the voltage corner for the CPR3 regulator
+ * associated with the regulator device
+ * @rdev: Regulator device pointer for the cpr3-regulator
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each cpr3-regulator device.
+ *
+ * Return: voltage corner value offset by CPR3_CORNER_OFFSET
+ */
+static int cpr3_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct cpr3_regulator *vreg = rdev_get_drvdata(rdev);
+
+ if (vreg->current_corner == CPR3_REGULATOR_CORNER_INVALID)
+ return CPR3_CORNER_OFFSET;
+ else
+ return vreg->current_corner + CPR3_CORNER_OFFSET;
+}
+
+/**
+ * cpr3_regulator_list_voltage() - return the voltage corner mapped to the
+ * specified selector
+ * @rdev: Regulator device pointer for the cpr3-regulator
+ * @selector: Regulator selector
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each cpr3-regulator device.
+ *
+ * Return: voltage corner value offset by CPR3_CORNER_OFFSET
+ */
+static int cpr3_regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct cpr3_regulator *vreg = rdev_get_drvdata(rdev);
+
+ if (selector < vreg->corner_count)
+ return selector + CPR3_CORNER_OFFSET;
+ else
+ return 0;
+}
+
+/**
+ * cpr3_regulator_list_corner_voltage() - return the ceiling voltage mapped to
+ * the specified voltage corner
+ * @rdev: Regulator device pointer for the cpr3-regulator
+ * @corner: Voltage corner
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each cpr3-regulator device.
+ *
+ * Return: voltage value in microvolts or -EINVAL if the corner is out of range
+ */
+static int cpr3_regulator_list_corner_voltage(struct regulator_dev *rdev,
+ int corner)
+{
+ struct cpr3_regulator *vreg = rdev_get_drvdata(rdev);
+
+ corner -= CPR3_CORNER_OFFSET;
+
+ if (corner >= 0 && corner < vreg->corner_count)
+ return vreg->corner[corner].ceiling_volt;
+ else
+ return -EINVAL;
+}
+
+/**
+ * cpr3_regulator_is_enabled() - return the enable state of the CPR3 regulator
+ * @rdev: Regulator device pointer for the cpr3-regulator
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each cpr3-regulator device.
+ *
+ * Return: true if regulator is enabled, false if regulator is disabled
+ */
+static int cpr3_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct cpr3_regulator *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->vreg_enabled;
+}
+
+/**
+ * cpr3_regulator_enable() - enable the CPR3 regulator
+ * @rdev: Regulator device pointer for the cpr3-regulator
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each cpr3-regulator device.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_enable(struct regulator_dev *rdev)
+{
+ struct cpr3_regulator *vreg = rdev_get_drvdata(rdev);
+ struct cpr3_controller *ctrl = vreg->thread->ctrl;
+ int rc = 0;
+
+ if (vreg->vreg_enabled == true)
+ return 0;
+
+ mutex_lock(&ctrl->lock);
+
+ if (ctrl->system_regulator) {
+ rc = regulator_enable(ctrl->system_regulator);
+ if (rc) {
+ cpr3_err(ctrl, "regulator_enable(system) failed, rc=%d\n",
+ rc);
+ goto done;
+ }
+ }
+
+ rc = regulator_enable(ctrl->vdd_regulator);
+ if (rc) {
+ cpr3_err(vreg, "regulator_enable(vdd) failed, rc=%d\n", rc);
+ goto done;
+ }
+
+ if (vreg->ldo_regulator) {
+ rc = regulator_enable(vreg->ldo_regulator);
+ if (rc) {
+ cpr3_err(vreg, "regulator_enable(ldo) failed, rc=%d\n",
+ rc);
+ goto done;
+ }
+ }
+
+ vreg->vreg_enabled = true;
+ rc = cpr3_regulator_update_ctrl_state(ctrl);
+ if (rc) {
+ cpr3_err(vreg, "could not update CPR state, rc=%d\n", rc);
+ regulator_disable(ctrl->vdd_regulator);
+ vreg->vreg_enabled = false;
+ goto done;
+ }
+
+ cpr3_debug(vreg, "Enabled\n");
+done:
+ mutex_unlock(&ctrl->lock);
+
+ return rc;
+}
+
+/**
+ * cpr3_regulator_disable() - disable the CPR3 regulator
+ * @rdev: Regulator device pointer for the cpr3-regulator
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each cpr3-regulator device.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_disable(struct regulator_dev *rdev)
+{
+ struct cpr3_regulator *vreg = rdev_get_drvdata(rdev);
+ struct cpr3_controller *ctrl = vreg->thread->ctrl;
+ int rc, rc2;
+
+ if (vreg->vreg_enabled == false)
+ return 0;
+
+ mutex_lock(&ctrl->lock);
+
+ if (vreg->ldo_regulator && vreg->ldo_regulator_bypass == LDO_MODE) {
+ rc = regulator_get_voltage(ctrl->vdd_regulator);
+ if (rc < 0) {
+ cpr3_err(vreg, "regulator_get_voltage(vdd) failed, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ /* Switch back to BHS for safe operation */
+ rc = cpr3_regulator_set_bhs_mode(vreg, rc,
+ ctrl->aggr_corner.ceiling_volt);
+ if (rc) {
+ cpr3_err(vreg, "unable to switch to BHS mode, rc=%d\n",
+ rc);
+ goto done;
+ }
+ }
+
+ if (vreg->ldo_regulator) {
+ rc = regulator_disable(vreg->ldo_regulator);
+ if (rc) {
+ cpr3_err(vreg, "regulator_disable(ldo) failed, rc=%d\n",
+ rc);
+ goto done;
+ }
+ }
+ rc = regulator_disable(ctrl->vdd_regulator);
+ if (rc) {
+ cpr3_err(vreg, "regulator_disable(vdd) failed, rc=%d\n", rc);
+ goto done;
+ }
+
+ vreg->vreg_enabled = false;
+ rc = cpr3_regulator_update_ctrl_state(ctrl);
+ if (rc) {
+ cpr3_err(vreg, "could not update CPR state, rc=%d\n", rc);
+ rc2 = regulator_enable(ctrl->vdd_regulator);
+ vreg->vreg_enabled = true;
+ goto done;
+ }
+
+ if (ctrl->system_regulator) {
+ rc = regulator_disable(ctrl->system_regulator);
+ if (rc) {
+ cpr3_err(ctrl, "regulator_disable(system) failed, rc=%d\n",
+ rc);
+ goto done;
+ }
+ }
+
+ cpr3_debug(vreg, "Disabled\n");
+done:
+ mutex_unlock(&ctrl->lock);
+
+ return rc;
+}
+
+static struct regulator_ops cpr3_regulator_ops = {
+ .enable = cpr3_regulator_enable,
+ .disable = cpr3_regulator_disable,
+ .is_enabled = cpr3_regulator_is_enabled,
+ .set_voltage = cpr3_regulator_set_voltage,
+ .get_voltage = cpr3_regulator_get_voltage,
+ .list_voltage = cpr3_regulator_list_voltage,
+ .list_corner_voltage = cpr3_regulator_list_corner_voltage,
+};
+
+/**
+ * cpr3_print_result() - print CPR measurement results to the kernel log for
+ * debugging purposes
+ * @thread: Pointer to the CPR3 thread
+ *
+ * Return: None
+ */
+static void cpr3_print_result(struct cpr3_thread *thread)
+{
+ struct cpr3_controller *ctrl = thread->ctrl;
+ u32 result[3], busy, step_dn, step_up, error_steps, error, negative;
+ u32 quot_min, quot_max, ro_min, ro_max, step_quot_min, step_quot_max;
+ u32 sensor_min, sensor_max;
+ char *sign;
+
+ result[0] = cpr3_read(ctrl, CPR3_REG_RESULT0(thread->thread_id));
+ result[1] = cpr3_read(ctrl, CPR3_REG_RESULT1(thread->thread_id));
+ result[2] = cpr3_read(ctrl, CPR3_REG_RESULT2(thread->thread_id));
+
+ busy = !!(result[0] & CPR3_RESULT0_BUSY_MASK);
+ step_dn = !!(result[0] & CPR3_RESULT0_STEP_DN_MASK);
+ step_up = !!(result[0] & CPR3_RESULT0_STEP_UP_MASK);
+ error_steps = (result[0] & CPR3_RESULT0_ERROR_STEPS_MASK)
+ >> CPR3_RESULT0_ERROR_STEPS_SHIFT;
+ error = (result[0] & CPR3_RESULT0_ERROR_MASK)
+ >> CPR3_RESULT0_ERROR_SHIFT;
+ negative = !!(result[0] & CPR3_RESULT0_NEGATIVE_MASK);
+
+ quot_min = (result[1] & CPR3_RESULT1_QUOT_MIN_MASK)
+ >> CPR3_RESULT1_QUOT_MIN_SHIFT;
+ quot_max = (result[1] & CPR3_RESULT1_QUOT_MAX_MASK)
+ >> CPR3_RESULT1_QUOT_MAX_SHIFT;
+ ro_min = (result[1] & CPR3_RESULT1_RO_MIN_MASK)
+ >> CPR3_RESULT1_RO_MIN_SHIFT;
+ ro_max = (result[1] & CPR3_RESULT1_RO_MAX_MASK)
+ >> CPR3_RESULT1_RO_MAX_SHIFT;
+
+ step_quot_min = (result[2] & CPR3_RESULT2_STEP_QUOT_MIN_MASK)
+ >> CPR3_RESULT2_STEP_QUOT_MIN_SHIFT;
+ step_quot_max = (result[2] & CPR3_RESULT2_STEP_QUOT_MAX_MASK)
+ >> CPR3_RESULT2_STEP_QUOT_MAX_SHIFT;
+ sensor_min = (result[2] & CPR3_RESULT2_SENSOR_MIN_MASK)
+ >> CPR3_RESULT2_SENSOR_MIN_SHIFT;
+ sensor_max = (result[2] & CPR3_RESULT2_SENSOR_MAX_MASK)
+ >> CPR3_RESULT2_SENSOR_MAX_SHIFT;
+
+ sign = negative ? "-" : "";
+ cpr3_debug(ctrl, "thread %u: busy=%u, step_dn=%u, step_up=%u, error_steps=%s%u, error=%s%u\n",
+ thread->thread_id, busy, step_dn, step_up, sign, error_steps,
+ sign, error);
+ cpr3_debug(ctrl, "thread %u: quot_min=%u, quot_max=%u, ro_min=%u, ro_max=%u\n",
+ thread->thread_id, quot_min, quot_max, ro_min, ro_max);
+ cpr3_debug(ctrl, "thread %u: step_quot_min=%u, step_quot_max=%u, sensor_min=%u, sensor_max=%u\n",
+ thread->thread_id, step_quot_min, step_quot_max, sensor_min,
+ sensor_max);
+}
+
+/**
+ * cpr3_thread_busy() - returns if the specified CPR3 thread is busy taking
+ * a measurement
+ * @thread: Pointer to the CPR3 thread
+ *
+ * Return: CPR3 busy status
+ */
+static bool cpr3_thread_busy(struct cpr3_thread *thread)
+{
+ u32 result;
+
+ result = cpr3_read(thread->ctrl, CPR3_REG_RESULT0(thread->thread_id));
+
+ return !!(result & CPR3_RESULT0_BUSY_MASK);
+}
+
+/**
+ * cpr3_irq_handler() - CPR interrupt handler callback function used for
+ * software closed-loop operation
+ * @irq: CPR interrupt number
+ * @data: Private data corresponding to the CPR3 controller
+ * pointer
+ *
+ * This function increases or decreases the vdd supply voltage based upon the
+ * CPR controller recommendation.
+ *
+ * Return: IRQ_HANDLED
+ */
+static irqreturn_t cpr3_irq_handler(int irq, void *data)
+{
+ struct cpr3_controller *ctrl = data;
+ struct cpr3_corner *aggr = &ctrl->aggr_corner;
+ u32 cont = CPR3_CONT_CMD_NACK;
+ u32 reg_last_measurement = 0;
+ struct cpr3_regulator *vreg;
+ struct cpr3_corner *corner;
+ unsigned long flags;
+ int i, j, new_volt, last_volt, dynamic_floor_volt, rc;
+ u32 irq_en, status, cpr_status, ctl;
+ bool up, down;
+
+ mutex_lock(&ctrl->lock);
+
+ if (!ctrl->cpr_enabled) {
+ cpr3_debug(ctrl, "CPR interrupt received but CPR is disabled\n");
+ mutex_unlock(&ctrl->lock);
+ return IRQ_HANDLED;
+ } else if (ctrl->use_hw_closed_loop) {
+ cpr3_debug(ctrl, "CPR interrupt received but CPR is using HW closed-loop\n");
+ goto done;
+ }
+
+ /*
+ * CPR IRQ status checking and CPR controller disabling must happen
+ * atomically and without invening delay in order to avoid an interrupt
+ * storm caused by the handler racing with the CPR controller.
+ */
+ local_irq_save(flags);
+ preempt_disable();
+
+ status = cpr3_read(ctrl, CPR3_REG_IRQ_STATUS);
+ up = status & CPR3_IRQ_UP;
+ down = status & CPR3_IRQ_DOWN;
+
+ if (!up && !down) {
+ /*
+ * Toggle the CPR controller off and then back on since the
+ * hardware and software states are out of sync. This condition
+ * occurs after an aging measurement completes as the CPR IRQ
+ * physically triggers during the aging measurement but the
+ * handler is stuck waiting on the mutex lock.
+ */
+ cpr3_ctrl_loop_disable(ctrl);
+
+ local_irq_restore(flags);
+ preempt_enable();
+
+ /* Wait for the loop disable write to complete */
+ mb();
+
+ /* Wait for BUSY=1 and LOOP_EN=0 in CPR controller registers. */
+ for (i = 0; i < CPR3_REGISTER_WRITE_DELAY_US / 10; i++) {
+ cpr_status = cpr3_read(ctrl, CPR3_REG_CPR_STATUS);
+ ctl = cpr3_read(ctrl, CPR3_REG_CPR_CTL);
+ if (cpr_status & CPR3_CPR_STATUS_BUSY_MASK
+ && (ctl & CPR3_CPR_CTL_LOOP_EN_MASK)
+ == CPR3_CPR_CTL_LOOP_DISABLE)
+ break;
+ udelay(10);
+ }
+ if (i == CPR3_REGISTER_WRITE_DELAY_US / 10)
+ cpr3_debug(ctrl, "CPR controller not disabled after %d us\n",
+ CPR3_REGISTER_WRITE_DELAY_US);
+
+ /* Clear interrupt status */
+ cpr3_write(ctrl, CPR3_REG_IRQ_CLEAR,
+ CPR3_IRQ_UP | CPR3_IRQ_DOWN);
+
+ /* Wait for the interrupt clearing write to complete */
+ mb();
+
+ /* Wait for IRQ_STATUS register to be cleared. */
+ for (i = 0; i < CPR3_REGISTER_WRITE_DELAY_US / 10; i++) {
+ status = cpr3_read(ctrl, CPR3_REG_IRQ_STATUS);
+ if (!(status & (CPR3_IRQ_UP | CPR3_IRQ_DOWN)))
+ break;
+ udelay(10);
+ }
+ if (i == CPR3_REGISTER_WRITE_DELAY_US / 10)
+ cpr3_debug(ctrl, "CPR interrupts not cleared after %d us\n",
+ CPR3_REGISTER_WRITE_DELAY_US);
+
+ cpr3_ctrl_loop_enable(ctrl);
+
+ cpr3_debug(ctrl, "CPR interrupt received but no up or down status bit is set\n");
+
+ mutex_unlock(&ctrl->lock);
+ return IRQ_HANDLED;
+ } else if (up && down) {
+ cpr3_debug(ctrl, "both up and down status bits set\n");
+ /* The up flag takes precedence over the down flag. */
+ down = false;
+ }
+
+ if (ctrl->supports_hw_closed_loop)
+ reg_last_measurement
+ = cpr3_read(ctrl, CPR3_REG_LAST_MEASUREMENT);
+ dynamic_floor_volt = cpr3_regulator_get_dynamic_floor_volt(ctrl,
+ reg_last_measurement);
+
+ local_irq_restore(flags);
+ preempt_enable();
+
+ irq_en = aggr->irq_en;
+ last_volt = aggr->last_volt;
+
+ for (i = 0; i < ctrl->thread_count; i++) {
+ if (cpr3_thread_busy(&ctrl->thread[i])) {
+ cpr3_debug(ctrl, "CPR thread %u busy when it should be waiting for SW cont\n",
+ ctrl->thread[i].thread_id);
+ goto done;
+ }
+ }
+
+ new_volt = up ? last_volt + ctrl->step_volt
+ : last_volt - ctrl->step_volt;
+
+ /* Re-enable UP/DOWN interrupt when its opposite is received. */
+ irq_en |= up ? CPR3_IRQ_DOWN : CPR3_IRQ_UP;
+
+ if (new_volt > aggr->ceiling_volt) {
+ new_volt = aggr->ceiling_volt;
+ irq_en &= ~CPR3_IRQ_UP;
+ cpr3_debug(ctrl, "limiting to ceiling=%d uV\n",
+ aggr->ceiling_volt);
+ } else if (new_volt < aggr->floor_volt) {
+ new_volt = aggr->floor_volt;
+ irq_en &= ~CPR3_IRQ_DOWN;
+ cpr3_debug(ctrl, "limiting to floor=%d uV\n", aggr->floor_volt);
+ }
+
+ if (down && new_volt < dynamic_floor_volt) {
+ /*
+ * The vdd-supply voltage should not be decreased below the
+ * dynamic floor voltage. However, it is not necessary (and
+ * counter productive) to force the voltage up to this level
+ * if it happened to be below it since the closed-loop voltage
+ * must have gotten there in a safe manner while the power
+ * domains for the CPR3 regulator imposing the dynamic floor
+ * were not bypassed.
+ */
+ new_volt = last_volt;
+ irq_en &= ~CPR3_IRQ_DOWN;
+ cpr3_debug(ctrl, "limiting to dynamic floor=%d uV\n",
+ dynamic_floor_volt);
+ }
+
+ for (i = 0; i < ctrl->thread_count; i++)
+ cpr3_print_result(&ctrl->thread[i]);
+
+ cpr3_debug(ctrl, "%s: new_volt=%d uV, last_volt=%d uV\n",
+ up ? "UP" : "DN", new_volt, last_volt);
+
+ if (ctrl->proc_clock_throttle && last_volt == aggr->ceiling_volt
+ && new_volt < last_volt)
+ cpr3_write(ctrl, CPR3_REG_PD_THROTTLE,
+ ctrl->proc_clock_throttle);
+
+ if (new_volt != last_volt) {
+ rc = cpr3_regulator_scale_vdd_voltage(ctrl, new_volt,
+ last_volt,
+ aggr);
+ if (rc) {
+ cpr3_err(ctrl, "scale_vdd() failed to set vdd=%d uV, rc=%d\n",
+ new_volt, rc);
+ goto done;
+ }
+ cont = CPR3_CONT_CMD_ACK;
+
+ /*
+ * Update the closed-loop voltage for all regulators managed
+ * by this CPR controller.
+ */
+ for (i = 0; i < ctrl->thread_count; i++) {
+ for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+ vreg = &ctrl->thread[i].vreg[j];
+ cpr3_update_vreg_closed_loop_volt(vreg,
+ new_volt, reg_last_measurement);
+ }
+ }
+ }
+
+ if (ctrl->proc_clock_throttle && new_volt == aggr->ceiling_volt)
+ cpr3_write(ctrl, CPR3_REG_PD_THROTTLE,
+ CPR3_PD_THROTTLE_DISABLE);
+
+ corner = &ctrl->thread[0].vreg[0].corner[
+ ctrl->thread[0].vreg[0].current_corner];
+
+ if (irq_en != aggr->irq_en) {
+ aggr->irq_en = irq_en;
+ cpr3_write(ctrl, CPR3_REG_IRQ_EN, irq_en);
+ }
+
+ aggr->last_volt = new_volt;
+
+done:
+ /* Clear interrupt status */
+ cpr3_write(ctrl, CPR3_REG_IRQ_CLEAR, CPR3_IRQ_UP | CPR3_IRQ_DOWN);
+
+ /* ACK or NACK the CPR controller */
+ cpr3_write(ctrl, CPR3_REG_CONT_CMD, cont);
+
+ mutex_unlock(&ctrl->lock);
+ return IRQ_HANDLED;
+}
+
+/**
+ * cpr3_ceiling_irq_handler() - CPR ceiling reached interrupt handler callback
+ * function used for hardware closed-loop operation
+ * @irq: CPR ceiling interrupt number
+ * @data: Private data corresponding to the CPR3 controller
+ * pointer
+ *
+ * This function disables processor clock throttling and closed-loop operation
+ * when the ceiling voltage is reached.
+ *
+ * Return: IRQ_HANDLED
+ */
+static irqreturn_t cpr3_ceiling_irq_handler(int irq, void *data)
+{
+ struct cpr3_controller *ctrl = data;
+ int rc, volt;
+
+ mutex_lock(&ctrl->lock);
+
+ if (!ctrl->cpr_enabled) {
+ cpr3_debug(ctrl, "CPR ceiling interrupt received but CPR is disabled\n");
+ goto done;
+ } else if (!ctrl->use_hw_closed_loop) {
+ cpr3_debug(ctrl, "CPR ceiling interrupt received but CPR is using SW closed-loop\n");
+ goto done;
+ }
+
+ volt = regulator_get_voltage(ctrl->vdd_regulator);
+ if (volt < 0) {
+ cpr3_err(ctrl, "could not get vdd voltage, rc=%d\n", volt);
+ goto done;
+ } else if (volt != ctrl->aggr_corner.ceiling_volt) {
+ cpr3_debug(ctrl, "CPR ceiling interrupt received but vdd voltage: %d uV != ceiling voltage: %d uV\n",
+ volt, ctrl->aggr_corner.ceiling_volt);
+ goto done;
+ }
+
+ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) {
+ /*
+ * Since the ceiling voltage has been reached, disable processor
+ * clock throttling as well as CPR closed-loop operation.
+ */
+ cpr3_write(ctrl, CPR3_REG_PD_THROTTLE,
+ CPR3_PD_THROTTLE_DISABLE);
+ cpr3_ctrl_loop_disable(ctrl);
+ cpr3_debug(ctrl, "CPR closed-loop and throttling disabled\n");
+ }
+
+done:
+ rc = msm_spm_avs_clear_irq(0, MSM_SPM_AVS_IRQ_MAX);
+ if (rc)
+ cpr3_err(ctrl, "could not clear max IRQ, rc=%d\n", rc);
+
+ mutex_unlock(&ctrl->lock);
+ return IRQ_HANDLED;
+}
+
+/**
+ * cpr3_regulator_vreg_register() - register a regulator device for a CPR3
+ * regulator
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * This function initializes all regulator framework related structures and then
+ * calls regulator_register() for the CPR3 regulator.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_vreg_register(struct cpr3_regulator *vreg)
+{
+ struct regulator_config config = {};
+ struct regulator_desc *rdesc;
+ struct regulator_init_data *init_data;
+ int rc;
+
+ init_data = of_get_regulator_init_data(vreg->thread->ctrl->dev,
+ vreg->of_node);
+ if (!init_data) {
+ cpr3_err(vreg, "regulator init data is missing\n");
+ return -EINVAL;
+ }
+
+ init_data->constraints.input_uV = init_data->constraints.max_uV;
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_STATUS;
+
+ rdesc = &vreg->rdesc;
+ rdesc->n_voltages = vreg->corner_count;
+ rdesc->name = init_data->constraints.name;
+ rdesc->ops = &cpr3_regulator_ops;
+ rdesc->owner = THIS_MODULE;
+ rdesc->type = REGULATOR_VOLTAGE;
+
+ config.dev = vreg->thread->ctrl->dev;
+ config.driver_data = vreg;
+ config.init_data = init_data;
+ config.of_node = vreg->of_node;
+
+ vreg->rdev = regulator_register(rdesc, &config);
+ if (IS_ERR(vreg->rdev)) {
+ rc = PTR_ERR(vreg->rdev);
+ cpr3_err(vreg, "regulator_register failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int debugfs_int_set(void *data, u64 val)
+{
+ *(int *)data = val;
+ return 0;
+}
+
+static int debugfs_int_get(void *data, u64 *val)
+{
+ *val = *(int *)data;
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_int, debugfs_int_get, debugfs_int_set, "%lld\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_int_ro, debugfs_int_get, NULL, "%lld\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_int_wo, NULL, debugfs_int_set, "%lld\n");
+
+/**
+ * debugfs_create_int - create a debugfs file that is used to read and write a
+ * signed int value
+ * @name: Pointer to a string containing the name of the file to
+ * create
+ * @mode: The permissions that the file should have
+ * @parent: Pointer to the parent dentry for this file. This should
+ * be a directory dentry if set. If this parameter is
+ * %NULL, then the file will be created in the root of the
+ * debugfs filesystem.
+ * @value: Pointer to the variable that the file should read to and
+ * write from
+ *
+ * This function creates a file in debugfs with the given name that
+ * contains the value of the variable @value. If the @mode variable is so
+ * set, it can be read from, and written to.
+ *
+ * This function will return a pointer to a dentry if it succeeds. This
+ * pointer must be passed to the debugfs_remove() function when the file is
+ * to be removed. If an error occurs, %NULL will be returned.
+ */
+static struct dentry *debugfs_create_int(const char *name, umode_t mode,
+ struct dentry *parent, int *value)
+{
+ /* if there are no write bits set, make read only */
+ if (!(mode & S_IWUGO))
+ return debugfs_create_file(name, mode, parent, value,
+ &fops_int_ro);
+ /* if there are no read bits set, make write only */
+ if (!(mode & S_IRUGO))
+ return debugfs_create_file(name, mode, parent, value,
+ &fops_int_wo);
+
+ return debugfs_create_file(name, mode, parent, value, &fops_int);
+}
+
+static int debugfs_bool_get(void *data, u64 *val)
+{
+ *val = *(bool *)data;
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_bool_ro, debugfs_bool_get, NULL, "%lld\n");
+
+/**
+ * cpr3_debug_ldo_mode_allowed_set() - debugfs callback used to change the
+ * value of the CPR3 regulator ldo_mode_allowed flag
+ * @data: Pointer to private data which is equal to the CPR3
+ * regulator pointer
+ * @val: New value for ldo_mode_allowed
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_ldo_mode_allowed_set(void *data, u64 val)
+{
+ struct cpr3_regulator *vreg = data;
+ struct cpr3_controller *ctrl = vreg->thread->ctrl;
+ bool allow = !!val;
+ int rc, vdd_volt;
+
+ mutex_lock(&ctrl->lock);
+
+ if (vreg->ldo_mode_allowed == allow)
+ goto done;
+
+ vreg->ldo_mode_allowed = allow;
+
+ if (!allow && vreg->ldo_regulator_bypass == LDO_MODE) {
+ vdd_volt = regulator_get_voltage(ctrl->vdd_regulator);
+ if (vdd_volt < 0) {
+ cpr3_err(vreg, "regulator_get_voltage(vdd) failed, rc=%d\n",
+ vdd_volt);
+ goto done;
+ }
+
+ /* Switch back to BHS */
+ rc = cpr3_regulator_set_bhs_mode(vreg, vdd_volt,
+ ctrl->aggr_corner.ceiling_volt);
+ if (rc) {
+ cpr3_err(vreg, "unable to switch to BHS mode, rc=%d\n",
+ rc);
+ goto done;
+ }
+ } else {
+ rc = cpr3_regulator_update_ctrl_state(ctrl);
+ if (rc) {
+ cpr3_err(vreg, "could not change LDO mode=%s, rc=%d\n",
+ allow ? "allowed" : "disallowed", rc);
+ goto done;
+ }
+ }
+
+ cpr3_debug(vreg, "LDO mode=%s\n", allow ? "allowed" : "disallowed");
+
+done:
+ mutex_unlock(&ctrl->lock);
+ return 0;
+}
+
+/**
+ * cpr3_debug_ldo_mode_allowed_get() - debugfs callback used to retrieve the
+ * value of the CPR3 regulator ldo_mode_allowed flag
+ * @data: Pointer to private data which is equal to the CPR3
+ * regulator pointer
+ * @val: Output parameter written with a value of the
+ * ldo_mode_allowed flag
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_ldo_mode_allowed_get(void *data, u64 *val)
+{
+ struct cpr3_regulator *vreg = data;
+
+ *val = vreg->ldo_mode_allowed;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_ldo_mode_allowed_fops,
+ cpr3_debug_ldo_mode_allowed_get,
+ cpr3_debug_ldo_mode_allowed_set,
+ "%llu\n");
+
+/**
+ * cpr3_debug_ldo_mode_get() - debugfs callback used to retrieve the state of
+ * the CPR3 regulator's LDO
+ * @data: Pointer to private data which is equal to the CPR3
+ * regulator pointer
+ * @val: Output parameter written with a value of 1 if using
+ * LDO mode or 0 if the LDO is bypassed
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_ldo_mode_get(void *data, u64 *val)
+{
+ struct cpr3_regulator *vreg = data;
+
+ *val = (vreg->ldo_regulator_bypass == LDO_MODE);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_ldo_mode_fops, cpr3_debug_ldo_mode_get,
+ NULL, "%llu\n");
+
+/**
+ * struct cpr3_debug_corner_info - data structure used by the
+ * cpr3_debugfs_create_corner_int function
+ * @vreg: Pointer to the CPR3 regulator
+ * @index: Pointer to the corner array index
+ * @member_offset: Offset in bytes from the beginning of struct cpr3_corner
+ * to the beginning of the value to be read from
+ * @corner: Pointer to the CPR3 corner array
+ */
+struct cpr3_debug_corner_info {
+ struct cpr3_regulator *vreg;
+ int *index;
+ size_t member_offset;
+ struct cpr3_corner *corner;
+};
+
+static int cpr3_debug_corner_int_get(void *data, u64 *val)
+{
+ struct cpr3_debug_corner_info *info = data;
+ struct cpr3_controller *ctrl = info->vreg->thread->ctrl;
+ int i;
+
+ mutex_lock(&ctrl->lock);
+
+ i = *info->index;
+ if (i < 0)
+ i = 0;
+
+ *val = *(int *)((char *)&info->vreg->corner[i] + info->member_offset);
+
+ mutex_unlock(&ctrl->lock);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_corner_int_fops, cpr3_debug_corner_int_get,
+ NULL, "%lld\n");
+
+/**
+ * cpr3_debugfs_create_corner_int - create a debugfs file that is used to read
+ * a signed int value out of a CPR3 regulator's corner array
+ * @vreg: Pointer to the CPR3 regulator
+ * @name: Pointer to a string containing the name of the file to
+ * create
+ * @mode: The permissions that the file should have
+ * @parent: Pointer to the parent dentry for this file. This should
+ * be a directory dentry if set. If this parameter is
+ * %NULL, then the file will be created in the root of the
+ * debugfs filesystem.
+ * @index: Pointer to the corner array index
+ * @member_offset: Offset in bytes from the beginning of struct cpr3_corner
+ * to the beginning of the value to be read from
+ *
+ * This function creates a file in debugfs with the given name that
+ * contains the value of the int type variable vreg->corner[index].member
+ * where member_offset == offsetof(struct cpr3_corner, member).
+ */
+static struct dentry *cpr3_debugfs_create_corner_int(
+ struct cpr3_regulator *vreg, const char *name, umode_t mode,
+ struct dentry *parent, int *index, size_t member_offset)
+{
+ struct cpr3_debug_corner_info *info;
+
+ info = devm_kzalloc(vreg->thread->ctrl->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ info->vreg = vreg;
+ info->index = index;
+ info->member_offset = member_offset;
+
+ return debugfs_create_file(name, mode, parent, info,
+ &cpr3_debug_corner_int_fops);
+}
+
+static int cpr3_debug_quot_open(struct inode *inode, struct file *file)
+{
+ struct cpr3_debug_corner_info *info = inode->i_private;
+ struct cpr3_thread *thread = info->vreg->thread;
+ int size, i, pos;
+ u32 *quot;
+ char *buf;
+
+ /*
+ * Max size:
+ * - 10 digits + ' ' or '\n' = 11 bytes per number
+ * - terminating '\0'
+ */
+ size = CPR3_RO_COUNT * 11;
+ buf = kzalloc(size + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ file->private_data = buf;
+
+ mutex_lock(&thread->ctrl->lock);
+
+ quot = info->corner[*info->index].target_quot;
+
+ for (i = 0, pos = 0; i < CPR3_RO_COUNT; i++)
+ pos += scnprintf(buf + pos, size - pos, "%u%c",
+ quot[i], i < CPR3_RO_COUNT - 1 ? ' ' : '\n');
+
+ mutex_unlock(&thread->ctrl->lock);
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t cpr3_debug_quot_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return simple_read_from_buffer(buf, len, ppos, file->private_data,
+ strlen(file->private_data));
+}
+
+static int cpr3_debug_quot_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+
+ return 0;
+}
+
+static const struct file_operations cpr3_debug_quot_fops = {
+ .owner = THIS_MODULE,
+ .open = cpr3_debug_quot_open,
+ .release = cpr3_debug_quot_release,
+ .read = cpr3_debug_quot_read,
+ .llseek = no_llseek,
+};
+
+/**
+ * cpr3_regulator_debugfs_corner_add() - add debugfs files to expose
+ * configuration data for the CPR corner
+ * @vreg: Pointer to the CPR3 regulator
+ * @corner_dir: Pointer to the parent corner dentry for the new files
+ * @index: Pointer to the corner array index
+ *
+ * Return: none
+ */
+static void cpr3_regulator_debugfs_corner_add(struct cpr3_regulator *vreg,
+ struct dentry *corner_dir, int *index)
+{
+ struct cpr3_debug_corner_info *info;
+ struct dentry *temp;
+
+ temp = cpr3_debugfs_create_corner_int(vreg, "floor_volt", S_IRUGO,
+ corner_dir, index, offsetof(struct cpr3_corner, floor_volt));
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(vreg, "floor_volt debugfs file creation failed\n");
+ return;
+ }
+
+ temp = cpr3_debugfs_create_corner_int(vreg, "ceiling_volt", S_IRUGO,
+ corner_dir, index, offsetof(struct cpr3_corner, ceiling_volt));
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(vreg, "ceiling_volt debugfs file creation failed\n");
+ return;
+ }
+
+ temp = cpr3_debugfs_create_corner_int(vreg, "open_loop_volt", S_IRUGO,
+ corner_dir, index,
+ offsetof(struct cpr3_corner, open_loop_volt));
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(vreg, "open_loop_volt debugfs file creation failed\n");
+ return;
+ }
+
+ temp = cpr3_debugfs_create_corner_int(vreg, "last_volt", S_IRUGO,
+ corner_dir, index, offsetof(struct cpr3_corner, last_volt));
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(vreg, "last_volt debugfs file creation failed\n");
+ return;
+ }
+
+ info = devm_kzalloc(vreg->thread->ctrl->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return;
+
+ info->vreg = vreg;
+ info->index = index;
+ info->corner = vreg->corner;
+
+ temp = debugfs_create_file("target_quots", S_IRUGO, corner_dir,
+ info, &cpr3_debug_quot_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(vreg, "target_quots debugfs file creation failed\n");
+ return;
+ }
+}
+
+/**
+ * cpr3_debug_corner_index_set() - debugfs callback used to change the
+ * value of the CPR3 regulator debug_corner index
+ * @data: Pointer to private data which is equal to the CPR3
+ * regulator pointer
+ * @val: New value for debug_corner
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_corner_index_set(void *data, u64 val)
+{
+ struct cpr3_regulator *vreg = data;
+
+ if (val < CPR3_CORNER_OFFSET || val > vreg->corner_count) {
+ cpr3_err(vreg, "invalid corner index %llu; allowed values: %d-%d\n",
+ val, CPR3_CORNER_OFFSET, vreg->corner_count);
+ return -EINVAL;
+ }
+
+ mutex_lock(&vreg->thread->ctrl->lock);
+ vreg->debug_corner = val - CPR3_CORNER_OFFSET;
+ mutex_unlock(&vreg->thread->ctrl->lock);
+
+ return 0;
+}
+
+/**
+ * cpr3_debug_corner_index_get() - debugfs callback used to retrieve
+ * the value of the CPR3 regulator debug_corner index
+ * @data: Pointer to private data which is equal to the CPR3
+ * regulator pointer
+ * @val: Output parameter written with the value of
+ * debug_corner
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_corner_index_get(void *data, u64 *val)
+{
+ struct cpr3_regulator *vreg = data;
+
+ *val = vreg->debug_corner + CPR3_CORNER_OFFSET;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_corner_index_fops,
+ cpr3_debug_corner_index_get,
+ cpr3_debug_corner_index_set,
+ "%llu\n");
+
+/**
+ * cpr3_debug_current_corner_index_get() - debugfs callback used to retrieve
+ * the value of the CPR3 regulator current_corner index
+ * @data: Pointer to private data which is equal to the CPR3
+ * regulator pointer
+ * @val: Output parameter written with the value of
+ * current_corner
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_current_corner_index_get(void *data, u64 *val)
+{
+ struct cpr3_regulator *vreg = data;
+
+ *val = vreg->current_corner + CPR3_CORNER_OFFSET;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_current_corner_index_fops,
+ cpr3_debug_current_corner_index_get,
+ NULL, "%llu\n");
+
+/**
+ * cpr3_regulator_debugfs_vreg_add() - add debugfs files to expose configuration
+ * data for the CPR3 regulator
+ * @vreg: Pointer to the CPR3 regulator
+ * @thread_dir CPR3 thread debugfs directory handle
+ *
+ * Return: none
+ */
+static void cpr3_regulator_debugfs_vreg_add(struct cpr3_regulator *vreg,
+ struct dentry *thread_dir)
+{
+ struct dentry *temp, *corner_dir, *vreg_dir;
+
+ vreg_dir = debugfs_create_dir(vreg->name, thread_dir);
+ if (IS_ERR_OR_NULL(vreg_dir)) {
+ cpr3_err(vreg, "%s debugfs directory creation failed\n",
+ vreg->name);
+ return;
+ }
+
+ temp = debugfs_create_int("speed_bin_fuse", S_IRUGO, vreg_dir,
+ &vreg->speed_bin_fuse);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(vreg, "speed_bin_fuse debugfs file creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_int("cpr_rev_fuse", S_IRUGO, vreg_dir,
+ &vreg->cpr_rev_fuse);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(vreg, "cpr_rev_fuse debugfs file creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_int("fuse_combo", S_IRUGO, vreg_dir,
+ &vreg->fuse_combo);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(vreg, "fuse_combo debugfs file creation failed\n");
+ return;
+ }
+
+ if (vreg->ldo_regulator) {
+ temp = debugfs_create_file("ldo_mode", S_IRUGO, vreg_dir,
+ vreg, &cpr3_debug_ldo_mode_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(vreg, "ldo_mode debugfs file creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_file("ldo_mode_allowed",
+ S_IRUGO | S_IWUSR, vreg_dir, vreg,
+ &cpr3_debug_ldo_mode_allowed_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(vreg, "ldo_mode_allowed debugfs file creation failed\n");
+ return;
+ }
+ }
+
+ temp = debugfs_create_int("corner_count", S_IRUGO, vreg_dir,
+ &vreg->corner_count);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(vreg, "corner_count debugfs file creation failed\n");
+ return;
+ }
+
+ corner_dir = debugfs_create_dir("corner", vreg_dir);
+ if (IS_ERR_OR_NULL(corner_dir)) {
+ cpr3_err(vreg, "corner debugfs directory creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_file("index", S_IRUGO | S_IWUSR, corner_dir,
+ vreg, &cpr3_debug_corner_index_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(vreg, "index debugfs file creation failed\n");
+ return;
+ }
+
+ cpr3_regulator_debugfs_corner_add(vreg, corner_dir,
+ &vreg->debug_corner);
+
+ corner_dir = debugfs_create_dir("current_corner", vreg_dir);
+ if (IS_ERR_OR_NULL(corner_dir)) {
+ cpr3_err(vreg, "current_corner debugfs directory creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_file("index", S_IRUGO, corner_dir,
+ vreg, &cpr3_debug_current_corner_index_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(vreg, "index debugfs file creation failed\n");
+ return;
+ }
+
+ cpr3_regulator_debugfs_corner_add(vreg, corner_dir,
+ &vreg->current_corner);
+}
+
+/**
+ * cpr3_regulator_debugfs_thread_add() - add debugfs files to expose
+ * configuration data for the CPR thread
+ * @thread: Pointer to the CPR3 thread
+ *
+ * Return: none
+ */
+static void cpr3_regulator_debugfs_thread_add(struct cpr3_thread *thread)
+{
+ struct cpr3_controller *ctrl = thread->ctrl;
+ struct dentry *aggr_dir, *temp, *thread_dir;
+ struct cpr3_debug_corner_info *info;
+ char buf[20];
+ int *index;
+ int i;
+
+ scnprintf(buf, sizeof(buf), "thread%u", thread->thread_id);
+ thread_dir = debugfs_create_dir(buf, thread->ctrl->debugfs);
+ if (IS_ERR_OR_NULL(thread_dir)) {
+ cpr3_err(ctrl, "thread %u %s debugfs directory creation failed\n",
+ thread->thread_id, buf);
+ return;
+ }
+
+ aggr_dir = debugfs_create_dir("max_aggregated_params", thread_dir);
+ if (IS_ERR_OR_NULL(aggr_dir)) {
+ cpr3_err(ctrl, "thread %u max_aggregated_params debugfs directory creation failed\n",
+ thread->thread_id);
+ return;
+ }
+
+ temp = debugfs_create_int("floor_volt", S_IRUGO, aggr_dir,
+ &thread->aggr_corner.floor_volt);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(ctrl, "thread %u aggr floor_volt debugfs file creation failed\n",
+ thread->thread_id);
+ return;
+ }
+
+ temp = debugfs_create_int("ceiling_volt", S_IRUGO, aggr_dir,
+ &thread->aggr_corner.ceiling_volt);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(ctrl, "thread %u aggr ceiling_volt debugfs file creation failed\n",
+ thread->thread_id);
+ return;
+ }
+
+ temp = debugfs_create_int("open_loop_volt", S_IRUGO, aggr_dir,
+ &thread->aggr_corner.open_loop_volt);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(ctrl, "thread %u aggr open_loop_volt debugfs file creation failed\n",
+ thread->thread_id);
+ return;
+ }
+
+ temp = debugfs_create_int("last_volt", S_IRUGO, aggr_dir,
+ &thread->aggr_corner.last_volt);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(ctrl, "thread %u aggr last_volt debugfs file creation failed\n",
+ thread->thread_id);
+ return;
+ }
+
+ info = devm_kzalloc(thread->ctrl->dev, sizeof(*info), GFP_KERNEL);
+ index = devm_kzalloc(thread->ctrl->dev, sizeof(*index), GFP_KERNEL);
+ if (!info || !index)
+ return;
+ *index = 0;
+ info->vreg = &thread->vreg[0];
+ info->index = index;
+ info->corner = &thread->aggr_corner;
+
+ temp = debugfs_create_file("target_quots", S_IRUGO, aggr_dir,
+ info, &cpr3_debug_quot_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(ctrl, "thread %u target_quots debugfs file creation failed\n",
+ thread->thread_id);
+ return;
+ }
+
+ for (i = 0; i < thread->vreg_count; i++)
+ cpr3_regulator_debugfs_vreg_add(&thread->vreg[i], thread_dir);
+}
+
+/**
+ * cpr3_debug_closed_loop_enable_set() - debugfs callback used to change the
+ * value of the CPR controller cpr_allowed_sw flag which enables or
+ * disables closed-loop operation
+ * @data: Pointer to private data which is equal to the CPR
+ * controller pointer
+ * @val: New value for cpr_allowed_sw
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_closed_loop_enable_set(void *data, u64 val)
+{
+ struct cpr3_controller *ctrl = data;
+ bool enable = !!val;
+ int rc;
+
+ mutex_lock(&ctrl->lock);
+
+ if (ctrl->cpr_allowed_sw == enable)
+ goto done;
+
+ if (enable && !ctrl->cpr_allowed_hw) {
+ cpr3_err(ctrl, "CPR closed-loop operation is not allowed\n");
+ goto done;
+ }
+
+ ctrl->cpr_allowed_sw = enable;
+
+ rc = cpr3_regulator_update_ctrl_state(ctrl);
+ if (rc) {
+ cpr3_err(ctrl, "could not change CPR enable state=%u, rc=%d\n",
+ enable, rc);
+ goto done;
+ }
+
+ if (ctrl->proc_clock_throttle && !ctrl->cpr_enabled) {
+ rc = cpr3_clock_enable(ctrl);
+ if (rc) {
+ cpr3_err(ctrl, "clock enable failed, rc=%d\n", rc);
+ goto done;
+ }
+ ctrl->cpr_enabled = true;
+
+ cpr3_write(ctrl, CPR3_REG_PD_THROTTLE,
+ CPR3_PD_THROTTLE_DISABLE);
+
+ cpr3_clock_disable(ctrl);
+ ctrl->cpr_enabled = false;
+ }
+
+ cpr3_debug(ctrl, "closed-loop=%s\n", enable ? "enabled" : "disabled");
+
+done:
+ mutex_unlock(&ctrl->lock);
+ return 0;
+}
+
+/**
+ * cpr3_debug_closed_loop_enable_get() - debugfs callback used to retrieve
+ * the value of the CPR controller cpr_allowed_sw flag which
+ * indicates if closed-loop operation is enabled
+ * @data: Pointer to private data which is equal to the CPR
+ * controller pointer
+ * @val: Output parameter written with the value of
+ * cpr_allowed_sw
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_closed_loop_enable_get(void *data, u64 *val)
+{
+ struct cpr3_controller *ctrl = data;
+
+ *val = ctrl->cpr_allowed_sw;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_closed_loop_enable_fops,
+ cpr3_debug_closed_loop_enable_get,
+ cpr3_debug_closed_loop_enable_set,
+ "%llu\n");
+
+/**
+ * cpr3_debug_hw_closed_loop_enable_set() - debugfs callback used to change the
+ * value of the CPR controller use_hw_closed_loop flag which
+ * switches between software closed-loop and hardware closed-loop
+ * operation
+ * @data: Pointer to private data which is equal to the CPR
+ * controller pointer
+ * @val: New value for use_hw_closed_loop
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_hw_closed_loop_enable_set(void *data, u64 val)
+{
+ struct cpr3_controller *ctrl = data;
+ bool use_hw_closed_loop = !!val;
+ struct cpr3_regulator *vreg;
+ bool cpr_enabled;
+ int i, j, k, rc;
+
+ mutex_lock(&ctrl->lock);
+
+ if (ctrl->use_hw_closed_loop == use_hw_closed_loop)
+ goto done;
+
+ cpr3_ctrl_loop_disable(ctrl);
+
+ ctrl->use_hw_closed_loop = use_hw_closed_loop;
+
+ cpr_enabled = ctrl->cpr_enabled;
+
+ /* Ensure that CPR clocks are enabled before writing to registers. */
+ if (!cpr_enabled) {
+ rc = cpr3_clock_enable(ctrl);
+ if (rc) {
+ cpr3_err(ctrl, "clock enable failed, rc=%d\n", rc);
+ goto done;
+ }
+ ctrl->cpr_enabled = true;
+ }
+
+ if (ctrl->use_hw_closed_loop)
+ cpr3_write(ctrl, CPR3_REG_IRQ_EN, 0);
+
+ if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR4) {
+ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+ CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_EN_MASK,
+ ctrl->use_hw_closed_loop
+ ? CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_ENABLE
+ : CPR4_MARGIN_ADJ_CTL_HW_CLOSED_LOOP_DISABLE);
+ } else if (ctrl->ctrl_type == CPR_CTRL_TYPE_CPR3) {
+ cpr3_write(ctrl, CPR3_REG_HW_CLOSED_LOOP,
+ ctrl->use_hw_closed_loop
+ ? CPR3_HW_CLOSED_LOOP_ENABLE
+ : CPR3_HW_CLOSED_LOOP_DISABLE);
+ }
+
+ /* Turn off CPR clocks if they were off before this function call. */
+ if (!cpr_enabled) {
+ cpr3_clock_disable(ctrl);
+ ctrl->cpr_enabled = false;
+ }
+
+ if (ctrl->use_hw_closed_loop) {
+ rc = regulator_enable(ctrl->vdd_limit_regulator);
+ if (rc) {
+ cpr3_err(ctrl, "CPR limit regulator enable failed, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ rc = msm_spm_avs_enable_irq(0, MSM_SPM_AVS_IRQ_MAX);
+ if (rc) {
+ cpr3_err(ctrl, "could not enable max IRQ, rc=%d\n", rc);
+ goto done;
+ }
+ } else {
+ rc = regulator_disable(ctrl->vdd_limit_regulator);
+ if (rc) {
+ cpr3_err(ctrl, "CPR limit regulator disable failed, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ rc = msm_spm_avs_disable_irq(0, MSM_SPM_AVS_IRQ_MAX);
+ if (rc) {
+ cpr3_err(ctrl, "could not disable max IRQ, rc=%d\n",
+ rc);
+ goto done;
+ }
+ }
+
+ /*
+ * Due to APM and mem-acc floor restriction constraints, the closed-loop
+ * voltage may be different when using software closed-loop vs hardware
+ * closed-loop. Therefore, reset the cached closed-loop voltage for all
+ * corners to the corresponding open-loop voltage when switching between
+ * SW and HW closed-loop mode.
+ */
+ for (i = 0; i < ctrl->thread_count; i++) {
+ for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+ vreg = &ctrl->thread[i].vreg[j];
+ for (k = 0; k < vreg->corner_count; k++)
+ vreg->corner[k].last_volt
+ = vreg->corner[k].open_loop_volt;
+ }
+ }
+
+ /* Skip last_volt caching */
+ ctrl->last_corner_was_closed_loop = false;
+
+ rc = cpr3_regulator_update_ctrl_state(ctrl);
+ if (rc) {
+ cpr3_err(ctrl, "could not change CPR HW closed-loop enable state=%u, rc=%d\n",
+ use_hw_closed_loop, rc);
+ goto done;
+ }
+
+ cpr3_debug(ctrl, "closed-loop mode=%s\n",
+ use_hw_closed_loop ? "HW" : "SW");
+
+done:
+ mutex_unlock(&ctrl->lock);
+ return 0;
+}
+
+/**
+ * cpr3_debug_hw_closed_loop_enable_get() - debugfs callback used to retrieve
+ * the value of the CPR controller use_hw_closed_loop flag which
+ * indicates if hardware closed-loop operation is being used in
+ * place of software closed-loop operation
+ * @data: Pointer to private data which is equal to the CPR
+ * controller pointer
+ * @val: Output parameter written with the value of
+ * use_hw_closed_loop
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_hw_closed_loop_enable_get(void *data, u64 *val)
+{
+ struct cpr3_controller *ctrl = data;
+
+ *val = ctrl->use_hw_closed_loop;
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_hw_closed_loop_enable_fops,
+ cpr3_debug_hw_closed_loop_enable_get,
+ cpr3_debug_hw_closed_loop_enable_set,
+ "%llu\n");
+
+/**
+ * cpr3_debug_trigger_aging_measurement_set() - debugfs callback used to trigger
+ * another CPR measurement
+ * @data: Pointer to private data which is equal to the CPR
+ * controller pointer
+ * @val: Unused
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_debug_trigger_aging_measurement_set(void *data, u64 val)
+{
+ struct cpr3_controller *ctrl = data;
+ int rc;
+
+ mutex_lock(&ctrl->lock);
+
+ cpr3_ctrl_loop_disable(ctrl);
+
+ cpr3_regulator_set_aging_ref_adjustment(ctrl, INT_MAX);
+ ctrl->aging_required = true;
+ ctrl->aging_succeeded = false;
+ ctrl->aging_failed = false;
+
+ rc = cpr3_regulator_update_ctrl_state(ctrl);
+ if (rc) {
+ cpr3_err(ctrl, "could not update the CPR controller state, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+done:
+ mutex_unlock(&ctrl->lock);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(cpr3_debug_trigger_aging_measurement_fops,
+ NULL,
+ cpr3_debug_trigger_aging_measurement_set,
+ "%llu\n");
+
+/**
+ * cpr3_regulator_debugfs_ctrl_add() - add debugfs files to expose configuration
+ * data for the CPR controller
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: none
+ */
+static void cpr3_regulator_debugfs_ctrl_add(struct cpr3_controller *ctrl)
+{
+ struct dentry *temp, *aggr_dir;
+ int i;
+
+ /* Add cpr3-regulator base directory if it isn't present already. */
+ if (cpr3_debugfs_base == NULL) {
+ cpr3_debugfs_base = debugfs_create_dir("cpr3-regulator", NULL);
+ if (IS_ERR_OR_NULL(cpr3_debugfs_base)) {
+ cpr3_err(ctrl, "cpr3-regulator debugfs base directory creation failed\n");
+ cpr3_debugfs_base = NULL;
+ return;
+ }
+ }
+
+ ctrl->debugfs = debugfs_create_dir(ctrl->name, cpr3_debugfs_base);
+ if (IS_ERR_OR_NULL(ctrl->debugfs)) {
+ cpr3_err(ctrl, "cpr3-regulator controller debugfs directory creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_file("cpr_closed_loop_enable", S_IRUGO | S_IWUSR,
+ ctrl->debugfs, ctrl,
+ &cpr3_debug_closed_loop_enable_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(ctrl, "cpr_closed_loop_enable debugfs file creation failed\n");
+ return;
+ }
+
+ if (ctrl->supports_hw_closed_loop) {
+ temp = debugfs_create_file("use_hw_closed_loop",
+ S_IRUGO | S_IWUSR, ctrl->debugfs, ctrl,
+ &cpr3_debug_hw_closed_loop_enable_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(ctrl, "use_hw_closed_loop debugfs file creation failed\n");
+ return;
+ }
+ }
+
+ temp = debugfs_create_int("thread_count", S_IRUGO, ctrl->debugfs,
+ &ctrl->thread_count);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(ctrl, "thread_count debugfs file creation failed\n");
+ return;
+ }
+
+ if (ctrl->apm) {
+ temp = debugfs_create_int("apm_threshold_volt", S_IRUGO,
+ ctrl->debugfs, &ctrl->apm_threshold_volt);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(ctrl, "apm_threshold_volt debugfs file creation failed\n");
+ return;
+ }
+ }
+
+ if (ctrl->aging_required || ctrl->aging_succeeded
+ || ctrl->aging_failed) {
+ temp = debugfs_create_int("aging_adj_volt", S_IRUGO,
+ ctrl->debugfs, &ctrl->aging_ref_adjust_volt);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(ctrl, "aging_adj_volt debugfs file creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_file("aging_succeeded", S_IRUGO,
+ ctrl->debugfs, &ctrl->aging_succeeded, &fops_bool_ro);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(ctrl, "aging_succeeded debugfs file creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_file("aging_failed", S_IRUGO,
+ ctrl->debugfs, &ctrl->aging_failed, &fops_bool_ro);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(ctrl, "aging_failed debugfs file creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_file("aging_trigger", S_IWUSR,
+ ctrl->debugfs, ctrl,
+ &cpr3_debug_trigger_aging_measurement_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(ctrl, "aging_trigger debugfs file creation failed\n");
+ return;
+ }
+ }
+
+ aggr_dir = debugfs_create_dir("max_aggregated_voltages", ctrl->debugfs);
+ if (IS_ERR_OR_NULL(aggr_dir)) {
+ cpr3_err(ctrl, "max_aggregated_voltages debugfs directory creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_int("floor_volt", S_IRUGO, aggr_dir,
+ &ctrl->aggr_corner.floor_volt);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(ctrl, "aggr floor_volt debugfs file creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_int("ceiling_volt", S_IRUGO, aggr_dir,
+ &ctrl->aggr_corner.ceiling_volt);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(ctrl, "aggr ceiling_volt debugfs file creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_int("open_loop_volt", S_IRUGO, aggr_dir,
+ &ctrl->aggr_corner.open_loop_volt);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(ctrl, "aggr open_loop_volt debugfs file creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_int("last_volt", S_IRUGO, aggr_dir,
+ &ctrl->aggr_corner.last_volt);
+ if (IS_ERR_OR_NULL(temp)) {
+ cpr3_err(ctrl, "aggr last_volt debugfs file creation failed\n");
+ return;
+ }
+
+ for (i = 0; i < ctrl->thread_count; i++)
+ cpr3_regulator_debugfs_thread_add(&ctrl->thread[i]);
+}
+
+/**
+ * cpr3_regulator_debugfs_ctrl_remove() - remove debugfs files for the CPR
+ * controller
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Note, this function must be called after the controller has been removed from
+ * cpr3_controller_list and while the cpr3_controller_list_mutex lock is held.
+ *
+ * Return: none
+ */
+static void cpr3_regulator_debugfs_ctrl_remove(struct cpr3_controller *ctrl)
+{
+ if (list_empty(&cpr3_controller_list)) {
+ debugfs_remove_recursive(cpr3_debugfs_base);
+ cpr3_debugfs_base = NULL;
+ } else {
+ debugfs_remove_recursive(ctrl->debugfs);
+ }
+}
+
+/**
+ * cpr3_regulator_init_ctrl_data() - performs initialization of CPR controller
+ * elements
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_init_ctrl_data(struct cpr3_controller *ctrl)
+{
+ /* Read the initial vdd voltage from hardware. */
+ ctrl->aggr_corner.last_volt
+ = regulator_get_voltage(ctrl->vdd_regulator);
+ if (ctrl->aggr_corner.last_volt < 0) {
+ cpr3_err(ctrl, "regulator_get_voltage(vdd) failed, rc=%d\n",
+ ctrl->aggr_corner.last_volt);
+ return ctrl->aggr_corner.last_volt;
+ }
+ ctrl->aggr_corner.open_loop_volt = ctrl->aggr_corner.last_volt;
+
+ return 0;
+}
+
+/**
+ * cpr3_regulator_init_vreg_data() - performs initialization of common CPR3
+ * regulator elements and validate aging configurations
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_init_vreg_data(struct cpr3_regulator *vreg)
+{
+ int i, j;
+
+ vreg->current_corner = CPR3_REGULATOR_CORNER_INVALID;
+ vreg->last_closed_loop_corner = CPR3_REGULATOR_CORNER_INVALID;
+
+ for (i = 0; i < vreg->corner_count; i++) {
+ vreg->corner[i].last_volt = vreg->corner[i].open_loop_volt;
+ vreg->corner[i].irq_en = CPR3_IRQ_UP | CPR3_IRQ_DOWN;
+
+ vreg->corner[i].ro_mask = 0;
+ for (j = 0; j < CPR3_RO_COUNT; j++) {
+ if (vreg->corner[i].target_quot[j] == 0)
+ vreg->corner[i].ro_mask |= BIT(j);
+ }
+ }
+
+ if (vreg->aging_allowed && vreg->corner[vreg->aging_corner].ceiling_volt
+ > vreg->thread->ctrl->aging_ref_volt) {
+ cpr3_err(vreg, "aging corner %d ceiling voltage = %d > aging ref voltage = %d uV\n",
+ vreg->aging_corner,
+ vreg->corner[vreg->aging_corner].ceiling_volt,
+ vreg->thread->ctrl->aging_ref_volt);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * cpr3_regulator_suspend() - perform common required CPR3 power down steps
+ * before the system enters suspend
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_regulator_suspend(struct cpr3_controller *ctrl)
+{
+ int rc;
+
+ mutex_lock(&ctrl->lock);
+
+ cpr3_ctrl_loop_disable(ctrl);
+
+ rc = cpr3_closed_loop_disable(ctrl);
+ if (rc)
+ cpr3_err(ctrl, "could not disable CPR, rc=%d\n", rc);
+
+ ctrl->cpr_suspended = true;
+
+ mutex_unlock(&ctrl->lock);
+ return 0;
+}
+
+/**
+ * cpr3_regulator_resume() - perform common required CPR3 power up steps after
+ * the system resumes from suspend
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_regulator_resume(struct cpr3_controller *ctrl)
+{
+ int rc;
+
+ mutex_lock(&ctrl->lock);
+
+ ctrl->cpr_suspended = false;
+
+ rc = cpr3_regulator_update_ctrl_state(ctrl);
+ if (rc)
+ cpr3_err(ctrl, "could not enable CPR, rc=%d\n", rc);
+
+ mutex_unlock(&ctrl->lock);
+ return 0;
+}
+
+/**
+ * cpr3_regulator_validate_controller() - verify the data passed in via the
+ * cpr3_controller data structure
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_regulator_validate_controller(struct cpr3_controller *ctrl)
+{
+ int i;
+
+ if (!ctrl->vdd_regulator) {
+ cpr3_err(ctrl, "vdd regulator missing\n");
+ return -EINVAL;
+ } else if (ctrl->sensor_count <= 0
+ || ctrl->sensor_count > CPR3_MAX_SENSOR_COUNT) {
+ cpr3_err(ctrl, "invalid CPR sensor count=%d\n",
+ ctrl->sensor_count);
+ return -EINVAL;
+ } else if (!ctrl->sensor_owner) {
+ cpr3_err(ctrl, "CPR sensor ownership table missing\n");
+ return -EINVAL;
+ }
+
+ if (ctrl->aging_required) {
+ for (i = 0; i < ctrl->aging_sensor_count; i++) {
+ if (ctrl->aging_sensor[i].sensor_id
+ >= ctrl->sensor_count) {
+ cpr3_err(ctrl, "aging_sensor[%d] id=%u is not in the value range 0-%d",
+ i, ctrl->aging_sensor[i].sensor_id,
+ ctrl->sensor_count - 1);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * cpr3_regulator_register() - register the regulators for a CPR3 controller and
+ * perform CPR hardware initialization
+ * @pdev: Platform device pointer for the CPR3 controller
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_regulator_register(struct platform_device *pdev,
+ struct cpr3_controller *ctrl)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int i, j, rc;
+
+ if (!dev->of_node) {
+ dev_err(dev, "%s: Device tree node is missing\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!ctrl || !ctrl->name) {
+ dev_err(dev, "%s: CPR controller data is missing\n", __func__);
+ return -EINVAL;
+ }
+
+ rc = cpr3_regulator_validate_controller(ctrl);
+ if (rc) {
+ cpr3_err(ctrl, "controller validation failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ mutex_init(&ctrl->lock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpr_ctrl");
+ if (!res || !res->start) {
+ cpr3_err(ctrl, "CPR controller address is missing\n");
+ return -ENXIO;
+ }
+ ctrl->cpr_ctrl_base = devm_ioremap(dev, res->start, resource_size(res));
+
+ ctrl->irq = platform_get_irq_byname(pdev, "cpr");
+ if (ctrl->irq < 0) {
+ cpr3_err(ctrl, "missing CPR interrupt\n");
+ return ctrl->irq;
+ }
+
+ if (ctrl->supports_hw_closed_loop) {
+ rc = msm_spm_probe_done();
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(ctrl, "spm unavailable, rc=%d\n", rc);
+ return rc;
+ }
+
+ ctrl->ceiling_irq = platform_get_irq_byname(pdev, "ceiling");
+ if (ctrl->ceiling_irq < 0) {
+ cpr3_err(ctrl, "missing ceiling interrupt\n");
+ return ctrl->ceiling_irq;
+ }
+ }
+
+ rc = cpr3_regulator_init_ctrl_data(ctrl);
+ if (rc) {
+ cpr3_err(ctrl, "CPR controller data initialization failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ for (i = 0; i < ctrl->thread_count; i++) {
+ for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+ rc = cpr3_regulator_init_vreg_data(
+ &ctrl->thread[i].vreg[j]);
+ if (rc)
+ return rc;
+ cpr3_print_quots(&ctrl->thread[i].vreg[j]);
+ }
+ }
+
+ /*
+ * Add the maximum possible aging voltage margin until it is possible
+ * to perform an aging measurement.
+ */
+ if (ctrl->aging_required)
+ cpr3_regulator_set_aging_ref_adjustment(ctrl, INT_MAX);
+
+ rc = cpr3_regulator_init_ctrl(ctrl);
+ if (rc) {
+ cpr3_err(ctrl, "CPR controller initialization failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* Register regulator devices for all threads. */
+ for (i = 0; i < ctrl->thread_count; i++) {
+ for (j = 0; j < ctrl->thread[i].vreg_count; j++) {
+ rc = cpr3_regulator_vreg_register(
+ &ctrl->thread[i].vreg[j]);
+ if (rc) {
+ cpr3_err(&ctrl->thread[i].vreg[j], "failed to register regulator, rc=%d\n",
+ rc);
+ goto free_regulators;
+ }
+ }
+ }
+
+ rc = devm_request_threaded_irq(dev, ctrl->irq, NULL, cpr3_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING, "cpr3", ctrl);
+ if (rc) {
+ cpr3_err(ctrl, "could not request IRQ %d, rc=%d\n",
+ ctrl->irq, rc);
+ goto free_regulators;
+ }
+
+ if (ctrl->supports_hw_closed_loop) {
+ rc = devm_request_threaded_irq(dev, ctrl->ceiling_irq, NULL,
+ cpr3_ceiling_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "cpr3_ceiling", ctrl);
+ if (rc) {
+ cpr3_err(ctrl, "could not request ceiling IRQ %d, rc=%d\n",
+ ctrl->ceiling_irq, rc);
+ goto free_regulators;
+ }
+ }
+
+ mutex_lock(&cpr3_controller_list_mutex);
+ cpr3_regulator_debugfs_ctrl_add(ctrl);
+ list_add(&ctrl->list, &cpr3_controller_list);
+ mutex_unlock(&cpr3_controller_list_mutex);
+
+ return 0;
+
+free_regulators:
+ for (i = 0; i < ctrl->thread_count; i++)
+ for (j = 0; j < ctrl->thread[i].vreg_count; j++)
+ if (!IS_ERR_OR_NULL(ctrl->thread[i].vreg[j].rdev))
+ regulator_unregister(
+ ctrl->thread[i].vreg[j].rdev);
+ return rc;
+}
+
+/**
+ * cpr3_regulator_unregister() - unregister the regulators for a CPR3 controller
+ * and perform CPR hardware shutdown
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_regulator_unregister(struct cpr3_controller *ctrl)
+{
+ int i, j;
+
+ mutex_lock(&cpr3_controller_list_mutex);
+ list_del(&ctrl->list);
+ cpr3_regulator_debugfs_ctrl_remove(ctrl);
+ mutex_unlock(&cpr3_controller_list_mutex);
+
+ cpr3_ctrl_loop_disable(ctrl);
+ cpr3_closed_loop_disable(ctrl);
+
+ if (ctrl->use_hw_closed_loop) {
+ regulator_disable(ctrl->vdd_limit_regulator);
+ msm_spm_avs_disable_irq(0, MSM_SPM_AVS_IRQ_MAX);
+ }
+
+ for (i = 0; i < ctrl->thread_count; i++)
+ for (j = 0; j < ctrl->thread[i].vreg_count; j++)
+ regulator_unregister(ctrl->thread[i].vreg[j].rdev);
+
+ return 0;
+}
diff --git a/drivers/regulator/cpr3-regulator.h b/drivers/regulator/cpr3-regulator.h
new file mode 100644
index 000000000000..150104c11fc1
--- /dev/null
+++ b/drivers/regulator/cpr3-regulator.h
@@ -0,0 +1,786 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __REGULATOR_CPR3_REGULATOR_H__
+#define __REGULATOR_CPR3_REGULATOR_H__
+
+#include <linux/clk.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/power/qcom/apm.h>
+#include <linux/regulator/driver.h>
+
+struct cpr3_controller;
+struct cpr3_thread;
+
+/**
+ * struct cpr3_fuse_param - defines one contiguous segment of a fuse parameter
+ * that is contained within a given row.
+ * @row: Fuse row number
+ * @bit_start: The first bit within the row of the fuse parameter segment
+ * @bit_end: The last bit within the row of the fuse parameter segment
+ *
+ * Each fuse row is 64 bits in length. bit_start and bit_end may take values
+ * from 0 to 63. bit_start must be less than or equal to bit_end.
+ */
+struct cpr3_fuse_param {
+ unsigned row;
+ unsigned bit_start;
+ unsigned bit_end;
+};
+
+/* Each CPR3 sensor has 16 ring oscillators */
+#define CPR3_RO_COUNT 16
+
+/* The maximum number of sensors that can be present on a single CPR loop. */
+#define CPR3_MAX_SENSOR_COUNT 256
+
+/* This constant is used when allocating array printing buffers. */
+#define MAX_CHARS_PER_INT 10
+
+/**
+ * struct cpr3_corner - CPR3 virtual voltage corner data structure
+ * @floor_volt: CPR closed-loop floor voltage in microvolts
+ * @ceiling_volt: CPR closed-loop ceiling voltage in microvolts
+ * @open_loop_volt: CPR open-loop voltage (i.e. initial voltage) in
+ * microvolts
+ * @last_volt: Last known settled CPR closed-loop voltage which is used
+ * when switching to a new corner
+ * @system_volt: The system-supply voltage in microvolts or corners or
+ * levels
+ * @mem_acc_volt: The mem-acc-supply voltage in corners
+ * @proc_freq: Processor frequency in Hertz. For CPR rev. 3 and 4
+ * conrollers, this field is only used by platform specific
+ * CPR3 driver for interpolation.
+ * @cpr_fuse_corner: Fused corner index associated with this virtual corner
+ * (only used by platform specific CPR3 driver for
+ * mapping purposes)
+ * @target_quot: Array of target quotient values to use for each ring
+ * oscillator (RO) for this corner. A value of 0 should be
+ * specified as the target quotient for each RO that is
+ * unused by this corner.
+ * @ro_scale: Array of CPR ring oscillator (RO) scaling factors. The
+ * scaling factor for each RO is defined from RO0 to RO15
+ * with units of QUOT/V. A value of 0 may be specified for
+ * an RO that is unused.
+ * @ro_mask: Bitmap where each of the 16 LSBs indicate if the
+ * corresponding ROs should be masked for this corner
+ * @irq_en: Bitmap of the CPR interrupts to enable for this corner
+ * @aging_derate: The amount to derate the aging voltage adjustment
+ * determined for the reference corner in units of uV/mV.
+ * E.g. a value of 900 would imply that the adjustment for
+ * this corner should be 90% (900/1000) of that for the
+ * reference corner.
+ *
+ * The value of last_volt is initialized inside of the cpr3_regulator_register()
+ * call with the open_loop_volt value. It can later be updated to the settled
+ * VDD supply voltage.
+ *
+ * The values of ro_mask and irq_en are initialized inside of the
+ * cpr3_regulator_register() call.
+ */
+struct cpr3_corner {
+ int floor_volt;
+ int ceiling_volt;
+ int open_loop_volt;
+ int last_volt;
+ int system_volt;
+ int mem_acc_volt;
+ u32 proc_freq;
+ int cpr_fuse_corner;
+ u32 target_quot[CPR3_RO_COUNT];
+ u32 ro_scale[CPR3_RO_COUNT];
+ u32 ro_mask;
+ u32 irq_en;
+ int aging_derate;
+};
+
+/**
+ * struct cpr3_regulator - CPR3 logical regulator instance associated with a
+ * given CPR3 hardware thread
+ * @of_node: Device node associated with the device tree child node
+ * of this CPR3 regulator
+ * @thread: Pointer to the CPR3 thread which manages this CPR3
+ * regulator
+ * @name: Unique name for this CPR3 regulator which is filled
+ * using the device tree regulator-name property
+ * @rdesc: Regulator description for this CPR3 regulator
+ * @rdev: Regulator device pointer for the regulator registered
+ * for this CPR3 regulator
+ * @mem_acc_regulator: Pointer to the optional mem-acc supply regulator used
+ * to manage memory circuitry settings based upon CPR3
+ * regulator output voltage.
+ * @ldo_regulator: Pointer to the LDO supply regulator used to manage
+ * per-cluster LDO voltage and bypass state
+ * @ldo_regulator_bypass: Cached copy of the LDO regulator bypass state
+ * @ldo_ret_regulator: Pointer to the LDO retention supply regulator used to
+ * manage LDO retention bypass state
+ * @corner: Array of all corners supported by this CPR3 regulator
+ * @corner_count: The number of elements in the corner array
+ * @platform_fuses: Pointer to platform specific CPR fuse data (only used by
+ * platform specific CPR3 driver)
+ * @speed_bin_fuse: Value read from the speed bin fuse parameter
+ * @speed_bins_supported: The number of speed bins supported by the device tree
+ * configuration for this CPR3 regulator
+ * @cpr_rev_fuse: Value read from the CPR fusing revision fuse parameter
+ * @fuse_combo: Platform specific enum value identifying the specific
+ * combination of fuse values found on a given chip
+ * @fuse_combos_supported: The number of fuse combinations supported by the
+ * device tree configuration for this CPR3 regulator
+ * @fuse_corner_count: Number of corners defined by fuse parameters
+ * @fuse_combo_corner_sum: The sum of the corner counts across all fuse combos
+ * @fuse_combo_offset: The device tree property array offset for the selected
+ * fuse combo
+ * @speed_bin_corner_sum: The sum of the corner counts across all speed bins
+ * This may be specified as 0 if per speed bin parsing
+ * support is not required.
+ * @speed_bin_offset: The device tree property array offset for the selected
+ * speed bin
+ * @pd_bypass_mask: Bit mask of power domains associated with this CPR3
+ * regulator
+ * @dynamic_floor_corner: Index identifying the voltage corner for the CPR3
+ * regulator whose last_volt value should be used as the
+ * global CPR floor voltage if all of the power domains
+ * associated with this CPR3 regulator are bypassed
+ * @uses_dynamic_floor: Boolean flag indicating that dynamic_floor_corner should
+ * be utilized for the CPR3 regulator
+ * @current_corner: Index identifying the currently selected voltage corner
+ * for the CPR3 regulator or less than 0 if no corner has
+ * been requested
+ * @last_closed_loop_corner: Index identifying the last voltage corner for the
+ * CPR3 regulator which was configured when operating in
+ * CPR closed-loop mode or less than 0 if no corner has
+ * been requested. CPR registers are only written to when
+ * using closed-loop mode.
+ * @aggregated: Boolean flag indicating that this CPR3 regulator
+ * participated in the last aggregation event
+ * @debug_corner: Index identifying voltage corner used for displaying
+ * corner configuration values in debugfs
+ * @ldo_min_headroom_volt: Minimum voltage difference in microvolts required
+ * between the VDD supply voltage and the LDO output in
+ * order for the LDO operate
+ * @ldo_max_headroom_volt: Maximum voltage difference in microvolts between
+ * the input and output of the active LDO hardware to
+ * maintain optimum operability.
+ * @ldo_adjust_volt: Voltage in microvolts used to offset margin assigned
+ * to IR drop between PMIC and CPU
+ * @ldo_ret_volt: The lowest supported CPU retention voltage in
+ * microvolts. This voltage may vary part-to-part based
+ * upon the value of hardware fuses.
+ * @ldo_max_volt: The maximum physically supported LDO voltage in
+ * microvolts
+ * @ldo_mode_allowed: Boolean which indicates if LDO mode is allowed for this
+ * CPR3 regulator
+ * @vreg_enabled: Boolean defining the enable state of the CPR3
+ * regulator's regulator within the regulator framework.
+ * @aging_allowed: Boolean defining if CPR aging adjustments are allowed
+ * for this CPR3 regulator given the fuse combo of the
+ * device
+ * @aging_corner: The corner that should be configured for this regulator
+ * when an aging measurement is performed.
+ * @aging_max_adjust_volt: The maximum aging voltage margin in microvolts that
+ * may be added to the target quotients of this regulator.
+ * A value of 0 may be specified if this regulator does not
+ * require any aging adjustment.
+ *
+ * This structure contains both configuration and runtime state data. The
+ * elements current_corner, last_closed_loop_corner, aggregated, debug_corner,
+ * ldo_mode_allowed, and vreg_enabled are state variables.
+ */
+struct cpr3_regulator {
+ struct device_node *of_node;
+ struct cpr3_thread *thread;
+ const char *name;
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+ struct regulator *mem_acc_regulator;
+ struct regulator *ldo_regulator;
+ bool ldo_regulator_bypass;
+ struct regulator *ldo_ret_regulator;
+ struct cpr3_corner *corner;
+ int corner_count;
+
+ void *platform_fuses;
+ int speed_bin_fuse;
+ int speed_bins_supported;
+ int cpr_rev_fuse;
+ int fuse_combo;
+ int fuse_combos_supported;
+ int fuse_corner_count;
+ int fuse_combo_corner_sum;
+ int fuse_combo_offset;
+ int speed_bin_corner_sum;
+ int speed_bin_offset;
+ u32 pd_bypass_mask;
+ int dynamic_floor_corner;
+ bool uses_dynamic_floor;
+
+ int current_corner;
+ int last_closed_loop_corner;
+ bool aggregated;
+ int debug_corner;
+ int ldo_min_headroom_volt;
+ int ldo_max_headroom_volt;
+ int ldo_adjust_volt;
+ int ldo_ret_volt;
+ int ldo_max_volt;
+ bool ldo_mode_allowed;
+ bool vreg_enabled;
+
+ bool aging_allowed;
+ int aging_corner;
+ int aging_max_adjust_volt;
+};
+
+/**
+ * struct cpr3_thread - CPR3 hardware thread data structure
+ * @thread_id: Hardware thread ID
+ * @of_node: Device node associated with the device tree child node
+ * of this CPR3 thread
+ * @ctrl: Pointer to the CPR3 controller which manages this thread
+ * @vreg: Array of CPR3 regulators handled by the CPR3 thread
+ * @vreg_count: Number of elements in the vreg array
+ * @aggr_corner: CPR corner containing the in process aggregated voltage
+ * and target quotient configurations which will be applied
+ * @last_closed_loop_aggr_corner: CPR corner containing the most recent
+ * configurations which were written into hardware
+ * registers when operating in closed loop mode (i.e. with
+ * CPR enabled)
+ * @consecutive_up: The number of consecutive CPR step up events needed to
+ * to trigger an up interrupt
+ * @consecutive_down: The number of consecutive CPR step down events needed to
+ * to trigger a down interrupt
+ * @up_threshold: The number CPR error steps required to generate an up
+ * event
+ * @down_threshold: The number CPR error steps required to generate a down
+ * event
+ *
+ * This structure contains both configuration and runtime state data. The
+ * elements aggr_corner and last_closed_loop_aggr_corner are state variables.
+ */
+struct cpr3_thread {
+ u32 thread_id;
+ struct device_node *of_node;
+ struct cpr3_controller *ctrl;
+ struct cpr3_regulator *vreg;
+ int vreg_count;
+ struct cpr3_corner aggr_corner;
+ struct cpr3_corner last_closed_loop_aggr_corner;
+
+ u32 consecutive_up;
+ u32 consecutive_down;
+ u32 up_threshold;
+ u32 down_threshold;
+};
+
+/* Per CPR controller data */
+/**
+ * enum cpr3_mem_acc_corners - Constants which define the number of mem-acc
+ * regulator corners available in the mem-acc corner map array.
+ * %CPR3_MEM_ACC_LOW_CORNER: Index in mem-acc corner map array mapping to the
+ * mem-acc regulator corner
+ * to be used for low voltage vdd supply
+ * %CPR3_MEM_ACC_HIGH_CORNER: Index in mem-acc corner map array mapping to the
+ * mem-acc regulator corner to be used for high
+ * voltage vdd supply
+ * %CPR3_MEM_ACC_CORNERS: Number of elements in the mem-acc corner map
+ * array
+ */
+enum cpr3_mem_acc_corners {
+ CPR3_MEM_ACC_LOW_CORNER = 0,
+ CPR3_MEM_ACC_HIGH_CORNER = 1,
+ CPR3_MEM_ACC_CORNERS = 2,
+};
+
+/**
+ * enum cpr3_count_mode - CPR3 controller count mode which defines the
+ * method that CPR sensor data is acquired
+ * %CPR3_COUNT_MODE_ALL_AT_ONCE_MIN: Capture all CPR sensor readings
+ * simultaneously and report the minimum
+ * value seen in successive measurements
+ * %CPR3_COUNT_MODE_ALL_AT_ONCE_MAX: Capture all CPR sensor readings
+ * simultaneously and report the maximum
+ * value seen in successive measurements
+ * %CPR3_COUNT_MODE_STAGGERED: Read one sensor at a time in a
+ * sequential fashion
+ * %CPR3_COUNT_MODE_ALL_AT_ONCE_AGE: Capture all CPR aging sensor readings
+ * simultaneously.
+ */
+enum cpr3_count_mode {
+ CPR3_COUNT_MODE_ALL_AT_ONCE_MIN = 0,
+ CPR3_COUNT_MODE_ALL_AT_ONCE_MAX = 1,
+ CPR3_COUNT_MODE_STAGGERED = 2,
+ CPR3_COUNT_MODE_ALL_AT_ONCE_AGE = 3,
+};
+
+/**
+ * enum cpr_controller_type - supported CPR controller hardware types
+ * %CPR_CTRL_TYPE_CPR3: HW has CPR3 controller
+ * %CPR_CTRL_TYPE_CPR4: HW has CPR4 controller
+ */
+enum cpr_controller_type {
+ CPR_CTRL_TYPE_CPR3,
+ CPR_CTRL_TYPE_CPR4,
+};
+
+/**
+ * struct cpr3_aging_sensor_info - CPR3 aging sensor information
+ * @sensor_id The index of the CPR3 sensor to be used in the aging
+ * measurement.
+ * @ro_scale The CPR ring oscillator (RO) scaling factor for the
+ * aging sensor with units of QUOT/V.
+ * @init_quot_diff: The fused quotient difference between aged and un-aged
+ * paths that was measured at manufacturing time.
+ * @measured_quot_diff: The quotient difference measured at runtime.
+ * @bypass_mask: Bit mask of the CPR sensors that must be bypassed during
+ * the aging measurement for this sensor
+ *
+ * This structure contains both configuration and runtime state data. The
+ * element measured_quot_diff is a state variable.
+ */
+struct cpr3_aging_sensor_info {
+ u32 sensor_id;
+ u32 ro_scale;
+ int init_quot_diff;
+ int measured_quot_diff;
+ u32 bypass_mask[CPR3_MAX_SENSOR_COUNT / 32];
+};
+
+/**
+ * struct cpr3_controller - CPR3 controller data structure
+ * @dev: Device pointer for the CPR3 controller device
+ * @name: Unique name for the CPR3 controller
+ * @cpr_ctrl_base: Virtual address of the CPR3 controller base register
+ * @fuse_base: Virtual address of fuse row 0
+ * @list: list head used in a global cpr3-regulator list so that
+ * cpr3-regulator structs can be found easily in RAM dumps
+ * @thread: Array of CPR3 threads managed by the CPR3 controller
+ * @thread_count: Number of elements in the thread array
+ * @sensor_owner: Array of thread IDs indicating which thread owns a given
+ * CPR sensor
+ * @sensor_count: The number of CPR sensors found on the CPR loop managed
+ * by this CPR controller. Must be equal to the number of
+ * elements in the sensor_owner array
+ * @soc_revision: Revision number of the SoC. This may be unused by
+ * platforms that do not have different behavior for
+ * different SoC revisions.
+ * @lock: Mutex lock used to ensure mutual exclusion between
+ * all of the threads associated with the controller
+ * @vdd_regulator: Pointer to the VDD supply regulator which this CPR3
+ * controller manages
+ * @system_regulator: Pointer to the optional system-supply regulator upon
+ * which the VDD supply regulator depends.
+ * @mem_acc_regulator: Pointer to the optional mem-acc supply regulator used
+ * to manage memory circuitry settings based upon the
+ * VDD supply output voltage.
+ * @vdd_limit_regulator: Pointer to the VDD supply limit regulator which is used
+ * for hardware closed-loop in order specify ceiling and
+ * floor voltage limits (platform specific)
+ * @system_supply_max_volt: Voltage in microvolts which corresponds to the
+ * absolute ceiling voltage of the system-supply
+ * @mem_acc_threshold_volt: mem-acc threshold voltage in microvolts
+ * @mem_acc_corner_map: mem-acc regulator corners mapping to low and high
+ * voltage mem-acc settings for the memories powered by
+ * this CPR3 controller and its associated CPR3 regulators
+ * @core_clk: Pointer to the CPR3 controller core clock
+ * @iface_clk: Pointer to the CPR3 interface clock (platform specific)
+ * @bus_clk: Pointer to the CPR3 bus clock (platform specific)
+ * @irq: CPR interrupt number
+ * @ceiling_irq: Interrupt number for the interrupt that is triggered
+ * when hardware closed-loop attempts to exceed the ceiling
+ * voltage
+ * @apm: Handle to the array power mux (APM)
+ * @apm_threshold_volt: APM threshold voltage in microvolts
+ * @apm_adj_volt: Minimum difference between APM threshold voltage and
+ * open-loop voltage which allows the APM threshold voltage
+ * to be used as a ceiling
+ * @apm_high_supply: APM supply to configure if VDD voltage is greater than
+ * or equal to the APM threshold voltage
+ * @apm_low_supply: APM supply to configure if the VDD voltage is less than
+ * the APM threshold voltage
+ * @cpr_clock_rate: CPR reference clock frequency in Hz.
+ * @sensor_time: The time in nanoseconds that each sensor takes to
+ * perform a measurement.
+ * @loop_time: The time in nanoseconds between consecutive CPR
+ * measurements.
+ * @up_down_delay_time: The time to delay in nanoseconds between consecutive CPR
+ * measurements when the last measurement recommended
+ * increasing or decreasing the vdd-supply voltage.
+ * (platform specific)
+ * @idle_clocks: Number of CPR reference clock ticks that the CPR
+ * controller waits in transitional states.
+ * @step_quot_init_min: The default minimum CPR step quotient value. The step
+ * quotient is the number of additional ring oscillator
+ * ticks observed when increasing one step in vdd-supply
+ * output voltage.
+ * @step_quot_init_max: The default maximum CPR step quotient value.
+ * @step_volt: Step size in microvolts between available set points
+ * of the VDD supply
+ * @down_error_step_limit: CPR4 hardware closed-loop down error step limit which
+ * defines the maximum number of VDD supply regulator steps
+ * that the voltage may be reduced as the result of a
+ * single CPR measurement.
+ * @up_error_step_limit: CPR4 hardware closed-loop up error step limit which
+ * defines the maximum number of VDD supply regulator steps
+ * that the voltage may be increased as the result of a
+ * single CPR measurement.
+ * @count_mode: CPR controller count mode
+ * @count_repeat: Number of times to perform consecutive sensor
+ * measurements when using all-at-once count modes.
+ * @proc_clock_throttle: Defines the processor clock frequency throttling
+ * register value to use. This can be used to reduce the
+ * clock frequency when a power domain exits a low power
+ * mode until CPR settles at a new voltage.
+ * (platform specific)
+ * @cpr_allowed_hw: Boolean which indicates if closed-loop CPR operation is
+ * permitted for a given chip based upon hardware fuse
+ * values
+ * @cpr_allowed_sw: Boolean which indicates if closed-loop CPR operation is
+ * permitted based upon software policies
+ * @supports_hw_closed_loop: Boolean which indicates if this CPR3/4 controller
+ * physically supports hardware closed-loop CPR operation
+ * @use_hw_closed_loop: Boolean which indicates that this controller will be
+ * using hardware closed-loop operation in place of
+ * software closed-loop operation.
+ * @ctrl_type: CPR controller type
+ * @saw_use_unit_mV: Boolean which indicates the unit used in SAW PVC
+ * interface is mV.
+ * @aggr_corner: CPR corner containing the most recently aggregated
+ * voltage configurations which are being used currently
+ * @cpr_enabled: Boolean which indicates that the CPR controller is
+ * enabled and operating in closed-loop mode. CPR clocks
+ * have been prepared and enabled whenever this flag is
+ * true.
+ * @last_corner_was_closed_loop: Boolean indicating if the last known corners
+ * were updated during closed loop operation.
+ * @cpr_suspended: Boolean which indicates that CPR has been temporarily
+ * disabled while enterring system suspend.
+ * @debugfs: Pointer to the debugfs directory of this CPR3 controller
+ * @aging_ref_volt: Reference voltage in microvolts to configure when
+ * performing CPR aging measurements.
+ * @aging_vdd_mode: vdd-supply regulator mode to configure before performing
+ * a CPR aging measurement. It should be one of
+ * REGULATOR_MODE_*.
+ * @aging_complete_vdd_mode: vdd-supply regulator mode to configure after
+ * performing a CPR aging measurement. It should be one of
+ * REGULATOR_MODE_*.
+ * @aging_ref_adjust_volt: The reference aging voltage margin in microvolts that
+ * should be added to the target quotients of the
+ * regulators managed by this controller after derating.
+ * @aging_required: Flag which indicates that a CPR aging measurement still
+ * needs to be performed for this CPR3 controller.
+ * @aging_succeeded: Flag which indicates that a CPR aging measurement has
+ * completed successfully.
+ * @aging_failed: Flag which indicates that a CPR aging measurement has
+ * failed to complete successfully.
+ * @aging_sensor: Array of CPR3 aging sensors which are used to perform
+ * aging measurements at a runtime.
+ * @aging_sensor_count: Number of elements in the aging_sensor array
+ *
+ * This structure contains both configuration and runtime state data. The
+ * elements cpr_allowed_sw, use_hw_closed_loop, aggr_corner, cpr_enabled,
+ * last_corner_was_closed_loop, cpr_suspended, aging_ref_adjust_volt,
+ * aging_required, aging_succeeded, and aging_failed are state variables.
+ *
+ * The apm* elements do not need to be initialized if the VDD supply managed by
+ * the CPR3 controller does not utilize an APM.
+ */
+struct cpr3_controller {
+ struct device *dev;
+ const char *name;
+ void __iomem *cpr_ctrl_base;
+ void __iomem *fuse_base;
+ struct list_head list;
+ struct cpr3_thread *thread;
+ int thread_count;
+ u8 *sensor_owner;
+ int sensor_count;
+ int soc_revision;
+ struct mutex lock;
+ struct regulator *vdd_regulator;
+ struct regulator *system_regulator;
+ struct regulator *mem_acc_regulator;
+ struct regulator *vdd_limit_regulator;
+ int system_supply_max_volt;
+ int mem_acc_threshold_volt;
+ int mem_acc_corner_map[CPR3_MEM_ACC_CORNERS];
+ struct clk *core_clk;
+ struct clk *iface_clk;
+ struct clk *bus_clk;
+ int irq;
+ int ceiling_irq;
+ struct msm_apm_ctrl_dev *apm;
+ int apm_threshold_volt;
+ int apm_adj_volt;
+ enum msm_apm_supply apm_high_supply;
+ enum msm_apm_supply apm_low_supply;
+ u32 cpr_clock_rate;
+ u32 sensor_time;
+ u32 loop_time;
+ u32 up_down_delay_time;
+ u32 idle_clocks;
+ u32 step_quot_init_min;
+ u32 step_quot_init_max;
+ int step_volt;
+ u32 down_error_step_limit;
+ u32 up_error_step_limit;
+ enum cpr3_count_mode count_mode;
+ u32 count_repeat;
+ u32 proc_clock_throttle;
+ bool cpr_allowed_hw;
+ bool cpr_allowed_sw;
+ bool supports_hw_closed_loop;
+ bool use_hw_closed_loop;
+ enum cpr_controller_type ctrl_type;
+ bool saw_use_unit_mV;
+ struct cpr3_corner aggr_corner;
+ bool cpr_enabled;
+ bool last_corner_was_closed_loop;
+ bool cpr_suspended;
+ struct dentry *debugfs;
+
+ int aging_ref_volt;
+ unsigned int aging_vdd_mode;
+ unsigned int aging_complete_vdd_mode;
+ int aging_ref_adjust_volt;
+ bool aging_required;
+ bool aging_succeeded;
+ bool aging_failed;
+ struct cpr3_aging_sensor_info *aging_sensor;
+ int aging_sensor_count;
+};
+
+/* Used for rounding voltages to the closest physically available set point. */
+#define CPR3_ROUND(n, d) (DIV_ROUND_UP(n, d) * (d))
+
+#define cpr3_err(cpr3_thread, message, ...) \
+ pr_err("%s: " message, (cpr3_thread)->name, ##__VA_ARGS__)
+#define cpr3_info(cpr3_thread, message, ...) \
+ pr_info("%s: " message, (cpr3_thread)->name, ##__VA_ARGS__)
+#define cpr3_debug(cpr3_thread, message, ...) \
+ pr_debug("%s: " message, (cpr3_thread)->name, ##__VA_ARGS__)
+
+/*
+ * Offset subtracted from voltage corner values passed in from the regulator
+ * framework in order to get internal voltage corner values. This is needed
+ * since the regulator framework treats 0 as an error value at regulator
+ * registration time.
+ */
+#define CPR3_CORNER_OFFSET 1
+
+#ifdef CONFIG_REGULATOR_CPR3
+
+int cpr3_regulator_register(struct platform_device *pdev,
+ struct cpr3_controller *ctrl);
+int cpr3_regulator_unregister(struct cpr3_controller *ctrl);
+int cpr3_regulator_suspend(struct cpr3_controller *ctrl);
+int cpr3_regulator_resume(struct cpr3_controller *ctrl);
+
+int cpr3_allocate_threads(struct cpr3_controller *ctrl, u32 min_thread_id,
+ u32 max_thread_id);
+int cpr3_map_fuse_base(struct cpr3_controller *ctrl,
+ struct platform_device *pdev);
+int cpr3_read_fuse_param(void __iomem *fuse_base_addr,
+ const struct cpr3_fuse_param *param, u64 *param_value);
+int cpr3_convert_open_loop_voltage_fuse(int ref_volt, int step_volt, u32 fuse,
+ int fuse_len);
+u64 cpr3_interpolate(u64 x1, u64 y1, u64 x2, u64 y2, u64 x);
+int cpr3_parse_array_property(struct cpr3_regulator *vreg,
+ const char *prop_name, int tuple_size, u32 *out);
+int cpr3_parse_corner_array_property(struct cpr3_regulator *vreg,
+ const char *prop_name, int tuple_size, u32 *out);
+int cpr3_parse_common_corner_data(struct cpr3_regulator *vreg);
+int cpr3_parse_thread_u32(struct cpr3_thread *thread, const char *propname,
+ u32 *out_value, u32 value_min, u32 value_max);
+int cpr3_parse_ctrl_u32(struct cpr3_controller *ctrl, const char *propname,
+ u32 *out_value, u32 value_min, u32 value_max);
+int cpr3_parse_common_thread_data(struct cpr3_thread *thread);
+int cpr3_parse_common_ctrl_data(struct cpr3_controller *ctrl);
+int cpr3_limit_open_loop_voltages(struct cpr3_regulator *vreg);
+void cpr3_open_loop_voltage_as_ceiling(struct cpr3_regulator *vreg);
+int cpr3_limit_floor_voltages(struct cpr3_regulator *vreg);
+void cpr3_print_quots(struct cpr3_regulator *vreg);
+int cpr3_adjust_fused_open_loop_voltages(struct cpr3_regulator *vreg,
+ int *fuse_volt);
+int cpr3_adjust_open_loop_voltages(struct cpr3_regulator *vreg);
+int cpr3_quot_adjustment(int ro_scale, int volt_adjust);
+int cpr3_voltage_adjustment(int ro_scale, int quot_adjust);
+int cpr3_parse_closed_loop_voltage_adjustments(struct cpr3_regulator *vreg,
+ u64 *ro_sel, int *volt_adjust,
+ int *volt_adjust_fuse, int *ro_scale);
+int cpr3_apm_init(struct cpr3_controller *ctrl);
+int cpr3_mem_acc_init(struct cpr3_regulator *vreg);
+
+#else
+
+static inline int cpr3_regulator_register(struct platform_device *pdev,
+ struct cpr3_controller *ctrl)
+{
+ return -ENXIO;
+}
+
+static inline int cpr3_regulator_unregister(struct cpr3_controller *ctrl)
+{
+ return -ENXIO;
+}
+
+static inline int cpr3_regulator_suspend(struct cpr3_controller *ctrl)
+{
+ return -ENXIO;
+}
+
+static inline int cpr3_regulator_resume(struct cpr3_controller *ctrl)
+{
+ return -ENXIO;
+}
+
+static inline int cpr3_get_thread_name(struct cpr3_thread *thread,
+ struct device_node *thread_node)
+{
+ return -EPERM;
+}
+
+static inline int cpr3_allocate_threads(struct cpr3_controller *ctrl,
+ u32 min_thread_id, u32 max_thread_id)
+{
+ return -EPERM;
+}
+
+static inline int cpr3_map_fuse_base(struct cpr3_controller *ctrl,
+ struct platform_device *pdev)
+{
+ return -ENXIO;
+}
+
+static inline int cpr3_read_fuse_param(void __iomem *fuse_base_addr,
+ const struct cpr3_fuse_param *param, u64 *param_value)
+{
+ return -EPERM;
+}
+
+static inline int cpr3_convert_open_loop_voltage_fuse(int ref_volt,
+ int step_volt, u32 fuse, int fuse_len)
+{
+ return -EPERM;
+}
+
+static inline u64 cpr3_interpolate(u64 x1, u64 y1, u64 x2, u64 y2, u64 x)
+{
+ return 0;
+}
+
+static inline int cpr3_parse_array_property(struct cpr3_regulator *vreg,
+ const char *prop_name, int tuple_size, u32 *out)
+{
+ return -EPERM;
+}
+
+static inline int cpr3_parse_corner_array_property(struct cpr3_regulator *vreg,
+ const char *prop_name, int tuple_size, u32 *out)
+{
+ return -EPERM;
+}
+
+static inline int cpr3_parse_common_corner_data(struct cpr3_regulator *vreg)
+{
+ return -EPERM;
+}
+
+static inline int cpr3_parse_thread_u32(struct cpr3_thread *thread,
+ const char *propname, u32 *out_value, u32 value_min,
+ u32 value_max)
+{
+ return -EPERM;
+}
+
+static inline int cpr3_parse_ctrl_u32(struct cpr3_controller *ctrl,
+ const char *propname, u32 *out_value, u32 value_min,
+ u32 value_max)
+{
+ return -EPERM;
+}
+
+static inline int cpr3_parse_common_thread_data(struct cpr3_thread *thread)
+{
+ return -EPERM;
+}
+
+static inline int cpr3_parse_common_ctrl_data(struct cpr3_controller *ctrl)
+{
+ return -EPERM;
+}
+
+static inline int cpr3_limit_open_loop_voltages(struct cpr3_regulator *vreg)
+{
+ return -EPERM;
+}
+
+static inline void cpr3_open_loop_voltage_as_ceiling(
+ struct cpr3_regulator *vreg)
+{
+ return;
+}
+
+static inline int cpr3_limit_floor_voltages(struct cpr3_regulator *vreg)
+{
+ return -EPERM;
+}
+
+static inline void cpr3_print_quots(struct cpr3_regulator *vreg)
+{
+ return;
+}
+
+static inline int cpr3_adjust_fused_open_loop_voltages(
+ struct cpr3_regulator *vreg, int *fuse_volt)
+{
+ return -EPERM;
+}
+
+static inline int cpr3_adjust_open_loop_voltages(struct cpr3_regulator *vreg)
+{
+ return -EPERM;
+}
+
+static inline int cpr3_quot_adjustment(int ro_scale, int volt_adjust)
+{
+ return 0;
+}
+
+static inline int cpr3_voltage_adjustment(int ro_scale, int quot_adjust)
+{
+ return 0;
+}
+
+static inline int cpr3_parse_closed_loop_voltage_adjustments(
+ struct cpr3_regulator *vreg, u64 *ro_sel,
+ int *volt_adjust, int *volt_adjust_fuse, int *ro_scale)
+{
+ return 0;
+}
+
+static inline int cpr3_apm_init(struct cpr3_controller *ctrl)
+{
+ return 0;
+}
+
+static inline int cpr3_mem_acc_init(struct cpr3_regulator *vreg)
+{
+ return 0;
+}
+
+#endif /* CONFIG_REGULATOR_CPR3 */
+
+#endif /* __REGULATOR_CPR_REGULATOR_H__ */
diff --git a/drivers/regulator/cpr3-util.c b/drivers/regulator/cpr3-util.c
new file mode 100644
index 000000000000..72c609d53b55
--- /dev/null
+++ b/drivers/regulator/cpr3-util.c
@@ -0,0 +1,1505 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * This file contains utility functions to be used by platform specific CPR3
+ * regulator drivers.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "cpr3-regulator.h"
+
+#define BYTES_PER_FUSE_ROW 8
+#define MAX_FUSE_ROW_BIT 63
+
+#define CPR3_CONSECUTIVE_UP_DOWN_MIN 0
+#define CPR3_CONSECUTIVE_UP_DOWN_MAX 15
+#define CPR3_UP_DOWN_THRESHOLD_MIN 0
+#define CPR3_UP_DOWN_THRESHOLD_MAX 31
+#define CPR3_STEP_QUOT_MIN 0
+#define CPR3_STEP_QUOT_MAX 63
+#define CPR3_IDLE_CLOCKS_MIN 0
+#define CPR3_IDLE_CLOCKS_MAX 31
+
+/* This constant has units of uV/mV so 1000 corresponds to 100%. */
+#define CPR3_AGING_DERATE_UNITY 1000
+
+/**
+ * cpr3_allocate_regulators() - allocate and initialize CPR3 regulators for a
+ * given thread based upon device tree data
+ * @thread: Pointer to the CPR3 thread
+ *
+ * This function allocates the thread->vreg array based upon the number of
+ * device tree regulator subnodes. It also initializes generic elements of each
+ * regulator struct such as name, of_node, and thread.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr3_allocate_regulators(struct cpr3_thread *thread)
+{
+ struct device_node *node;
+ int i, rc;
+
+ thread->vreg_count = 0;
+
+ for_each_available_child_of_node(thread->of_node, node) {
+ thread->vreg_count++;
+ }
+
+ thread->vreg = devm_kcalloc(thread->ctrl->dev, thread->vreg_count,
+ sizeof(*thread->vreg), GFP_KERNEL);
+ if (!thread->vreg)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_available_child_of_node(thread->of_node, node) {
+ thread->vreg[i].of_node = node;
+ thread->vreg[i].thread = thread;
+
+ rc = of_property_read_string(node, "regulator-name",
+ &thread->vreg[i].name);
+ if (rc) {
+ dev_err(thread->ctrl->dev, "could not find regulator name, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+/**
+ * cpr3_allocate_threads() - allocate and initialize CPR3 threads for a given
+ * controller based upon device tree data
+ * @ctrl: Pointer to the CPR3 controller
+ * @min_thread_id: Minimum allowed hardware thread ID for this controller
+ * @max_thread_id: Maximum allowed hardware thread ID for this controller
+ *
+ * This function allocates the ctrl->thread array based upon the number of
+ * device tree thread subnodes. It also initializes generic elements of each
+ * thread struct such as thread_id, of_node, ctrl, and vreg array.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_allocate_threads(struct cpr3_controller *ctrl, u32 min_thread_id,
+ u32 max_thread_id)
+{
+ struct device *dev = ctrl->dev;
+ struct device_node *thread_node;
+ int i, j, rc;
+
+ ctrl->thread_count = 0;
+
+ for_each_available_child_of_node(dev->of_node, thread_node) {
+ ctrl->thread_count++;
+ }
+
+ ctrl->thread = devm_kcalloc(dev, ctrl->thread_count,
+ sizeof(*ctrl->thread), GFP_KERNEL);
+ if (!ctrl->thread)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_available_child_of_node(dev->of_node, thread_node) {
+ ctrl->thread[i].of_node = thread_node;
+ ctrl->thread[i].ctrl = ctrl;
+
+ rc = of_property_read_u32(thread_node, "qcom,cpr-thread-id",
+ &ctrl->thread[i].thread_id);
+ if (rc) {
+ dev_err(dev, "could not read DT property qcom,cpr-thread-id, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (ctrl->thread[i].thread_id < min_thread_id ||
+ ctrl->thread[i].thread_id > max_thread_id) {
+ dev_err(dev, "invalid thread id = %u; not within [%u, %u]\n",
+ ctrl->thread[i].thread_id, min_thread_id,
+ max_thread_id);
+ return -EINVAL;
+ }
+
+ /* Verify that the thread ID is unique for all child nodes. */
+ for (j = 0; j < i; j++) {
+ if (ctrl->thread[j].thread_id
+ == ctrl->thread[i].thread_id) {
+ dev_err(dev, "duplicate thread id = %u found\n",
+ ctrl->thread[i].thread_id);
+ return -EINVAL;
+ }
+ }
+
+ rc = cpr3_allocate_regulators(&ctrl->thread[i]);
+ if (rc)
+ return rc;
+
+ i++;
+ }
+
+ return 0;
+}
+
+/**
+ * cpr3_map_fuse_base() - ioremap the base address of the fuse region
+ * @ctrl: Pointer to the CPR3 controller
+ * @pdev: Platform device pointer for the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_map_fuse_base(struct cpr3_controller *ctrl,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fuse_base");
+ if (!res || !res->start) {
+ dev_err(&pdev->dev, "fuse base address is missing\n");
+ return -ENXIO;
+ }
+
+ ctrl->fuse_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+
+ return 0;
+}
+
+/**
+ * cpr3_read_fuse_param() - reads a CPR3 fuse parameter out of eFuses
+ * @fuse_base_addr: Virtual memory address of the eFuse base address
+ * @param: Null terminated array of fuse param segments to read
+ * from
+ * @param_value: Output with value read from the eFuses
+ *
+ * This function reads from each of the parameter segments listed in the param
+ * array and concatenates their values together. Reading stops when an element
+ * is reached which has all 0 struct values. The total number of bits specified
+ * for the fuse parameter across all segments must be less than or equal to 64.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_read_fuse_param(void __iomem *fuse_base_addr,
+ const struct cpr3_fuse_param *param, u64 *param_value)
+{
+ u64 fuse_val, val;
+ int bits;
+ int bits_total = 0;
+
+ *param_value = 0;
+
+ while (param->row || param->bit_start || param->bit_end) {
+ if (param->bit_start > param->bit_end
+ || param->bit_end > MAX_FUSE_ROW_BIT) {
+ pr_err("Invalid fuse parameter segment: row=%u, start=%u, end=%u\n",
+ param->row, param->bit_start, param->bit_end);
+ return -EINVAL;
+ }
+
+ bits = param->bit_end - param->bit_start + 1;
+ if (bits_total + bits > 64) {
+ pr_err("Invalid fuse parameter segments; total bits = %d\n",
+ bits_total + bits);
+ return -EINVAL;
+ }
+
+ fuse_val = readq_relaxed(fuse_base_addr
+ + param->row * BYTES_PER_FUSE_ROW);
+ val = (fuse_val >> param->bit_start) & ((1ULL << bits) - 1);
+ *param_value |= val << bits_total;
+ bits_total += bits;
+
+ param++;
+ }
+
+ return 0;
+}
+
+/**
+ * cpr3_convert_open_loop_voltage_fuse() - converts an open loop voltage fuse
+ * value into an absolute voltage with units of microvolts
+ * @ref_volt: Reference voltage in microvolts
+ * @step_volt: The step size in microvolts of the fuse LSB
+ * @fuse: Open loop voltage fuse value
+ * @fuse_len: The bit length of the fuse value
+ *
+ * The MSB of the fuse parameter corresponds to a sign bit. If it is set, then
+ * the lower bits correspond to the number of steps to go down from the
+ * reference voltage. If it is not set, then the lower bits correspond to the
+ * number of steps to go up from the reference voltage.
+ */
+int cpr3_convert_open_loop_voltage_fuse(int ref_volt, int step_volt, u32 fuse,
+ int fuse_len)
+{
+ int sign, steps;
+
+ sign = (fuse & (1 << (fuse_len - 1))) ? -1 : 1;
+ steps = fuse & ((1 << (fuse_len - 1)) - 1);
+
+ return ref_volt + sign * steps * step_volt;
+}
+
+/**
+ * cpr3_interpolate() - performs linear interpolation
+ * @x1 Lower known x value
+ * @y1 Lower known y value
+ * @x2 Upper known x value
+ * @y2 Upper known y value
+ * @x Intermediate x value
+ *
+ * Returns y where (x, y) falls on the line between (x1, y1) and (x2, y2).
+ * It is required that x1 < x2, y1 <= y2, and x1 <= x <= x2. If these
+ * conditions are not met, then y2 will be returned.
+ */
+u64 cpr3_interpolate(u64 x1, u64 y1, u64 x2, u64 y2, u64 x)
+{
+ u64 temp;
+
+ if (x1 >= x2 || y1 > y2 || x1 > x || x > x2)
+ return y2;
+
+ temp = (x2 - x) * (y2 - y1);
+ do_div(temp, (u32)(x2 - x1));
+
+ return y2 - temp;
+}
+
+/**
+ * cpr3_parse_array_property() - fill an array from a portion of the values
+ * specified for a device tree property
+ * @vreg: Pointer to the CPR3 regulator
+ * @prop_name: The name of the device tree property to read from
+ * @tuple_size: The number of elements in each tuple
+ * @out: Output data array which must be of size tuple_size
+ *
+ * cpr3_parse_common_corner_data() must be called for vreg before this function
+ * is called so that fuse combo and speed bin size elements are initialized.
+ *
+ * Three formats are supported for the device tree property:
+ * 1. Length == tuple_size
+ * (reading begins at index 0)
+ * 2. Length == tuple_size * vreg->fuse_combos_supported
+ * (reading begins at index tuple_size * vreg->fuse_combo)
+ * 3. Length == tuple_size * vreg->speed_bins_supported
+ * (reading begins at index tuple_size * vreg->speed_bin_fuse)
+ *
+ * All other property lengths are treated as errors.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_parse_array_property(struct cpr3_regulator *vreg,
+ const char *prop_name, int tuple_size, u32 *out)
+{
+ struct device_node *node = vreg->of_node;
+ int len = 0;
+ int i, offset, rc;
+
+ if (!of_find_property(node, prop_name, &len)) {
+ cpr3_err(vreg, "property %s is missing\n", prop_name);
+ return -EINVAL;
+ }
+
+ if (len == tuple_size * sizeof(u32)) {
+ offset = 0;
+ } else if (len == tuple_size * vreg->fuse_combos_supported
+ * sizeof(u32)) {
+ offset = tuple_size * vreg->fuse_combo;
+ } else if (vreg->speed_bins_supported > 0 &&
+ len == tuple_size * vreg->speed_bins_supported * sizeof(u32)) {
+ offset = tuple_size * vreg->speed_bin_fuse;
+ } else {
+ if (vreg->speed_bins_supported > 0)
+ cpr3_err(vreg, "property %s has invalid length=%d, should be %lu, %lu, or %lu\n",
+ prop_name, len,
+ tuple_size * sizeof(u32),
+ tuple_size * vreg->speed_bins_supported
+ * sizeof(u32),
+ tuple_size * vreg->fuse_combos_supported
+ * sizeof(u32));
+ else
+ cpr3_err(vreg, "property %s has invalid length=%d, should be %lu or %lu\n",
+ prop_name, len,
+ tuple_size * sizeof(u32),
+ tuple_size * vreg->fuse_combos_supported
+ * sizeof(u32));
+ return -EINVAL;
+ }
+
+ for (i = 0; i < tuple_size; i++) {
+ rc = of_property_read_u32_index(node, prop_name, offset + i,
+ &out[i]);
+ if (rc) {
+ cpr3_err(vreg, "error reading property %s, rc=%d\n",
+ prop_name, rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * cpr3_parse_corner_array_property() - fill a per-corner array from a portion
+ * of the values specified for a device tree property
+ * @vreg: Pointer to the CPR3 regulator
+ * @prop_name: The name of the device tree property to read from
+ * @tuple_size: The number of elements in each per-corner tuple
+ * @out: Output data array which must be of size:
+ * tuple_size * vreg->corner_count
+ *
+ * cpr3_parse_common_corner_data() must be called for vreg before this function
+ * is called so that fuse combo and speed bin size elements are initialized.
+ *
+ * Three formats are supported for the device tree property:
+ * 1. Length == tuple_size * vreg->corner_count
+ * (reading begins at index 0)
+ * 2. Length == tuple_size * vreg->fuse_combo_corner_sum
+ * (reading begins at index tuple_size * vreg->fuse_combo_offset)
+ * 3. Length == tuple_size * vreg->speed_bin_corner_sum
+ * (reading begins at index tuple_size * vreg->speed_bin_offset)
+ *
+ * All other property lengths are treated as errors.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_parse_corner_array_property(struct cpr3_regulator *vreg,
+ const char *prop_name, int tuple_size, u32 *out)
+{
+ struct device_node *node = vreg->of_node;
+ int len = 0;
+ int i, offset, rc;
+
+ if (!of_find_property(node, prop_name, &len)) {
+ cpr3_err(vreg, "property %s is missing\n", prop_name);
+ return -EINVAL;
+ }
+
+ if (len == tuple_size * vreg->corner_count * sizeof(u32)) {
+ offset = 0;
+ } else if (len == tuple_size * vreg->fuse_combo_corner_sum
+ * sizeof(u32)) {
+ offset = tuple_size * vreg->fuse_combo_offset;
+ } else if (vreg->speed_bin_corner_sum > 0 &&
+ len == tuple_size * vreg->speed_bin_corner_sum * sizeof(u32)) {
+ offset = tuple_size * vreg->speed_bin_offset;
+ } else {
+ if (vreg->speed_bin_corner_sum > 0)
+ cpr3_err(vreg, "property %s has invalid length=%d, should be %lu, %lu, or %lu\n",
+ prop_name, len,
+ tuple_size * vreg->corner_count * sizeof(u32),
+ tuple_size * vreg->speed_bin_corner_sum
+ * sizeof(u32),
+ tuple_size * vreg->fuse_combo_corner_sum
+ * sizeof(u32));
+ else
+ cpr3_err(vreg, "property %s has invalid length=%d, should be %lu or %lu\n",
+ prop_name, len,
+ tuple_size * vreg->corner_count * sizeof(u32),
+ tuple_size * vreg->fuse_combo_corner_sum
+ * sizeof(u32));
+ return -EINVAL;
+ }
+
+ for (i = 0; i < tuple_size * vreg->corner_count; i++) {
+ rc = of_property_read_u32_index(node, prop_name, offset + i,
+ &out[i]);
+ if (rc) {
+ cpr3_err(vreg, "error reading property %s, rc=%d\n",
+ prop_name, rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * cpr3_parse_common_corner_data() - parse common CPR3 properties relating to
+ * the corners supported by a CPR3 regulator from device tree
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * This function reads, validates, and utilizes the following device tree
+ * properties: qcom,cpr-fuse-corners, qcom,cpr-fuse-combos, qcom,cpr-speed-bins,
+ * qcom,cpr-speed-bin-corners, qcom,cpr-corners, qcom,cpr-voltage-ceiling,
+ * qcom,cpr-voltage-floor, qcom,corner-frequencies,
+ * and qcom,cpr-corner-fmax-map.
+ *
+ * It initializes these CPR3 regulator elements: corner, corner_count,
+ * fuse_combos_supported, and speed_bins_supported. It initializes these
+ * elements for each corner: ceiling_volt, floor_volt, proc_freq, and
+ * cpr_fuse_corner.
+ *
+ * It requires that the following CPR3 regulator elements be initialized before
+ * being called: fuse_corner_count, fuse_combo, and speed_bin_fuse.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_parse_common_corner_data(struct cpr3_regulator *vreg)
+{
+ struct device_node *node = vreg->of_node;
+ struct cpr3_controller *ctrl = vreg->thread->ctrl;
+ u32 max_fuse_combos, fuse_corners, aging_allowed = 0;
+ u32 max_speed_bins = 0;
+ u32 *combo_corners;
+ u32 *speed_bin_corners;
+ u32 *temp;
+ int i, j, rc;
+
+ rc = of_property_read_u32(node, "qcom,cpr-fuse-corners", &fuse_corners);
+ if (rc) {
+ cpr3_err(vreg, "error reading property qcom,cpr-fuse-corners, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (vreg->fuse_corner_count != fuse_corners) {
+ cpr3_err(vreg, "device tree config supports %d fuse corners but the hardware has %d fuse corners\n",
+ fuse_corners, vreg->fuse_corner_count);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(node, "qcom,cpr-fuse-combos",
+ &max_fuse_combos);
+ if (rc) {
+ cpr3_err(vreg, "error reading property qcom,cpr-fuse-combos, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /*
+ * Sanity check against arbitrarily large value to avoid excessive
+ * memory allocation.
+ */
+ if (max_fuse_combos > 100 || max_fuse_combos == 0) {
+ cpr3_err(vreg, "qcom,cpr-fuse-combos is invalid: %u\n",
+ max_fuse_combos);
+ return -EINVAL;
+ }
+
+ if (vreg->fuse_combo >= max_fuse_combos) {
+ cpr3_err(vreg, "device tree config supports fuse combos 0-%u but the hardware has combo %d\n",
+ max_fuse_combos - 1, vreg->fuse_combo);
+ BUG_ON(1);
+ return -EINVAL;
+ }
+
+ vreg->fuse_combos_supported = max_fuse_combos;
+
+ of_property_read_u32(node, "qcom,cpr-speed-bins", &max_speed_bins);
+
+ /*
+ * Sanity check against arbitrarily large value to avoid excessive
+ * memory allocation.
+ */
+ if (max_speed_bins > 100) {
+ cpr3_err(vreg, "qcom,cpr-speed-bins is invalid: %u\n",
+ max_speed_bins);
+ return -EINVAL;
+ }
+
+ if (max_speed_bins && vreg->speed_bin_fuse >= max_speed_bins) {
+ cpr3_err(vreg, "device tree config supports speed bins 0-%u but the hardware has speed bin %d\n",
+ max_speed_bins - 1, vreg->speed_bin_fuse);
+ BUG();
+ return -EINVAL;
+ }
+
+ vreg->speed_bins_supported = max_speed_bins;
+
+ combo_corners = kcalloc(vreg->fuse_combos_supported,
+ sizeof(*combo_corners), GFP_KERNEL);
+ if (!combo_corners)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(node, "qcom,cpr-corners", combo_corners,
+ vreg->fuse_combos_supported);
+ if (rc == -EOVERFLOW) {
+ /* Single value case */
+ rc = of_property_read_u32(node, "qcom,cpr-corners",
+ combo_corners);
+ for (i = 1; i < vreg->fuse_combos_supported; i++)
+ combo_corners[i] = combo_corners[0];
+ }
+ if (rc) {
+ cpr3_err(vreg, "error reading property qcom,cpr-corners, rc=%d\n",
+ rc);
+ kfree(combo_corners);
+ return rc;
+ }
+
+ vreg->fuse_combo_offset = 0;
+ vreg->fuse_combo_corner_sum = 0;
+ for (i = 0; i < vreg->fuse_combos_supported; i++) {
+ vreg->fuse_combo_corner_sum += combo_corners[i];
+ if (i < vreg->fuse_combo)
+ vreg->fuse_combo_offset += combo_corners[i];
+ }
+
+ vreg->corner_count = combo_corners[vreg->fuse_combo];
+
+ kfree(combo_corners);
+
+ vreg->speed_bin_offset = 0;
+ vreg->speed_bin_corner_sum = 0;
+ if (vreg->speed_bins_supported > 0) {
+ speed_bin_corners = kcalloc(vreg->speed_bins_supported,
+ sizeof(*speed_bin_corners), GFP_KERNEL);
+ if (!speed_bin_corners)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(node,
+ "qcom,cpr-speed-bin-corners", speed_bin_corners,
+ vreg->speed_bins_supported);
+ if (rc) {
+ cpr3_err(vreg, "error reading property qcom,cpr-speed-bin-corners, rc=%d\n",
+ rc);
+ kfree(speed_bin_corners);
+ return rc;
+ }
+
+ for (i = 0; i < vreg->speed_bins_supported; i++) {
+ vreg->speed_bin_corner_sum += speed_bin_corners[i];
+ if (i < vreg->speed_bin_fuse)
+ vreg->speed_bin_offset += speed_bin_corners[i];
+ }
+
+ if (speed_bin_corners[vreg->speed_bin_fuse]
+ != vreg->corner_count) {
+ cpr3_err(vreg, "qcom,cpr-corners and qcom,cpr-speed-bin-corners conflict on number of corners: %d vs %u\n",
+ vreg->corner_count,
+ speed_bin_corners[vreg->speed_bin_fuse]);
+ kfree(speed_bin_corners);
+ return -EINVAL;
+ }
+
+ kfree(speed_bin_corners);
+ }
+
+ vreg->corner = devm_kcalloc(ctrl->dev, vreg->corner_count,
+ sizeof(*vreg->corner), GFP_KERNEL);
+ temp = kcalloc(vreg->corner_count, sizeof(*temp), GFP_KERNEL);
+ if (!vreg->corner || !temp)
+ return -ENOMEM;
+
+ rc = cpr3_parse_corner_array_property(vreg, "qcom,cpr-voltage-ceiling",
+ 1, temp);
+ if (rc)
+ goto free_temp;
+ for (i = 0; i < vreg->corner_count; i++)
+ vreg->corner[i].ceiling_volt
+ = CPR3_ROUND(temp[i], ctrl->step_volt);
+
+ rc = cpr3_parse_corner_array_property(vreg, "qcom,cpr-voltage-floor",
+ 1, temp);
+ if (rc)
+ goto free_temp;
+ for (i = 0; i < vreg->corner_count; i++)
+ vreg->corner[i].floor_volt
+ = CPR3_ROUND(temp[i], ctrl->step_volt);
+
+ /* Validate ceiling and floor values */
+ for (i = 0; i < vreg->corner_count; i++) {
+ if (vreg->corner[i].floor_volt
+ > vreg->corner[i].ceiling_volt) {
+ cpr3_err(vreg, "CPR floor[%d]=%d > ceiling[%d]=%d uV\n",
+ i, vreg->corner[i].floor_volt,
+ i, vreg->corner[i].ceiling_volt);
+ rc = -EINVAL;
+ goto free_temp;
+ }
+ }
+
+ /* Load optional system-supply voltages */
+ if (of_find_property(vreg->of_node, "qcom,system-voltage", NULL)) {
+ rc = cpr3_parse_corner_array_property(vreg,
+ "qcom,system-voltage", 1, temp);
+ if (rc)
+ goto free_temp;
+ for (i = 0; i < vreg->corner_count; i++)
+ vreg->corner[i].system_volt = temp[i];
+ }
+
+ rc = cpr3_parse_corner_array_property(vreg, "qcom,corner-frequencies",
+ 1, temp);
+ if (rc)
+ goto free_temp;
+ for (i = 0; i < vreg->corner_count; i++)
+ vreg->corner[i].proc_freq = temp[i];
+
+ /* Validate frequencies */
+ for (i = 1; i < vreg->corner_count; i++) {
+ if (vreg->corner[i].proc_freq
+ < vreg->corner[i - 1].proc_freq) {
+ cpr3_err(vreg, "invalid frequency: freq[%d]=%u < freq[%d]=%u\n",
+ i, vreg->corner[i].proc_freq, i - 1,
+ vreg->corner[i - 1].proc_freq);
+ rc = -EINVAL;
+ goto free_temp;
+ }
+ }
+
+ rc = cpr3_parse_array_property(vreg, "qcom,cpr-corner-fmax-map",
+ vreg->fuse_corner_count, temp);
+ if (rc)
+ goto free_temp;
+ for (i = 0; i < vreg->fuse_corner_count; i++) {
+ if (temp[i] < CPR3_CORNER_OFFSET
+ || temp[i] > vreg->corner_count + CPR3_CORNER_OFFSET) {
+ cpr3_err(vreg, "invalid corner value specified in qcom,cpr-corner-fmax-map: %u\n",
+ temp[i]);
+ rc = -EINVAL;
+ goto free_temp;
+ } else if (i > 0 && temp[i - 1] >= temp[i]) {
+ cpr3_err(vreg, "invalid corner %u less than or equal to previous corner %u\n",
+ temp[i], temp[i - 1]);
+ rc = -EINVAL;
+ goto free_temp;
+ }
+ }
+ if (temp[vreg->fuse_corner_count - 1] != vreg->corner_count) {
+ cpr3_err(vreg, "highest Fmax corner %u in qcom,cpr-corner-fmax-map does not match highest supported corner %d\n",
+ temp[vreg->fuse_corner_count - 1],
+ vreg->corner_count);
+ rc = -EINVAL;
+ goto free_temp;
+ }
+ for (i = 0; i < vreg->corner_count; i++) {
+ for (j = 0; j < vreg->fuse_corner_count; j++) {
+ if (i + CPR3_CORNER_OFFSET <= temp[j]) {
+ vreg->corner[i].cpr_fuse_corner = j;
+ break;
+ }
+ }
+ }
+
+ if (of_find_property(vreg->of_node,
+ "qcom,allow-aging-voltage-adjustment", NULL)) {
+ rc = cpr3_parse_array_property(vreg,
+ "qcom,allow-aging-voltage-adjustment",
+ 1, &aging_allowed);
+ if (rc)
+ goto free_temp;
+
+ vreg->aging_allowed = aging_allowed;
+ }
+
+ if (vreg->aging_allowed) {
+ if (ctrl->aging_ref_volt <= 0) {
+ cpr3_err(ctrl, "qcom,cpr-aging-ref-voltage must be specified\n");
+ rc = -EINVAL;
+ goto free_temp;
+ }
+
+ rc = cpr3_parse_array_property(vreg,
+ "qcom,cpr-aging-max-voltage-adjustment",
+ 1, &vreg->aging_max_adjust_volt);
+ if (rc)
+ goto free_temp;
+
+ rc = cpr3_parse_array_property(vreg,
+ "qcom,cpr-aging-ref-corner", 1, &vreg->aging_corner);
+ if (rc) {
+ goto free_temp;
+ } else if (vreg->aging_corner < CPR3_CORNER_OFFSET
+ || vreg->aging_corner > vreg->corner_count - 1
+ + CPR3_CORNER_OFFSET) {
+ cpr3_err(vreg, "aging reference corner=%d not in range [%d, %d]\n",
+ vreg->aging_corner, CPR3_CORNER_OFFSET,
+ vreg->corner_count - 1 + CPR3_CORNER_OFFSET);
+ rc = -EINVAL;
+ goto free_temp;
+ }
+ vreg->aging_corner -= CPR3_CORNER_OFFSET;
+
+ if (of_find_property(vreg->of_node, "qcom,cpr-aging-derate",
+ NULL)) {
+ rc = cpr3_parse_corner_array_property(vreg,
+ "qcom,cpr-aging-derate", 1, temp);
+ if (rc)
+ goto free_temp;
+
+ for (i = 0; i < vreg->corner_count; i++)
+ vreg->corner[i].aging_derate = temp[i];
+ } else {
+ for (i = 0; i < vreg->corner_count; i++)
+ vreg->corner[i].aging_derate
+ = CPR3_AGING_DERATE_UNITY;
+ }
+ }
+
+free_temp:
+ kfree(temp);
+ return rc;
+}
+
+/**
+ * cpr3_parse_thread_u32() - parse the specified property from the CPR3 thread's
+ * device tree node and verify that it is within the allowed limits
+ * @thread: Pointer to the CPR3 thread
+ * @propname: The name of the device tree property to read
+ * @out_value: The output pointer to fill with the value read
+ * @value_min: The minimum allowed property value
+ * @value_max: The maximum allowed property value
+ *
+ * This function prints a verbose error message if the property is missing or
+ * has a value which is not within the specified range.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_parse_thread_u32(struct cpr3_thread *thread, const char *propname,
+ u32 *out_value, u32 value_min, u32 value_max)
+{
+ int rc;
+
+ rc = of_property_read_u32(thread->of_node, propname, out_value);
+ if (rc) {
+ cpr3_err(thread->ctrl, "thread %u error reading property %s, rc=%d\n",
+ thread->thread_id, propname, rc);
+ return rc;
+ }
+
+ if (*out_value < value_min || *out_value > value_max) {
+ cpr3_err(thread->ctrl, "thread %u %s=%u is invalid; allowed range: [%u, %u]\n",
+ thread->thread_id, propname, *out_value, value_min,
+ value_max);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * cpr3_parse_ctrl_u32() - parse the specified property from the CPR3
+ * controller's device tree node and verify that it is within the
+ * allowed limits
+ * @ctrl: Pointer to the CPR3 controller
+ * @propname: The name of the device tree property to read
+ * @out_value: The output pointer to fill with the value read
+ * @value_min: The minimum allowed property value
+ * @value_max: The maximum allowed property value
+ *
+ * This function prints a verbose error message if the property is missing or
+ * has a value which is not within the specified range.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_parse_ctrl_u32(struct cpr3_controller *ctrl, const char *propname,
+ u32 *out_value, u32 value_min, u32 value_max)
+{
+ int rc;
+
+ rc = of_property_read_u32(ctrl->dev->of_node, propname, out_value);
+ if (rc) {
+ cpr3_err(ctrl, "error reading property %s, rc=%d\n",
+ propname, rc);
+ return rc;
+ }
+
+ if (*out_value < value_min || *out_value > value_max) {
+ cpr3_err(ctrl, "%s=%u is invalid; allowed range: [%u, %u]\n",
+ propname, *out_value, value_min, value_max);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * cpr3_parse_common_thread_data() - parse common CPR3 thread properties from
+ * device tree
+ * @thread: Pointer to the CPR3 thread
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_parse_common_thread_data(struct cpr3_thread *thread)
+{
+ int rc;
+
+ rc = cpr3_parse_thread_u32(thread, "qcom,cpr-consecutive-up",
+ &thread->consecutive_up, CPR3_CONSECUTIVE_UP_DOWN_MIN,
+ CPR3_CONSECUTIVE_UP_DOWN_MAX);
+ if (rc)
+ return rc;
+
+ rc = cpr3_parse_thread_u32(thread, "qcom,cpr-consecutive-down",
+ &thread->consecutive_down, CPR3_CONSECUTIVE_UP_DOWN_MIN,
+ CPR3_CONSECUTIVE_UP_DOWN_MAX);
+ if (rc)
+ return rc;
+
+ rc = cpr3_parse_thread_u32(thread, "qcom,cpr-up-threshold",
+ &thread->up_threshold, CPR3_UP_DOWN_THRESHOLD_MIN,
+ CPR3_UP_DOWN_THRESHOLD_MAX);
+ if (rc)
+ return rc;
+
+ rc = cpr3_parse_thread_u32(thread, "qcom,cpr-down-threshold",
+ &thread->down_threshold, CPR3_UP_DOWN_THRESHOLD_MIN,
+ CPR3_UP_DOWN_THRESHOLD_MAX);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
+/**
+ * cpr3_parse_common_ctrl_data() - parse common CPR3 controller properties from
+ * device tree
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_parse_common_ctrl_data(struct cpr3_controller *ctrl)
+{
+ int rc;
+
+ rc = cpr3_parse_ctrl_u32(ctrl, "qcom,cpr-sensor-time",
+ &ctrl->sensor_time, 0, UINT_MAX);
+ if (rc)
+ return rc;
+
+ rc = cpr3_parse_ctrl_u32(ctrl, "qcom,cpr-loop-time",
+ &ctrl->loop_time, 0, UINT_MAX);
+ if (rc)
+ return rc;
+
+ rc = cpr3_parse_ctrl_u32(ctrl, "qcom,cpr-idle-cycles",
+ &ctrl->idle_clocks, CPR3_IDLE_CLOCKS_MIN,
+ CPR3_IDLE_CLOCKS_MAX);
+ if (rc)
+ return rc;
+
+ rc = cpr3_parse_ctrl_u32(ctrl, "qcom,cpr-step-quot-init-min",
+ &ctrl->step_quot_init_min, CPR3_STEP_QUOT_MIN,
+ CPR3_STEP_QUOT_MAX);
+ if (rc)
+ return rc;
+
+ rc = cpr3_parse_ctrl_u32(ctrl, "qcom,cpr-step-quot-init-max",
+ &ctrl->step_quot_init_max, CPR3_STEP_QUOT_MIN,
+ CPR3_STEP_QUOT_MAX);
+ if (rc)
+ return rc;
+
+ rc = of_property_read_u32(ctrl->dev->of_node, "qcom,voltage-step",
+ &ctrl->step_volt);
+ if (rc) {
+ cpr3_err(ctrl, "error reading property qcom,voltage-step, rc=%d\n",
+ rc);
+ return rc;
+ }
+ if (ctrl->step_volt <= 0) {
+ cpr3_err(ctrl, "qcom,voltage-step=%d is invalid\n",
+ ctrl->step_volt);
+ return -EINVAL;
+ }
+
+ rc = cpr3_parse_ctrl_u32(ctrl, "qcom,cpr-count-mode",
+ &ctrl->count_mode, CPR3_COUNT_MODE_ALL_AT_ONCE_MIN,
+ CPR3_COUNT_MODE_STAGGERED);
+ if (rc)
+ return rc;
+
+ /* Count repeat is optional */
+ ctrl->count_repeat = 0;
+ of_property_read_u32(ctrl->dev->of_node, "qcom,cpr-count-repeat",
+ &ctrl->count_repeat);
+
+ ctrl->cpr_allowed_sw = of_property_read_bool(ctrl->dev->of_node,
+ "qcom,cpr-enable");
+
+ /* Aging reference voltage is optional */
+ ctrl->aging_ref_volt = 0;
+ of_property_read_u32(ctrl->dev->of_node, "qcom,cpr-aging-ref-voltage",
+ &ctrl->aging_ref_volt);
+
+ ctrl->vdd_regulator = devm_regulator_get(ctrl->dev, "vdd");
+ if (IS_ERR(ctrl->vdd_regulator)) {
+ rc = PTR_ERR(ctrl->vdd_regulator);
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(ctrl, "unable request vdd regulator, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (of_find_property(ctrl->dev->of_node, "clock-names", NULL)) {
+ ctrl->core_clk = devm_clk_get(ctrl->dev, "core_clk");
+ if (IS_ERR(ctrl->core_clk)) {
+ rc = PTR_ERR(ctrl->core_clk);
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(ctrl, "unable request core clock, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ ctrl->system_regulator = devm_regulator_get_optional(ctrl->dev,
+ "system");
+ if (IS_ERR(ctrl->system_regulator)) {
+ rc = PTR_ERR(ctrl->system_regulator);
+ if (rc != -EPROBE_DEFER) {
+ rc = 0;
+ ctrl->system_regulator = NULL;
+ } else {
+ return rc;
+ }
+ }
+
+ ctrl->mem_acc_regulator = devm_regulator_get_optional(ctrl->dev,
+ "mem-acc");
+ if (IS_ERR(ctrl->mem_acc_regulator)) {
+ rc = PTR_ERR(ctrl->mem_acc_regulator);
+ if (rc != -EPROBE_DEFER) {
+ rc = 0;
+ ctrl->mem_acc_regulator = NULL;
+ } else {
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * cpr3_limit_open_loop_voltages() - modify the open-loop voltage of each corner
+ * so that it fits within the floor to ceiling
+ * voltage range of the corner
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * This function clips the open-loop voltage for each corner so that it is
+ * limited to the floor to ceiling range. It also rounds each open-loop voltage
+ * so that it corresponds to a set point available to the underlying regulator.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_limit_open_loop_voltages(struct cpr3_regulator *vreg)
+{
+ int i, volt;
+
+ cpr3_debug(vreg, "open-loop voltages after trimming and rounding:\n");
+ for (i = 0; i < vreg->corner_count; i++) {
+ volt = CPR3_ROUND(vreg->corner[i].open_loop_volt,
+ vreg->thread->ctrl->step_volt);
+ if (volt < vreg->corner[i].floor_volt)
+ volt = vreg->corner[i].floor_volt;
+ else if (volt > vreg->corner[i].ceiling_volt)
+ volt = vreg->corner[i].ceiling_volt;
+ vreg->corner[i].open_loop_volt = volt;
+ cpr3_debug(vreg, "corner[%2d]: open-loop=%d uV\n", i, volt);
+ }
+
+ return 0;
+}
+
+/**
+ * cpr3_open_loop_voltage_as_ceiling() - configures the ceiling voltage for each
+ * corner to equal the open-loop voltage if the relevant device
+ * tree property is found for the CPR3 regulator
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * This function assumes that the the open-loop voltage for each corner has
+ * already been rounded to the nearest allowed set point and that it falls
+ * within the floor to ceiling range.
+ *
+ * Return: none
+ */
+void cpr3_open_loop_voltage_as_ceiling(struct cpr3_regulator *vreg)
+{
+ int i;
+
+ if (!of_property_read_bool(vreg->of_node,
+ "qcom,cpr-scaled-open-loop-voltage-as-ceiling"))
+ return;
+
+ for (i = 0; i < vreg->corner_count; i++)
+ vreg->corner[i].ceiling_volt
+ = vreg->corner[i].open_loop_volt;
+}
+
+/**
+ * cpr3_limit_floor_voltages() - raise the floor voltage of each corner so that
+ * the optional maximum floor to ceiling voltage range specified in
+ * device tree is satisfied
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * This function also ensures that the open-loop voltage for each corner falls
+ * within the final floor to ceiling voltage range and that floor voltages
+ * increase monotonically.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_limit_floor_voltages(struct cpr3_regulator *vreg)
+{
+ char *prop = "qcom,cpr-floor-to-ceiling-max-range";
+ int i, floor_new;
+ u32 *floor_range;
+ int rc = 0;
+
+ if (!of_find_property(vreg->of_node, prop, NULL))
+ goto enforce_monotonicity;
+
+ floor_range = kcalloc(vreg->corner_count, sizeof(*floor_range),
+ GFP_KERNEL);
+ if (!floor_range)
+ return -ENOMEM;
+
+ rc = cpr3_parse_corner_array_property(vreg, prop, 1, floor_range);
+ if (rc)
+ goto free_floor_adjust;
+
+ for (i = 0; i < vreg->corner_count; i++) {
+ if ((s32)floor_range[i] >= 0) {
+ floor_new = CPR3_ROUND(vreg->corner[i].ceiling_volt
+ - floor_range[i],
+ vreg->thread->ctrl->step_volt);
+
+ vreg->corner[i].floor_volt = max(floor_new,
+ vreg->corner[i].floor_volt);
+ if (vreg->corner[i].open_loop_volt
+ < vreg->corner[i].floor_volt)
+ vreg->corner[i].open_loop_volt
+ = vreg->corner[i].floor_volt;
+ }
+ }
+
+free_floor_adjust:
+ kfree(floor_range);
+
+enforce_monotonicity:
+ /* Ensure that floor voltages increase monotonically. */
+ for (i = 1; i < vreg->corner_count; i++) {
+ if (vreg->corner[i].floor_volt
+ < vreg->corner[i - 1].floor_volt) {
+ cpr3_debug(vreg, "corner %d floor voltage=%d uV < corner %d voltage=%d uV; overriding: corner %d voltage=%d\n",
+ i, vreg->corner[i].floor_volt,
+ i - 1, vreg->corner[i - 1].floor_volt,
+ i, vreg->corner[i - 1].floor_volt);
+ vreg->corner[i].floor_volt
+ = vreg->corner[i - 1].floor_volt;
+
+ if (vreg->corner[i].open_loop_volt
+ < vreg->corner[i].floor_volt)
+ vreg->corner[i].open_loop_volt
+ = vreg->corner[i].floor_volt;
+ if (vreg->corner[i].ceiling_volt
+ < vreg->corner[i].floor_volt)
+ vreg->corner[i].ceiling_volt
+ = vreg->corner[i].floor_volt;
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * cpr3_print_quots() - print CPR target quotients into the kernel log for
+ * debugging purposes
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * Return: none
+ */
+void cpr3_print_quots(struct cpr3_regulator *vreg)
+{
+ int i, j, pos;
+ size_t buflen;
+ char *buf;
+
+ buflen = sizeof(*buf) * CPR3_RO_COUNT * (MAX_CHARS_PER_INT + 2);
+ buf = kzalloc(buflen, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ for (i = 0; i < vreg->corner_count; i++) {
+ for (j = 0, pos = 0; j < CPR3_RO_COUNT; j++)
+ pos += scnprintf(buf + pos, buflen - pos, " %u",
+ vreg->corner[i].target_quot[j]);
+ cpr3_debug(vreg, "target quots[%2d]:%s\n", i, buf);
+ }
+
+ kfree(buf);
+}
+
+/**
+ * cpr3_adjust_fused_open_loop_voltages() - adjust the fused open-loop voltages
+ * for each fuse corner according to device tree values
+ * @vreg: Pointer to the CPR3 regulator
+ * @fuse_volt: Pointer to an array of the fused open-loop voltage
+ * values
+ *
+ * Voltage values in fuse_volt are modified in place.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_adjust_fused_open_loop_voltages(struct cpr3_regulator *vreg,
+ int *fuse_volt)
+{
+ int i, rc, prev_volt;
+ int *volt_adjust;
+
+ if (!of_find_property(vreg->of_node,
+ "qcom,cpr-open-loop-voltage-fuse-adjustment", NULL)) {
+ /* No adjustment required. */
+ return 0;
+ }
+
+ volt_adjust = kcalloc(vreg->fuse_corner_count, sizeof(*volt_adjust),
+ GFP_KERNEL);
+ if (!volt_adjust)
+ return -ENOMEM;
+
+ rc = cpr3_parse_array_property(vreg,
+ "qcom,cpr-open-loop-voltage-fuse-adjustment",
+ vreg->fuse_corner_count, volt_adjust);
+ if (rc) {
+ cpr3_err(vreg, "could not load open-loop fused voltage adjustments, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ for (i = 0; i < vreg->fuse_corner_count; i++) {
+ if (volt_adjust[i]) {
+ prev_volt = fuse_volt[i];
+ fuse_volt[i] += volt_adjust[i];
+ cpr3_debug(vreg, "adjusted fuse corner %d open-loop voltage: %d --> %d uV\n",
+ i, prev_volt, fuse_volt[i]);
+ }
+ }
+
+done:
+ kfree(volt_adjust);
+ return rc;
+}
+
+/**
+ * cpr3_adjust_open_loop_voltages() - adjust the open-loop voltages for each
+ * corner according to device tree values
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_adjust_open_loop_voltages(struct cpr3_regulator *vreg)
+{
+ int i, rc, prev_volt, min_volt;
+ int *volt_adjust, *volt_diff;
+
+ if (!of_find_property(vreg->of_node,
+ "qcom,cpr-open-loop-voltage-adjustment", NULL)) {
+ /* No adjustment required. */
+ return 0;
+ }
+
+ volt_adjust = kcalloc(vreg->corner_count, sizeof(*volt_adjust),
+ GFP_KERNEL);
+ volt_diff = kcalloc(vreg->corner_count, sizeof(*volt_diff), GFP_KERNEL);
+ if (!volt_adjust || !volt_diff) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ rc = cpr3_parse_corner_array_property(vreg,
+ "qcom,cpr-open-loop-voltage-adjustment", 1, volt_adjust);
+ if (rc) {
+ cpr3_err(vreg, "could not load open-loop voltage adjustments, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ for (i = 0; i < vreg->corner_count; i++) {
+ if (volt_adjust[i]) {
+ prev_volt = vreg->corner[i].open_loop_volt;
+ vreg->corner[i].open_loop_volt += volt_adjust[i];
+ cpr3_debug(vreg, "adjusted corner %d open-loop voltage: %d --> %d uV\n",
+ i, prev_volt, vreg->corner[i].open_loop_volt);
+ }
+ }
+
+ if (of_find_property(vreg->of_node,
+ "qcom,cpr-open-loop-voltage-min-diff", NULL)) {
+ rc = cpr3_parse_corner_array_property(vreg,
+ "qcom,cpr-open-loop-voltage-min-diff", 1, volt_diff);
+ if (rc) {
+ cpr3_err(vreg, "could not load minimum open-loop voltage differences, rc=%d\n",
+ rc);
+ goto done;
+ }
+ }
+
+ /*
+ * Ensure that open-loop voltages increase monotonically with respect
+ * to configurable minimum allowed differences.
+ */
+ for (i = 1; i < vreg->corner_count; i++) {
+ min_volt = vreg->corner[i - 1].open_loop_volt + volt_diff[i];
+ if (vreg->corner[i].open_loop_volt < min_volt) {
+ cpr3_debug(vreg, "adjusted corner %d open-loop voltage=%d uV < corner %d voltage=%d uV + min diff=%d uV; overriding: corner %d voltage=%d\n",
+ i, vreg->corner[i].open_loop_volt,
+ i - 1, vreg->corner[i - 1].open_loop_volt,
+ volt_diff[i], i, min_volt);
+ vreg->corner[i].open_loop_volt = min_volt;
+ }
+ }
+
+done:
+ kfree(volt_diff);
+ kfree(volt_adjust);
+ return rc;
+}
+
+/**
+ * cpr3_quot_adjustment() - returns the quotient adjustment value resulting from
+ * the specified voltage adjustment and RO scaling factor
+ * @ro_scale: The CPR ring oscillator (RO) scaling factor with units
+ * of QUOT/V
+ * @volt_adjust: The amount to adjust the voltage by in units of
+ * microvolts. This value may be positive or negative.
+ */
+int cpr3_quot_adjustment(int ro_scale, int volt_adjust)
+{
+ unsigned long long temp;
+ int quot_adjust;
+ int sign = 1;
+
+ if (ro_scale < 0) {
+ sign = -sign;
+ ro_scale = -ro_scale;
+ }
+
+ if (volt_adjust < 0) {
+ sign = -sign;
+ volt_adjust = -volt_adjust;
+ }
+
+ temp = (unsigned long long)ro_scale * (unsigned long long)volt_adjust;
+ do_div(temp, 1000000);
+
+ quot_adjust = temp;
+ quot_adjust *= sign;
+
+ return quot_adjust;
+}
+
+/**
+ * cpr3_voltage_adjustment() - returns the voltage adjustment value resulting
+ * from the specified quotient adjustment and RO scaling factor
+ * @ro_scale: The CPR ring oscillator (RO) scaling factor with units
+ * of QUOT/V
+ * @quot_adjust: The amount to adjust the quotient by in units of
+ * QUOT. This value may be positive or negative.
+ */
+int cpr3_voltage_adjustment(int ro_scale, int quot_adjust)
+{
+ unsigned long long temp;
+ int volt_adjust;
+ int sign = 1;
+
+ if (ro_scale < 0) {
+ sign = -sign;
+ ro_scale = -ro_scale;
+ }
+
+ if (quot_adjust < 0) {
+ sign = -sign;
+ quot_adjust = -quot_adjust;
+ }
+
+ if (ro_scale == 0)
+ return 0;
+
+ temp = (unsigned long long)quot_adjust * 1000000;
+ do_div(temp, ro_scale);
+
+ volt_adjust = temp;
+ volt_adjust *= sign;
+
+ return volt_adjust;
+}
+
+/**
+ * cpr3_parse_closed_loop_voltage_adjustments() - load per-fuse-corner and
+ * per-corner closed-loop adjustment values from device tree
+ * @vreg: Pointer to the CPR3 regulator
+ * @ro_sel: Array of ring oscillator values selected for each
+ * fuse corner
+ * @volt_adjust: Pointer to array which will be filled with the
+ * per-corner closed-loop adjustment voltages
+ * @volt_adjust_fuse: Pointer to array which will be filled with the
+ * per-fuse-corner closed-loop adjustment voltages
+ * @ro_scale: Pointer to array which will be filled with the
+ * per-fuse-corner RO scaling factor values with units of
+ * QUOT/V
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_parse_closed_loop_voltage_adjustments(
+ struct cpr3_regulator *vreg, u64 *ro_sel,
+ int *volt_adjust, int *volt_adjust_fuse, int *ro_scale)
+{
+ int i, rc;
+ u32 *ro_all_scale;
+
+ if (!of_find_property(vreg->of_node,
+ "qcom,cpr-closed-loop-voltage-adjustment", NULL)
+ && !of_find_property(vreg->of_node,
+ "qcom,cpr-closed-loop-voltage-fuse-adjustment", NULL)
+ && !vreg->aging_allowed) {
+ /* No adjustment required. */
+ return 0;
+ } else if (!of_find_property(vreg->of_node,
+ "qcom,cpr-ro-scaling-factor", NULL)) {
+ cpr3_err(vreg, "qcom,cpr-ro-scaling-factor is required for closed-loop voltage adjustment, but is missing\n");
+ return -EINVAL;
+ }
+
+ ro_all_scale = kcalloc(vreg->fuse_corner_count * CPR3_RO_COUNT,
+ sizeof(*ro_all_scale), GFP_KERNEL);
+ if (!ro_all_scale)
+ return -ENOMEM;
+
+ rc = cpr3_parse_array_property(vreg, "qcom,cpr-ro-scaling-factor",
+ vreg->fuse_corner_count * CPR3_RO_COUNT, ro_all_scale);
+ if (rc) {
+ cpr3_err(vreg, "could not load RO scaling factors, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ for (i = 0; i < vreg->fuse_corner_count; i++)
+ ro_scale[i] = ro_all_scale[i * CPR3_RO_COUNT + ro_sel[i]];
+
+ for (i = 0; i < vreg->corner_count; i++)
+ memcpy(vreg->corner[i].ro_scale,
+ &ro_all_scale[vreg->corner[i].cpr_fuse_corner * CPR3_RO_COUNT],
+ sizeof(*ro_all_scale) * CPR3_RO_COUNT);
+
+ if (of_find_property(vreg->of_node,
+ "qcom,cpr-closed-loop-voltage-fuse-adjustment", NULL)) {
+ rc = cpr3_parse_array_property(vreg,
+ "qcom,cpr-closed-loop-voltage-fuse-adjustment",
+ vreg->fuse_corner_count, volt_adjust_fuse);
+ if (rc) {
+ cpr3_err(vreg, "could not load closed-loop fused voltage adjustments, rc=%d\n",
+ rc);
+ goto done;
+ }
+ }
+
+ if (of_find_property(vreg->of_node,
+ "qcom,cpr-closed-loop-voltage-adjustment", NULL)) {
+ rc = cpr3_parse_corner_array_property(vreg,
+ "qcom,cpr-closed-loop-voltage-adjustment",
+ 1, volt_adjust);
+ if (rc) {
+ cpr3_err(vreg, "could not load closed-loop voltage adjustments, rc=%d\n",
+ rc);
+ goto done;
+ }
+ }
+
+done:
+ kfree(ro_all_scale);
+ return rc;
+}
+
+/**
+ * cpr3_apm_init() - initialize APM data for a CPR3 controller
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * This function loads memory array power mux (APM) data from device tree
+ * if it is present and requests a handle to the appropriate APM controller
+ * device.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_apm_init(struct cpr3_controller *ctrl)
+{
+ struct device_node *node = ctrl->dev->of_node;
+ int rc;
+
+ if (!of_find_property(node, "qcom,apm-ctrl", NULL)) {
+ /* No APM used */
+ return 0;
+ }
+
+ ctrl->apm = msm_apm_ctrl_dev_get(ctrl->dev);
+ if (IS_ERR(ctrl->apm)) {
+ rc = PTR_ERR(ctrl->apm);
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(ctrl, "APM get failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(node, "qcom,apm-threshold-voltage",
+ &ctrl->apm_threshold_volt);
+ if (rc) {
+ cpr3_err(ctrl, "error reading qcom,apm-threshold-voltage, rc=%d\n",
+ rc);
+ return rc;
+ }
+ ctrl->apm_threshold_volt
+ = CPR3_ROUND(ctrl->apm_threshold_volt, ctrl->step_volt);
+
+ /* No error check since this is an optional property. */
+ of_property_read_u32(node, "qcom,apm-hysteresis-voltage",
+ &ctrl->apm_adj_volt);
+ ctrl->apm_adj_volt = CPR3_ROUND(ctrl->apm_adj_volt, ctrl->step_volt);
+
+ ctrl->apm_high_supply = MSM_APM_SUPPLY_APCC;
+ ctrl->apm_low_supply = MSM_APM_SUPPLY_MX;
+
+ return 0;
+}
+
+/**
+ * cpr3_mem_acc_init() - initialize mem-acc regulator data for
+ * a CPR3 regulator
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+int cpr3_mem_acc_init(struct cpr3_regulator *vreg)
+{
+ struct cpr3_controller *ctrl = vreg->thread->ctrl;
+ u32 *temp;
+ int i, rc;
+
+ if (!ctrl->mem_acc_regulator) {
+ cpr3_info(ctrl, "not using memory accelerator regulator\n");
+ return 0;
+ }
+
+ temp = kcalloc(vreg->corner_count, sizeof(*temp), GFP_KERNEL);
+ if (!temp)
+ return -ENOMEM;
+
+ rc = cpr3_parse_corner_array_property(vreg, "qcom,mem-acc-voltage",
+ 1, temp);
+ if (rc) {
+ cpr3_err(ctrl, "could not load mem-acc corners, rc=%d\n", rc);
+ } else {
+ for (i = 0; i < vreg->corner_count; i++)
+ vreg->corner[i].mem_acc_volt = temp[i];
+ }
+
+ kfree(temp);
+ return rc;
+}
diff --git a/drivers/regulator/cpr4-apss-regulator.c b/drivers/regulator/cpr4-apss-regulator.c
new file mode 100644
index 000000000000..8e43ac763c4a
--- /dev/null
+++ b/drivers/regulator/cpr4-apss-regulator.c
@@ -0,0 +1,966 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+#include "cpr3-regulator.h"
+
+#define MSMTITANIUM_APSS_FUSE_CORNERS 4
+
+/**
+ * struct cpr4_msmtitanium_apss_fuses - APSS specific fuse data for MSMTITANIUM
+ * @ro_sel: Ring oscillator select fuse parameter value for each
+ * fuse corner
+ * @init_voltage: Initial (i.e. open-loop) voltage fuse parameter value
+ * for each fuse corner (raw, not converted to a voltage)
+ * @target_quot: CPR target quotient fuse parameter value for each fuse
+ * corner
+ * @quot_offset: CPR target quotient offset fuse parameter value for each
+ * fuse corner (raw, not unpacked) used for target quotient
+ * interpolation
+ * @speed_bin: Application processor speed bin fuse parameter value for
+ * the given chip
+ * @cpr_fusing_rev: CPR fusing revision fuse parameter value
+ *
+ * This struct holds the values for all of the fuses read from memory.
+ */
+struct cpr4_msmtitanium_apss_fuses {
+ u64 ro_sel[MSMTITANIUM_APSS_FUSE_CORNERS];
+ u64 init_voltage[MSMTITANIUM_APSS_FUSE_CORNERS];
+ u64 target_quot[MSMTITANIUM_APSS_FUSE_CORNERS];
+ u64 quot_offset[MSMTITANIUM_APSS_FUSE_CORNERS];
+ u64 speed_bin;
+ u64 cpr_fusing_rev;
+};
+
+/*
+ * fuse combo = fusing revision + 8 * (speed bin)
+ * where: fusing revision = 0 - 7 and speed bin = 0 - 7
+ */
+#define CPR4_MSMTITANIUM_APSS_FUSE_COMBO_COUNT 64
+
+/*
+ * Constants which define the name of each fuse corner.
+ */
+enum cpr4_msmtitanium_apss_fuse_corner {
+ CPR4_MSMTITANIUM_APSS_FUSE_CORNER_LOWSVS = 0,
+ CPR4_MSMTITANIUM_APSS_FUSE_CORNER_SVS = 1,
+ CPR4_MSMTITANIUM_APSS_FUSE_CORNER_NOM = 2,
+ CPR4_MSMTITANIUM_APSS_FUSE_CORNER_TURBO_L1 = 3,
+};
+
+static const char * const cpr4_msmtitanium_apss_fuse_corner_name[] = {
+ [CPR4_MSMTITANIUM_APSS_FUSE_CORNER_LOWSVS] = "LowSVS",
+ [CPR4_MSMTITANIUM_APSS_FUSE_CORNER_SVS] = "SVS",
+ [CPR4_MSMTITANIUM_APSS_FUSE_CORNER_NOM] = "NOM",
+ [CPR4_MSMTITANIUM_APSS_FUSE_CORNER_TURBO_L1] = "TURBO_L1",
+};
+
+/*
+ * MSMTITANIUM APSS fuse parameter locations:
+ *
+ * Structs are organized with the following dimensions:
+ * Outer: 0 to 3 for fuse corners from lowest to highest corner
+ * Inner: large enough to hold the longest set of parameter segments which
+ * fully defines a fuse parameter, +1 (for NULL termination).
+ * Each segment corresponds to a contiguous group of bits from a
+ * single fuse row. These segments are concatentated together in
+ * order to form the full fuse parameter value. The segments for
+ * a given parameter may correspond to different fuse rows.
+ */
+static const struct cpr3_fuse_param
+msmtitanium_apss_ro_sel_param[MSMTITANIUM_APSS_FUSE_CORNERS][2] = {
+ {{73, 12, 15}, {} },
+ {{73, 8, 11}, {} },
+ {{73, 4, 7}, {} },
+ {{73, 0, 3}, {} },
+};
+
+static const struct cpr3_fuse_param
+msmtitanium_apss_init_voltage_param[MSMTITANIUM_APSS_FUSE_CORNERS][2] = {
+ {{71, 24, 29}, {} },
+ {{71, 18, 23}, {} },
+ {{71, 12, 17}, {} },
+ {{71, 6, 11}, {} },
+};
+
+static const struct cpr3_fuse_param
+msmtitanium_apss_target_quot_param[MSMTITANIUM_APSS_FUSE_CORNERS][2] = {
+ {{72, 44, 55}, {} },
+ {{72, 32, 43}, {} },
+ {{72, 20, 31}, {} },
+ {{72, 8, 19}, {} },
+};
+
+static const struct cpr3_fuse_param
+msmtitanium_apss_quot_offset_param[MSMTITANIUM_APSS_FUSE_CORNERS][2] = {
+ {{} },
+ {{71, 46, 52}, {} },
+ {{71, 39, 45}, {} },
+ {{71, 32, 38}, {} },
+};
+
+static const struct cpr3_fuse_param msmtitanium_cpr_fusing_rev_param[] = {
+ {71, 53, 55},
+ {},
+};
+
+static const struct cpr3_fuse_param msmtitanium_apss_speed_bin_param[] = {
+ {36, 40, 42},
+ {},
+};
+
+/*
+ * Open loop voltage fuse reference voltages in microvolts for MSMTITANIUM
+ */
+static const int msmtitanium_apss_fuse_ref_volt
+ [MSMTITANIUM_APSS_FUSE_CORNERS] = {
+ 645000,
+ 720000,
+ 865000,
+ 1065000,
+};
+
+#define MSMTITANIUM_APSS_FUSE_STEP_VOLT 10000
+#define MSMTITANIUM_APSS_VOLTAGE_FUSE_SIZE 6
+#define MSMTITANIUM_APSS_QUOT_OFFSET_SCALE 5
+
+#define MSMTITANIUM_APSS_CPR_SENSOR_COUNT 13
+
+#define MSMTITANIUM_APSS_CPR_CLOCK_RATE 19200000
+
+/**
+ * cpr4_msmtitanium_apss_read_fuse_data() - load APSS specific fuse parameter values
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * This function allocates a cpr4_msmtitanium_apss_fuses struct, fills it with
+ * values read out of hardware fuses, and finally copies common fuse values
+ * into the CPR3 regulator struct.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr4_msmtitanium_apss_read_fuse_data(struct cpr3_regulator *vreg)
+{
+ void __iomem *base = vreg->thread->ctrl->fuse_base;
+ struct cpr4_msmtitanium_apss_fuses *fuse;
+ int i, rc;
+
+ fuse = devm_kzalloc(vreg->thread->ctrl->dev, sizeof(*fuse), GFP_KERNEL);
+ if (!fuse)
+ return -ENOMEM;
+
+ rc = cpr3_read_fuse_param(base, msmtitanium_apss_speed_bin_param,
+ &fuse->speed_bin);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read speed bin fuse, rc=%d\n", rc);
+ return rc;
+ }
+ cpr3_info(vreg, "speed bin = %llu\n", fuse->speed_bin);
+
+ rc = cpr3_read_fuse_param(base, msmtitanium_cpr_fusing_rev_param,
+ &fuse->cpr_fusing_rev);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read CPR fusing revision fuse, rc=%d\n",
+ rc);
+ return rc;
+ }
+ cpr3_info(vreg, "CPR fusing revision = %llu\n", fuse->cpr_fusing_rev);
+
+ for (i = 0; i < MSMTITANIUM_APSS_FUSE_CORNERS; i++) {
+ rc = cpr3_read_fuse_param(base,
+ msmtitanium_apss_init_voltage_param[i],
+ &fuse->init_voltage[i]);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read fuse-corner %d initial voltage fuse, rc=%d\n",
+ i, rc);
+ return rc;
+ }
+
+ rc = cpr3_read_fuse_param(base,
+ msmtitanium_apss_target_quot_param[i],
+ &fuse->target_quot[i]);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read fuse-corner %d target quotient fuse, rc=%d\n",
+ i, rc);
+ return rc;
+ }
+
+ rc = cpr3_read_fuse_param(base,
+ msmtitanium_apss_ro_sel_param[i],
+ &fuse->ro_sel[i]);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read fuse-corner %d RO select fuse, rc=%d\n",
+ i, rc);
+ return rc;
+ }
+
+ rc = cpr3_read_fuse_param(base,
+ msmtitanium_apss_quot_offset_param[i],
+ &fuse->quot_offset[i]);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read fuse-corner %d quotient offset fuse, rc=%d\n",
+ i, rc);
+ return rc;
+ }
+ }
+
+ vreg->fuse_combo = fuse->cpr_fusing_rev + 8 * fuse->speed_bin;
+ if (vreg->fuse_combo >= CPR4_MSMTITANIUM_APSS_FUSE_COMBO_COUNT) {
+ cpr3_err(vreg, "invalid CPR fuse combo = %d found\n",
+ vreg->fuse_combo);
+ return -EINVAL;
+ }
+
+ vreg->speed_bin_fuse = fuse->speed_bin;
+ vreg->cpr_rev_fuse = fuse->cpr_fusing_rev;
+ vreg->fuse_corner_count = MSMTITANIUM_APSS_FUSE_CORNERS;
+ vreg->platform_fuses = fuse;
+
+ return 0;
+}
+
+/**
+ * cpr4_apss_parse_corner_data() - parse APSS corner data from device tree
+ * properties of the CPR3 regulator's device node
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr4_apss_parse_corner_data(struct cpr3_regulator *vreg)
+{
+ int rc;
+
+ rc = cpr3_parse_common_corner_data(vreg);
+ if (rc) {
+ cpr3_err(vreg, "error reading corner data, rc=%d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+/**
+ * cpr4_msmtitanium_apss_calculate_open_loop_voltages() - calculate the open-loop
+ * voltage for each corner of a CPR3 regulator
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * If open-loop voltage interpolation is allowed in device tree, then
+ * this function calculates the open-loop voltage for a given corner using
+ * linear interpolation. This interpolation is performed using the processor
+ * frequencies of the lower and higher Fmax corners along with their fused
+ * open-loop voltages.
+ *
+ * If open-loop voltage interpolation is not allowed, then this function uses
+ * the Fmax fused open-loop voltage for all of the corners associated with a
+ * given fuse corner.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr4_msmtitanium_apss_calculate_open_loop_voltages(
+ struct cpr3_regulator *vreg)
+{
+ struct device_node *node = vreg->of_node;
+ struct cpr4_msmtitanium_apss_fuses *fuse = vreg->platform_fuses;
+ int i, j, rc = 0;
+ bool allow_interpolation;
+ u64 freq_low, volt_low, freq_high, volt_high;
+ int *fuse_volt;
+ int *fmax_corner;
+
+ fuse_volt = kcalloc(vreg->fuse_corner_count, sizeof(*fuse_volt),
+ GFP_KERNEL);
+ fmax_corner = kcalloc(vreg->fuse_corner_count, sizeof(*fmax_corner),
+ GFP_KERNEL);
+ if (!fuse_volt || !fmax_corner) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ for (i = 0; i < vreg->fuse_corner_count; i++) {
+ fuse_volt[i] = cpr3_convert_open_loop_voltage_fuse(
+ msmtitanium_apss_fuse_ref_volt[i],
+ MSMTITANIUM_APSS_FUSE_STEP_VOLT, fuse->init_voltage[i],
+ MSMTITANIUM_APSS_VOLTAGE_FUSE_SIZE);
+
+ /* Log fused open-loop voltage values for debugging purposes. */
+ cpr3_info(vreg, "fused %8s: open-loop=%7d uV\n",
+ cpr4_msmtitanium_apss_fuse_corner_name[i],
+ fuse_volt[i]);
+ }
+
+ rc = cpr3_adjust_fused_open_loop_voltages(vreg, fuse_volt);
+ if (rc) {
+ cpr3_err(vreg, "fused open-loop voltage adjustment failed, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ allow_interpolation = of_property_read_bool(node,
+ "qcom,allow-voltage-interpolation");
+
+ for (i = 1; i < vreg->fuse_corner_count; i++) {
+ if (fuse_volt[i] < fuse_volt[i - 1]) {
+ cpr3_info(vreg, "fuse corner %d voltage=%d uV < fuse corner %d voltage=%d uV; overriding: fuse corner %d voltage=%d\n",
+ i, fuse_volt[i], i - 1, fuse_volt[i - 1],
+ i, fuse_volt[i - 1]);
+ fuse_volt[i] = fuse_volt[i - 1];
+ }
+ }
+
+ if (!allow_interpolation) {
+ /* Use fused open-loop voltage for lower frequencies. */
+ for (i = 0; i < vreg->corner_count; i++)
+ vreg->corner[i].open_loop_volt
+ = fuse_volt[vreg->corner[i].cpr_fuse_corner];
+ goto done;
+ }
+
+ /* Determine highest corner mapped to each fuse corner */
+ j = vreg->fuse_corner_count - 1;
+ for (i = vreg->corner_count - 1; i >= 0; i--) {
+ if (vreg->corner[i].cpr_fuse_corner == j) {
+ fmax_corner[j] = i;
+ j--;
+ }
+ }
+ if (j >= 0) {
+ cpr3_err(vreg, "invalid fuse corner mapping\n");
+ rc = -EINVAL;
+ goto done;
+ }
+
+ /*
+ * Interpolation is not possible for corners mapped to the lowest fuse
+ * corner so use the fuse corner value directly.
+ */
+ for (i = 0; i <= fmax_corner[0]; i++)
+ vreg->corner[i].open_loop_volt = fuse_volt[0];
+
+ /* Interpolate voltages for the higher fuse corners. */
+ for (i = 1; i < vreg->fuse_corner_count; i++) {
+ freq_low = vreg->corner[fmax_corner[i - 1]].proc_freq;
+ volt_low = fuse_volt[i - 1];
+ freq_high = vreg->corner[fmax_corner[i]].proc_freq;
+ volt_high = fuse_volt[i];
+
+ for (j = fmax_corner[i - 1] + 1; j <= fmax_corner[i]; j++)
+ vreg->corner[j].open_loop_volt = cpr3_interpolate(
+ freq_low, volt_low, freq_high, volt_high,
+ vreg->corner[j].proc_freq);
+ }
+
+done:
+ if (rc == 0) {
+ cpr3_debug(vreg, "unadjusted per-corner open-loop voltages:\n");
+ for (i = 0; i < vreg->corner_count; i++)
+ cpr3_debug(vreg, "open-loop[%2d] = %d uV\n", i,
+ vreg->corner[i].open_loop_volt);
+
+ rc = cpr3_adjust_open_loop_voltages(vreg);
+ if (rc)
+ cpr3_err(vreg, "open-loop voltage adjustment failed, rc=%d\n",
+ rc);
+ }
+
+ kfree(fuse_volt);
+ kfree(fmax_corner);
+ return rc;
+}
+
+/**
+ * cpr4_msmtitanium_apss_set_no_interpolation_quotients() - use the fused target
+ * quotient values for lower frequencies.
+ * @vreg: Pointer to the CPR3 regulator
+ * @volt_adjust: Pointer to array of per-corner closed-loop adjustment
+ * voltages
+ * @volt_adjust_fuse: Pointer to array of per-fuse-corner closed-loop
+ * adjustment voltages
+ * @ro_scale: Pointer to array of per-fuse-corner RO scaling factor
+ * values with units of QUOT/V
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr4_msmtitanium_apss_set_no_interpolation_quotients(
+ struct cpr3_regulator *vreg, int *volt_adjust,
+ int *volt_adjust_fuse, int *ro_scale)
+{
+ struct cpr4_msmtitanium_apss_fuses *fuse = vreg->platform_fuses;
+ u32 quot, ro;
+ int quot_adjust;
+ int i, fuse_corner;
+
+ for (i = 0; i < vreg->corner_count; i++) {
+ fuse_corner = vreg->corner[i].cpr_fuse_corner;
+ quot = fuse->target_quot[fuse_corner];
+ quot_adjust = cpr3_quot_adjustment(ro_scale[fuse_corner],
+ volt_adjust_fuse[fuse_corner] +
+ volt_adjust[i]);
+ ro = fuse->ro_sel[fuse_corner];
+ vreg->corner[i].target_quot[ro] = quot + quot_adjust;
+ cpr3_debug(vreg, "corner=%d RO=%u target quot=%u\n",
+ i, ro, quot);
+
+ if (quot_adjust)
+ cpr3_debug(vreg, "adjusted corner %d RO%u target quot: %u --> %u (%d uV)\n",
+ i, ro, quot, vreg->corner[i].target_quot[ro],
+ volt_adjust_fuse[fuse_corner] +
+ volt_adjust[i]);
+ }
+
+ return 0;
+}
+
+/**
+ * cpr4_msmtitanium_apss_calculate_target_quotients() - calculate the CPR target
+ * quotient for each corner of a CPR3 regulator
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * If target quotient interpolation is allowed in device tree, then this
+ * function calculates the target quotient for a given corner using linear
+ * interpolation. This interpolation is performed using the processor
+ * frequencies of the lower and higher Fmax corners along with the fused
+ * target quotient and quotient offset of the higher Fmax corner.
+ *
+ * If target quotient interpolation is not allowed, then this function uses
+ * the Fmax fused target quotient for all of the corners associated with a
+ * given fuse corner.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr4_msmtitanium_apss_calculate_target_quotients(
+ struct cpr3_regulator *vreg)
+{
+ struct cpr4_msmtitanium_apss_fuses *fuse = vreg->platform_fuses;
+ int rc;
+ bool allow_interpolation;
+ u64 freq_low, freq_high, prev_quot;
+ u64 *quot_low;
+ u64 *quot_high;
+ u32 quot, ro;
+ int i, j, fuse_corner, quot_adjust;
+ int *fmax_corner;
+ int *volt_adjust, *volt_adjust_fuse, *ro_scale;
+
+ /* Log fused quotient values for debugging purposes. */
+ cpr3_info(vreg, "fused LowSVS: quot[%2llu]=%4llu\n",
+ fuse->ro_sel[CPR4_MSMTITANIUM_APSS_FUSE_CORNER_LOWSVS],
+ fuse->target_quot[CPR4_MSMTITANIUM_APSS_FUSE_CORNER_LOWSVS]);
+ for (i = CPR4_MSMTITANIUM_APSS_FUSE_CORNER_SVS;
+ i <= CPR4_MSMTITANIUM_APSS_FUSE_CORNER_TURBO_L1; i++)
+ cpr3_info(vreg, "fused %8s: quot[%2llu]=%4llu, quot_offset[%2llu]=%4llu\n",
+ cpr4_msmtitanium_apss_fuse_corner_name[i],
+ fuse->ro_sel[i], fuse->target_quot[i],
+ fuse->ro_sel[i], fuse->quot_offset[i] *
+ MSMTITANIUM_APSS_QUOT_OFFSET_SCALE);
+
+ allow_interpolation = of_property_read_bool(vreg->of_node,
+ "qcom,allow-quotient-interpolation");
+
+ volt_adjust = kcalloc(vreg->corner_count, sizeof(*volt_adjust),
+ GFP_KERNEL);
+ volt_adjust_fuse = kcalloc(vreg->fuse_corner_count,
+ sizeof(*volt_adjust_fuse), GFP_KERNEL);
+ ro_scale = kcalloc(vreg->fuse_corner_count, sizeof(*ro_scale),
+ GFP_KERNEL);
+ fmax_corner = kcalloc(vreg->fuse_corner_count, sizeof(*fmax_corner),
+ GFP_KERNEL);
+ quot_low = kcalloc(vreg->fuse_corner_count, sizeof(*quot_low),
+ GFP_KERNEL);
+ quot_high = kcalloc(vreg->fuse_corner_count, sizeof(*quot_high),
+ GFP_KERNEL);
+ if (!volt_adjust || !volt_adjust_fuse || !ro_scale ||
+ !fmax_corner || !quot_low || !quot_high) {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ rc = cpr3_parse_closed_loop_voltage_adjustments(vreg, &fuse->ro_sel[0],
+ volt_adjust, volt_adjust_fuse, ro_scale);
+ if (rc) {
+ cpr3_err(vreg, "could not load closed-loop voltage adjustments, rc=%d\n",
+ rc);
+ goto done;
+ }
+
+ if (!allow_interpolation) {
+ /* Use fused target quotients for lower frequencies. */
+ return cpr4_msmtitanium_apss_set_no_interpolation_quotients(
+ vreg, volt_adjust, volt_adjust_fuse, ro_scale);
+ }
+
+ /* Determine highest corner mapped to each fuse corner */
+ j = vreg->fuse_corner_count - 1;
+ for (i = vreg->corner_count - 1; i >= 0; i--) {
+ if (vreg->corner[i].cpr_fuse_corner == j) {
+ fmax_corner[j] = i;
+ j--;
+ }
+ }
+ if (j >= 0) {
+ cpr3_err(vreg, "invalid fuse corner mapping\n");
+ rc = -EINVAL;
+ goto done;
+ }
+
+ /*
+ * Interpolation is not possible for corners mapped to the lowest fuse
+ * corner so use the fuse corner value directly.
+ */
+ i = CPR4_MSMTITANIUM_APSS_FUSE_CORNER_LOWSVS;
+ quot_adjust = cpr3_quot_adjustment(ro_scale[i], volt_adjust_fuse[i]);
+ quot = fuse->target_quot[i] + quot_adjust;
+ quot_high[i] = quot_low[i] = quot;
+ ro = fuse->ro_sel[i];
+ if (quot_adjust)
+ cpr3_debug(vreg, "adjusted fuse corner %d RO%u target quot: %llu --> %u (%d uV)\n",
+ i, ro, fuse->target_quot[i], quot, volt_adjust_fuse[i]);
+
+ for (i = 0; i <= fmax_corner[CPR4_MSMTITANIUM_APSS_FUSE_CORNER_LOWSVS];
+ i++)
+ vreg->corner[i].target_quot[ro] = quot;
+
+ for (i = CPR4_MSMTITANIUM_APSS_FUSE_CORNER_SVS;
+ i < vreg->fuse_corner_count; i++) {
+ quot_high[i] = fuse->target_quot[i];
+ if (fuse->ro_sel[i] == fuse->ro_sel[i - 1])
+ quot_low[i] = quot_high[i - 1];
+ else
+ quot_low[i] = quot_high[i]
+ - fuse->quot_offset[i]
+ * MSMTITANIUM_APSS_QUOT_OFFSET_SCALE;
+ if (quot_high[i] < quot_low[i]) {
+ cpr3_debug(vreg, "quot_high[%d]=%llu < quot_low[%d]=%llu; overriding: quot_high[%d]=%llu\n",
+ i, quot_high[i], i, quot_low[i],
+ i, quot_low[i]);
+ quot_high[i] = quot_low[i];
+ }
+ }
+
+ /* Perform per-fuse-corner target quotient adjustment */
+ for (i = 1; i < vreg->fuse_corner_count; i++) {
+ quot_adjust = cpr3_quot_adjustment(ro_scale[i],
+ volt_adjust_fuse[i]);
+ if (quot_adjust) {
+ prev_quot = quot_high[i];
+ quot_high[i] += quot_adjust;
+ cpr3_debug(vreg, "adjusted fuse corner %d RO%llu target quot: %llu --> %llu (%d uV)\n",
+ i, fuse->ro_sel[i], prev_quot, quot_high[i],
+ volt_adjust_fuse[i]);
+ }
+
+ if (fuse->ro_sel[i] == fuse->ro_sel[i - 1])
+ quot_low[i] = quot_high[i - 1];
+ else
+ quot_low[i] += cpr3_quot_adjustment(ro_scale[i],
+ volt_adjust_fuse[i - 1]);
+
+ if (quot_high[i] < quot_low[i]) {
+ cpr3_debug(vreg, "quot_high[%d]=%llu < quot_low[%d]=%llu after adjustment; overriding: quot_high[%d]=%llu\n",
+ i, quot_high[i], i, quot_low[i],
+ i, quot_low[i]);
+ quot_high[i] = quot_low[i];
+ }
+ }
+
+ /* Interpolate voltages for the higher fuse corners. */
+ for (i = 1; i < vreg->fuse_corner_count; i++) {
+ freq_low = vreg->corner[fmax_corner[i - 1]].proc_freq;
+ freq_high = vreg->corner[fmax_corner[i]].proc_freq;
+
+ ro = fuse->ro_sel[i];
+ for (j = fmax_corner[i - 1] + 1; j <= fmax_corner[i]; j++)
+ vreg->corner[j].target_quot[ro] = cpr3_interpolate(
+ freq_low, quot_low[i], freq_high, quot_high[i],
+ vreg->corner[j].proc_freq);
+ }
+
+ /* Perform per-corner target quotient adjustment */
+ for (i = 0; i < vreg->corner_count; i++) {
+ fuse_corner = vreg->corner[i].cpr_fuse_corner;
+ ro = fuse->ro_sel[fuse_corner];
+ quot_adjust = cpr3_quot_adjustment(ro_scale[fuse_corner],
+ volt_adjust[i]);
+ if (quot_adjust) {
+ prev_quot = vreg->corner[i].target_quot[ro];
+ vreg->corner[i].target_quot[ro] += quot_adjust;
+ cpr3_debug(vreg, "adjusted corner %d RO%u target quot: %llu --> %u (%d uV)\n",
+ i, ro, prev_quot,
+ vreg->corner[i].target_quot[ro],
+ volt_adjust[i]);
+ }
+ }
+
+ /* Ensure that target quotients increase monotonically */
+ for (i = 1; i < vreg->corner_count; i++) {
+ ro = fuse->ro_sel[vreg->corner[i].cpr_fuse_corner];
+ if (fuse->ro_sel[vreg->corner[i - 1].cpr_fuse_corner] == ro
+ && vreg->corner[i].target_quot[ro]
+ < vreg->corner[i - 1].target_quot[ro]) {
+ cpr3_debug(vreg, "adjusted corner %d RO%u target quot=%u < adjusted corner %d RO%u target quot=%u; overriding: corner %d RO%u target quot=%u\n",
+ i, ro, vreg->corner[i].target_quot[ro],
+ i - 1, ro, vreg->corner[i - 1].target_quot[ro],
+ i, ro, vreg->corner[i - 1].target_quot[ro]);
+ vreg->corner[i].target_quot[ro]
+ = vreg->corner[i - 1].target_quot[ro];
+ }
+ }
+
+done:
+ kfree(volt_adjust);
+ kfree(volt_adjust_fuse);
+ kfree(ro_scale);
+ kfree(fmax_corner);
+ kfree(quot_low);
+ kfree(quot_high);
+ return rc;
+}
+
+/**
+ * cpr4_apss_print_settings() - print out APSS CPR configuration settings into
+ * the kernel log for debugging purposes
+ * @vreg: Pointer to the CPR3 regulator
+ */
+static void cpr4_apss_print_settings(struct cpr3_regulator *vreg)
+{
+ struct cpr3_corner *corner;
+ int i;
+
+ cpr3_debug(vreg, "Corner: Frequency (Hz), Fuse Corner, Floor (uV), Open-Loop (uV), Ceiling (uV)\n");
+ for (i = 0; i < vreg->corner_count; i++) {
+ corner = &vreg->corner[i];
+ cpr3_debug(vreg, "%3d: %10u, %2d, %7d, %7d, %7d\n",
+ i, corner->proc_freq, corner->cpr_fuse_corner,
+ corner->floor_volt, corner->open_loop_volt,
+ corner->ceiling_volt);
+ }
+
+ if (vreg->thread->ctrl->apm)
+ cpr3_debug(vreg, "APM threshold = %d uV, APM adjust = %d uV\n",
+ vreg->thread->ctrl->apm_threshold_volt,
+ vreg->thread->ctrl->apm_adj_volt);
+}
+
+/**
+ * cpr4_apss_init_thread() - perform steps necessary to initialize the
+ * configuration data for a CPR3 thread
+ * @thread: Pointer to the CPR3 thread
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr4_apss_init_thread(struct cpr3_thread *thread)
+{
+ int rc;
+
+ rc = cpr3_parse_common_thread_data(thread);
+ if (rc) {
+ cpr3_err(thread->ctrl, "thread %u unable to read CPR thread data from device tree, rc=%d\n",
+ thread->thread_id, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * cpr4_apss_init_regulator() - perform all steps necessary to initialize the
+ * configuration data for a CPR3 regulator
+ * @vreg: Pointer to the CPR3 regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr4_apss_init_regulator(struct cpr3_regulator *vreg)
+{
+ struct cpr4_msmtitanium_apss_fuses *fuse;
+ int rc;
+
+ rc = cpr4_msmtitanium_apss_read_fuse_data(vreg);
+ if (rc) {
+ cpr3_err(vreg, "unable to read CPR fuse data, rc=%d\n", rc);
+ return rc;
+ }
+
+ fuse = vreg->platform_fuses;
+
+ rc = cpr4_apss_parse_corner_data(vreg);
+ if (rc) {
+ cpr3_err(vreg, "unable to read CPR corner data from device tree, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr3_mem_acc_init(vreg);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(vreg, "unable to initialize mem-acc regulator settings, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr4_msmtitanium_apss_calculate_open_loop_voltages(vreg);
+ if (rc) {
+ cpr3_err(vreg, "unable to calculate open-loop voltages, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr3_limit_open_loop_voltages(vreg);
+ if (rc) {
+ cpr3_err(vreg, "unable to limit open-loop voltages, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ cpr3_open_loop_voltage_as_ceiling(vreg);
+
+ rc = cpr3_limit_floor_voltages(vreg);
+ if (rc) {
+ cpr3_err(vreg, "unable to limit floor voltages, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = cpr4_msmtitanium_apss_calculate_target_quotients(vreg);
+ if (rc) {
+ cpr3_err(vreg, "unable to calculate target quotients, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ cpr4_apss_print_settings(vreg);
+
+ return 0;
+}
+
+/**
+ * cpr4_apss_init_controller() - perform APSS CPR4 controller specific
+ * initializations
+ * @ctrl: Pointer to the CPR3 controller
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int cpr4_apss_init_controller(struct cpr3_controller *ctrl)
+{
+ int rc;
+
+ rc = cpr3_parse_common_ctrl_data(ctrl);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(ctrl, "unable to parse common controller data, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(ctrl->dev->of_node,
+ "qcom,cpr-down-error-step-limit",
+ &ctrl->down_error_step_limit);
+ if (rc) {
+ cpr3_err(ctrl, "error reading qcom,cpr-down-error-step-limit, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(ctrl->dev->of_node,
+ "qcom,cpr-up-error-step-limit",
+ &ctrl->up_error_step_limit);
+ if (rc) {
+ cpr3_err(ctrl, "error reading qcom,cpr-up-error-step-limit, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ ctrl->saw_use_unit_mV = of_property_read_bool(ctrl->dev->of_node,
+ "qcom,cpr-saw-use-unit-mV");
+
+ ctrl->vdd_limit_regulator = devm_regulator_get(ctrl->dev, "vdd-limit");
+ if (IS_ERR(ctrl->vdd_limit_regulator)) {
+ rc = PTR_ERR(ctrl->vdd_limit_regulator);
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(ctrl, "unable to request vdd-limit regulator, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr3_apm_init(ctrl);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(ctrl, "unable to initialize APM settings, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ ctrl->sensor_count = MSMTITANIUM_APSS_CPR_SENSOR_COUNT;
+
+ /*
+ * APSS only has one thread (0) per controller so the zeroed
+ * array does not need further modification.
+ */
+ ctrl->sensor_owner = devm_kcalloc(ctrl->dev, ctrl->sensor_count,
+ sizeof(*ctrl->sensor_owner), GFP_KERNEL);
+ if (!ctrl->sensor_owner)
+ return -ENOMEM;
+
+ ctrl->cpr_clock_rate = MSMTITANIUM_APSS_CPR_CLOCK_RATE;
+ ctrl->ctrl_type = CPR_CTRL_TYPE_CPR4;
+ ctrl->supports_hw_closed_loop = true;
+ ctrl->use_hw_closed_loop = of_property_read_bool(ctrl->dev->of_node,
+ "qcom,cpr-hw-closed-loop");
+ return 0;
+}
+
+static int cpr4_apss_regulator_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct cpr3_controller *ctrl = platform_get_drvdata(pdev);
+
+ return cpr3_regulator_suspend(ctrl);
+}
+
+static int cpr4_apss_regulator_resume(struct platform_device *pdev)
+{
+ struct cpr3_controller *ctrl = platform_get_drvdata(pdev);
+
+ return cpr3_regulator_resume(ctrl);
+}
+
+static int cpr4_apss_regulator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cpr3_controller *ctrl;
+ int i, rc;
+
+ if (!dev->of_node) {
+ dev_err(dev, "Device tree node is missing\n");
+ return -EINVAL;
+ }
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->dev = dev;
+ /* Set to false later if anything precludes CPR operation. */
+ ctrl->cpr_allowed_hw = true;
+
+ rc = of_property_read_string(dev->of_node, "qcom,cpr-ctrl-name",
+ &ctrl->name);
+ if (rc) {
+ cpr3_err(ctrl, "unable to read qcom,cpr-ctrl-name, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr3_map_fuse_base(ctrl, pdev);
+ if (rc) {
+ cpr3_err(ctrl, "could not map fuse base address\n");
+ return rc;
+ }
+
+ rc = cpr3_allocate_threads(ctrl, 0, 0);
+ if (rc) {
+ cpr3_err(ctrl, "failed to allocate CPR thread array, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (ctrl->thread_count != 1) {
+ cpr3_err(ctrl, "expected 1 thread but found %d\n",
+ ctrl->thread_count);
+ return -EINVAL;
+ }
+
+ rc = cpr4_apss_init_controller(ctrl);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ cpr3_err(ctrl, "failed to initialize CPR controller parameters, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = cpr4_apss_init_thread(&ctrl->thread[0]);
+ if (rc) {
+ cpr3_err(ctrl, "thread initialization failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ for (i = 0; i < ctrl->thread[0].vreg_count; i++) {
+ rc = cpr4_apss_init_regulator(&ctrl->thread[0].vreg[i]);
+ if (rc) {
+ cpr3_err(&ctrl->thread[0].vreg[i], "regulator initialization failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ platform_set_drvdata(pdev, ctrl);
+
+ return cpr3_regulator_register(pdev, ctrl);
+}
+
+static int cpr4_apss_regulator_remove(struct platform_device *pdev)
+{
+ struct cpr3_controller *ctrl = platform_get_drvdata(pdev);
+
+ return cpr3_regulator_unregister(ctrl);
+}
+
+static struct of_device_id cpr4_regulator_match_table[] = {
+ { .compatible = "qcom,cpr4-msmtitanium-apss-regulator", },
+ {}
+};
+
+static struct platform_driver cpr4_apss_regulator_driver = {
+ .driver = {
+ .name = "qcom,cpr4-apss-regulator",
+ .of_match_table = cpr4_regulator_match_table,
+ .owner = THIS_MODULE,
+ },
+ .probe = cpr4_apss_regulator_probe,
+ .remove = cpr4_apss_regulator_remove,
+ .suspend = cpr4_apss_regulator_suspend,
+ .resume = cpr4_apss_regulator_resume,
+};
+
+static int cpr4_regulator_init(void)
+{
+ return platform_driver_register(&cpr4_apss_regulator_driver);
+}
+
+static void cpr4_regulator_exit(void)
+{
+ platform_driver_unregister(&cpr4_apss_regulator_driver);
+}
+
+MODULE_DESCRIPTION("CPR4 APSS regulator driver");
+MODULE_LICENSE("GPL v2");
+
+arch_initcall(cpr4_regulator_init);
+module_exit(cpr4_regulator_exit);
diff --git a/drivers/regulator/kryo-regulator.c b/drivers/regulator/kryo-regulator.c
new file mode 100644
index 000000000000..c50219348256
--- /dev/null
+++ b/drivers/regulator/kryo-regulator.c
@@ -0,0 +1,1106 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/cpu_pm.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/kryo-regulator.h>
+
+#include <soc/qcom/spm.h>
+
+#define KRYO_REGULATOR_DRIVER_NAME "kryo-regulator"
+
+#define kvreg_err(kvreg, message, ...) \
+ pr_err("%s: " message, (kvreg)->name, ##__VA_ARGS__)
+#define kvreg_info(kvreg, message, ...) \
+ pr_info("%s: " message, (kvreg)->name, ##__VA_ARGS__)
+#define kvreg_debug(kvreg, message, ...) \
+ pr_debug("%s: " message, (kvreg)->name, ##__VA_ARGS__)
+
+/* CPUSS power domain register offsets */
+#define APCC_PWR_CTL_OVERRIDE 0x38
+#define APCC_PGS_RET_STATUS 0xe0
+
+/* APCS CSR register offsets */
+#define APCS_VERSION 0xfd0
+
+/* Cluster power domain register offsets */
+#define APC_LDO_VREF_SET 0x08
+#define APC_RET_VREF_SET 0x10
+#define APC_PWR_GATE_MODE 0x18
+#define APC_PWR_GATE_DLY 0x28
+#define APC_LDO_CFG 0x40
+#define APC_APM_CFG 0x50
+#define APC_PGSCTL_STS 0x60
+
+/* Register bit mask definitions*/
+#define PWR_GATE_SWITCH_MODE_MASK GENMASK(0, 0)
+#define VREF_MASK GENMASK(6, 0)
+#define APM_CFG_MASK GENMASK(7, 0)
+#define FSM_CUR_STATE_MASK GENMASK(5, 4)
+#define APC_PWR_GATE_DLY_MASK GENMASK(11, 0)
+#define APCC_PGS_MASK(cluster) (0x7 << (0x3 * (cluster)))
+
+/* Register bit definitions */
+#define VREF_BIT_POS 0
+
+/* Maximum delay to wait before declaring a Power Gate Switch timed out */
+#define PWR_GATE_SWITCH_TIMEOUT_US 5
+
+#define PWR_GATE_SWITCH_MODE_LDO 0
+#define PWR_GATE_SWITCH_MODE_BHS 1
+#define MSM8996_CPUSS_VER_1P1 0x10010000
+
+#define LDO_N_VOLTAGES 0x80
+#define AFFINITY_LEVEL_M3 2
+#define SHARED_CPU_REG_NUM 0
+#define VDD_SUPPLY_STEP_UV 5000
+#define VDD_SUPPLY_MIN_UV 80000
+
+struct kryo_regulator {
+ struct list_head link;
+ spinlock_t slock;
+ struct regulator_desc desc;
+ struct regulator_dev *rdev;
+ struct regulator_dev *retention_rdev;
+ struct regulator_desc retention_desc;
+ const char *name;
+ enum kryo_supply_mode mode;
+ enum kryo_supply_mode retention_mode;
+ enum kryo_supply_mode pre_lpm_state_mode;
+ void __iomem *reg_base;
+ void __iomem *pm_apcc_base;
+ struct dentry *debugfs;
+ struct notifier_block cpu_pm_notifier;
+ unsigned long lpm_enter_count;
+ unsigned long lpm_exit_count;
+ int volt;
+ int retention_volt;
+ int headroom_volt;
+ int pre_lpm_state_volt;
+ int vref_func_step_volt;
+ int vref_func_min_volt;
+ int vref_func_max_volt;
+ int vref_ret_step_volt;
+ int vref_ret_min_volt;
+ int vref_ret_max_volt;
+ int cluster_num;
+ u32 ldo_config_init;
+ u32 apm_config_init;
+ u32 version;
+ bool vreg_en;
+};
+
+static struct dentry *kryo_debugfs_base;
+static DEFINE_MUTEX(kryo_regulator_list_mutex);
+static LIST_HEAD(kryo_regulator_list);
+
+static bool is_between(int left, int right, int value)
+{
+ if (left >= right && left >= value && value >= right)
+ return true;
+ if (left <= right && left <= value && value <= right)
+ return true;
+
+ return false;
+}
+
+static void kryo_masked_write(struct kryo_regulator *kvreg,
+ int reg, u32 mask, u32 val)
+{
+ u32 reg_val;
+
+ reg_val = readl_relaxed(kvreg->reg_base + reg);
+ reg_val &= ~mask;
+ reg_val |= (val & mask);
+
+ writel_relaxed(reg_val, kvreg->reg_base + reg);
+
+ /* Ensure write above completes */
+ mb();
+}
+
+static inline void kryo_pm_apcc_masked_write(struct kryo_regulator *kvreg,
+ int reg, u32 mask, u32 val)
+{
+ u32 reg_val, orig_val;
+
+ reg_val = orig_val = readl_relaxed(kvreg->pm_apcc_base + reg);
+ reg_val &= ~mask;
+ reg_val |= (val & mask);
+
+ if (reg_val != orig_val) {
+ writel_relaxed(reg_val, kvreg->pm_apcc_base + reg);
+
+ /* Ensure write above completes */
+ mb();
+ }
+}
+
+static inline int kryo_decode_retention_volt(struct kryo_regulator *kvreg,
+ int reg)
+{
+ return kvreg->vref_ret_min_volt + reg * kvreg->vref_ret_step_volt;
+}
+
+static inline int kryo_encode_retention_volt(struct kryo_regulator *kvreg,
+ int volt)
+{
+ int encoded_volt = DIV_ROUND_UP(volt - kvreg->vref_ret_min_volt,
+ kvreg->vref_ret_step_volt);
+
+ if (encoded_volt >= LDO_N_VOLTAGES || encoded_volt < 0)
+ return -EINVAL;
+ else
+ return encoded_volt;
+}
+
+static inline int kryo_decode_functional_volt(struct kryo_regulator *kvreg,
+ int reg)
+{
+ return kvreg->vref_func_min_volt + reg * kvreg->vref_func_step_volt;
+}
+
+static inline int kryo_encode_functional_volt(struct kryo_regulator *kvreg,
+ int volt)
+{
+ int encoded_volt = DIV_ROUND_UP(volt - kvreg->vref_func_min_volt,
+ kvreg->vref_func_step_volt);
+
+ if (encoded_volt >= LDO_N_VOLTAGES || encoded_volt < 0)
+ return -EINVAL;
+ else
+ return encoded_volt;
+}
+
+/* Locks must be held by the caller */
+static int kryo_set_retention_volt(struct kryo_regulator *kvreg, int volt)
+{
+ int reg_val;
+
+ reg_val = kryo_encode_retention_volt(kvreg, volt);
+ if (reg_val < 0) {
+ kvreg_err(kvreg, "unsupported LDO retention voltage, rc=%d\n",
+ reg_val);
+ return reg_val;
+ }
+
+ kryo_masked_write(kvreg, APC_RET_VREF_SET, VREF_MASK,
+ reg_val << VREF_BIT_POS);
+
+ kvreg->retention_volt = kryo_decode_retention_volt(kvreg, reg_val);
+ kvreg_debug(kvreg, "Set LDO retention voltage=%d uV (0x%x)\n",
+ kvreg->retention_volt, reg_val);
+
+ return 0;
+}
+
+/* Locks must be held by the caller */
+static int kryo_set_ldo_volt(struct kryo_regulator *kvreg, int volt)
+{
+ int reg_val;
+
+ /*
+ * Assume the consumer ensures the requested voltage satisfies the
+ * headroom and adjustment voltage requirements. The value may be
+ * rounded up if necessary, to match the LDO resolution. Configure it.
+ */
+ reg_val = kryo_encode_functional_volt(kvreg, volt);
+ if (reg_val < 0) {
+ kvreg_err(kvreg, "unsupported LDO functional voltage, rc=%d\n",
+ reg_val);
+ return reg_val;
+ }
+
+ kryo_masked_write(kvreg, APC_LDO_VREF_SET, VREF_MASK,
+ reg_val << VREF_BIT_POS);
+
+ kvreg->volt = kryo_decode_functional_volt(kvreg, reg_val);
+ kvreg_debug(kvreg, "Set LDO voltage=%d uV (0x%x)\n",
+ kvreg->volt, reg_val);
+
+ return 0;
+}
+
+/* Locks must be held by the caller */
+static int kryo_configure_mode(struct kryo_regulator *kvreg,
+ enum kryo_supply_mode mode)
+{
+ u32 reg;
+ int timeout = PWR_GATE_SWITCH_TIMEOUT_US;
+
+ /* Configure LDO or BHS mode */
+ kryo_masked_write(kvreg, APC_PWR_GATE_MODE, PWR_GATE_SWITCH_MODE_MASK,
+ mode == LDO_MODE ? PWR_GATE_SWITCH_MODE_LDO
+ : PWR_GATE_SWITCH_MODE_BHS);
+
+ /* Complete register write before reading HW status register */
+ mb();
+
+ /* Delay to allow Power Gate Switch FSM to reach idle state */
+ while (timeout > 0) {
+ reg = readl_relaxed(kvreg->reg_base + APC_PGSCTL_STS);
+ if (!(reg & FSM_CUR_STATE_MASK))
+ break;
+
+ udelay(1);
+ timeout--;
+ }
+
+ if (timeout == 0) {
+ kvreg_err(kvreg, "PGS switch to %s failed. APC_PGSCTL_STS=0x%x\n",
+ mode == LDO_MODE ? "LDO" : "BHS", reg);
+ return -ETIMEDOUT;
+ }
+
+ kvreg->mode = mode;
+ kvreg_debug(kvreg, "using %s mode\n", mode == LDO_MODE ? "LDO" : "BHS");
+
+ return 0;
+}
+
+static int kryo_regulator_enable(struct regulator_dev *rdev)
+{
+ struct kryo_regulator *kvreg = rdev_get_drvdata(rdev);
+ int rc;
+ unsigned long flags;
+
+ if (kvreg->vreg_en == true)
+ return 0;
+
+ spin_lock_irqsave(&kvreg->slock, flags);
+ rc = kryo_set_ldo_volt(kvreg, kvreg->volt);
+ if (rc) {
+ kvreg_err(kvreg, "set voltage failed, rc=%d\n", rc);
+ goto done;
+ }
+
+ kvreg->vreg_en = true;
+ kvreg_debug(kvreg, "enabled\n");
+
+done:
+ spin_unlock_irqrestore(&kvreg->slock, flags);
+
+ return rc;
+}
+
+static int kryo_regulator_disable(struct regulator_dev *rdev)
+{
+ struct kryo_regulator *kvreg = rdev_get_drvdata(rdev);
+ int rc;
+ unsigned long flags;
+
+ if (kvreg->vreg_en == false)
+ return 0;
+
+ spin_lock_irqsave(&kvreg->slock, flags);
+ kvreg->vreg_en = false;
+ kvreg_debug(kvreg, "disabled\n");
+ spin_unlock_irqrestore(&kvreg->slock, flags);
+
+ return rc;
+}
+
+static int kryo_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct kryo_regulator *kvreg = rdev_get_drvdata(rdev);
+
+ return kvreg->vreg_en;
+}
+
+static int kryo_regulator_set_voltage(struct regulator_dev *rdev,
+ int min_volt, int max_volt, unsigned *selector)
+{
+ struct kryo_regulator *kvreg = rdev_get_drvdata(rdev);
+ int rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kvreg->slock, flags);
+
+ if (!kvreg->vreg_en) {
+ kvreg->volt = min_volt;
+ spin_unlock_irqrestore(&kvreg->slock, flags);
+ return 0;
+ }
+
+ rc = kryo_set_ldo_volt(kvreg, min_volt);
+ if (rc)
+ kvreg_err(kvreg, "set voltage failed, rc=%d\n", rc);
+
+ spin_unlock_irqrestore(&kvreg->slock, flags);
+
+ return rc;
+}
+
+static int kryo_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct kryo_regulator *kvreg = rdev_get_drvdata(rdev);
+
+ return kvreg->volt;
+}
+
+static int kryo_regulator_set_bypass(struct regulator_dev *rdev,
+ bool enable)
+{
+ struct kryo_regulator *kvreg = rdev_get_drvdata(rdev);
+ int rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kvreg->slock, flags);
+
+ /*
+ * LDO Vref voltage must be programmed before switching
+ * modes to ensure stable operation.
+ */
+ rc = kryo_set_ldo_volt(kvreg, kvreg->volt);
+ if (rc)
+ kvreg_err(kvreg, "set voltage failed, rc=%d\n", rc);
+
+ rc = kryo_configure_mode(kvreg, enable);
+ if (rc)
+ kvreg_err(kvreg, "could not configure to %s mode\n",
+ enable == LDO_MODE ? "LDO" : "BHS");
+ spin_unlock_irqrestore(&kvreg->slock, flags);
+
+ return rc;
+}
+
+static int kryo_regulator_get_bypass(struct regulator_dev *rdev,
+ bool *enable)
+{
+ struct kryo_regulator *kvreg = rdev_get_drvdata(rdev);
+
+ *enable = kvreg->mode;
+
+ return 0;
+}
+
+static int kryo_regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct kryo_regulator *kvreg = rdev_get_drvdata(rdev);
+
+ if (selector < kvreg->desc.n_voltages)
+ return kryo_decode_functional_volt(kvreg, selector);
+ else
+ return 0;
+}
+
+static int kryo_regulator_retention_set_voltage(struct regulator_dev *rdev,
+ int min_volt, int max_volt, unsigned *selector)
+{
+ struct kryo_regulator *kvreg = rdev_get_drvdata(rdev);
+ int rc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kvreg->slock, flags);
+ rc = kryo_set_retention_volt(kvreg, min_volt);
+ if (rc)
+ kvreg_err(kvreg, "set voltage failed, rc=%d\n", rc);
+
+ spin_unlock_irqrestore(&kvreg->slock, flags);
+
+ return rc;
+}
+
+static int kryo_regulator_retention_get_voltage(struct regulator_dev *rdev)
+{
+ struct kryo_regulator *kvreg = rdev_get_drvdata(rdev);
+
+ return kvreg->retention_volt;
+}
+
+static int kryo_regulator_retention_set_bypass(struct regulator_dev *rdev,
+ bool enable)
+{
+ struct kryo_regulator *kvreg = rdev_get_drvdata(rdev);
+ int timeout = PWR_GATE_SWITCH_TIMEOUT_US;
+ int rc = 0;
+ u32 reg_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kvreg->slock, flags);
+
+ kryo_pm_apcc_masked_write(kvreg,
+ APCC_PWR_CTL_OVERRIDE,
+ APCC_PGS_MASK(kvreg->cluster_num),
+ enable ?
+ 0 : APCC_PGS_MASK(kvreg->cluster_num));
+
+ /* Ensure write above completes before proceeding */
+ mb();
+
+ if (kvreg->version < MSM8996_CPUSS_VER_1P1) {
+ /* No status register, delay worst case */
+ udelay(PWR_GATE_SWITCH_TIMEOUT_US);
+ } else {
+ while (timeout > 0) {
+ reg_val = readl_relaxed(kvreg->pm_apcc_base
+ + APCC_PGS_RET_STATUS);
+ if (!(reg_val & APCC_PGS_MASK(kvreg->cluster_num)))
+ break;
+
+ udelay(1);
+ timeout--;
+ }
+
+ if (timeout == 0) {
+ kvreg_err(kvreg, "PGS switch timed out. APCC_PGS_RET_STATUS=0x%x\n",
+ reg_val);
+ rc = -ETIMEDOUT;
+ goto done;
+ }
+ }
+
+ /* Bypassed LDO retention operation == disallow LDO retention */
+ kvreg_debug(kvreg, "%s LDO retention\n",
+ enable ? "enabled" : "disabled");
+ kvreg->retention_mode = enable == LDO_MODE ? LDO_MODE
+ : BHS_MODE;
+
+done:
+ spin_unlock_irqrestore(&kvreg->slock, flags);
+
+ return rc;
+}
+
+static int kryo_regulator_retention_get_bypass(struct regulator_dev *rdev,
+ bool *enable)
+{
+ struct kryo_regulator *kvreg = rdev_get_drvdata(rdev);
+
+ *enable = kvreg->retention_mode;
+
+ return 0;
+}
+
+static int kryo_regulator_retention_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct kryo_regulator *kvreg = rdev_get_drvdata(rdev);
+
+ if (selector < kvreg->retention_desc.n_voltages)
+ return kryo_decode_retention_volt(kvreg, selector);
+ else
+ return 0;
+}
+
+static struct regulator_ops kryo_regulator_ops = {
+ .enable = kryo_regulator_enable,
+ .disable = kryo_regulator_disable,
+ .is_enabled = kryo_regulator_is_enabled,
+ .set_voltage = kryo_regulator_set_voltage,
+ .get_voltage = kryo_regulator_get_voltage,
+ .set_bypass = kryo_regulator_set_bypass,
+ .get_bypass = kryo_regulator_get_bypass,
+ .list_voltage = kryo_regulator_list_voltage,
+};
+
+static struct regulator_ops kryo_regulator_retention_ops = {
+ .set_voltage = kryo_regulator_retention_set_voltage,
+ .get_voltage = kryo_regulator_retention_get_voltage,
+ .set_bypass = kryo_regulator_retention_set_bypass,
+ .get_bypass = kryo_regulator_retention_get_bypass,
+ .list_voltage = kryo_regulator_retention_list_voltage,
+};
+
+static void kryo_ldo_voltage_init(struct kryo_regulator *kvreg)
+{
+ kryo_set_retention_volt(kvreg, kvreg->retention_volt);
+ kryo_set_ldo_volt(kvreg, kvreg->volt);
+}
+
+#define APC_PWR_GATE_DLY_INIT 0x00000101
+static int kryo_hw_init(struct kryo_regulator *kvreg)
+{
+ /* Set up VREF_LDO and VREF_RET */
+ kryo_ldo_voltage_init(kvreg);
+
+ /* Program LDO and APM configuration registers */
+ writel_relaxed(kvreg->ldo_config_init, kvreg->reg_base + APC_LDO_CFG);
+
+ kryo_masked_write(kvreg, APC_APM_CFG, APM_CFG_MASK,
+ kvreg->apm_config_init);
+
+ /* Configure power gate sequencer delay */
+ kryo_masked_write(kvreg, APC_PWR_GATE_DLY, APC_PWR_GATE_DLY_MASK,
+ APC_PWR_GATE_DLY_INIT);
+
+ /* Allow LDO retention mode only when it's safe to do so */
+ kryo_pm_apcc_masked_write(kvreg,
+ APCC_PWR_CTL_OVERRIDE,
+ APCC_PGS_MASK(kvreg->cluster_num),
+ APCC_PGS_MASK(kvreg->cluster_num));
+
+ /* Complete the above writes before other accesses */
+ mb();
+
+ return 0;
+}
+
+static ssize_t kryo_dbg_mode_read(struct file *file, char __user *buff,
+ size_t count, loff_t *ppos)
+{
+ struct kryo_regulator *kvreg = file->private_data;
+ char buf[10];
+ int len = 0;
+ u32 reg_val;
+ unsigned long flags;
+
+ if (!kvreg)
+ return -ENODEV;
+
+ /* Confirm HW state matches Kryo regulator device state */
+ spin_lock_irqsave(&kvreg->slock, flags);
+ reg_val = readl_relaxed(kvreg->reg_base + APC_PWR_GATE_MODE);
+ if (((reg_val & PWR_GATE_SWITCH_MODE_MASK) == PWR_GATE_SWITCH_MODE_LDO
+ && kvreg->mode != LDO_MODE) ||
+ ((reg_val & PWR_GATE_SWITCH_MODE_MASK) == PWR_GATE_SWITCH_MODE_BHS
+ && kvreg->mode != BHS_MODE)) {
+ kvreg_err(kvreg, "HW state disagrees on PWR gate mode! reg=0x%x\n",
+ reg_val);
+ len = snprintf(buf, sizeof(buf), "ERR\n");
+ } else {
+ len = snprintf(buf, sizeof(buf), "%s\n",
+ kvreg->mode == LDO_MODE ?
+ "LDO" : "BHS");
+ }
+ spin_unlock_irqrestore(&kvreg->slock, flags);
+
+ return simple_read_from_buffer(buff, count, ppos, buf, len);
+}
+
+static int kryo_dbg_base_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations kryo_dbg_mode_fops = {
+ .open = kryo_dbg_base_open,
+ .read = kryo_dbg_mode_read,
+};
+
+static void kryo_debugfs_init(struct kryo_regulator *kvreg)
+{
+ struct dentry *temp;
+
+ if (IS_ERR_OR_NULL(kryo_debugfs_base)) {
+ if (PTR_ERR(kryo_debugfs_base) != -ENODEV)
+ kvreg_err(kvreg, "Base directory missing, cannot create debugfs nodes rc=%ld\n",
+ PTR_ERR(kryo_debugfs_base));
+ return;
+ }
+
+ kvreg->debugfs = debugfs_create_dir(kvreg->name, kryo_debugfs_base);
+
+ if (IS_ERR_OR_NULL(kvreg->debugfs)) {
+ kvreg_err(kvreg, "debugfs directory creation failed rc=%ld\n",
+ PTR_ERR(kvreg->debugfs));
+ return;
+ }
+
+ temp = debugfs_create_file("mode", S_IRUGO, kvreg->debugfs,
+ kvreg, &kryo_dbg_mode_fops);
+
+ if (IS_ERR_OR_NULL(temp)) {
+ kvreg_err(kvreg, "mode node creation failed rc=%ld\n",
+ PTR_ERR(temp));
+ return;
+ }
+}
+
+static void kryo_debugfs_deinit(struct kryo_regulator *kvreg)
+{
+ debugfs_remove_recursive(kvreg->debugfs);
+}
+
+static void kryo_debugfs_base_init(void)
+{
+ kryo_debugfs_base = debugfs_create_dir(KRYO_REGULATOR_DRIVER_NAME,
+ NULL);
+ if (IS_ERR_OR_NULL(kryo_debugfs_base)) {
+ if (PTR_ERR(kryo_debugfs_base) != -ENODEV)
+ pr_err("%s debugfs base directory creation failed rc=%ld\n",
+ KRYO_REGULATOR_DRIVER_NAME,
+ PTR_ERR(kryo_debugfs_base));
+ }
+}
+
+static void kryo_debugfs_base_remove(void)
+{
+ debugfs_remove_recursive(kryo_debugfs_base);
+}
+
+static int kryo_regulator_init_data(struct platform_device *pdev,
+ struct kryo_regulator *kvreg)
+{
+ int rc = 0;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *temp;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pm-apc");
+ if (!res) {
+ dev_err(dev, "PM APC register address missing\n");
+ return -EINVAL;
+ }
+
+ kvreg->reg_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!kvreg->reg_base) {
+ dev_err(dev, "failed to map PM APC registers\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pm-apcc");
+ if (!res) {
+ dev_err(dev, "PM APCC register address missing\n");
+ return -EINVAL;
+ }
+
+ kvreg->pm_apcc_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!kvreg->pm_apcc_base) {
+ dev_err(dev, "failed to map PM APCC registers\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apcs-csr");
+ if (!res) {
+ dev_err(dev, "missing APCS CSR physical base address");
+ return -EINVAL;
+ }
+
+ temp = ioremap(res->start, resource_size(res));
+ if (!temp) {
+ dev_err(dev, "failed to map APCS CSR registers\n");
+ return -ENOMEM;
+ }
+
+ kvreg->version = readl_relaxed(temp + APCS_VERSION);
+ iounmap(temp);
+
+ rc = of_property_read_u32(dev->of_node,
+ "qcom,vref-functional-step-voltage",
+ &kvreg->vref_func_step_volt);
+ if (rc < 0) {
+ dev_err(dev, "qcom,vref-functional-step-voltage missing rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(dev->of_node,
+ "qcom,vref-functional-min-voltage",
+ &kvreg->vref_func_min_volt);
+ if (rc < 0) {
+ dev_err(dev, "qcom,vref-functional-min-voltage missing rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ kvreg->vref_func_max_volt = kryo_decode_functional_volt(kvreg,
+ LDO_N_VOLTAGES - 1);
+
+ rc = of_property_read_u32(dev->of_node,
+ "qcom,vref-retention-step-voltage",
+ &kvreg->vref_ret_step_volt);
+ if (rc < 0) {
+ dev_err(dev, "qcom,vref-retention-step-voltage missing rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(dev->of_node,
+ "qcom,vref-retention-min-voltage",
+ &kvreg->vref_ret_min_volt);
+ if (rc < 0) {
+ dev_err(dev, "qcom,vref-retention-min-voltage missing rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ kvreg->vref_ret_max_volt = kryo_decode_retention_volt(kvreg,
+ LDO_N_VOLTAGES - 1);
+
+ rc = of_property_read_u32(dev->of_node, "qcom,ldo-default-voltage",
+ &kvreg->volt);
+ if (rc < 0) {
+ dev_err(dev, "qcom,ldo-default-voltage missing rc=%d\n", rc);
+ return rc;
+ }
+ if (!is_between(kvreg->vref_func_min_volt,
+ kvreg->vref_func_max_volt,
+ kvreg->volt)) {
+ dev_err(dev, "qcom,ldo-default-voltage=%d uV outside allowed range\n",
+ kvreg->volt);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(dev->of_node, "qcom,retention-voltage",
+ &kvreg->retention_volt);
+ if (rc < 0) {
+ dev_err(dev, "qcom,retention-voltage missing rc=%d\n", rc);
+ return rc;
+ }
+ if (!is_between(kvreg->vref_ret_min_volt,
+ kvreg->vref_ret_max_volt,
+ kvreg->retention_volt)) {
+ dev_err(dev, "qcom,retention-voltage=%d uV outside allowed range\n",
+ kvreg->retention_volt);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(dev->of_node, "qcom,ldo-headroom-voltage",
+ &kvreg->headroom_volt);
+ if (rc < 0) {
+ dev_err(dev, "qcom,ldo-headroom-voltage missing rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(dev->of_node, "qcom,ldo-config-init",
+ &kvreg->ldo_config_init);
+ if (rc < 0) {
+ dev_err(dev, "qcom,ldo-config-init missing rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(dev->of_node, "qcom,apm-config-init",
+ &kvreg->apm_config_init);
+ if (rc < 0) {
+ dev_err(dev, "qcom,apm-config-init missing rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(dev->of_node, "qcom,cluster-num",
+ &kvreg->cluster_num);
+ if (rc < 0) {
+ dev_err(dev, "qcom,cluster-num missing rc=%d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int kryo_regulator_retention_init(struct kryo_regulator *kvreg,
+ struct platform_device *pdev,
+ struct device_node *ret_node)
+{
+ struct device *dev = &pdev->dev;
+ struct regulator_init_data *init_data;
+ struct regulator_config reg_config = {};
+ int rc;
+
+ init_data = of_get_regulator_init_data(dev, ret_node);
+ if (!init_data) {
+ kvreg_err(kvreg, "regulator init data is missing\n");
+ return -EINVAL;
+ }
+
+ if (!init_data->constraints.name) {
+ kvreg_err(kvreg, "regulator name is missing from constraints\n");
+ return -EINVAL;
+ }
+
+ init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_BYPASS
+ | REGULATOR_CHANGE_VOLTAGE;
+ init_data->constraints.input_uV = init_data->constraints.max_uV;
+
+ kvreg->retention_desc.name = init_data->constraints.name;
+ kvreg->retention_desc.n_voltages = LDO_N_VOLTAGES;
+ kvreg->retention_desc.ops = &kryo_regulator_retention_ops;
+ kvreg->retention_desc.type = REGULATOR_VOLTAGE;
+ kvreg->retention_desc.owner = THIS_MODULE;
+
+ reg_config.dev = dev;
+ reg_config.init_data = init_data;
+ reg_config.driver_data = kvreg;
+ reg_config.of_node = ret_node;
+ kvreg->retention_rdev = regulator_register(&kvreg->retention_desc,
+ &reg_config);
+ if (IS_ERR(kvreg->retention_rdev)) {
+ rc = PTR_ERR(kvreg->retention_rdev);
+ kvreg_err(kvreg, "regulator_register failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static int kryo_regulator_lpm_prepare(struct kryo_regulator *kvreg)
+{
+ int vdd_volt_uv, bhs_volt, vdd_vlvl = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kvreg->slock, flags);
+
+ kvreg->pre_lpm_state_mode = kvreg->mode;
+ kvreg->pre_lpm_state_volt = kvreg->volt;
+
+ if (kvreg->mode == LDO_MODE) {
+ if (!vdd_vlvl) {
+ vdd_vlvl = msm_spm_get_vdd(SHARED_CPU_REG_NUM);
+ if (vdd_vlvl < 0) {
+ kvreg_err(kvreg, "could not get vdd supply voltage level, rc=%d\n",
+ vdd_vlvl);
+ spin_unlock_irqrestore(&kvreg->slock, flags);
+ return NOTIFY_BAD;
+ }
+
+ vdd_volt_uv = vdd_vlvl * VDD_SUPPLY_STEP_UV
+ + VDD_SUPPLY_MIN_UV;
+ }
+ kvreg_debug(kvreg, "switching to BHS mode, vdd_apcc=%d uV, current LDO Vref=%d, LPM enter count=%lx\n",
+ vdd_volt_uv, kvreg->volt, kvreg->lpm_enter_count);
+
+ /*
+ * Program vdd supply minus LDO headroom as voltage.
+ * Cap this value to the maximum physically supported
+ * LDO voltage, if necessary.
+ */
+ bhs_volt = vdd_volt_uv - kvreg->headroom_volt;
+ if (bhs_volt > kvreg->vref_func_max_volt) {
+ kvreg_debug(kvreg, "limited to LDO output of %d uV when switching to BHS mode\n",
+ kvreg->vref_func_max_volt);
+ bhs_volt = kvreg->vref_func_max_volt;
+ }
+
+ kryo_set_ldo_volt(kvreg, bhs_volt);
+
+ /* Switch Power Gate Mode */
+ kryo_configure_mode(kvreg, BHS_MODE);
+ }
+
+ kvreg->lpm_enter_count++;
+ spin_unlock_irqrestore(&kvreg->slock, flags);
+
+ return NOTIFY_OK;
+}
+
+static int kryo_regulator_lpm_resume(struct kryo_regulator *kvreg)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kvreg->slock, flags);
+
+ if (kvreg->mode == BHS_MODE &&
+ kvreg->pre_lpm_state_mode == LDO_MODE) {
+ kvreg_debug(kvreg, "switching to LDO mode, cached LDO Vref=%d, LPM exit count=%lx\n",
+ kvreg->pre_lpm_state_volt, kvreg->lpm_exit_count);
+
+ /*
+ * Cached voltage value corresponds to vdd supply minus
+ * LDO headroom, reprogram it.
+ */
+ kryo_set_ldo_volt(kvreg, kvreg->volt);
+
+ /* Switch Power Gate Mode */
+ kryo_configure_mode(kvreg, LDO_MODE);
+
+ /* Request final LDO output voltage */
+ kryo_set_ldo_volt(kvreg, kvreg->pre_lpm_state_volt);
+ }
+
+ kvreg->lpm_exit_count++;
+ spin_unlock_irqrestore(&kvreg->slock, flags);
+
+ if (kvreg->lpm_exit_count != kvreg->lpm_enter_count) {
+ kvreg_err(kvreg, "LPM entry/exit counter mismatch, this is not expected: enter=%lx exit=%lx\n",
+ kvreg->lpm_enter_count, kvreg->lpm_exit_count);
+ BUG_ON(1);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int kryo_regulator_cpu_pm_callback(struct notifier_block *self,
+ unsigned long cmd, void *v)
+{
+ struct kryo_regulator *kvreg = container_of(self, struct kryo_regulator,
+ cpu_pm_notifier);
+ unsigned long aff_level = (unsigned long) v;
+ int rc = NOTIFY_OK;
+
+ switch (cmd) {
+ case CPU_CLUSTER_PM_ENTER:
+ if (aff_level == AFFINITY_LEVEL_M3)
+ rc = kryo_regulator_lpm_prepare(kvreg);
+ break;
+ case CPU_CLUSTER_PM_EXIT:
+ if (aff_level == AFFINITY_LEVEL_M3)
+ rc = kryo_regulator_lpm_resume(kvreg);
+ break;
+ }
+
+ return rc;
+}
+
+static int kryo_regulator_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct kryo_regulator *kvreg;
+ struct regulator_config reg_config = {};
+ struct regulator_init_data *init_data = pdev->dev.platform_data;
+ struct device_node *child;
+ int rc = 0;
+
+ if (!dev->of_node) {
+ dev_err(dev, "Device tree node is missing\n");
+ return -ENODEV;
+ }
+
+ init_data = of_get_regulator_init_data(dev, dev->of_node);
+
+ if (!init_data) {
+ dev_err(dev, "regulator init data is missing\n");
+ return -EINVAL;
+ }
+
+ if (!init_data->constraints.name) {
+ dev_err(dev, "regulator name is missing from constraints\n");
+ return -EINVAL;
+ }
+
+ init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_VOLTAGE
+ | REGULATOR_CHANGE_BYPASS | REGULATOR_CHANGE_STATUS;
+ init_data->constraints.input_uV = init_data->constraints.max_uV;
+
+ kvreg = devm_kzalloc(dev, sizeof(*kvreg), GFP_KERNEL);
+ if (!kvreg) {
+ dev_err(dev, "memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ rc = kryo_regulator_init_data(pdev, kvreg);
+ if (rc) {
+ dev_err(dev, "could not parse and ioremap all device tree properties\n");
+ return rc;
+ }
+
+ spin_lock_init(&kvreg->slock);
+ kvreg->name = init_data->constraints.name;
+ kvreg->desc.name = kvreg->name;
+ kvreg->desc.n_voltages = LDO_N_VOLTAGES;
+ kvreg->desc.ops = &kryo_regulator_ops;
+ kvreg->desc.type = REGULATOR_VOLTAGE;
+ kvreg->desc.owner = THIS_MODULE;
+ kvreg->mode = BHS_MODE;
+
+ for_each_available_child_of_node(dev->of_node, child) {
+ kryo_regulator_retention_init(kvreg, pdev, child);
+ if (rc) {
+ dev_err(dev, "could not initialize retention regulator, rc=%d\n",
+ rc);
+ return rc;
+ }
+ break;
+ }
+
+ /* CPUSS PM Register Initialization */
+ rc = kryo_hw_init(kvreg);
+ if (rc) {
+ dev_err(dev, "unable to perform CPUSS PM initialization sequence\n");
+ return rc;
+ }
+
+ reg_config.dev = dev;
+ reg_config.init_data = init_data;
+ reg_config.driver_data = kvreg;
+ reg_config.of_node = dev->of_node;
+ kvreg->rdev = regulator_register(&kvreg->desc, &reg_config);
+ if (IS_ERR(kvreg->rdev)) {
+ rc = PTR_ERR(kvreg->rdev);
+ kvreg_err(kvreg, "regulator_register failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ platform_set_drvdata(pdev, kvreg);
+ kryo_debugfs_init(kvreg);
+
+ mutex_lock(&kryo_regulator_list_mutex);
+ list_add_tail(&kvreg->link, &kryo_regulator_list);
+ mutex_unlock(&kryo_regulator_list_mutex);
+
+ kvreg->cpu_pm_notifier.notifier_call = kryo_regulator_cpu_pm_callback;
+ cpu_pm_register_notifier(&kvreg->cpu_pm_notifier);
+ kvreg_debug(kvreg, "registered cpu pm notifier\n");
+
+ kvreg_info(kvreg, "default LDO functional volt=%d uV, LDO retention volt=%d uV, Vref func=%d + %d*(val), cluster-num=%d\n",
+ kvreg->volt, kvreg->retention_volt,
+ kvreg->vref_func_min_volt,
+ kvreg->vref_func_step_volt,
+ kvreg->cluster_num);
+
+ return rc;
+}
+
+static int kryo_regulator_remove(struct platform_device *pdev)
+{
+ struct kryo_regulator *kvreg = platform_get_drvdata(pdev);
+
+ mutex_lock(&kryo_regulator_list_mutex);
+ list_del(&kvreg->link);
+ mutex_unlock(&kryo_regulator_list_mutex);
+
+ cpu_pm_unregister_notifier(&kvreg->cpu_pm_notifier);
+ regulator_unregister(kvreg->rdev);
+ platform_set_drvdata(pdev, NULL);
+ kryo_debugfs_deinit(kvreg);
+
+ return 0;
+}
+
+static struct of_device_id kryo_regulator_match_table[] = {
+ { .compatible = "qcom,kryo-regulator", },
+ {}
+};
+
+static struct platform_driver kryo_regulator_driver = {
+ .probe = kryo_regulator_probe,
+ .remove = kryo_regulator_remove,
+ .driver = {
+ .name = KRYO_REGULATOR_DRIVER_NAME,
+ .of_match_table = kryo_regulator_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init kryo_regulator_init(void)
+{
+ kryo_debugfs_base_init();
+ return platform_driver_register(&kryo_regulator_driver);
+}
+
+static void __exit kryo_regulator_exit(void)
+{
+ platform_driver_unregister(&kryo_regulator_driver);
+ kryo_debugfs_base_remove();
+}
+
+MODULE_DESCRIPTION("Kryo regulator driver");
+MODULE_LICENSE("GPL v2");
+
+arch_initcall(kryo_regulator_init);
+module_exit(kryo_regulator_exit);
diff --git a/drivers/regulator/mem-acc-regulator.c b/drivers/regulator/mem-acc-regulator.c
new file mode 100644
index 000000000000..4cecc0c40cd7
--- /dev/null
+++ b/drivers/regulator/mem-acc-regulator.c
@@ -0,0 +1,1390 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "ACC: %s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/string.h>
+#include <soc/qcom/scm.h>
+
+#define MEM_ACC_DEFAULT_SEL_SIZE 2
+
+#define BYTES_PER_FUSE_ROW 8
+
+/* mem-acc config flags */
+
+enum {
+ MEM_ACC_USE_CORNER_ACC_MAP = BIT(0),
+ MEM_ACC_USE_ADDR_VAL_MAP = BIT(1),
+};
+
+#define FUSE_MAP_NO_MATCH (-1)
+#define FUSE_PARAM_MATCH_ANY (-1)
+#define PARAM_MATCH_ANY (-1)
+
+enum {
+ MEMORY_L1,
+ MEMORY_L2,
+ MEMORY_MAX,
+};
+
+#define MEM_ACC_TYPE_MAX 6
+
+/**
+ * struct acc_reg_value - Acc register configuration structure
+ * @addr_index: An index in to phys_reg_addr_list and remap_reg_addr_list
+ * to get the ACC register physical address and remapped address.
+ * @reg_val: Value to program in to the register mapped by addr_index.
+ */
+struct acc_reg_value {
+ u32 addr_index;
+ u32 reg_val;
+};
+
+struct corner_acc_reg_config {
+ struct acc_reg_value *reg_config_list;
+ int max_reg_config_len;
+};
+
+struct mem_acc_regulator {
+ struct device *dev;
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+
+ int corner;
+ bool mem_acc_supported[MEMORY_MAX];
+ bool mem_acc_custom_supported[MEMORY_MAX];
+
+ u32 *acc_sel_mask[MEMORY_MAX];
+ u32 *acc_sel_bit_pos[MEMORY_MAX];
+ u32 acc_sel_bit_size[MEMORY_MAX];
+ u32 num_acc_sel[MEMORY_MAX];
+ u32 *acc_en_bit_pos;
+ u32 num_acc_en;
+ u32 *corner_acc_map;
+ u32 num_corners;
+ u32 override_fuse_value;
+ int override_map_match;
+ int override_map_count;
+
+
+ void __iomem *acc_sel_base[MEMORY_MAX];
+ void __iomem *acc_en_base;
+ phys_addr_t acc_sel_addr[MEMORY_MAX];
+ phys_addr_t acc_en_addr;
+ u32 flags;
+
+ void __iomem *acc_custom_addr[MEMORY_MAX];
+ u32 *acc_custom_data[MEMORY_MAX];
+
+ phys_addr_t mem_acc_type_addr[MEM_ACC_TYPE_MAX];
+ u32 *mem_acc_type_data;
+
+ /* eFuse parameters */
+ phys_addr_t efuse_addr;
+ void __iomem *efuse_base;
+
+ u32 num_acc_reg;
+ u32 *phys_reg_addr_list;
+ void __iomem **remap_reg_addr_list;
+ struct corner_acc_reg_config *corner_acc_reg_config;
+};
+
+static DEFINE_MUTEX(mem_acc_memory_mutex);
+
+static u64 mem_acc_read_efuse_row(struct mem_acc_regulator *mem_acc_vreg,
+ u32 row_num, bool use_tz_api)
+{
+ int rc;
+ u64 efuse_bits;
+ struct scm_desc desc = {0};
+ struct mem_acc_read_req {
+ u32 row_address;
+ int addr_type;
+ } req;
+
+ struct mem_acc_read_rsp {
+ u32 row_data[2];
+ u32 status;
+ } rsp;
+
+ if (!use_tz_api) {
+ efuse_bits = readq_relaxed(mem_acc_vreg->efuse_base
+ + row_num * BYTES_PER_FUSE_ROW);
+ return efuse_bits;
+ }
+
+ desc.args[0] = req.row_address = mem_acc_vreg->efuse_addr +
+ row_num * BYTES_PER_FUSE_ROW;
+ desc.args[1] = req.addr_type = 0;
+ desc.arginfo = SCM_ARGS(2);
+ efuse_bits = 0;
+
+ if (!is_scm_armv8()) {
+ rc = scm_call(SCM_SVC_FUSE, SCM_FUSE_READ,
+ &req, sizeof(req), &rsp, sizeof(rsp));
+ } else {
+ rc = scm_call2(SCM_SIP_FNID(SCM_SVC_FUSE, SCM_FUSE_READ),
+ &desc);
+ rsp.row_data[0] = desc.ret[0];
+ rsp.row_data[1] = desc.ret[1];
+ rsp.status = desc.ret[2];
+ }
+
+ if (rc) {
+ pr_err("read row %d failed, err code = %d", row_num, rc);
+ } else {
+ efuse_bits = ((u64)(rsp.row_data[1]) << 32) +
+ (u64)rsp.row_data[0];
+ }
+
+ return efuse_bits;
+}
+
+static inline u32 apc_to_acc_corner(struct mem_acc_regulator *mem_acc_vreg,
+ int corner)
+{
+ /*
+ * corner_acc_map maps the corner from index 0 and APC corner value
+ * starts from the value 1
+ */
+ return mem_acc_vreg->corner_acc_map[corner - 1];
+}
+
+static void __update_acc_sel(struct mem_acc_regulator *mem_acc_vreg,
+ int corner, int mem_type)
+{
+ u32 acc_data, acc_data_old, i, bit, acc_corner;
+
+ acc_data = readl_relaxed(mem_acc_vreg->acc_sel_base[mem_type]);
+ acc_data_old = acc_data;
+ for (i = 0; i < mem_acc_vreg->num_acc_sel[mem_type]; i++) {
+ bit = mem_acc_vreg->acc_sel_bit_pos[mem_type][i];
+ acc_data &= ~mem_acc_vreg->acc_sel_mask[mem_type][i];
+ acc_corner = apc_to_acc_corner(mem_acc_vreg, corner);
+ acc_data |= (acc_corner << bit) &
+ mem_acc_vreg->acc_sel_mask[mem_type][i];
+ }
+ pr_debug("corner=%d old_acc_sel=0x%02x new_acc_sel=0x%02x mem_type=%d\n",
+ corner, acc_data_old, acc_data, mem_type);
+ writel_relaxed(acc_data, mem_acc_vreg->acc_sel_base[mem_type]);
+}
+
+static void __update_acc_type(struct mem_acc_regulator *mem_acc_vreg,
+ int corner)
+{
+ int i, rc;
+
+ for (i = 0; i < MEM_ACC_TYPE_MAX; i++) {
+ if (mem_acc_vreg->mem_acc_type_addr[i]) {
+ rc = scm_io_write(mem_acc_vreg->mem_acc_type_addr[i],
+ mem_acc_vreg->mem_acc_type_data[corner - 1 + i *
+ mem_acc_vreg->num_corners]);
+ if (rc)
+ pr_err("scm_io_write: %pa failure rc:%d\n",
+ &(mem_acc_vreg->mem_acc_type_addr[i]),
+ rc);
+ }
+ }
+}
+
+static void __update_acc_custom(struct mem_acc_regulator *mem_acc_vreg,
+ int corner, int mem_type)
+{
+ writel_relaxed(
+ mem_acc_vreg->acc_custom_data[mem_type][corner-1],
+ mem_acc_vreg->acc_custom_addr[mem_type]);
+ pr_debug("corner=%d mem_type=%d custom_data=0x%2x\n", corner,
+ mem_type, mem_acc_vreg->acc_custom_data[mem_type][corner-1]);
+}
+
+static void update_acc_sel(struct mem_acc_regulator *mem_acc_vreg, int corner)
+{
+ int i;
+
+ for (i = 0; i < MEMORY_MAX; i++) {
+ if (mem_acc_vreg->mem_acc_supported[i])
+ __update_acc_sel(mem_acc_vreg, corner, i);
+ if (mem_acc_vreg->mem_acc_custom_supported[i])
+ __update_acc_custom(mem_acc_vreg, corner, i);
+ }
+
+ if (mem_acc_vreg->mem_acc_type_data)
+ __update_acc_type(mem_acc_vreg, corner);
+}
+
+static void update_acc_reg(struct mem_acc_regulator *mem_acc_vreg, int corner)
+{
+ struct corner_acc_reg_config *corner_acc_reg_config;
+ struct acc_reg_value *reg_config_list;
+ int i, index;
+ u32 addr_index, reg_val;
+
+ corner_acc_reg_config =
+ &mem_acc_vreg->corner_acc_reg_config[mem_acc_vreg->corner];
+ reg_config_list = corner_acc_reg_config->reg_config_list;
+ for (i = 0; i < corner_acc_reg_config->max_reg_config_len; i++) {
+ /*
+ * Use (corner - 1) in the below equation as
+ * the reg_config_list[] stores the values starting from
+ * index '0' where as the minimum corner value allowed
+ * in regulator framework is '1'.
+ */
+ index = (corner - 1) * corner_acc_reg_config->max_reg_config_len
+ + i;
+ addr_index = reg_config_list[index].addr_index;
+ reg_val = reg_config_list[index].reg_val;
+
+ if (addr_index == PARAM_MATCH_ANY)
+ break;
+
+ writel_relaxed(reg_val,
+ mem_acc_vreg->remap_reg_addr_list[addr_index]);
+ /* make sure write complete */
+ mb();
+
+ pr_debug("corner=%d register:0x%x value:0x%x\n", corner,
+ mem_acc_vreg->phys_reg_addr_list[addr_index], reg_val);
+ }
+}
+
+static int mem_acc_regulator_set_voltage(struct regulator_dev *rdev,
+ int corner, int corner_max, unsigned *selector)
+{
+ struct mem_acc_regulator *mem_acc_vreg = rdev_get_drvdata(rdev);
+ int i;
+
+ if (corner > mem_acc_vreg->num_corners) {
+ pr_err("Invalid corner=%d requested\n", corner);
+ return -EINVAL;
+ }
+
+ pr_debug("old corner=%d, new corner=%d\n",
+ mem_acc_vreg->corner, corner);
+
+ if (corner == mem_acc_vreg->corner)
+ return 0;
+
+ /* go up or down one level at a time */
+ mutex_lock(&mem_acc_memory_mutex);
+
+ if (mem_acc_vreg->flags & MEM_ACC_USE_ADDR_VAL_MAP) {
+ update_acc_reg(mem_acc_vreg, corner);
+ } else if (mem_acc_vreg->flags & MEM_ACC_USE_CORNER_ACC_MAP) {
+ if (corner > mem_acc_vreg->corner) {
+ for (i = mem_acc_vreg->corner + 1; i <= corner; i++) {
+ pr_debug("UP: to corner %d\n", i);
+ update_acc_sel(mem_acc_vreg, i);
+ }
+ } else {
+ for (i = mem_acc_vreg->corner - 1; i >= corner; i--) {
+ pr_debug("DOWN: to corner %d\n", i);
+ update_acc_sel(mem_acc_vreg, i);
+ }
+ }
+ }
+
+ mutex_unlock(&mem_acc_memory_mutex);
+
+ pr_debug("new voltage corner set %d\n", corner);
+
+ mem_acc_vreg->corner = corner;
+
+ return 0;
+}
+
+static int mem_acc_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct mem_acc_regulator *mem_acc_vreg = rdev_get_drvdata(rdev);
+
+ return mem_acc_vreg->corner;
+}
+
+static struct regulator_ops mem_acc_corner_ops = {
+ .set_voltage = mem_acc_regulator_set_voltage,
+ .get_voltage = mem_acc_regulator_get_voltage,
+};
+
+static int __mem_acc_sel_init(struct mem_acc_regulator *mem_acc_vreg,
+ int mem_type)
+{
+ int i;
+ u32 bit, mask;
+
+ mem_acc_vreg->acc_sel_mask[mem_type] = devm_kzalloc(mem_acc_vreg->dev,
+ mem_acc_vreg->num_acc_sel[mem_type] * sizeof(u32), GFP_KERNEL);
+ if (!mem_acc_vreg->acc_sel_mask[mem_type]) {
+ pr_err("Unable to allocate memory for mem_type=%d\n", mem_type);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < mem_acc_vreg->num_acc_sel[mem_type]; i++) {
+ bit = mem_acc_vreg->acc_sel_bit_pos[mem_type][i];
+ mask = BIT(mem_acc_vreg->acc_sel_bit_size[mem_type]) - 1;
+ mem_acc_vreg->acc_sel_mask[mem_type][i] = mask << bit;
+ }
+
+ return 0;
+}
+
+static int mem_acc_sel_init(struct mem_acc_regulator *mem_acc_vreg)
+{
+ int i, rc;
+
+ for (i = 0; i < MEMORY_MAX; i++) {
+ if (mem_acc_vreg->mem_acc_supported[i]) {
+ rc = __mem_acc_sel_init(mem_acc_vreg, i);
+ if (rc) {
+ pr_err("Unable to intialize mem_type=%d rc=%d\n",
+ i, rc);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void mem_acc_en_init(struct mem_acc_regulator *mem_acc_vreg)
+{
+ int i, bit;
+ u32 acc_data;
+
+ acc_data = readl_relaxed(mem_acc_vreg->acc_en_base);
+ pr_debug("init: acc_en_register=%x\n", acc_data);
+ for (i = 0; i < mem_acc_vreg->num_acc_en; i++) {
+ bit = mem_acc_vreg->acc_en_bit_pos[i];
+ acc_data |= BIT(bit);
+ }
+ pr_debug("final: acc_en_register=%x\n", acc_data);
+ writel_relaxed(acc_data, mem_acc_vreg->acc_en_base);
+}
+
+static int populate_acc_data(struct mem_acc_regulator *mem_acc_vreg,
+ const char *prop_name, u32 **value, u32 *len)
+{
+ int rc;
+
+ if (!of_get_property(mem_acc_vreg->dev->of_node, prop_name, len)) {
+ pr_err("Unable to find %s property\n", prop_name);
+ return -EINVAL;
+ }
+ *len /= sizeof(u32);
+ if (!(*len)) {
+ pr_err("Incorrect entries in %s\n", prop_name);
+ return -EINVAL;
+ }
+
+ *value = devm_kzalloc(mem_acc_vreg->dev, (*len) * sizeof(u32),
+ GFP_KERNEL);
+ if (!(*value)) {
+ pr_err("Unable to allocate memory for %s\n", prop_name);
+ return -ENOMEM;
+ }
+
+ pr_debug("Found %s, data-length = %d\n", prop_name, *len);
+
+ rc = of_property_read_u32_array(mem_acc_vreg->dev->of_node,
+ prop_name, *value, *len);
+ if (rc) {
+ pr_err("Unable to populate %s rc=%d\n", prop_name, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int mem_acc_sel_setup(struct mem_acc_regulator *mem_acc_vreg,
+ struct resource *res, int mem_type)
+{
+ int len, rc;
+ char *mem_select_str;
+ char *mem_select_size_str;
+
+ mem_acc_vreg->acc_sel_addr[mem_type] = res->start;
+ len = res->end - res->start + 1;
+ pr_debug("'acc_sel_addr' = %pa mem_type=%d (len=%d)\n",
+ &res->start, mem_type, len);
+
+ mem_acc_vreg->acc_sel_base[mem_type] = devm_ioremap(mem_acc_vreg->dev,
+ mem_acc_vreg->acc_sel_addr[mem_type], len);
+ if (!mem_acc_vreg->acc_sel_base[mem_type]) {
+ pr_err("Unable to map 'acc_sel_addr' %pa for mem_type=%d\n",
+ &mem_acc_vreg->acc_sel_addr[mem_type], mem_type);
+ return -EINVAL;
+ }
+
+ switch (mem_type) {
+ case MEMORY_L1:
+ mem_select_str = "qcom,acc-sel-l1-bit-pos";
+ mem_select_size_str = "qcom,acc-sel-l1-bit-size";
+ break;
+ case MEMORY_L2:
+ mem_select_str = "qcom,acc-sel-l2-bit-pos";
+ mem_select_size_str = "qcom,acc-sel-l2-bit-size";
+ break;
+ default:
+ pr_err("Invalid memory type: %d\n", mem_type);
+ return -EINVAL;
+ }
+
+ mem_acc_vreg->acc_sel_bit_size[mem_type] = MEM_ACC_DEFAULT_SEL_SIZE;
+ of_property_read_u32(mem_acc_vreg->dev->of_node, mem_select_size_str,
+ &mem_acc_vreg->acc_sel_bit_size[mem_type]);
+
+ rc = populate_acc_data(mem_acc_vreg, mem_select_str,
+ &mem_acc_vreg->acc_sel_bit_pos[mem_type],
+ &mem_acc_vreg->num_acc_sel[mem_type]);
+ if (rc)
+ pr_err("Unable to populate '%s' rc=%d\n", mem_select_str, rc);
+
+ return rc;
+}
+
+static int mem_acc_efuse_init(struct platform_device *pdev,
+ struct mem_acc_regulator *mem_acc_vreg)
+{
+ struct resource *res;
+ int len;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse_addr");
+ if (!res || !res->start) {
+ mem_acc_vreg->efuse_base = NULL;
+ pr_debug("'efuse_addr' resource missing or not used.\n");
+ return 0;
+ }
+
+ mem_acc_vreg->efuse_addr = res->start;
+ len = res->end - res->start + 1;
+
+ pr_info("efuse_addr = %pa (len=0x%x)\n", &res->start, len);
+
+ mem_acc_vreg->efuse_base = devm_ioremap(&pdev->dev,
+ mem_acc_vreg->efuse_addr, len);
+ if (!mem_acc_vreg->efuse_base) {
+ pr_err("Unable to map efuse_addr %pa\n",
+ &mem_acc_vreg->efuse_addr);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mem_acc_custom_data_init(struct platform_device *pdev,
+ struct mem_acc_regulator *mem_acc_vreg,
+ int mem_type)
+{
+ struct resource *res;
+ char *custom_apc_addr_str, *custom_apc_data_str;
+ int len, rc = 0;
+
+ switch (mem_type) {
+ case MEMORY_L1:
+ custom_apc_addr_str = "acc-l1-custom";
+ custom_apc_data_str = "qcom,l1-acc-custom-data";
+ break;
+ case MEMORY_L2:
+ custom_apc_addr_str = "acc-l2-custom";
+ custom_apc_data_str = "qcom,l2-acc-custom-data";
+ break;
+ default:
+ pr_err("Invalid memory type: %d\n", mem_type);
+ return -EINVAL;
+ }
+
+ if (!of_find_property(mem_acc_vreg->dev->of_node,
+ custom_apc_data_str, NULL)) {
+ pr_debug("%s custom_data not specified\n", custom_apc_data_str);
+ return 0;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ custom_apc_addr_str);
+ if (!res || !res->start) {
+ pr_debug("%s resource missing\n", custom_apc_addr_str);
+ return -EINVAL;
+ } else {
+ len = res->end - res->start + 1;
+ mem_acc_vreg->acc_custom_addr[mem_type] =
+ devm_ioremap(mem_acc_vreg->dev, res->start, len);
+ if (!mem_acc_vreg->acc_custom_addr[mem_type]) {
+ pr_err("Unable to map %s %pa\n", custom_apc_addr_str,
+ &res->start);
+ return -EINVAL;
+ }
+ }
+
+ rc = populate_acc_data(mem_acc_vreg, custom_apc_data_str,
+ &mem_acc_vreg->acc_custom_data[mem_type], &len);
+ if (rc) {
+ pr_err("Unable to find %s rc=%d\n", custom_apc_data_str, rc);
+ return rc;
+ }
+
+ if (mem_acc_vreg->num_corners != len) {
+ pr_err("Custom data is not present for all the corners\n");
+ return -EINVAL;
+ }
+
+ mem_acc_vreg->mem_acc_custom_supported[mem_type] = true;
+
+ return 0;
+}
+
+static int override_mem_acc_custom_data(struct platform_device *pdev,
+ struct mem_acc_regulator *mem_acc_vreg,
+ int mem_type)
+{
+ char *custom_apc_data_str;
+ int len, rc = 0, i;
+ int tuple_count, tuple_match;
+ u32 index = 0, value = 0;
+
+ switch (mem_type) {
+ case MEMORY_L1:
+ custom_apc_data_str = "qcom,override-l1-acc-custom-data";
+ break;
+ case MEMORY_L2:
+ custom_apc_data_str = "qcom,override-l2-acc-custom-data";
+ break;
+ default:
+ pr_err("Invalid memory type: %d\n", mem_type);
+ return -EINVAL;
+ }
+
+ if (!of_find_property(mem_acc_vreg->dev->of_node,
+ custom_apc_data_str, &len)) {
+ pr_debug("%s not specified\n", custom_apc_data_str);
+ return 0;
+ }
+
+ if (mem_acc_vreg->override_map_count) {
+ if (mem_acc_vreg->override_map_match == FUSE_MAP_NO_MATCH)
+ return 0;
+ tuple_count = mem_acc_vreg->override_map_count;
+ tuple_match = mem_acc_vreg->override_map_match;
+ } else {
+ tuple_count = 1;
+ tuple_match = 0;
+ }
+
+ if (len != mem_acc_vreg->num_corners * tuple_count * sizeof(u32)) {
+ pr_err("%s length=%d is invalid\n", custom_apc_data_str, len);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mem_acc_vreg->num_corners; i++) {
+ index = (tuple_match * mem_acc_vreg->num_corners) + i;
+ rc = of_property_read_u32_index(mem_acc_vreg->dev->of_node,
+ custom_apc_data_str, index, &value);
+ if (rc) {
+ pr_err("Unable read %s index %u, rc=%d\n",
+ custom_apc_data_str, index, rc);
+ return rc;
+ }
+ mem_acc_vreg->acc_custom_data[mem_type][i] = value;
+ }
+
+ return 0;
+}
+
+static int mem_acc_override_corner_map(struct mem_acc_regulator *mem_acc_vreg)
+{
+ int len = 0, i, rc;
+ int tuple_count, tuple_match;
+ u32 index = 0, value = 0;
+ char *prop_str = "qcom,override-corner-acc-map";
+
+ if (!of_find_property(mem_acc_vreg->dev->of_node, prop_str, &len))
+ return 0;
+
+ if (mem_acc_vreg->override_map_count) {
+ if (mem_acc_vreg->override_map_match == FUSE_MAP_NO_MATCH)
+ return 0;
+ tuple_count = mem_acc_vreg->override_map_count;
+ tuple_match = mem_acc_vreg->override_map_match;
+ } else {
+ tuple_count = 1;
+ tuple_match = 0;
+ }
+
+ if (len != mem_acc_vreg->num_corners * tuple_count * sizeof(u32)) {
+ pr_err("%s length=%d is invalid\n", prop_str, len);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mem_acc_vreg->num_corners; i++) {
+ index = (tuple_match * mem_acc_vreg->num_corners) + i;
+ rc = of_property_read_u32_index(mem_acc_vreg->dev->of_node,
+ prop_str, index, &value);
+ if (rc) {
+ pr_err("Unable read %s index %u, rc=%d\n",
+ prop_str, index, rc);
+ return rc;
+ }
+ mem_acc_vreg->corner_acc_map[i] = value;
+ }
+
+ return 0;
+
+}
+
+static int mem_acc_find_override_map_match(struct platform_device *pdev,
+ struct mem_acc_regulator *mem_acc_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ int i, rc, tuple_size;
+ int len = 0;
+ u32 *tmp;
+ char *prop_str = "qcom,override-fuse-version-map";
+
+ /* Specify default no match case. */
+ mem_acc_vreg->override_map_match = FUSE_MAP_NO_MATCH;
+ mem_acc_vreg->override_map_count = 0;
+
+ if (!of_find_property(of_node, prop_str, &len)) {
+ /* No mapping present. */
+ return 0;
+ }
+
+ tuple_size = 1;
+ mem_acc_vreg->override_map_count = len / (sizeof(u32) * tuple_size);
+
+ if (len == 0 || len % (sizeof(u32) * tuple_size)) {
+ pr_err("%s length=%d is invalid\n", prop_str, len);
+ return -EINVAL;
+ }
+
+ tmp = kzalloc(len, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(of_node, prop_str, tmp,
+ mem_acc_vreg->override_map_count * tuple_size);
+ if (rc) {
+ pr_err("could not read %s rc=%d\n", prop_str, rc);
+ goto done;
+ }
+
+ for (i = 0; i < mem_acc_vreg->override_map_count; i++) {
+ if (tmp[i * tuple_size] != mem_acc_vreg->override_fuse_value
+ && tmp[i * tuple_size] != FUSE_PARAM_MATCH_ANY) {
+ continue;
+ } else {
+ mem_acc_vreg->override_map_match = i;
+ break;
+ }
+ }
+
+ if (mem_acc_vreg->override_map_match != FUSE_MAP_NO_MATCH)
+ pr_debug("%s tuple match found: %d\n", prop_str,
+ mem_acc_vreg->override_map_match);
+ else
+ pr_err("%s tuple match not found\n", prop_str);
+
+done:
+ kfree(tmp);
+ return rc;
+}
+
+#define MAX_CHARS_PER_INT 20
+
+static int mem_acc_reg_addr_val_dump(struct mem_acc_regulator *mem_acc_vreg,
+ struct corner_acc_reg_config *corner_acc_reg_config,
+ u32 corner)
+{
+ int i, k, index, pos = 0;
+ u32 addr_index;
+ size_t buflen;
+ char *buf;
+ struct acc_reg_value *reg_config_list =
+ corner_acc_reg_config->reg_config_list;
+ int max_reg_config_len = corner_acc_reg_config->max_reg_config_len;
+ int num_corners = mem_acc_vreg->num_corners;
+
+ /*
+ * Log register and value mapping since they are useful for
+ * baseline MEM ACC logging.
+ */
+ buflen = max_reg_config_len * (MAX_CHARS_PER_INT + 6) * sizeof(*buf);
+ buf = kzalloc(buflen, GFP_KERNEL);
+ if (buf == NULL) {
+ pr_err("Could not allocate memory for acc register and value logging\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_corners; i++) {
+ if (corner == i + 1)
+ continue;
+
+ pr_debug("Corner: %d --> %d:\n", corner, i + 1);
+ pos = 0;
+ for (k = 0; k < max_reg_config_len; k++) {
+ index = i * max_reg_config_len + k;
+ addr_index = reg_config_list[index].addr_index;
+ if (addr_index == PARAM_MATCH_ANY)
+ break;
+
+ pos += scnprintf(buf + pos, buflen - pos,
+ "<0x%x 0x%x> ",
+ mem_acc_vreg->phys_reg_addr_list[addr_index],
+ reg_config_list[index].reg_val);
+ }
+ buf[pos] = '\0';
+ pr_debug("%s\n", buf);
+ }
+
+ return 0;
+}
+
+static int mem_acc_get_reg_addr_val(struct device_node *of_node,
+ const char *prop_str, struct acc_reg_value *reg_config_list,
+ int list_offset, int list_size, u32 max_reg_index)
+{
+
+ int i, index, rc = 0;
+
+ for (i = 0; i < list_size / 2; i++) {
+ index = (list_offset * list_size) + i * 2;
+ rc = of_property_read_u32_index(of_node, prop_str, index,
+ &reg_config_list[i].addr_index);
+ rc |= of_property_read_u32_index(of_node, prop_str, index + 1,
+ &reg_config_list[i].reg_val);
+ if (rc) {
+ pr_err("could not read %s at tuple %u: rc=%d\n",
+ prop_str, index, rc);
+ return rc;
+ }
+
+ if (reg_config_list[i].addr_index == PARAM_MATCH_ANY)
+ continue;
+
+ if ((!reg_config_list[i].addr_index) ||
+ reg_config_list[i].addr_index > max_reg_index) {
+ pr_err("Invalid register index %u in %s at tuple %u\n",
+ reg_config_list[i].addr_index, prop_str, index);
+ return -EINVAL;
+ }
+ }
+
+ return rc;
+}
+
+static int mem_acc_init_reg_config(struct mem_acc_regulator *mem_acc_vreg)
+{
+ struct device_node *of_node = mem_acc_vreg->dev->of_node;
+ int i, size, len = 0, rc = 0;
+ u32 addr_index, reg_val, index;
+ char *prop_str = "qcom,acc-init-reg-config";
+
+ if (!of_find_property(of_node, prop_str, &len)) {
+ /* Initial acc register configuration not specified */
+ return rc;
+ }
+
+ size = len / sizeof(u32);
+ if ((!size) || (size % 2)) {
+ pr_err("%s specified with invalid length: %d\n",
+ prop_str, size);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < size / 2; i++) {
+ index = i * 2;
+ rc = of_property_read_u32_index(of_node, prop_str, index,
+ &addr_index);
+ rc |= of_property_read_u32_index(of_node, prop_str, index + 1,
+ &reg_val);
+ if (rc) {
+ pr_err("could not read %s at tuple %u: rc=%d\n",
+ prop_str, index, rc);
+ return rc;
+ }
+
+ if ((!addr_index) || addr_index > mem_acc_vreg->num_acc_reg) {
+ pr_err("Invalid register index %u in %s at tuple %u\n",
+ addr_index, prop_str, index);
+ return -EINVAL;
+ }
+
+ writel_relaxed(reg_val,
+ mem_acc_vreg->remap_reg_addr_list[addr_index]);
+ /* make sure write complete */
+ mb();
+
+ pr_debug("acc initial config: register:0x%x value:0x%x\n",
+ mem_acc_vreg->phys_reg_addr_list[addr_index], reg_val);
+ }
+
+ return rc;
+}
+
+static int mem_acc_get_reg_addr(struct mem_acc_regulator *mem_acc_vreg)
+{
+ struct device_node *of_node = mem_acc_vreg->dev->of_node;
+ void __iomem **remap_reg_addr_list;
+ u32 *phys_reg_addr_list;
+ int i, num_acc_reg, len = 0, rc = 0;
+
+ if (!of_find_property(of_node, "qcom,acc-reg-addr-list", &len)) {
+ /* acc register address list not specified */
+ return rc;
+ }
+
+ num_acc_reg = len / sizeof(u32);
+ if (!num_acc_reg) {
+ pr_err("qcom,acc-reg-addr-list has invalid len = %d\n", len);
+ return -EINVAL;
+ }
+
+ phys_reg_addr_list = devm_kcalloc(mem_acc_vreg->dev, num_acc_reg + 1,
+ sizeof(*phys_reg_addr_list), GFP_KERNEL);
+ if (!phys_reg_addr_list)
+ return -ENOMEM;
+
+ remap_reg_addr_list = devm_kcalloc(mem_acc_vreg->dev, num_acc_reg + 1,
+ sizeof(*remap_reg_addr_list), GFP_KERNEL);
+ if (!remap_reg_addr_list)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(of_node, "qcom,acc-reg-addr-list",
+ &phys_reg_addr_list[1], num_acc_reg);
+ if (rc) {
+ pr_err("Read- qcom,acc-reg-addr-list failed: rc=%d\n", rc);
+ return rc;
+ }
+
+ for (i = 1; i <= num_acc_reg; i++) {
+ remap_reg_addr_list[i] = devm_ioremap(mem_acc_vreg->dev,
+ phys_reg_addr_list[i], 0x4);
+ if (!remap_reg_addr_list[i]) {
+ pr_err("Unable to map register address 0x%x\n",
+ phys_reg_addr_list[i]);
+ return -EINVAL;
+ }
+ }
+
+ mem_acc_vreg->num_acc_reg = num_acc_reg;
+ mem_acc_vreg->phys_reg_addr_list = phys_reg_addr_list;
+ mem_acc_vreg->remap_reg_addr_list = remap_reg_addr_list;
+
+ return rc;
+}
+
+static int mem_acc_reg_config_init(struct mem_acc_regulator *mem_acc_vreg)
+{
+ struct device_node *of_node = mem_acc_vreg->dev->of_node;
+ struct acc_reg_value *reg_config_list;
+ int len, size, rc, i, num_corners;
+ struct property *prop;
+ char prop_str[30];
+ struct corner_acc_reg_config *corner_acc_reg_config;
+
+ rc = of_property_read_u32(of_node, "qcom,num-acc-corners",
+ &num_corners);
+ if (rc) {
+ pr_err("could not read qcom,num-acc-corners: rc=%d\n", rc);
+ return rc;
+ }
+
+ mem_acc_vreg->num_corners = num_corners;
+
+ rc = of_property_read_u32(of_node, "qcom,boot-acc-corner",
+ &mem_acc_vreg->corner);
+ if (rc) {
+ pr_err("could not read qcom,boot-acc-corner: rc=%d\n", rc);
+ return rc;
+ }
+ pr_debug("boot acc corner = %d\n", mem_acc_vreg->corner);
+
+ corner_acc_reg_config = devm_kcalloc(mem_acc_vreg->dev, num_corners + 1,
+ sizeof(*corner_acc_reg_config),
+ GFP_KERNEL);
+ if (!corner_acc_reg_config)
+ return -ENOMEM;
+
+ for (i = 1; i <= num_corners; i++) {
+ snprintf(prop_str, sizeof(prop_str),
+ "qcom,corner%d-reg-config", i);
+ prop = of_find_property(of_node, prop_str, &len);
+ size = len / sizeof(u32);
+ if ((!prop) || (!size) || size < (num_corners * 2)) {
+ pr_err("%s property is missed or invalid length: len=%d\n",
+ prop_str, len);
+ return -EINVAL;
+ }
+
+ reg_config_list = devm_kcalloc(mem_acc_vreg->dev, size / 2,
+ sizeof(*reg_config_list), GFP_KERNEL);
+ if (!reg_config_list)
+ return -ENOMEM;
+
+ rc = mem_acc_get_reg_addr_val(of_node, prop_str,
+ reg_config_list, 0, size,
+ mem_acc_vreg->num_acc_reg);
+ if (rc) {
+ pr_err("Failed to read %s property: rc=%d\n",
+ prop_str, rc);
+ return rc;
+ }
+
+ corner_acc_reg_config[i].max_reg_config_len =
+ size / (num_corners * 2);
+ corner_acc_reg_config[i].reg_config_list = reg_config_list;
+
+ rc = mem_acc_reg_addr_val_dump(mem_acc_vreg,
+ &corner_acc_reg_config[i], i);
+ if (rc) {
+ pr_err("could not dump acc address-value dump for corner=%d: rc=%d\n",
+ i, rc);
+ return rc;
+ }
+ }
+
+ mem_acc_vreg->corner_acc_reg_config = corner_acc_reg_config;
+ mem_acc_vreg->flags |= MEM_ACC_USE_ADDR_VAL_MAP;
+ return rc;
+}
+
+static int mem_acc_override_reg_addr_val_init(
+ struct mem_acc_regulator *mem_acc_vreg)
+{
+ struct device_node *of_node = mem_acc_vreg->dev->of_node;
+ struct corner_acc_reg_config *corner_acc_reg_config;
+ struct acc_reg_value *override_reg_config_list;
+ int i, tuple_count, tuple_match, len = 0, rc = 0;
+ u32 list_size, override_max_reg_config_len;
+ char prop_str[40];
+ struct property *prop;
+ int num_corners = mem_acc_vreg->num_corners;
+
+ if (!mem_acc_vreg->corner_acc_reg_config)
+ return 0;
+
+ if (mem_acc_vreg->override_map_count) {
+ if (mem_acc_vreg->override_map_match == FUSE_MAP_NO_MATCH)
+ return 0;
+ tuple_count = mem_acc_vreg->override_map_count;
+ tuple_match = mem_acc_vreg->override_map_match;
+ } else {
+ tuple_count = 1;
+ tuple_match = 0;
+ }
+
+ corner_acc_reg_config = mem_acc_vreg->corner_acc_reg_config;
+ for (i = 1; i <= num_corners; i++) {
+ snprintf(prop_str, sizeof(prop_str),
+ "qcom,override-corner%d-addr-val-map", i);
+ prop = of_find_property(of_node, prop_str, &len);
+ list_size = len / (tuple_count * sizeof(u32));
+ if (!prop) {
+ pr_debug("%s property not specified\n", prop_str);
+ continue;
+ }
+
+ if ((!list_size) || list_size < (num_corners * 2)) {
+ pr_err("qcom,override-corner%d-addr-val-map property is missed or invalid length: len=%d\n",
+ i, len);
+ return -EINVAL;
+ }
+
+ override_max_reg_config_len = list_size / (num_corners * 2);
+ override_reg_config_list =
+ corner_acc_reg_config[i].reg_config_list;
+
+ if (corner_acc_reg_config[i].max_reg_config_len
+ != override_max_reg_config_len) {
+ /* Free already allocate memory */
+ devm_kfree(mem_acc_vreg->dev, override_reg_config_list);
+
+ /* Allocated memory for new requirement */
+ override_reg_config_list =
+ devm_kcalloc(mem_acc_vreg->dev,
+ override_max_reg_config_len * num_corners,
+ sizeof(*override_reg_config_list), GFP_KERNEL);
+ if (!override_reg_config_list)
+ return -ENOMEM;
+
+ corner_acc_reg_config[i].max_reg_config_len =
+ override_max_reg_config_len;
+ corner_acc_reg_config[i].reg_config_list =
+ override_reg_config_list;
+ }
+
+ rc = mem_acc_get_reg_addr_val(of_node, prop_str,
+ override_reg_config_list, tuple_match,
+ list_size, mem_acc_vreg->num_acc_reg);
+ if (rc) {
+ pr_err("Failed to read %s property: rc=%d\n",
+ prop_str, rc);
+ return rc;
+ }
+
+ rc = mem_acc_reg_addr_val_dump(mem_acc_vreg,
+ &corner_acc_reg_config[i], i);
+ if (rc) {
+ pr_err("could not dump acc address-value dump for corner=%d: rc=%d\n",
+ i, rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+#define MEM_TYPE_STRING_LEN 20
+static int mem_acc_init(struct platform_device *pdev,
+ struct mem_acc_regulator *mem_acc_vreg)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ struct resource *res;
+ int len, rc, i, j;
+ u32 fuse_sel[4];
+ u64 fuse_bits;
+ bool acc_type_present = false;
+ char tmps[MEM_TYPE_STRING_LEN];
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "acc-en");
+ if (!res || !res->start) {
+ pr_debug("'acc-en' resource missing or not used.\n");
+ } else {
+ mem_acc_vreg->acc_en_addr = res->start;
+ len = res->end - res->start + 1;
+ pr_debug("'acc_en_addr' = %pa (len=0x%x)\n", &res->start, len);
+
+ mem_acc_vreg->acc_en_base = devm_ioremap(mem_acc_vreg->dev,
+ mem_acc_vreg->acc_en_addr, len);
+ if (!mem_acc_vreg->acc_en_base) {
+ pr_err("Unable to map 'acc_en_addr' %pa\n",
+ &mem_acc_vreg->acc_en_addr);
+ return -EINVAL;
+ }
+
+ rc = populate_acc_data(mem_acc_vreg, "qcom,acc-en-bit-pos",
+ &mem_acc_vreg->acc_en_bit_pos,
+ &mem_acc_vreg->num_acc_en);
+ if (rc) {
+ pr_err("Unable to populate 'qcom,acc-en-bit-pos' rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = mem_acc_efuse_init(pdev, mem_acc_vreg);
+ if (rc) {
+ pr_err("Wrong eFuse address specified: rc=%d\n", rc);
+ return rc;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "acc-sel-l1");
+ if (!res || !res->start) {
+ pr_debug("'acc-sel-l1' resource missing or not used.\n");
+ } else {
+ rc = mem_acc_sel_setup(mem_acc_vreg, res, MEMORY_L1);
+ if (rc) {
+ pr_err("Unable to setup mem-acc for mem_type=%d rc=%d\n",
+ MEMORY_L1, rc);
+ return rc;
+ }
+ mem_acc_vreg->mem_acc_supported[MEMORY_L1] = true;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "acc-sel-l2");
+ if (!res || !res->start) {
+ pr_debug("'acc-sel-l2' resource missing or not used.\n");
+ } else {
+ rc = mem_acc_sel_setup(mem_acc_vreg, res, MEMORY_L2);
+ if (rc) {
+ pr_err("Unable to setup mem-acc for mem_type=%d rc=%d\n",
+ MEMORY_L2, rc);
+ return rc;
+ }
+ mem_acc_vreg->mem_acc_supported[MEMORY_L2] = true;
+ }
+
+ for (i = 0; i < MEM_ACC_TYPE_MAX; i++) {
+ snprintf(tmps, MEM_TYPE_STRING_LEN, "mem-acc-type%d", i + 1);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, tmps);
+
+ if (!res || !res->start) {
+ pr_debug("'%s' resource missing or not used.\n", tmps);
+ } else {
+ mem_acc_vreg->mem_acc_type_addr[i] = res->start;
+ acc_type_present = true;
+ }
+ }
+
+ rc = mem_acc_get_reg_addr(mem_acc_vreg);
+ if (rc) {
+ pr_err("Unable to get acc register addresses: rc=%d\n", rc);
+ return rc;
+ }
+
+ if (mem_acc_vreg->phys_reg_addr_list) {
+ rc = mem_acc_reg_config_init(mem_acc_vreg);
+ if (rc) {
+ pr_err("acc register address-value map failed: rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (of_find_property(of_node, "qcom,corner-acc-map", NULL)) {
+ rc = populate_acc_data(mem_acc_vreg, "qcom,corner-acc-map",
+ &mem_acc_vreg->corner_acc_map,
+ &mem_acc_vreg->num_corners);
+
+ /* Check if at least one valid mem-acc config. is specified */
+ for (i = 0; i < MEMORY_MAX; i++) {
+ if (mem_acc_vreg->mem_acc_supported[i])
+ break;
+ }
+ if (i == MEMORY_MAX && !acc_type_present) {
+ pr_err("No mem-acc configuration specified\n");
+ return -EINVAL;
+ }
+
+ mem_acc_vreg->flags |= MEM_ACC_USE_CORNER_ACC_MAP;
+ }
+
+ if ((mem_acc_vreg->flags & MEM_ACC_USE_CORNER_ACC_MAP) &&
+ (mem_acc_vreg->flags & MEM_ACC_USE_ADDR_VAL_MAP)) {
+ pr_err("Invalid configuration, both qcom,corner-acc-map and qcom,cornerX-addr-val-map specified\n");
+ return -EINVAL;
+ }
+
+ pr_debug("num_corners = %d\n", mem_acc_vreg->num_corners);
+
+ if (mem_acc_vreg->num_acc_en)
+ mem_acc_en_init(mem_acc_vreg);
+
+ if (mem_acc_vreg->phys_reg_addr_list) {
+ rc = mem_acc_init_reg_config(mem_acc_vreg);
+ if (rc) {
+ pr_err("acc initial register configuration failed: rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = mem_acc_sel_init(mem_acc_vreg);
+ if (rc) {
+ pr_err("Unable to intialize mem_acc_sel reg rc=%d\n", rc);
+ return rc;
+ }
+
+ for (i = 0; i < MEMORY_MAX; i++) {
+ rc = mem_acc_custom_data_init(pdev, mem_acc_vreg, i);
+ if (rc) {
+ pr_err("Unable to initialize custom data for mem_type=%d rc=%d\n",
+ i, rc);
+ return rc;
+ }
+ }
+
+ if (of_find_property(mem_acc_vreg->dev->of_node,
+ "qcom,override-acc-fuse-sel", NULL)) {
+ rc = of_property_read_u32_array(mem_acc_vreg->dev->of_node,
+ "qcom,override-acc-fuse-sel", fuse_sel, 4);
+ if (rc < 0) {
+ pr_err("Read failed - qcom,override-acc-fuse-sel rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ fuse_bits = mem_acc_read_efuse_row(mem_acc_vreg, fuse_sel[0],
+ fuse_sel[3]);
+ /*
+ * fuse_sel[1] = LSB position in row (shift)
+ * fuse_sel[2] = num of bits (mask)
+ */
+ mem_acc_vreg->override_fuse_value = (fuse_bits >> fuse_sel[1]) &
+ ((1 << fuse_sel[2]) - 1);
+
+ rc = mem_acc_find_override_map_match(pdev, mem_acc_vreg);
+ if (rc) {
+ pr_err("Unable to find fuse map match rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_debug("override_fuse_val=%d override_map_match=%d\n",
+ mem_acc_vreg->override_fuse_value,
+ mem_acc_vreg->override_map_match);
+
+ rc = mem_acc_override_corner_map(mem_acc_vreg);
+ if (rc) {
+ pr_err("Unable to override corner map rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = mem_acc_override_reg_addr_val_init(mem_acc_vreg);
+ if (rc) {
+ pr_err("Unable to override reg_config_list init rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ for (i = 0; i < MEMORY_MAX; i++) {
+ rc = override_mem_acc_custom_data(pdev,
+ mem_acc_vreg, i);
+ if (rc) {
+ pr_err("Unable to override custom data for mem_type=%d rc=%d\n",
+ i, rc);
+ return rc;
+ }
+ }
+ }
+
+ if (acc_type_present) {
+ mem_acc_vreg->mem_acc_type_data = devm_kzalloc(
+ mem_acc_vreg->dev, mem_acc_vreg->num_corners *
+ MEM_ACC_TYPE_MAX * sizeof(u32), GFP_KERNEL);
+
+ if (!mem_acc_vreg->mem_acc_type_data) {
+ pr_err("Unable to allocate memory for mem_acc_type\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < MEM_ACC_TYPE_MAX; i++) {
+ if (mem_acc_vreg->mem_acc_type_addr[i]) {
+ snprintf(tmps, MEM_TYPE_STRING_LEN,
+ "qcom,mem-acc-type%d", i + 1);
+
+ j = i * mem_acc_vreg->num_corners;
+ rc = of_property_read_u32_array(
+ mem_acc_vreg->dev->of_node,
+ tmps,
+ &mem_acc_vreg->mem_acc_type_data[j],
+ mem_acc_vreg->num_corners);
+ if (rc) {
+ pr_err("Unable to get property %s rc=%d\n",
+ tmps, rc);
+ return rc;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int mem_acc_regulator_probe(struct platform_device *pdev)
+{
+ struct regulator_config reg_config = {};
+ struct mem_acc_regulator *mem_acc_vreg;
+ struct regulator_desc *rdesc;
+ struct regulator_init_data *init_data;
+ int rc;
+
+ if (!pdev->dev.of_node) {
+ pr_err("Device tree node is missing\n");
+ return -EINVAL;
+ }
+
+ init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node);
+ if (!init_data) {
+ pr_err("regulator init data is missing\n");
+ return -EINVAL;
+ } else {
+ init_data->constraints.input_uV
+ = init_data->constraints.max_uV;
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_VOLTAGE;
+ }
+
+ mem_acc_vreg = devm_kzalloc(&pdev->dev, sizeof(*mem_acc_vreg),
+ GFP_KERNEL);
+ if (!mem_acc_vreg) {
+ pr_err("Can't allocate mem_acc_vreg memory\n");
+ return -ENOMEM;
+ }
+ mem_acc_vreg->dev = &pdev->dev;
+
+ rc = mem_acc_init(pdev, mem_acc_vreg);
+ if (rc) {
+ pr_err("Unable to initialize mem_acc configuration rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rdesc = &mem_acc_vreg->rdesc;
+ rdesc->owner = THIS_MODULE;
+ rdesc->type = REGULATOR_VOLTAGE;
+ rdesc->ops = &mem_acc_corner_ops;
+ rdesc->name = init_data->constraints.name;
+
+ reg_config.dev = &pdev->dev;
+ reg_config.init_data = init_data;
+ reg_config.driver_data = mem_acc_vreg;
+ reg_config.of_node = pdev->dev.of_node;
+ mem_acc_vreg->rdev = regulator_register(rdesc, &reg_config);
+ if (IS_ERR(mem_acc_vreg->rdev)) {
+ rc = PTR_ERR(mem_acc_vreg->rdev);
+ if (rc != -EPROBE_DEFER)
+ pr_err("regulator_register failed: rc=%d\n", rc);
+ return rc;
+ }
+
+ platform_set_drvdata(pdev, mem_acc_vreg);
+
+ return 0;
+}
+
+static int mem_acc_regulator_remove(struct platform_device *pdev)
+{
+ struct mem_acc_regulator *mem_acc_vreg = platform_get_drvdata(pdev);
+
+ regulator_unregister(mem_acc_vreg->rdev);
+
+ return 0;
+}
+
+static struct of_device_id mem_acc_regulator_match_table[] = {
+ { .compatible = "qcom,mem-acc-regulator", },
+ {}
+};
+
+static struct platform_driver mem_acc_regulator_driver = {
+ .probe = mem_acc_regulator_probe,
+ .remove = mem_acc_regulator_remove,
+ .driver = {
+ .name = "qcom,mem-acc-regulator",
+ .of_match_table = mem_acc_regulator_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init mem_acc_regulator_init(void)
+{
+ return platform_driver_register(&mem_acc_regulator_driver);
+}
+postcore_initcall(mem_acc_regulator_init);
+
+static void __exit mem_acc_regulator_exit(void)
+{
+ platform_driver_unregister(&mem_acc_regulator_driver);
+}
+module_exit(mem_acc_regulator_exit);
+
+MODULE_DESCRIPTION("MEM-ACC-SEL regulator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/proxy-consumer.c b/drivers/regulator/proxy-consumer.c
new file mode 100644
index 000000000000..b833c4e1fa69
--- /dev/null
+++ b/drivers/regulator/proxy-consumer.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/proxy-consumer.h>
+
+struct proxy_consumer {
+ struct list_head list;
+ struct regulator *reg;
+ bool enable;
+ int min_uV;
+ int max_uV;
+ u32 current_uA;
+};
+
+static DEFINE_MUTEX(proxy_consumer_list_mutex);
+static LIST_HEAD(proxy_consumer_list);
+static bool proxy_consumers_removed;
+
+/**
+ * regulator_proxy_consumer_register() - conditionally register a proxy consumer
+ * for the specified regulator and set its boot time parameters
+ * @reg_dev: Device pointer of the regulator
+ * @reg_node: Device node pointer of the regulator
+ *
+ * Returns a struct proxy_consumer pointer corresponding to the regulator on
+ * success, ERR_PTR() if an error occurred, or NULL if no proxy consumer is
+ * needed for the regulator. This function calls
+ * regulator_get(reg_dev, "proxy") after first checking if any proxy consumer
+ * properties are present in the reg_node device node. After that, the voltage,
+ * minimum current, and/or the enable state will be set based upon the device
+ * node property values.
+ */
+struct proxy_consumer *regulator_proxy_consumer_register(struct device *reg_dev,
+ struct device_node *reg_node)
+{
+ struct proxy_consumer *consumer = NULL;
+ const char *reg_name = "";
+ u32 voltage[2] = {0};
+ int rc;
+
+ /* Return immediately if no proxy consumer properties are specified. */
+ if (!of_find_property(reg_node, "qcom,proxy-consumer-enable", NULL)
+ && !of_find_property(reg_node, "qcom,proxy-consumer-voltage", NULL)
+ && !of_find_property(reg_node, "qcom,proxy-consumer-current", NULL))
+ return NULL;
+
+ mutex_lock(&proxy_consumer_list_mutex);
+
+ /* Do not register new consumers if they cannot be removed later. */
+ if (proxy_consumers_removed) {
+ rc = -EPERM;
+ goto unlock;
+ }
+
+ if (dev_name(reg_dev))
+ reg_name = dev_name(reg_dev);
+
+ consumer = kzalloc(sizeof(*consumer), GFP_KERNEL);
+ if (!consumer) {
+ pr_err("kzalloc failed\n");
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ consumer->enable
+ = of_property_read_bool(reg_node, "qcom,proxy-consumer-enable");
+ of_property_read_u32(reg_node, "qcom,proxy-consumer-current",
+ &consumer->current_uA);
+ rc = of_property_read_u32_array(reg_node, "qcom,proxy-consumer-voltage",
+ voltage, 2);
+ if (!rc) {
+ consumer->min_uV = voltage[0];
+ consumer->max_uV = voltage[1];
+ }
+
+ dev_dbg(reg_dev, "proxy consumer request: enable=%d, voltage_range=[%d, %d] uV, min_current=%d uA\n",
+ consumer->enable, consumer->min_uV, consumer->max_uV,
+ consumer->current_uA);
+
+ consumer->reg = regulator_get(reg_dev, "proxy");
+ if (IS_ERR_OR_NULL(consumer->reg)) {
+ rc = PTR_ERR(consumer->reg);
+ pr_err("regulator_get() failed for %s, rc=%d\n", reg_name, rc);
+ goto unlock;
+ }
+
+ if (consumer->max_uV > 0 && consumer->min_uV <= consumer->max_uV) {
+ rc = regulator_set_voltage(consumer->reg, consumer->min_uV,
+ consumer->max_uV);
+ if (rc) {
+ pr_err("regulator_set_voltage %s failed, rc=%d\n",
+ reg_name, rc);
+ goto free_regulator;
+ }
+ }
+
+ if (consumer->current_uA > 0) {
+ rc = regulator_set_optimum_mode(consumer->reg,
+ consumer->current_uA);
+ if (rc < 0) {
+ pr_err("regulator_set_optimum_mode %s failed, rc=%d\n",
+ reg_name, rc);
+ goto remove_voltage;
+ }
+ }
+
+ if (consumer->enable) {
+ rc = regulator_enable(consumer->reg);
+ if (rc) {
+ pr_err("regulator_enable %s failed, rc=%d\n", reg_name,
+ rc);
+ goto remove_current;
+ }
+ }
+
+ list_add(&consumer->list, &proxy_consumer_list);
+ mutex_unlock(&proxy_consumer_list_mutex);
+
+ return consumer;
+
+remove_current:
+ regulator_set_optimum_mode(consumer->reg, 0);
+remove_voltage:
+ regulator_set_voltage(consumer->reg, 0, INT_MAX);
+free_regulator:
+ regulator_put(consumer->reg);
+unlock:
+ kfree(consumer);
+ mutex_unlock(&proxy_consumer_list_mutex);
+ return ERR_PTR(rc);
+}
+
+/* proxy_consumer_list_mutex must be held by caller. */
+static int regulator_proxy_consumer_remove(struct proxy_consumer *consumer)
+{
+ int rc = 0;
+
+ if (consumer->enable) {
+ rc = regulator_disable(consumer->reg);
+ if (rc)
+ pr_err("regulator_disable failed, rc=%d\n", rc);
+ }
+
+ if (consumer->current_uA > 0) {
+ rc = regulator_set_optimum_mode(consumer->reg, 0);
+ if (rc < 0)
+ pr_err("regulator_set_optimum_mode failed, rc=%d\n",
+ rc);
+ }
+
+ if (consumer->max_uV > 0 && consumer->min_uV <= consumer->max_uV) {
+ rc = regulator_set_voltage(consumer->reg, 0, INT_MAX);
+ if (rc)
+ pr_err("regulator_set_voltage failed, rc=%d\n", rc);
+ }
+
+ regulator_put(consumer->reg);
+ list_del(&consumer->list);
+ kfree(consumer);
+
+ return rc;
+}
+
+/**
+ * regulator_proxy_consumer_unregister() - unregister a proxy consumer and
+ * remove its boot time requests
+ * @consumer: Pointer to proxy_consumer to be removed
+ *
+ * Returns 0 on success or errno on failure. This function removes all requests
+ * made by the proxy consumer in regulator_proxy_consumer_register() and then
+ * frees the consumer's resources.
+ */
+int regulator_proxy_consumer_unregister(struct proxy_consumer *consumer)
+{
+ int rc = 0;
+
+ if (IS_ERR_OR_NULL(consumer))
+ return 0;
+
+ mutex_lock(&proxy_consumer_list_mutex);
+ if (!proxy_consumers_removed)
+ rc = regulator_proxy_consumer_remove(consumer);
+ mutex_unlock(&proxy_consumer_list_mutex);
+
+ return rc;
+}
+
+/*
+ * Remove all proxy requests at late_initcall_sync. The assumption is that all
+ * devices have probed at this point and made their own regulator requests.
+ */
+static int __init regulator_proxy_consumer_remove_all(void)
+{
+ struct proxy_consumer *consumer;
+ struct proxy_consumer *temp;
+
+ mutex_lock(&proxy_consumer_list_mutex);
+ proxy_consumers_removed = true;
+
+ if (!list_empty(&proxy_consumer_list))
+ pr_info("removing regulator proxy consumer requests\n");
+
+ list_for_each_entry_safe(consumer, temp, &proxy_consumer_list, list) {
+ regulator_proxy_consumer_remove(consumer);
+ }
+ mutex_unlock(&proxy_consumer_list_mutex);
+
+ return 0;
+}
+late_initcall_sync(regulator_proxy_consumer_remove_all);
diff --git a/drivers/regulator/qpnp-labibb-regulator.c b/drivers/regulator/qpnp-labibb-regulator.c
new file mode 100644
index 000000000000..dabcae1e78b0
--- /dev/null
+++ b/drivers/regulator/qpnp-labibb-regulator.c
@@ -0,0 +1,2494 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/spmi.h>
+#include <linux/string.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+#define QPNP_LABIBB_REGULATOR_DRIVER_NAME "qcom,qpnp-labibb-regulator"
+
+#define REG_PERPH_TYPE 0x04
+
+#define QPNP_LAB_TYPE 0x24
+#define QPNP_IBB_TYPE 0x20
+
+/* Common register value for LAB/IBB */
+#define REG_LAB_IBB_LCD_MODE 0x0
+#define REG_LAB_IBB_AMOLED_MODE BIT(7)
+#define REG_LAB_IBB_SEC_ACCESS 0xD0
+#define REG_LAB_IBB_SEC_UNLOCK_CODE 0xA5
+
+/* LAB register offset definitions */
+#define REG_LAB_STATUS1 0x08
+#define REG_LAB_VOLTAGE 0x41
+#define REG_LAB_RING_SUPPRESSION_CTL 0x42
+#define REG_LAB_LCD_AMOLED_SEL 0x44
+#define REG_LAB_MODULE_RDY 0x45
+#define REG_LAB_ENABLE_CTL 0x46
+#define REG_LAB_PD_CTL 0x47
+#define REG_LAB_CLK_DIV 0x48
+#define REG_LAB_IBB_EN_RDY 0x49
+#define REG_LAB_CURRENT_LIMIT 0x4B
+#define REG_LAB_CURRENT_SENSE 0x4C
+#define REG_LAB_PS_CTL 0x50
+#define REG_LAB_RDSON_MNGMNT 0x53
+#define REG_LAB_PRECHARGE_CTL 0x5E
+#define REG_LAB_SOFT_START_CTL 0x5F
+#define REG_LAB_SPARE_CTL 0x60
+
+/* LAB register bits definitions */
+
+/* REG_LAB_STATUS1 */
+#define LAB_STATUS1_VREG_OK_MASK BIT(7)
+#define LAB_STATUS1_VREG_OK BIT(7)
+
+/* REG_LAB_VOLTAGE */
+#define LAB_VOLTAGE_OVERRIDE_EN BIT(7)
+#define LAB_VOLTAGE_SET_BITS 4
+#define LAB_VOLTAGE_SET_MASK ((1 << LAB_VOLTAGE_SET_BITS) - 1)
+
+/* REG_LAB_RING_SUPPRESSION_CTL */
+#define LAB_RING_SUPPRESSION_CTL_EN BIT(7)
+
+/* REG_LAB_MODULE_RDY */
+#define LAB_MODULE_RDY_EN BIT(7)
+
+/* REG_LAB_ENABLE_CTL */
+#define LAB_ENABLE_CTL_EN BIT(7)
+
+/* REG_LAB_PD_CTL */
+#define LAB_PD_CTL_STRONG_PULL BIT(0)
+#define LAB_PD_CTL_STRENGTH_MASK BIT(0)
+#define LAB_PD_CTL_DISABLE_PD BIT(1)
+#define LAB_PD_CTL_EN_MASK BIT(1)
+
+/* REG_LAB_IBB_EN_RDY */
+#define LAB_IBB_EN_RDY_EN BIT(7)
+
+/* REG_LAB_CURRENT_LIMIT */
+#define LAB_CURRENT_LIMIT_BITS 3
+#define LAB_CURRENT_LIMIT_MASK ((1 << LAB_CURRENT_LIMIT_BITS) - 1)
+#define LAB_CURRENT_LIMIT_EN BIT(7)
+
+/* REG_LAB_CURRENT_SENSE */
+#define LAB_CURRENT_SENSE_GAIN_BITS 2
+#define LAB_CURRENT_SENSE_GAIN_MASK ((1 << LAB_CURRENT_SENSE_GAIN_BITS) \
+ - 1)
+
+/* REG_LAB_PS_CTL */
+#define LAB_PS_CTL_BITS 2
+#define LAB_PS_CTL_MASK ((1 << LAB_PS_CTL_BITS) - 1)
+#define LAB_PS_CTL_EN BIT(7)
+
+/* REG_LAB_RDSON_MNGMNT */
+#define LAB_RDSON_MNGMNT_NFET_SLEW_EN BIT(5)
+#define LAB_RDSON_MNGMNT_PFET_SLEW_EN BIT(4)
+#define LAB_RDSON_MNGMNT_NFET_BITS 2
+#define LAB_RDSON_MNGMNT_NFET_MASK ((1 << LAB_RDSON_MNGMNT_NFET_BITS) - 1)
+#define LAB_RDSON_MNGMNT_NFET_SHIFT 2
+#define LAB_RDSON_MNGMNT_PFET_BITS 2
+#define LAB_RDSON_MNGMNT_PFET_MASK ((1 << LAB_RDSON_MNGMNT_PFET_BITS) - 1)
+
+/* REG_LAB_PRECHARGE_CTL */
+#define LAB_PRECHARGE_CTL_EN BIT(2)
+#define LAB_PRECHARGE_CTL_EN_BITS 2
+#define LAB_PRECHARGE_CTL_EN_MASK ((1 << LAB_PRECHARGE_CTL_EN_BITS) - 1)
+
+/* REG_LAB_SOFT_START_CTL */
+#define LAB_SOFT_START_CTL_BITS 2
+#define LAB_SOFT_START_CTL_MASK ((1 << LAB_SOFT_START_CTL_BITS) - 1)
+
+/* REG_LAB_SPARE_CTL */
+#define LAB_SPARE_TOUCH_WAKE_BIT BIT(3)
+#define LAB_SPARE_DISABLE_SCP_BIT BIT(0)
+
+/* IBB register offset definitions */
+#define REG_IBB_REVISION4 0x03
+#define REG_IBB_STATUS1 0x08
+#define REG_IBB_VOLTAGE 0x41
+#define REG_IBB_RING_SUPPRESSION_CTL 0x42
+#define REG_IBB_LCD_AMOLED_SEL 0x44
+#define REG_IBB_MODULE_RDY 0x45
+#define REG_IBB_ENABLE_CTL 0x46
+#define REG_IBB_PD_CTL 0x47
+#define REG_IBB_CLK_DIV 0x48
+#define REG_IBB_CURRENT_LIMIT 0x4B
+#define REG_IBB_PS_CTL 0x50
+#define REG_IBB_RDSON_MNGMNT 0x53
+#define REG_IBB_PWRUP_PWRDN_CTL_1 0x58
+#define REG_IBB_PWRUP_PWRDN_CTL_2 0x59
+#define REG_IBB_SOFT_START_CTL 0x5F
+#define REG_IBB_SWIRE_CTL 0x5A
+#define REG_IBB_SPARE_CTL 0x60
+#define REG_IBB_NLIMIT_DAC 0x61
+
+/* IBB register bits definition */
+
+/* REG_IBB_STATUS1 */
+#define IBB_STATUS1_VREG_OK_MASK BIT(7)
+#define IBB_STATUS1_VREG_OK BIT(7)
+
+/* REG_IBB_VOLTAGE */
+#define IBB_VOLTAGE_OVERRIDE_EN BIT(7)
+#define IBB_VOLTAGE_SET_BITS 6
+#define IBB_VOLTAGE_SET_MASK ((1 << IBB_VOLTAGE_SET_BITS) - 1)
+
+/* REG_IBB_RING_SUPPRESSION_CTL */
+#define IBB_RING_SUPPRESSION_CTL_EN BIT(7)
+
+/* REG_IBB_MODULE_RDY */
+#define IBB_MODULE_RDY_EN BIT(7)
+
+/* REG_IBB_ENABLE_CTL */
+#define IBB_ENABLE_CTL_SWIRE_RDY BIT(6)
+#define IBB_ENABLE_CTL_MODULE_EN BIT(7)
+
+/* REG_IBB_PD_CTL */
+#define IBB_PD_CTL_HALF_STRENGTH BIT(0)
+#define IBB_PD_CTL_STRENGTH_MASK BIT(0)
+#define IBB_PD_CTL_EN BIT(7)
+#define IBB_PD_CTL_EN_MASK BIT(7)
+
+/* REG_IBB_CURRENT_LIMIT */
+#define IBB_CURRENT_LIMIT_BITS 5
+#define IBB_CURRENT_LIMIT_MASK ((1 << IBB_CURRENT_LIMIT_BITS) - 1)
+#define IBB_CURRENT_LIMIT_DEBOUNCE_SHIFT 5
+#define IBB_CURRENT_LIMIT_EN BIT(7)
+#define IBB_ILIMIT_COUNT_CYC8 0
+#define IBB_CURRENT_MAX_500MA 0xA
+
+/* REG_IBB_PS_CTL */
+#define IBB_PS_CTL_EN 0x85
+#define IBB_PS_CTL_DISABLE 0x5
+
+/* REG_IBB_RDSON_MNGMNT */
+#define IBB_NFET_SLEW_EN BIT(7)
+#define IBB_PFET_SLEW_EN BIT(6)
+#define IBB_OVERRIDE_NFET_SW_SIZE BIT(5)
+#define IBB_OVERRIDE_PFET_SW_SIZE BIT(2)
+#define IBB_NFET_SW_SIZE_BITS 2
+#define IBB_PFET_SW_SIZE_BITS 2
+#define IBB_NFET_SW_SIZE_MASK ((1 << NFET_SW_SIZE_BITS) - 1)
+#define IBB_PFET_SW_SIZE_MASK ((1 << PFET_SW_SIZE_BITS) - 1)
+#define IBB_NFET_SW_SIZE_SHIFT 3
+
+/* REG_IBB_SPARE_CTL */
+#define IBB_BYPASS_PWRDN_DLY2_BIT BIT(5)
+#define IBB_FAST_STARTUP BIT(3)
+
+/* REG_IBB_SWIRE_CTL */
+#define IBB_OUTPUT_VOLTAGE_AT_ONE_PULSE_BITS 6
+#define IBB_OUTPUT_VOLTAGE_AT_ONE_PULSE_MASK \
+ ((1 << IBB_OUTPUT_VOLTAGE_AT_ONE_PULSE_BITS) - 1)
+#define MAX_OUTPUT_PULSE_VOLTAGE_MV 7700
+#define MIN_OUTPUT_PULSE_VOLTAGE_MV 1400
+#define OUTPUT_VOLTAGE_STEP_MV 100
+
+/* REG_IBB_NLIMIT_DAC */
+#define IBB_NLIMIT_DAC_EN 0x0
+#define IBB_NLIMIT_DAC_DISABLE 0x5
+
+/* REG_IBB_PWRUP_PWRDN_CTL_1 */
+#define IBB_PWRUP_PWRDN_CTL_1_DLY1_BITS 2
+#define IBB_PWRUP_PWRDN_CTL_1_DLY1_MASK \
+ ((1 << IBB_PWRUP_PWRDN_CTL_1_DLY1_BITS) - 1)
+#define IBB_PWRUP_PWRDN_CTL_1_DLY1_SHIFT 4
+#define IBB_PWRUP_PWRDN_CTL_1_DLY2_BITS 2
+#define IBB_PWRUP_PWRDN_CTL_1_DLY2_MASK \
+ ((1 << IBB_PWRUP_PWRDN_CTL_1_DLY2_BITS) - 1)
+#define IBB_PWRUP_PWRDN_CTL_1_LAB_VREG_OK BIT(7)
+#define IBB_PWRUP_PWRDN_CTL_1_EN_DLY1 BIT(6)
+#define PWRUP_PWRDN_CTL_1_DISCHARGE_EN BIT(2)
+
+/* REG_IBB_PWRUP_PWRDN_CTL_2 */
+#define IBB_DIS_DLY_BITS 2
+#define IBB_DIS_DLY_MASK ((1 << IBB_DIS_DLY_BITS) - 1)
+#define IBB_WAIT_MBG_OK BIT(2)
+
+/**
+ * enum qpnp_labibb_mode - working mode of LAB/IBB regulators
+ * %QPNP_LABIBB_STANDALONE_MODE: configure LAB/IBB regulator as a
+ * standalone regulator
+ * %QPNP_LABIBB_LCD_MODE: configure LAB and IBB regulators
+ * together to provide power supply for LCD
+ * %QPNP_LABIBB_AMOLED_MODE: configure LAB and IBB regulators
+ * together to provide power supply for AMOLED
+ * %QPNP_LABIBB_MAX_MODE max number of configureable modes
+ * supported by qpnp_labibb_regulator
+ */
+enum qpnp_labibb_mode {
+ QPNP_LABIBB_STANDALONE_MODE = 1,
+ QPNP_LABIBB_LCD_MODE,
+ QPNP_LABIBB_AMOLED_MODE,
+ QPNP_LABIBB_MAX_MODE,
+};
+
+static const int ibb_discharge_resistor_plan[] = {
+ 300,
+ 64,
+ 32,
+ 16,
+};
+
+static const int ibb_pwrup_dly_plan[] = {
+ 1000,
+ 2000,
+ 4000,
+ 8000,
+};
+
+static const int ibb_pwrdn_dly_plan[] = {
+ 1000,
+ 2000,
+ 4000,
+ 8000,
+};
+
+static const int lab_clk_div_plan[] = {
+ 3200,
+ 2740,
+ 2400,
+ 2130,
+ 1920,
+ 1750,
+ 1600,
+ 1480,
+ 1370,
+ 1280,
+ 1200,
+ 1130,
+ 1070,
+ 1010,
+ 960,
+ 910,
+};
+
+static const int ibb_clk_div_plan[] = {
+ 3200,
+ 2740,
+ 2400,
+ 2130,
+ 1920,
+ 1750,
+ 1600,
+ 1480,
+ 1370,
+ 1280,
+ 1200,
+ 1130,
+ 1070,
+ 1010,
+ 960,
+ 910,
+};
+
+static const int lab_current_limit_plan[] = {
+ 200,
+ 400,
+ 600,
+ 800,
+};
+
+static const char * const lab_current_sense_plan[] = {
+ "0.5x",
+ "1x",
+ "1.5x",
+ "2x"
+};
+
+static const int ibb_current_limit_plan[] = {
+ 0,
+ 50,
+ 100,
+ 150,
+ 200,
+ 250,
+ 300,
+ 350,
+ 400,
+ 450,
+ 500,
+ 550,
+ 600,
+ 650,
+ 700,
+ 750,
+ 800,
+ 850,
+ 900,
+ 950,
+ 1000,
+ 1050,
+ 1100,
+ 1150,
+ 1200,
+ 1250,
+ 1300,
+ 1350,
+ 1400,
+ 1450,
+ 1500,
+ 1550,
+};
+
+static const int ibb_debounce_plan[] = {
+ 8,
+ 16,
+ 32,
+ 64,
+};
+
+static const int lab_ps_threshold_plan[] = {
+ 20,
+ 30,
+ 40,
+ 50,
+};
+
+static const int lab_soft_start_plan[] = {
+ 200,
+ 400,
+ 600,
+ 800,
+};
+
+static const int lab_rdson_nfet_plan[] = {
+ 25,
+ 50,
+ 75,
+ 100,
+};
+
+static const int lab_rdson_pfet_plan[] = {
+ 25,
+ 50,
+ 75,
+ 100,
+};
+
+static const int lab_max_precharge_plan[] = {
+ 200,
+ 300,
+ 400,
+ 500,
+};
+
+struct lab_regulator {
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+ struct mutex lab_mutex;
+
+ int curr_volt;
+ int min_volt;
+
+ int step_size;
+ int slew_rate;
+ int soft_start;
+
+ int vreg_enabled;
+};
+
+struct ibb_regulator {
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+ struct mutex ibb_mutex;
+
+ int curr_volt;
+ int min_volt;
+
+ int step_size;
+ int slew_rate;
+ int soft_start;
+
+ u32 pwrup_dly;
+ u32 pwrdn_dly;
+
+ int vreg_enabled;
+};
+
+struct qpnp_labibb {
+ struct device *dev;
+ struct spmi_device *spmi;
+ u16 lab_base;
+ u16 ibb_base;
+ struct lab_regulator lab_vreg;
+ struct ibb_regulator ibb_vreg;
+ int mode;
+ bool ttw_en;
+ bool in_ttw_mode;
+ bool ibb_settings_saved;
+ bool swire_control;
+};
+
+enum ibb_settings_index {
+ IBB_PD_CTL = 0,
+ IBB_CURRENT_LIMIT,
+ IBB_RDSON_MNGMNT,
+ IBB_PWRUP_PWRDN_CTL_1,
+ IBB_PWRUP_PWRDN_CTL_2,
+ IBB_SETTINGS_MAX,
+};
+
+enum lab_settings_index {
+ LAB_SOFT_START_CTL = 0,
+ LAB_SETTINGS_MAX,
+};
+
+struct settings {
+ u16 address;
+ u8 value;
+ bool sec_access;
+};
+
+#define SETTING(_id, _sec_access) \
+ [_id] = { \
+ .address = REG_##_id, \
+ .sec_access = _sec_access, \
+ }
+
+static struct settings ibb_settings[IBB_SETTINGS_MAX] = {
+ SETTING(IBB_PD_CTL, false),
+ SETTING(IBB_CURRENT_LIMIT, true),
+ SETTING(IBB_RDSON_MNGMNT, false),
+ SETTING(IBB_PWRUP_PWRDN_CTL_1, true),
+ SETTING(IBB_PWRUP_PWRDN_CTL_2, true),
+};
+
+static struct settings lab_settings[LAB_SETTINGS_MAX] = {
+ SETTING(LAB_SOFT_START_CTL, false),
+};
+
+static int
+qpnp_labibb_read(struct qpnp_labibb *labibb, u8 *val,
+ u16 base, int count)
+{
+ int rc = 0;
+ struct spmi_device *spmi = labibb->spmi;
+
+ if (base == 0) {
+ pr_err("base cannot be zero base=0x%02x sid=0x%02x rc=%d\n",
+ base, spmi->sid, rc);
+ return -EINVAL;
+ }
+
+ rc = spmi_ext_register_readl(spmi->ctrl, spmi->sid, base, val, count);
+ if (rc) {
+ pr_err("SPMI read failed base=0x%02x sid=0x%02x rc=%d\n", base,
+ spmi->sid, rc);
+ return rc;
+ }
+ return 0;
+}
+
+static int
+qpnp_labibb_write(struct qpnp_labibb *labibb, u16 base,
+ u8 *val, int count)
+{
+ int rc = 0;
+ struct spmi_device *spmi = labibb->spmi;
+
+ if (base == 0) {
+ pr_err("base cannot be zero base=0x%02x sid=0x%02x rc=%d\n",
+ base, spmi->sid, rc);
+ return -EINVAL;
+ }
+
+ rc = spmi_ext_register_writel(spmi->ctrl, spmi->sid, base, val, count);
+ if (rc) {
+ pr_err("write failed base=0x%02x sid=0x%02x rc=%d\n",
+ base, spmi->sid, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int
+qpnp_labibb_masked_write(struct qpnp_labibb *labibb, u16 base,
+ u8 mask, u8 val)
+{
+ int rc;
+ u8 reg;
+
+ rc = qpnp_labibb_read(labibb, &reg, base, 1);
+ if (rc) {
+ pr_err("spmi read failed: addr=%03X, rc=%d\n", base, rc);
+ return rc;
+ }
+ pr_debug("addr = 0x%x read 0x%x\n", base, reg);
+
+ reg &= ~mask;
+ reg |= val & mask;
+
+ pr_debug("Writing 0x%x\n", reg);
+
+ rc = qpnp_labibb_write(labibb, base, &reg, 1);
+ if (rc) {
+ pr_err("spmi write failed: addr=%03X, rc=%d\n", base, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qpnp_labibb_sec_write(struct qpnp_labibb *labibb, u16 base,
+ u8 offset, u8 *val, int count)
+{
+ int rc;
+ u8 sec_val = REG_LAB_IBB_SEC_UNLOCK_CODE;
+
+ rc = qpnp_labibb_write(labibb, base + REG_LAB_IBB_SEC_ACCESS, &sec_val,
+ 1);
+ if (rc) {
+ pr_err("qpnp_lab_write register %x failed rc = %d\n",
+ base + REG_LAB_IBB_SEC_ACCESS, rc);
+ return rc;
+ }
+
+ rc = qpnp_labibb_write(labibb, base + offset, val, count);
+ if (rc)
+ pr_err("qpnp_labibb_write failed: addr=%03X, rc=%d\n",
+ base + offset, rc);
+
+ return rc;
+}
+
+static int qpnp_labibb_sec_masked_write(struct qpnp_labibb *labibb, u16 base,
+ u8 offset, u8 mask, u8 val)
+{
+ int rc;
+ u8 sec_val = REG_LAB_IBB_SEC_UNLOCK_CODE;
+
+ rc = qpnp_labibb_write(labibb, base + REG_LAB_IBB_SEC_ACCESS, &sec_val,
+ 1);
+ if (rc) {
+ pr_err("qpnp_lab_write register %x failed rc = %d\n",
+ base + REG_LAB_IBB_SEC_ACCESS, rc);
+ return rc;
+ }
+
+ rc = qpnp_labibb_masked_write(labibb, base + offset, mask, val);
+ if (rc)
+ pr_err("qpnp_lab_write register %x failed rc = %d\n",
+ base + offset, rc);
+
+ return rc;
+}
+
+static int qpnp_labibb_get_matching_idx(const char *val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lab_current_sense_plan); i++)
+ if (!strcmp(lab_current_sense_plan[i], val))
+ return i;
+
+ return -EINVAL;
+}
+
+static int qpnp_lab_dt_init(struct qpnp_labibb *labibb,
+ struct device_node *of_node)
+{
+ int rc = 0;
+ u8 i, val;
+ u32 tmp;
+
+ if (labibb->mode != QPNP_LABIBB_STANDALONE_MODE) {
+ if (labibb->mode == QPNP_LABIBB_LCD_MODE)
+ val = REG_LAB_IBB_LCD_MODE;
+ else
+ val = REG_LAB_IBB_AMOLED_MODE;
+
+ rc = qpnp_labibb_sec_write(labibb, labibb->lab_base,
+ REG_LAB_LCD_AMOLED_SEL, &val, 1);
+
+ if (rc) {
+ pr_err("qpnp_lab_sec_write register %x failed rc = %d\n",
+ REG_LAB_LCD_AMOLED_SEL, rc);
+ return rc;
+ }
+
+ val = LAB_IBB_EN_RDY_EN;
+ rc = qpnp_labibb_sec_write(labibb, labibb->lab_base,
+ REG_LAB_IBB_EN_RDY, &val, 1);
+
+ if (rc) {
+ pr_err("qpnp_lab_sec_write register %x failed rc = %d\n",
+ REG_LAB_IBB_EN_RDY, rc);
+ return rc;
+ }
+ }
+
+ val = 0;
+
+ if (of_property_read_bool(of_node, "qcom,qpnp-lab-full-pull-down"))
+ val |= LAB_PD_CTL_STRONG_PULL;
+
+ if (!of_property_read_bool(of_node, "qcom,qpnp-lab-pull-down-enable"))
+ val |= LAB_PD_CTL_DISABLE_PD;
+
+ rc = qpnp_labibb_write(labibb, labibb->lab_base + REG_LAB_PD_CTL,
+ &val, 1);
+
+ if (rc) {
+ pr_err("qpnp_lab_dt_init write register %x failed rc = %d\n",
+ REG_LAB_PD_CTL, rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node,
+ "qcom,qpnp-lab-switching-clock-frequency", &tmp);
+ if (rc) {
+ pr_err("get qcom,qpnp-lab-switching-clock-frequency failed rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ for (val = 0; val < ARRAY_SIZE(lab_clk_div_plan); val++)
+ if (lab_clk_div_plan[val] == tmp)
+ break;
+
+ if (val == ARRAY_SIZE(lab_clk_div_plan)) {
+ pr_err("Invalid property in qpnp-lab-switching-clock-frequency\n");
+ return -EINVAL;
+ }
+
+ rc = qpnp_labibb_write(labibb, labibb->lab_base + REG_LAB_CLK_DIV,
+ &val, 1);
+ if (rc) {
+ pr_err("qpnp_lab_dt_init write register %x failed rc = %d\n",
+ REG_LAB_CLK_DIV, rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node,
+ "qcom,qpnp-lab-limit-maximum-current", &tmp);
+
+ if (rc) {
+ pr_err("get qcom,qpnp-lab-limit-maximum-current failed rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ for (val = 0; val < ARRAY_SIZE(lab_current_limit_plan); val++)
+ if (lab_current_limit_plan[val] == tmp)
+ break;
+
+ if (val == ARRAY_SIZE(lab_current_limit_plan)) {
+ pr_err("Invalid property in qcom,qpnp-lab-limit-maximum-current\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(of_node,
+ "qcom,qpnp-lab-limit-max-current-enable"))
+ val |= LAB_CURRENT_LIMIT_EN;
+
+ rc = qpnp_labibb_write(labibb, labibb->lab_base +
+ REG_LAB_CURRENT_LIMIT, &val, 1);
+ if (rc) {
+ pr_err("qpnp_lab_dt_init write register %x failed rc = %d\n",
+ REG_LAB_CURRENT_LIMIT, rc);
+ return rc;
+ }
+
+ if (of_property_read_bool(of_node,
+ "qcom,qpnp-lab-ring-suppression-enable")) {
+ val = LAB_RING_SUPPRESSION_CTL_EN;
+ rc = qpnp_labibb_write(labibb, labibb->lab_base +
+ REG_LAB_RING_SUPPRESSION_CTL,
+ &val,
+ 1);
+ if (rc) {
+ pr_err("qpnp_lab_dt_init write register %x failed rc = %d\n",
+ REG_LAB_RING_SUPPRESSION_CTL, rc);
+ return rc;
+ }
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,qpnp-lab-ps-threshold", &tmp);
+
+ if (rc) {
+ pr_err("get qcom,qpnp-lab-ps-threshold failed rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ for (val = 0; val < ARRAY_SIZE(lab_ps_threshold_plan); val++)
+ if (lab_ps_threshold_plan[val] == tmp)
+ break;
+
+ if (val == ARRAY_SIZE(lab_ps_threshold_plan)) {
+ pr_err("Invalid property in qcom,qpnp-lab-ps-threshold\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(of_node, "qcom,qpnp-lab-ps-enable"))
+ val |= LAB_PS_CTL_EN;
+
+ rc = qpnp_labibb_write(labibb, labibb->lab_base + REG_LAB_PS_CTL,
+ &val, 1);
+
+ if (rc) {
+ pr_err("qpnp_lab_dt_init write register %x failed rc = %d\n",
+ REG_LAB_PS_CTL, rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,qpnp-lab-pfet-size", &tmp);
+
+ if (rc) {
+ pr_err("get qcom,qpnp-lab-pfet-size, rc = %d\n", rc);
+ return rc;
+ }
+
+ for (val = 0; val < ARRAY_SIZE(lab_rdson_pfet_plan); val++)
+ if (tmp == lab_rdson_pfet_plan[val])
+ break;
+
+ if (val == ARRAY_SIZE(lab_rdson_pfet_plan)) {
+ pr_err("Invalid property in qcom,qpnp-lab-pfet-size\n");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,qpnp-lab-nfet-size", &tmp);
+
+ if (rc) {
+ pr_err("get qcom,qpnp-lab-nfet-size, rc = %d\n", rc);
+ return rc;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lab_rdson_nfet_plan); i++)
+ if (tmp == lab_rdson_nfet_plan[i])
+ break;
+
+ if (i == ARRAY_SIZE(lab_rdson_nfet_plan)) {
+ pr_err("Iniid property in qcom,qpnp-lab-nfet-size\n");
+ return -EINVAL;
+ }
+
+ val |= i << LAB_RDSON_MNGMNT_NFET_SHIFT;
+ val |= (LAB_RDSON_MNGMNT_NFET_SLEW_EN | LAB_RDSON_MNGMNT_PFET_SLEW_EN);
+
+ rc = qpnp_labibb_write(labibb, labibb->lab_base + REG_LAB_RDSON_MNGMNT,
+ &val, 1);
+ if (rc) {
+ pr_err("qpnp_lab_dt_init write register %x failed rc = %d\n",
+ REG_LAB_RDSON_MNGMNT, rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,qpnp-lab-init-voltage",
+ &(labibb->lab_vreg.curr_volt));
+ if (rc) {
+ pr_err("get qcom,qpnp-lab-init-voltage failed, rc = %d\n", rc);
+ return rc;
+ }
+
+ if (!of_property_read_bool(of_node,
+ "qcom,qpnp-lab-use-default-voltage")) {
+ if (labibb->lab_vreg.curr_volt < labibb->lab_vreg.min_volt) {
+ pr_err("Invalid qcom,qpnp-lab-init-voltage property, qcom,qpnp-lab-init-voltage %d is less than the the minimum voltage %d",
+ labibb->lab_vreg.curr_volt,
+ labibb->lab_vreg.min_volt);
+ return -EINVAL;
+ }
+
+ val = DIV_ROUND_UP(labibb->lab_vreg.curr_volt -
+ labibb->lab_vreg.min_volt,
+ labibb->lab_vreg.step_size);
+
+ if (val > LAB_VOLTAGE_SET_MASK) {
+ pr_err("Invalid qcom,qpnp-lab-init-voltage property, qcom,qpnp-lab-init-voltage %d is larger than the max supported voltage %d",
+ labibb->lab_vreg.curr_volt,
+ labibb->lab_vreg.min_volt +
+ labibb->lab_vreg.step_size *
+ LAB_VOLTAGE_SET_MASK);
+ return -EINVAL;
+ }
+
+ labibb->lab_vreg.curr_volt = val * labibb->lab_vreg.step_size +
+ labibb->lab_vreg.min_volt;
+ val |= LAB_VOLTAGE_OVERRIDE_EN;
+ } else {
+ val = 0;
+ }
+
+ rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+ REG_LAB_VOLTAGE,
+ LAB_VOLTAGE_SET_MASK |
+ LAB_VOLTAGE_OVERRIDE_EN,
+ val);
+
+ if (rc) {
+ pr_err("qpnp_lab_regulator_set_voltage write register %x failed rc = %d\n",
+ REG_LAB_VOLTAGE, rc);
+
+ return rc;
+ }
+
+ if (labibb->swire_control) {
+ val = IBB_ENABLE_CTL_SWIRE_RDY;
+ rc = qpnp_labibb_write(labibb,
+ labibb->ibb_base + REG_IBB_ENABLE_CTL, &val, 1);
+ if (rc)
+ pr_err("Unable to set SWIRE_RDY rc=%d\n", rc);
+ }
+
+ return rc;
+}
+
+static int qpnp_labibb_restore_settings(struct qpnp_labibb *labibb)
+{
+ int rc, i;
+
+ for (i = 0; i < ARRAY_SIZE(ibb_settings); i++) {
+ if (ibb_settings[i].sec_access)
+ rc = qpnp_labibb_sec_write(labibb, labibb->ibb_base,
+ ibb_settings[i].address,
+ &ibb_settings[i].value, 1);
+ else
+ rc = qpnp_labibb_write(labibb, labibb->ibb_base +
+ ibb_settings[i].address,
+ &ibb_settings[i].value, 1);
+
+ if (rc) {
+ pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+ ibb_settings[i].address, rc);
+ return rc;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lab_settings); i++) {
+ if (lab_settings[i].sec_access)
+ rc = qpnp_labibb_sec_write(labibb, labibb->lab_base,
+ lab_settings[i].address,
+ &lab_settings[i].value, 1);
+ else
+ rc = qpnp_labibb_write(labibb, labibb->lab_base +
+ lab_settings[i].address,
+ &lab_settings[i].value, 1);
+
+ if (rc) {
+ pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+ lab_settings[i].address, rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int qpnp_labibb_save_settings(struct qpnp_labibb *labibb)
+{
+ int rc, i;
+
+ for (i = 0; i < ARRAY_SIZE(ibb_settings); i++) {
+ rc = qpnp_labibb_read(labibb, &ibb_settings[i].value,
+ labibb->ibb_base +
+ ibb_settings[i].address, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_read register %x failed rc = %d\n",
+ ibb_settings[i].address, rc);
+ return rc;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lab_settings); i++) {
+ rc = qpnp_labibb_read(labibb, &lab_settings[i].value,
+ labibb->lab_base +
+ lab_settings[i].address, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_read register %x failed rc = %d\n",
+ lab_settings[i].address, rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int qpnp_labibb_regulator_ttw_mode_enter(struct qpnp_labibb *labibb)
+{
+ int rc = 0;
+ u8 val;
+
+ /* Save the IBB settings before they get modified for TTW mode */
+ if (!labibb->ibb_settings_saved) {
+ rc = qpnp_labibb_save_settings(labibb);
+ if (rc) {
+ pr_err("Error in storing IBB setttings, rc=%d\n", rc);
+ return rc;
+ }
+ labibb->ibb_settings_saved = true;
+ }
+
+ val = LAB_PD_CTL_DISABLE_PD;
+ rc = qpnp_labibb_write(labibb, labibb->lab_base + REG_LAB_PD_CTL,
+ &val, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+ REG_LAB_PD_CTL, rc);
+ return rc;
+ }
+
+ val = LAB_SPARE_TOUCH_WAKE_BIT | LAB_SPARE_DISABLE_SCP_BIT;
+ rc = qpnp_labibb_write(labibb, labibb->lab_base + REG_LAB_SPARE_CTL,
+ &val, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+ REG_LAB_SPARE_CTL, rc);
+ return rc;
+ }
+
+ val = 0;
+ rc = qpnp_labibb_write(labibb, labibb->lab_base +
+ REG_LAB_SOFT_START_CTL, &val, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+ REG_LAB_SOFT_START_CTL, rc);
+ return rc;
+ }
+
+ val = 0;
+ rc = qpnp_labibb_write(labibb, labibb->ibb_base + REG_IBB_PD_CTL,
+ &val, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_read register %x failed rc = %d\n",
+ REG_IBB_PD_CTL, rc);
+ return rc;
+ }
+
+ val = 0;
+ rc = qpnp_labibb_sec_write(labibb, labibb->ibb_base,
+ REG_IBB_PWRUP_PWRDN_CTL_1, &val, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+ REG_IBB_PWRUP_PWRDN_CTL_1, rc);
+ return rc;
+ }
+
+ val = IBB_WAIT_MBG_OK;
+ rc = qpnp_labibb_sec_masked_write(labibb, labibb->ibb_base,
+ REG_IBB_PWRUP_PWRDN_CTL_2,
+ IBB_DIS_DLY_MASK | IBB_WAIT_MBG_OK, val);
+ if (rc) {
+ pr_err("qpnp_labibb_sec_write register %x failed rc = %d\n",
+ REG_IBB_PWRUP_PWRDN_CTL_2, rc);
+ return rc;
+ }
+
+ val = IBB_NFET_SLEW_EN | IBB_PFET_SLEW_EN | IBB_OVERRIDE_NFET_SW_SIZE |
+ IBB_OVERRIDE_PFET_SW_SIZE;
+ rc = qpnp_labibb_masked_write(labibb, labibb->ibb_base +
+ REG_IBB_RDSON_MNGMNT, 0xFF, val);
+ if (rc) {
+ pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+ REG_IBB_RDSON_MNGMNT, rc);
+ return rc;
+ }
+
+ val = IBB_CURRENT_LIMIT_EN | IBB_CURRENT_MAX_500MA |
+ (IBB_ILIMIT_COUNT_CYC8 << IBB_CURRENT_LIMIT_DEBOUNCE_SHIFT);
+ rc = qpnp_labibb_sec_write(labibb, labibb->ibb_base,
+ REG_IBB_CURRENT_LIMIT, &val, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+ REG_IBB_CURRENT_LIMIT, rc);
+ return rc;
+ }
+
+ val = IBB_BYPASS_PWRDN_DLY2_BIT | IBB_FAST_STARTUP;
+ rc = qpnp_labibb_write(labibb, labibb->ibb_base + REG_IBB_SPARE_CTL,
+ &val, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+ REG_IBB_SPARE_CTL, rc);
+ return rc;
+ }
+
+ val = IBB_ENABLE_CTL_SWIRE_RDY;
+ rc = qpnp_labibb_write(labibb, labibb->ibb_base + REG_IBB_ENABLE_CTL,
+ &val, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+ REG_IBB_ENABLE_CTL, rc);
+ return rc;
+ }
+ labibb->in_ttw_mode = true;
+ return 0;
+}
+
+static int qpnp_labibb_regulator_ttw_mode_exit(struct qpnp_labibb *labibb)
+{
+ int rc = 0;
+ u8 val;
+
+ if (!labibb->ibb_settings_saved) {
+ pr_err("IBB settings are not saved!\n");
+ return -EINVAL;
+ }
+
+ /* Restore the IBB settings back to switch back to normal mode */
+ rc = qpnp_labibb_restore_settings(labibb);
+ if (rc) {
+ pr_err("Error in restoring IBB setttings, rc=%d\n", rc);
+ return rc;
+ }
+
+ val = LAB_PD_CTL_STRONG_PULL;
+ rc = qpnp_labibb_write(labibb, labibb->lab_base + REG_LAB_PD_CTL,
+ &val, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+ REG_LAB_PD_CTL, rc);
+ return rc;
+ }
+
+ val = 0;
+ rc = qpnp_labibb_write(labibb, labibb->lab_base + REG_LAB_SPARE_CTL,
+ &val, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+ REG_LAB_SPARE_CTL, rc);
+ return rc;
+ }
+
+ val = 0;
+ rc = qpnp_labibb_write(labibb, labibb->ibb_base + REG_IBB_SPARE_CTL,
+ &val, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+ REG_IBB_SPARE_CTL, rc);
+ return rc;
+ }
+
+ val = 0;
+ rc = qpnp_labibb_write(labibb, labibb->ibb_base + REG_IBB_ENABLE_CTL,
+ &val, 1);
+
+ if (rc) {
+ pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+ REG_IBB_ENABLE_CTL, rc);
+ return rc;
+ }
+
+ labibb->in_ttw_mode = false;
+ return rc;
+}
+
+static int qpnp_labibb_regulator_enable(struct qpnp_labibb *labibb)
+{
+ int rc;
+ u8 val = IBB_ENABLE_CTL_MODULE_EN;
+ int dly;
+ int retries;
+ bool enabled = false;
+
+ if (labibb->ttw_en && !labibb->ibb_vreg.vreg_enabled &&
+ labibb->in_ttw_mode) {
+ rc = qpnp_labibb_regulator_ttw_mode_exit(labibb);
+ if (rc) {
+ pr_err("Error in exiting TTW mode rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ rc = qpnp_labibb_write(labibb, labibb->ibb_base + REG_IBB_ENABLE_CTL,
+ &val, 1);
+
+ if (rc) {
+ pr_err("write register %x failed rc = %d\n",
+ REG_IBB_ENABLE_CTL, rc);
+ return rc;
+ }
+
+ /* total delay time */
+ dly = labibb->lab_vreg.soft_start + labibb->ibb_vreg.soft_start
+ + labibb->ibb_vreg.pwrup_dly;
+ usleep_range(dly, dly + 100);
+
+ /* after this delay, lab should be enabled */
+ rc = qpnp_labibb_read(labibb, &val,
+ labibb->lab_base + REG_LAB_STATUS1, 1);
+ if (rc) {
+ pr_err("read register %x failed rc = %d\n",
+ REG_LAB_STATUS1, rc);
+ goto err_out;
+ }
+
+ pr_debug("soft=%d %d up=%d dly=%d\n",
+ labibb->lab_vreg.soft_start, labibb->ibb_vreg.soft_start,
+ labibb->ibb_vreg.pwrup_dly, dly);
+
+ if (!(val & LAB_STATUS1_VREG_OK)) {
+ pr_err("failed for LAB %x\n", val);
+ goto err_out;
+ }
+
+ /* poll IBB_STATUS to make sure ibb had been enabled */
+ dly = labibb->ibb_vreg.soft_start + labibb->ibb_vreg.pwrup_dly;
+ retries = 10;
+ while (retries--) {
+ rc = qpnp_labibb_read(labibb, &val,
+ labibb->ibb_base + REG_IBB_STATUS1, 1);
+ if (rc) {
+ pr_err("read register %x failed rc = %d\n",
+ REG_IBB_STATUS1, rc);
+ goto err_out;
+ }
+
+ if (val & IBB_STATUS1_VREG_OK) {
+ enabled = true;
+ break;
+ }
+ usleep_range(dly, dly + 100);
+ }
+
+ if (!enabled) {
+ pr_err("failed for IBB %x\n", val);
+ goto err_out;
+ }
+
+ labibb->lab_vreg.vreg_enabled = 1;
+ labibb->ibb_vreg.vreg_enabled = 1;
+
+ return 0;
+err_out:
+ val = 0;
+ rc = qpnp_labibb_write(labibb, labibb->ibb_base + REG_IBB_ENABLE_CTL,
+ &val, 1);
+ if (rc)
+ pr_err("write register %x failed rc = %d\n",
+ REG_IBB_ENABLE_CTL, rc);
+ return -EINVAL;
+}
+
+static int qpnp_labibb_regulator_disable(struct qpnp_labibb *labibb)
+{
+ int rc;
+ u8 val;
+ int dly;
+ int retries;
+ bool disabled = false;
+
+ val = 0;
+ rc = qpnp_labibb_write(labibb,
+ labibb->ibb_base + REG_IBB_ENABLE_CTL, &val, 1);
+ if (rc) {
+ pr_err("write register %x failed rc = %d\n",
+ REG_IBB_ENABLE_CTL, rc);
+ return rc;
+ }
+
+ /* poll IBB_STATUS to make sure ibb had been disabled */
+ dly = labibb->ibb_vreg.pwrdn_dly;
+ retries = 2;
+ while (retries--) {
+ usleep_range(dly, dly + 100);
+ rc = qpnp_labibb_read(labibb, &val,
+ labibb->ibb_base + REG_IBB_STATUS1, 1);
+ if (rc) {
+ pr_err("read register %x failed rc = %d\n",
+ REG_IBB_STATUS1, rc);
+ return rc;
+ }
+
+ if (!(val & IBB_STATUS1_VREG_OK)) {
+ disabled = true;
+ break;
+ }
+ }
+
+ if (!disabled) {
+ pr_err("failed for IBB %x\n", val);
+ return -EINVAL;
+ }
+
+ labibb->lab_vreg.vreg_enabled = 0;
+ labibb->ibb_vreg.vreg_enabled = 0;
+
+ if (labibb->ttw_en && !labibb->in_ttw_mode) {
+ rc = qpnp_labibb_regulator_ttw_mode_enter(labibb);
+ if (rc) {
+ pr_err("Error in entering TTW mode rc = %d\n", rc);
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static int qpnp_lab_regulator_enable(struct regulator_dev *rdev)
+{
+ int rc;
+ u8 val;
+
+ struct qpnp_labibb *labibb = rdev_get_drvdata(rdev);
+
+ if (!labibb->lab_vreg.vreg_enabled && !labibb->swire_control) {
+
+ if (labibb->mode != QPNP_LABIBB_STANDALONE_MODE)
+ return qpnp_labibb_regulator_enable(labibb);
+
+ val = LAB_ENABLE_CTL_EN;
+ rc = qpnp_labibb_write(labibb,
+ labibb->lab_base + REG_LAB_ENABLE_CTL, &val, 1);
+ if (rc) {
+ pr_err("qpnp_lab_regulator_enable write register %x failed rc = %d\n",
+ REG_LAB_ENABLE_CTL, rc);
+ return rc;
+ }
+
+ udelay(labibb->lab_vreg.soft_start);
+
+ rc = qpnp_labibb_read(labibb, &val,
+ labibb->lab_base + REG_LAB_STATUS1, 1);
+ if (rc) {
+ pr_err("qpnp_lab_regulator_enable read register %x failed rc = %d\n",
+ REG_LAB_STATUS1, rc);
+ return rc;
+ }
+
+ if ((val & LAB_STATUS1_VREG_OK_MASK) != LAB_STATUS1_VREG_OK) {
+ pr_err("qpnp_lab_regulator_enable failed\n");
+ return -EINVAL;
+ }
+
+ labibb->lab_vreg.vreg_enabled = 1;
+ }
+
+ return 0;
+}
+
+static int qpnp_lab_regulator_disable(struct regulator_dev *rdev)
+{
+ int rc;
+ u8 val;
+ struct qpnp_labibb *labibb = rdev_get_drvdata(rdev);
+
+ if (labibb->lab_vreg.vreg_enabled && !labibb->swire_control) {
+
+ if (labibb->mode != QPNP_LABIBB_STANDALONE_MODE)
+ return qpnp_labibb_regulator_disable(labibb);
+
+ val = 0;
+ rc = qpnp_labibb_write(labibb,
+ labibb->lab_base + REG_LAB_ENABLE_CTL, &val, 1);
+ if (rc) {
+ pr_err("qpnp_lab_regulator_enable write register %x failed rc = %d\n",
+ REG_LAB_ENABLE_CTL, rc);
+ return rc;
+ }
+
+ labibb->lab_vreg.vreg_enabled = 0;
+ }
+ return 0;
+}
+
+static int qpnp_lab_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct qpnp_labibb *labibb = rdev_get_drvdata(rdev);
+
+ if (labibb->swire_control)
+ return 0;
+
+ return labibb->lab_vreg.vreg_enabled;
+}
+
+static int qpnp_lab_regulator_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ int rc, new_uV;
+ u8 val;
+ struct qpnp_labibb *labibb = rdev_get_drvdata(rdev);
+
+ if (labibb->swire_control)
+ return 0;
+
+ if (min_uV < labibb->lab_vreg.min_volt) {
+ pr_err("qpnp_lab_regulator_set_voltage failed, min_uV %d is less than min_volt %d",
+ min_uV, labibb->lab_vreg.min_volt);
+ }
+
+ val = DIV_ROUND_UP(min_uV - labibb->lab_vreg.min_volt,
+ labibb->lab_vreg.step_size);
+ new_uV = val * labibb->lab_vreg.step_size + labibb->lab_vreg.min_volt;
+
+ if (new_uV > max_uV) {
+ pr_err("qpnp_lab_regulator_set_voltage unable to set voltage (%d %d)\n",
+ min_uV, max_uV);
+ return -EINVAL;
+ }
+
+ rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+ REG_LAB_VOLTAGE,
+ LAB_VOLTAGE_SET_MASK |
+ LAB_VOLTAGE_OVERRIDE_EN,
+ val | LAB_VOLTAGE_OVERRIDE_EN);
+
+ if (rc) {
+ pr_err("qpnp_lab_regulator_set_voltage write register %x failed rc = %d\n",
+ REG_LAB_VOLTAGE, rc);
+
+ return rc;
+ }
+
+ if (new_uV > labibb->lab_vreg.curr_volt)
+ udelay(val * labibb->lab_vreg.slew_rate);
+ labibb->lab_vreg.curr_volt = new_uV;
+
+ return 0;
+}
+
+static int qpnp_lab_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct qpnp_labibb *labibb = rdev_get_drvdata(rdev);
+
+ if (labibb->swire_control)
+ return 0;
+
+ return labibb->lab_vreg.curr_volt;
+}
+
+static struct regulator_ops qpnp_lab_ops = {
+ .enable = qpnp_lab_regulator_enable,
+ .disable = qpnp_lab_regulator_disable,
+ .is_enabled = qpnp_lab_regulator_is_enabled,
+ .set_voltage = qpnp_lab_regulator_set_voltage,
+ .get_voltage = qpnp_lab_regulator_get_voltage,
+};
+
+static int register_qpnp_lab_regulator(struct qpnp_labibb *labibb,
+ struct device_node *of_node)
+{
+ int rc = 0;
+ struct regulator_init_data *init_data;
+ struct regulator_desc *rdesc;
+ struct regulator_config cfg = {};
+ u8 val;
+ const char *current_sense_str;
+ bool config_current_sense = false;
+ u32 tmp;
+
+ if (!of_node) {
+ dev_err(labibb->dev, "qpnp lab regulator device tree node is missing\n");
+ return -EINVAL;
+ }
+
+ init_data = of_get_regulator_init_data(labibb->dev, of_node);
+ if (!init_data) {
+ pr_err("unable to get regulator init data for qpnp lab regulator\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,qpnp-lab-min-voltage",
+ &(labibb->lab_vreg.min_volt));
+ if (rc < 0) {
+ pr_err("qcom,qpnp-lab-min-voltage is missing, rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,qpnp-lab-step-size",
+ &(labibb->lab_vreg.step_size));
+ if (rc < 0) {
+ pr_err("qcom,qpnp-lab-step-size is missing, rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,qpnp-lab-slew-rate",
+ &(labibb->lab_vreg.slew_rate));
+ if (rc < 0) {
+ pr_err("qcom,qpnp-lab-slew-rate is missing, rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,qpnp-lab-soft-start",
+ &(labibb->lab_vreg.soft_start));
+ if (rc < 0) {
+ pr_err("qcom,qpnp-lab-soft-start is missing, rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ for (val = 0; val < ARRAY_SIZE(lab_soft_start_plan); val++)
+ if (lab_soft_start_plan[val] == labibb->lab_vreg.soft_start)
+ break;
+
+ if (val == ARRAY_SIZE(lab_soft_start_plan))
+ val = ARRAY_SIZE(lab_soft_start_plan) - 1;
+
+ rc = qpnp_labibb_write(labibb, labibb->lab_base +
+ REG_LAB_SOFT_START_CTL, &val, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+ REG_LAB_SOFT_START_CTL, rc);
+ return rc;
+ }
+
+ labibb->lab_vreg.soft_start = lab_soft_start_plan
+ [val & LAB_SOFT_START_CTL_MASK];
+
+ rc = of_property_read_u32(of_node, "qcom,qpnp-lab-max-precharge-time",
+ &tmp);
+ if (rc) {
+ pr_err("get qcom,qpnp-lab-max-precharge-time failed, rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ for (val = 0; val < ARRAY_SIZE(lab_max_precharge_plan); val++)
+ if (lab_max_precharge_plan[val] == tmp)
+ break;
+
+ if (val == ARRAY_SIZE(lab_max_precharge_plan)) {
+ pr_err("Invalid property in qcom,qpnp-lab-max-precharge-time\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(of_node,
+ "qcom,qpnp-lab-max-precharge-enable"))
+ val |= LAB_PRECHARGE_CTL_EN;
+
+ rc = qpnp_labibb_write(labibb, labibb->lab_base +
+ REG_LAB_PRECHARGE_CTL, &val, 1);
+ if (rc) {
+ pr_err("qpnp_lab_dt_init write register %x failed rc = %d\n",
+ REG_LAB_PRECHARGE_CTL, rc);
+ return rc;
+ }
+
+ if (labibb->mode == QPNP_LABIBB_AMOLED_MODE) {
+ /*
+ * default to 1.5 times current gain if
+ * user doesn't specify the current-sense
+ * dt parameter
+ */
+ current_sense_str = "1.5x";
+ val = qpnp_labibb_get_matching_idx(current_sense_str);
+ config_current_sense = true;
+ }
+
+ if (of_find_property(of_node,
+ "qpnp,qpnp-lab-current-sense", NULL)) {
+ config_current_sense = true;
+ rc = of_property_read_string(of_node,
+ "qpnp,qpnp-lab-current-sense",
+ &current_sense_str);
+ if (!rc) {
+ val = qpnp_labibb_get_matching_idx(
+ current_sense_str);
+ } else {
+ pr_err("qpnp,qpnp-lab-current-sense configured incorrectly rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ if (config_current_sense) {
+ rc = qpnp_labibb_masked_write(labibb, labibb->lab_base +
+ REG_LAB_CURRENT_SENSE,
+ LAB_CURRENT_SENSE_GAIN_MASK,
+ val);
+ if (rc) {
+ pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+ REG_LAB_CURRENT_SENSE, rc);
+ return rc;
+ }
+ }
+
+ rc = qpnp_labibb_read(labibb, &val,
+ labibb->ibb_base + REG_IBB_ENABLE_CTL, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_read register %x failed rc = %d\n",
+ REG_IBB_ENABLE_CTL, rc);
+ return rc;
+ }
+
+ if (!(val & (IBB_ENABLE_CTL_SWIRE_RDY | IBB_ENABLE_CTL_MODULE_EN))) {
+ /* SWIRE_RDY and IBB_MODULE_EN not enabled */
+ rc = qpnp_lab_dt_init(labibb, of_node);
+ if (rc) {
+ pr_err("qpnp-lab: wrong DT parameter specified: rc = %d\n",
+ rc);
+ return rc;
+ }
+ } else {
+ rc = qpnp_labibb_read(labibb, &val,
+ labibb->lab_base + REG_LAB_LCD_AMOLED_SEL, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_read register %x failed rc = %d\n",
+ REG_LAB_LCD_AMOLED_SEL, rc);
+ return rc;
+ }
+
+ if (val == REG_LAB_IBB_AMOLED_MODE)
+ labibb->mode = QPNP_LABIBB_AMOLED_MODE;
+ else
+ labibb->mode = QPNP_LABIBB_LCD_MODE;
+
+ rc = qpnp_labibb_read(labibb, &val, labibb->lab_base +
+ REG_LAB_VOLTAGE, 1);
+ if (rc) {
+ pr_err("qpnp_lab_read read register %x failed rc = %d\n",
+ REG_LAB_VOLTAGE, rc);
+ return rc;
+ }
+
+ if (val & LAB_VOLTAGE_OVERRIDE_EN) {
+ labibb->lab_vreg.curr_volt =
+ (val &
+ LAB_VOLTAGE_SET_MASK) *
+ labibb->lab_vreg.step_size +
+ labibb->lab_vreg.min_volt;
+ } else if (labibb->mode == QPNP_LABIBB_LCD_MODE) {
+ rc = of_property_read_u32(of_node,
+ "qcom,qpnp-lab-init-lcd-voltage",
+ &(labibb->lab_vreg.curr_volt));
+ if (rc) {
+ pr_err("get qcom,qpnp-lab-init-lcd-voltage failed, rc = %d\n",
+ rc);
+ return rc;
+ }
+ } else {
+ rc = of_property_read_u32(of_node,
+ "qcom,qpnp-lab-init-amoled-voltage",
+ &(labibb->lab_vreg.curr_volt));
+ if (rc) {
+ pr_err("get qcom,qpnp-lab-init-amoled-voltage failed, rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ labibb->lab_vreg.vreg_enabled = 1;
+ }
+
+ rc = qpnp_labibb_read(labibb, &val,
+ labibb->lab_base + REG_LAB_MODULE_RDY, 1);
+ if (rc) {
+ pr_err("qpnp_lab_read read register %x failed rc = %d\n",
+ REG_LAB_MODULE_RDY, rc);
+ return rc;
+ }
+
+ if (labibb->mode != QPNP_LABIBB_STANDALONE_MODE &&
+ !(val & LAB_MODULE_RDY_EN)) {
+ val = LAB_MODULE_RDY_EN;
+
+ rc = qpnp_labibb_write(labibb, labibb->lab_base +
+ REG_LAB_MODULE_RDY, &val, 1);
+
+ if (rc) {
+ pr_err("qpnp_lab_dt_init write register %x failed rc = %d\n",
+ REG_LAB_MODULE_RDY, rc);
+ return rc;
+ }
+ }
+
+ if (init_data->constraints.name) {
+ rdesc = &(labibb->lab_vreg.rdesc);
+ rdesc->owner = THIS_MODULE;
+ rdesc->type = REGULATOR_VOLTAGE;
+ rdesc->ops = &qpnp_lab_ops;
+ rdesc->name = init_data->constraints.name;
+
+ cfg.dev = labibb->dev;
+ cfg.init_data = init_data;
+ cfg.driver_data = labibb;
+ cfg.of_node = of_node;
+
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS;
+
+ labibb->lab_vreg.rdev = regulator_register(rdesc, &cfg);
+ if (IS_ERR(labibb->lab_vreg.rdev)) {
+ rc = PTR_ERR(labibb->lab_vreg.rdev);
+ labibb->lab_vreg.rdev = NULL;
+ pr_err("unable to get regulator init data for qpnp lab regulator, rc = %d\n",
+ rc);
+
+ return rc;
+ }
+ } else {
+ dev_err(labibb->dev, "qpnp lab regulator name missing\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&(labibb->lab_vreg.lab_mutex));
+ return 0;
+}
+
+static int qpnp_ibb_dt_init(struct qpnp_labibb *labibb,
+ struct device_node *of_node)
+{
+ int rc = 0;
+ u32 i, tmp;
+ u8 val;
+
+ if (labibb->mode != QPNP_LABIBB_STANDALONE_MODE) {
+ if (labibb->mode == QPNP_LABIBB_LCD_MODE)
+ val = REG_LAB_IBB_LCD_MODE;
+ else
+ val = REG_LAB_IBB_AMOLED_MODE;
+
+ rc = qpnp_labibb_sec_write(labibb, labibb->ibb_base,
+ REG_LAB_LCD_AMOLED_SEL, &val, 1);
+
+ if (rc) {
+ pr_err("qpnp_labibb_sec_write register %x failed rc = %d\n",
+ REG_IBB_LCD_AMOLED_SEL, rc);
+ return rc;
+ }
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,qpnp-ibb-lab-pwrdn-delay",
+ &tmp);
+ if (rc < 0) {
+ pr_err("qcom,qpnp-ibb-lab-pwrdn-delay is missing, rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ val = 0;
+
+ for (val = 0; val < ARRAY_SIZE(ibb_pwrdn_dly_plan); val++)
+ if (ibb_pwrdn_dly_plan[val] == tmp)
+ break;
+
+ if (val == ARRAY_SIZE(ibb_pwrdn_dly_plan)) {
+ pr_err("Invalid property in qcom,qpnp-ibb-lab-pwrdn-delay\n");
+ return -EINVAL;
+ }
+
+ labibb->ibb_vreg.pwrdn_dly = tmp;
+
+ rc = of_property_read_u32(of_node, "qcom,qpnp-ibb-lab-pwrup-delay",
+ &tmp);
+ if (rc < 0) {
+ pr_err("qcom,qpnp-ibb-lab-pwrup-delay is missing, rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ibb_pwrup_dly_plan); i++)
+ if (ibb_pwrup_dly_plan[i] == tmp)
+ break;
+
+ if (i == ARRAY_SIZE(ibb_pwrup_dly_plan)) {
+ pr_err("Invalid property in qcom,qpnp-ibb-lab-pwrup-delay\n");
+ return -EINVAL;
+ }
+
+ labibb->ibb_vreg.pwrup_dly = tmp;
+
+ val |= (i << IBB_PWRUP_PWRDN_CTL_1_DLY1_SHIFT);
+
+ if (of_property_read_bool(of_node, "qcom,qpnp-ibb-en-discharge"))
+ val |= PWRUP_PWRDN_CTL_1_DISCHARGE_EN;
+
+ if (labibb->mode != QPNP_LABIBB_STANDALONE_MODE)
+ val |= (IBB_PWRUP_PWRDN_CTL_1_EN_DLY1 |
+ IBB_PWRUP_PWRDN_CTL_1_LAB_VREG_OK);
+
+ rc = qpnp_labibb_sec_write(labibb, labibb->ibb_base,
+ REG_IBB_PWRUP_PWRDN_CTL_1,
+ &val,
+ 1);
+ if (rc) {
+ pr_err("qpnp_labibb_sec_write register %x failed rc = %d\n",
+ REG_IBB_PWRUP_PWRDN_CTL_1, rc);
+ return rc;
+ }
+
+ val = 0;
+
+ if (!of_property_read_bool(of_node, "qcom,qpnp-ibb-full-pull-down"))
+ val |= IBB_PD_CTL_HALF_STRENGTH;
+
+ if (of_property_read_bool(of_node, "qcom,qpnp-ibb-pull-down-enable"))
+ val |= IBB_PD_CTL_EN;
+
+ rc = qpnp_labibb_write(labibb, labibb->ibb_base + REG_IBB_PD_CTL,
+ &val, 1);
+
+ if (rc) {
+ pr_err("qpnp_lab_dt_init write register %x failed rc = %d\n",
+ REG_IBB_PD_CTL, rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node,
+ "qcom,qpnp-ibb-switching-clock-frequency", &tmp);
+ if (rc) {
+ pr_err("get qcom,qpnp-ibb-switching-clock-frequency failed rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ for (val = 0; val < ARRAY_SIZE(ibb_clk_div_plan); val++)
+ if (ibb_clk_div_plan[val] == tmp)
+ break;
+
+ if (val == ARRAY_SIZE(ibb_clk_div_plan)) {
+ pr_err("Invalid property in qpnp-ibb-switching-clock-frequency\n");
+ return -EINVAL;
+ }
+
+ rc = qpnp_labibb_write(labibb, labibb->ibb_base + REG_IBB_CLK_DIV,
+ &val, 1);
+ if (rc) {
+ pr_err("qpnp_ibb_dt_init write register %x failed rc = %d\n",
+ REG_IBB_CLK_DIV, rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node,
+ "qcom,qpnp-ibb-limit-maximum-current", &tmp);
+
+ if (rc) {
+ pr_err("get qcom,qpnp-ibb-limit-maximum-current failed rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ for (val = 0; val < ARRAY_SIZE(ibb_current_limit_plan); val++)
+ if (ibb_current_limit_plan[val] == tmp)
+ break;
+
+ if (val == ARRAY_SIZE(ibb_current_limit_plan)) {
+ pr_err("Invalid property in qcom,qpnp-ibb-limit-maximum-current\n");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,qpnp-ibb-debounce-cycle",
+ &tmp);
+
+ if (rc) {
+ pr_err("get qcom,qpnp-ibb-debounce-cycle failed rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ibb_debounce_plan); i++)
+ if (ibb_debounce_plan[i] == tmp)
+ break;
+
+ if (i == ARRAY_SIZE(ibb_debounce_plan)) {
+ pr_err("Invalid property in qcom,qpnp-ibb-debounce-cycle\n");
+ return -EINVAL;
+ }
+
+ val |= (i << IBB_CURRENT_LIMIT_DEBOUNCE_SHIFT);
+
+ if (of_property_read_bool(of_node,
+ "qcom,qpnp-ibb-limit-max-current-enable"))
+ val |= IBB_CURRENT_LIMIT_EN;
+
+ rc = qpnp_labibb_sec_write(labibb, labibb->ibb_base,
+ REG_IBB_CURRENT_LIMIT,
+ &val,
+ 1);
+ if (rc) {
+ pr_err("qpnp_labibb_sec_write register %x failed rc = %d\n",
+ REG_IBB_CURRENT_LIMIT, rc);
+ return rc;
+ }
+
+ if (of_property_read_bool(of_node,
+ "qcom,qpnp-ibb-ring-suppression-enable")) {
+ val = IBB_RING_SUPPRESSION_CTL_EN;
+ rc = qpnp_labibb_write(labibb, labibb->ibb_base +
+ REG_IBB_RING_SUPPRESSION_CTL,
+ &val,
+ 1);
+ if (rc) {
+ pr_err("qpnp_ibb_dt_init write register %x failed rc = %d\n",
+ REG_IBB_RING_SUPPRESSION_CTL, rc);
+ return rc;
+ }
+ }
+
+ if (of_property_read_bool(of_node, "qcom,qpnp-ibb-ps-enable")) {
+ val = IBB_PS_CTL_EN;
+ rc = qpnp_labibb_write(labibb, labibb->ibb_base +
+ REG_IBB_PS_CTL,
+ &val,
+ 1);
+ if (rc) {
+ pr_err("qpnp_ibb_dt_init write register %x failed rc = %d\n",
+ REG_IBB_PS_CTL, rc);
+ return rc;
+ }
+
+ val = IBB_NLIMIT_DAC_EN;
+ rc = qpnp_labibb_write(labibb, labibb->ibb_base +
+ REG_IBB_NLIMIT_DAC,
+ &val,
+ 1);
+ if (rc) {
+ pr_err("qpnp_ibb_dt_init write register %x failed rc = %d\n",
+ REG_IBB_NLIMIT_DAC, rc);
+ return rc;
+ }
+ } else {
+ val = IBB_PS_CTL_DISABLE;
+ rc = qpnp_labibb_write(labibb, labibb->ibb_base +
+ REG_IBB_PS_CTL,
+ &val,
+ 1);
+ if (rc) {
+ pr_err("qpnp_ibb_dt_init write register %x failed rc = %d\n",
+ REG_IBB_PS_CTL, rc);
+ return rc;
+ }
+
+ val = IBB_NLIMIT_DAC_DISABLE;
+ rc = qpnp_labibb_write(labibb, labibb->ibb_base +
+ REG_IBB_NLIMIT_DAC,
+ &val,
+ 1);
+ if (rc) {
+ pr_err("qpnp_ibb_dt_init write register %x failed rc = %d\n",
+ REG_IBB_NLIMIT_DAC, rc);
+ return rc;
+ }
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,qpnp-ibb-init-voltage",
+ &(labibb->ibb_vreg.curr_volt));
+ if (rc) {
+ pr_err("get qcom,qpnp-ibb-init-voltage failed, rc = %d\n", rc);
+ return rc;
+ }
+
+ if (!of_property_read_bool(of_node,
+ "qcom,qpnp-ibb-use-default-voltage")) {
+ if (labibb->ibb_vreg.curr_volt < labibb->ibb_vreg.min_volt) {
+ pr_err("Invalid qcom,qpnp-ibb-init-voltage property, qcom,qpnp-ibb-init-voltage %d is less than the the minimum voltage %d",
+ labibb->ibb_vreg.curr_volt,
+ labibb->ibb_vreg.min_volt);
+ return -EINVAL;
+ }
+
+ val = DIV_ROUND_UP(labibb->ibb_vreg.curr_volt -
+ labibb->ibb_vreg.min_volt,
+ labibb->ibb_vreg.step_size);
+
+ if (val > IBB_VOLTAGE_SET_MASK) {
+ pr_err("Invalid qcom,qpnp-ibb-init-voltage property, qcom,qpnp-lab-init-voltage %d is larger than the max supported voltage %d",
+ labibb->ibb_vreg.curr_volt,
+ labibb->ibb_vreg.min_volt +
+ labibb->ibb_vreg.step_size *
+ IBB_VOLTAGE_SET_MASK);
+ return -EINVAL;
+ }
+
+ labibb->ibb_vreg.curr_volt = val * labibb->ibb_vreg.step_size +
+ labibb->ibb_vreg.min_volt;
+ val |= IBB_VOLTAGE_OVERRIDE_EN;
+ } else {
+ val = 0;
+ }
+
+ rc = qpnp_labibb_masked_write(labibb, labibb->ibb_base +
+ REG_IBB_VOLTAGE,
+ IBB_VOLTAGE_SET_MASK |
+ IBB_VOLTAGE_OVERRIDE_EN,
+ val);
+
+ if (rc)
+ pr_err("qpnp_ibb_masked_write write register %x failed rc = %d\n",
+ REG_IBB_VOLTAGE, rc);
+
+
+ return rc;
+}
+
+static int qpnp_ibb_regulator_enable(struct regulator_dev *rdev)
+{
+ int rc;
+ u8 val;
+
+ struct qpnp_labibb *labibb = rdev_get_drvdata(rdev);
+
+ if (!labibb->ibb_vreg.vreg_enabled && !labibb->swire_control) {
+
+ if (labibb->mode != QPNP_LABIBB_STANDALONE_MODE)
+ return qpnp_labibb_regulator_enable(labibb);
+
+ val = IBB_ENABLE_CTL_MODULE_EN;
+ rc = qpnp_labibb_write(labibb,
+ labibb->ibb_base + REG_IBB_ENABLE_CTL, &val, 1);
+ if (rc) {
+ pr_err("qpnp_ibb_regulator_enable write register %x failed rc = %d\n",
+ REG_IBB_ENABLE_CTL, rc);
+ return rc;
+ }
+
+ udelay(labibb->ibb_vreg.soft_start);
+
+ rc = qpnp_labibb_read(labibb, &val,
+ labibb->ibb_base + REG_IBB_STATUS1, 1);
+ if (rc) {
+ pr_err("qpnp_ibb_regulator_enable read register %x failed rc = %d\n",
+ REG_IBB_STATUS1, rc);
+ return rc;
+ }
+
+ if ((val & IBB_STATUS1_VREG_OK_MASK) != IBB_STATUS1_VREG_OK) {
+ pr_err("qpnp_ibb_regulator_enable failed\n");
+ return -EINVAL;
+ }
+
+ labibb->ibb_vreg.vreg_enabled = 1;
+ }
+ return 0;
+}
+
+static int qpnp_ibb_regulator_disable(struct regulator_dev *rdev)
+{
+ int rc;
+ u8 val;
+ struct qpnp_labibb *labibb = rdev_get_drvdata(rdev);
+
+ if (labibb->ibb_vreg.vreg_enabled && !labibb->swire_control) {
+
+ if (labibb->mode != QPNP_LABIBB_STANDALONE_MODE)
+ return qpnp_labibb_regulator_disable(labibb);
+
+ val = 0;
+ rc = qpnp_labibb_write(labibb, labibb->ibb_base +
+ REG_IBB_ENABLE_CTL, &val, 1);
+ if (rc) {
+ pr_err("qpnp_ibb_regulator_enable write register %x failed rc = %d\n",
+ REG_IBB_ENABLE_CTL, rc);
+ return rc;
+ }
+
+ labibb->ibb_vreg.vreg_enabled = 0;
+ }
+ return 0;
+}
+
+static int qpnp_ibb_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct qpnp_labibb *labibb = rdev_get_drvdata(rdev);
+
+ if (labibb->swire_control)
+ return 0;
+
+ return labibb->ibb_vreg.vreg_enabled;
+}
+
+static int qpnp_ibb_regulator_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ int rc, new_uV;
+ u8 val;
+ struct qpnp_labibb *labibb = rdev_get_drvdata(rdev);
+
+ if (labibb->swire_control)
+ return 0;
+
+ if (min_uV < labibb->ibb_vreg.min_volt) {
+ pr_err("qpnp_ibb_regulator_set_voltage failed, min_uV %d is less than min_volt %d",
+ min_uV, labibb->ibb_vreg.min_volt);
+ return -EINVAL;
+ }
+
+ val = DIV_ROUND_UP(min_uV - labibb->ibb_vreg.min_volt,
+ labibb->ibb_vreg.step_size);
+ new_uV = val * labibb->ibb_vreg.step_size + labibb->ibb_vreg.min_volt;
+
+ if (new_uV > max_uV) {
+ pr_err("qpnp_ibb_regulator_set_voltage unable to set voltage (%d %d)\n",
+ min_uV, max_uV);
+ return -EINVAL;
+ }
+
+ rc = qpnp_labibb_masked_write(labibb, labibb->ibb_base +
+ REG_IBB_VOLTAGE,
+ IBB_VOLTAGE_SET_MASK |
+ IBB_VOLTAGE_OVERRIDE_EN,
+ val | IBB_VOLTAGE_OVERRIDE_EN);
+
+ if (rc) {
+ pr_err("qpnp_ibb_regulator_set_voltage write register %x failed rc = %d\n",
+ REG_IBB_VOLTAGE, rc);
+
+ return rc;
+ }
+
+ if (new_uV > labibb->ibb_vreg.curr_volt)
+ udelay(val * labibb->ibb_vreg.slew_rate);
+ labibb->ibb_vreg.curr_volt = new_uV;
+
+ return 0;
+}
+
+static int qpnp_ibb_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct qpnp_labibb *labibb = rdev_get_drvdata(rdev);
+
+ if (labibb->swire_control)
+ return 0;
+
+ return labibb->ibb_vreg.curr_volt;
+}
+
+static struct regulator_ops qpnp_ibb_ops = {
+ .enable = qpnp_ibb_regulator_enable,
+ .disable = qpnp_ibb_regulator_disable,
+ .is_enabled = qpnp_ibb_regulator_is_enabled,
+ .set_voltage = qpnp_ibb_regulator_set_voltage,
+ .get_voltage = qpnp_ibb_regulator_get_voltage,
+};
+
+static int register_qpnp_ibb_regulator(struct qpnp_labibb *labibb,
+ struct device_node *of_node)
+{
+ int rc = 0;
+ struct regulator_init_data *init_data;
+ struct regulator_desc *rdesc;
+ struct regulator_config cfg = {};
+ u8 val, ibb_enable_ctl;
+ u32 tmp;
+
+ if (!of_node) {
+ dev_err(labibb->dev, "qpnp ibb regulator device tree node is missing\n");
+ return -EINVAL;
+ }
+
+ init_data = of_get_regulator_init_data(labibb->dev, of_node);
+ if (!init_data) {
+ pr_err("unable to get regulator init data for qpnp ibb regulator\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,qpnp-ibb-min-voltage",
+ &(labibb->ibb_vreg.min_volt));
+ if (rc < 0) {
+ pr_err("qcom,qpnp-ibb-min-voltage is missing, rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,qpnp-ibb-step-size",
+ &(labibb->ibb_vreg.step_size));
+ if (rc < 0) {
+ pr_err("qcom,qpnp-ibb-step-size is missing, rc = %d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,qpnp-ibb-slew-rate",
+ &(labibb->ibb_vreg.slew_rate));
+ if (rc < 0) {
+ pr_err("qcom,qpnp-ibb-slew-rate is missing, rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,qpnp-ibb-soft-start",
+ &(labibb->ibb_vreg.soft_start));
+ if (rc < 0) {
+ pr_err("qcom,qpnp-ibb-soft-start is missing, rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,qpnp-ibb-discharge-resistor",
+ &tmp);
+
+ if (rc < 0) {
+ pr_err("qcom,qpnp-ibb-discharge-resistor is missing, rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ if (labibb->mode == QPNP_LABIBB_AMOLED_MODE) {
+ /*
+ * AMOLED mode needs ibb discharge resistor to be
+ * configured for 300KOhm
+ */
+ if (tmp < ibb_discharge_resistor_plan[0])
+ tmp = ibb_discharge_resistor_plan[0];
+ }
+
+ for (val = 0; val < ARRAY_SIZE(ibb_discharge_resistor_plan); val++)
+ if (ibb_discharge_resistor_plan[val] == tmp)
+ break;
+
+ if (val == ARRAY_SIZE(ibb_discharge_resistor_plan)) {
+ pr_err("Invalid property in qcom,qpnp-ibb-discharge-resistor\n");
+ return -EINVAL;
+ }
+
+ rc = qpnp_labibb_write(labibb, labibb->ibb_base +
+ REG_IBB_SOFT_START_CTL, &val, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_write register %x failed rc = %d\n",
+ REG_IBB_SOFT_START_CTL, rc);
+ return rc;
+ }
+
+ rc = qpnp_labibb_read(labibb, &val,
+ labibb->ibb_base + REG_IBB_REVISION4, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_read register %x failed rc = %d\n",
+ REG_IBB_REVISION4, rc);
+ return rc;
+ }
+
+ /* PMI8996 has revision 1 */
+ if (val < 1 && labibb->ttw_en) {
+ pr_err("TTW feature cannot be enabled for revision %d\n", val);
+ labibb->ttw_en = false;
+ }
+
+ if (of_find_property(of_node, "qcom,output-voltage-one-pulse", NULL)) {
+ if (!labibb->swire_control) {
+ pr_err("Invalid property 'qcom,output-voltage-one-pulse', valid only in SWIRE config\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_u32(of_node,
+ "qcom,output-voltage-one-pulse", &tmp);
+ if (rc) {
+ pr_err("failed to read qcom,output-voltage-one-pulse rc=%d\n",
+ rc);
+ return rc;
+ }
+ if (tmp > MAX_OUTPUT_PULSE_VOLTAGE_MV ||
+ tmp < MIN_OUTPUT_PULSE_VOLTAGE_MV) {
+ pr_err("Invalid one-pulse voltage range %d\n", tmp);
+ return -EINVAL;
+ }
+
+ /*
+ * Set the output voltage 100mV lower as the IBB HW module
+ * counts one pulse less in SWIRE mode.
+ */
+ val = DIV_ROUND_UP((tmp - MIN_OUTPUT_PULSE_VOLTAGE_MV),
+ OUTPUT_VOLTAGE_STEP_MV) - 1;
+ rc = qpnp_labibb_masked_write(labibb, labibb->ibb_base +
+ REG_IBB_SWIRE_CTL,
+ IBB_OUTPUT_VOLTAGE_AT_ONE_PULSE_MASK,
+ val);
+ if (rc) {
+ pr_err("qpnp_labiibb_write register %x failed rc = %d\n",
+ REG_IBB_SWIRE_CTL, rc);
+ return rc;
+ }
+ }
+
+ rc = qpnp_labibb_read(labibb, &ibb_enable_ctl,
+ labibb->ibb_base + REG_IBB_ENABLE_CTL, 1);
+ if (rc) {
+ pr_err("qpnp_ibb_read register %x failed rc = %d\n",
+ REG_IBB_ENABLE_CTL, rc);
+ return rc;
+ }
+
+ if (ibb_enable_ctl &
+ (IBB_ENABLE_CTL_SWIRE_RDY | IBB_ENABLE_CTL_MODULE_EN)) {
+ /* SWIRE_RDY or IBB_MODULE_EN enabled */
+ rc = qpnp_labibb_read(labibb, &val,
+ labibb->ibb_base + REG_IBB_LCD_AMOLED_SEL, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_read register %x failed rc = %d\n",
+ REG_IBB_LCD_AMOLED_SEL, rc);
+ return rc;
+ }
+
+ if (val == REG_LAB_IBB_AMOLED_MODE)
+ labibb->mode = QPNP_LABIBB_AMOLED_MODE;
+ else
+ labibb->mode = QPNP_LABIBB_LCD_MODE;
+
+ rc = qpnp_labibb_read(labibb, &val,
+ labibb->ibb_base + REG_IBB_VOLTAGE, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_read read register %x failed rc = %d\n",
+ REG_IBB_VOLTAGE, rc);
+ return rc;
+ }
+
+ if (val & IBB_VOLTAGE_OVERRIDE_EN) {
+ labibb->ibb_vreg.curr_volt =
+ (val & IBB_VOLTAGE_SET_MASK) *
+ labibb->ibb_vreg.step_size +
+ labibb->ibb_vreg.min_volt;
+ } else if (labibb->mode == QPNP_LABIBB_LCD_MODE) {
+ rc = of_property_read_u32(of_node,
+ "qcom,qpnp-ibb-init-lcd-voltage",
+ &(labibb->ibb_vreg.curr_volt));
+ if (rc) {
+ pr_err("get qcom,qpnp-ibb-init-lcd-voltage failed, rc = %d\n",
+ rc);
+ return rc;
+ }
+ } else {
+ rc = of_property_read_u32(of_node,
+ "qcom,qpnp-ibb-init-amoled-voltage",
+ &(labibb->ibb_vreg.curr_volt));
+ if (rc) {
+ pr_err("get qcom,qpnp-ibb-init-amoled-voltage failed, rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ }
+
+ rc = qpnp_labibb_read(labibb, &val, labibb->ibb_base +
+ REG_IBB_PWRUP_PWRDN_CTL_1, 1);
+ if (rc) {
+ pr_err("qpnp_labibb_config_init read register %x failed rc = %d\n",
+ REG_IBB_PWRUP_PWRDN_CTL_1, rc);
+ return rc;
+ }
+
+ labibb->ibb_vreg.pwrup_dly = ibb_pwrup_dly_plan[
+ (val >>
+ IBB_PWRUP_PWRDN_CTL_1_DLY1_SHIFT) &
+ IBB_PWRUP_PWRDN_CTL_1_DLY1_MASK];
+ labibb->ibb_vreg.pwrdn_dly = ibb_pwrdn_dly_plan[val &
+ IBB_PWRUP_PWRDN_CTL_1_DLY2_MASK];
+
+ labibb->ibb_vreg.vreg_enabled = 1;
+ } else {
+ /* SWIRE_RDY and IBB_MODULE_EN not enabled */
+ rc = qpnp_ibb_dt_init(labibb, of_node);
+ if (rc) {
+ pr_err("qpnp-ibb: wrong DT parameter specified: rc = %d\n",
+ rc);
+ return rc;
+ }
+ }
+ rc = qpnp_labibb_read(labibb, &val,
+ labibb->ibb_base + REG_IBB_MODULE_RDY, 1);
+ if (rc) {
+ pr_err("qpnp_ibb_read read register %x failed rc = %d\n",
+ REG_IBB_MODULE_RDY, rc);
+ return rc;
+ }
+
+ if (labibb->mode != QPNP_LABIBB_STANDALONE_MODE &&
+ !(val & IBB_MODULE_RDY_EN)) {
+ val = IBB_MODULE_RDY_EN;
+
+ rc = qpnp_labibb_write(labibb, labibb->ibb_base +
+ REG_IBB_MODULE_RDY, &val, 1);
+
+ if (rc) {
+ pr_err("qpnp_ibb_dt_init write register %x failed rc = %d\n",
+ REG_IBB_MODULE_RDY, rc);
+ return rc;
+ }
+ }
+
+ if (init_data->constraints.name) {
+ rdesc = &(labibb->ibb_vreg.rdesc);
+ rdesc->owner = THIS_MODULE;
+ rdesc->type = REGULATOR_VOLTAGE;
+ rdesc->ops = &qpnp_ibb_ops;
+ rdesc->name = init_data->constraints.name;
+
+ cfg.dev = labibb->dev;
+ cfg.init_data = init_data;
+ cfg.driver_data = labibb;
+ cfg.of_node = of_node;
+
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_STATUS;
+
+ labibb->ibb_vreg.rdev = regulator_register(rdesc, &cfg);
+ if (IS_ERR(labibb->ibb_vreg.rdev)) {
+ rc = PTR_ERR(labibb->ibb_vreg.rdev);
+ labibb->ibb_vreg.rdev = NULL;
+ pr_err("unable to get regulator init data for qpnp ibb regulator, rc = %d\n",
+ rc);
+
+ return rc;
+ }
+ } else {
+ dev_err(labibb->dev, "qpnp ibb regulator name missing\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&(labibb->ibb_vreg.ibb_mutex));
+ return 0;
+}
+
+static int qpnp_labibb_regulator_probe(struct spmi_device *spmi)
+{
+ struct qpnp_labibb *labibb;
+ struct resource *resource;
+ struct spmi_resource *spmi_resource;
+ const char *mode_name;
+ u8 type;
+ int rc = 0;
+
+ labibb = devm_kzalloc(&spmi->dev,
+ sizeof(struct qpnp_labibb), GFP_KERNEL);
+ if (labibb == NULL) {
+ pr_err("labibb allocation failed.\n");
+ return -ENOMEM;
+ }
+
+ labibb->dev = &(spmi->dev);
+ labibb->spmi = spmi;
+
+ rc = of_property_read_string(labibb->dev->of_node,
+ "qpnp,qpnp-labibb-mode", &mode_name);
+ if (!rc) {
+ if (strcmp("lcd", mode_name) == 0) {
+ labibb->mode = QPNP_LABIBB_LCD_MODE;
+ } else if (strcmp("amoled", mode_name) == 0) {
+ labibb->mode = QPNP_LABIBB_AMOLED_MODE;
+ } else if (strcmp("stand-alone", mode_name) == 0) {
+ labibb->mode = QPNP_LABIBB_STANDALONE_MODE;
+ } else {
+ pr_err("Invalid device property in qpnp,qpnp-labibb-mode: %s\n",
+ mode_name);
+ return -EINVAL;
+ }
+ } else {
+ pr_err("qpnp_labibb: qpnp,qpnp-labibb-mode is missing.\n");
+ return rc;
+ }
+
+ labibb->ttw_en = of_property_read_bool(labibb->dev->of_node,
+ "qcom,labibb-touch-to-wake-en");
+ if (labibb->ttw_en && labibb->mode != QPNP_LABIBB_LCD_MODE) {
+ pr_err("Invalid mode for TTW\n");
+ return -EINVAL;
+ }
+
+ labibb->swire_control = of_property_read_bool(labibb->dev->of_node,
+ "qpnp,swire-control");
+ if (labibb->swire_control && labibb->mode != QPNP_LABIBB_AMOLED_MODE) {
+ pr_err("Invalid mode for SWIRE control\n");
+ return -EINVAL;
+ }
+
+ spmi_for_each_container_dev(spmi_resource, spmi) {
+ if (!spmi_resource) {
+ pr_err("qpnp_labibb: spmi resource absent\n");
+ return -ENXIO;
+ }
+
+ resource = spmi_get_resource(spmi, spmi_resource,
+ IORESOURCE_MEM, 0);
+ if (!(resource && resource->start)) {
+ pr_err("node %s IO resource absent!\n",
+ spmi->dev.of_node->full_name);
+ return -ENXIO;
+ }
+
+ rc = qpnp_labibb_read(labibb, &type,
+ resource->start + REG_PERPH_TYPE, 1);
+ if (rc) {
+ pr_err("Peripheral type read failed rc=%d\n", rc);
+ goto fail_registration;
+ }
+
+ switch (type) {
+ case QPNP_LAB_TYPE:
+ labibb->lab_base = resource->start;
+ rc = register_qpnp_lab_regulator(labibb,
+ spmi_resource->of_node);
+ if (rc)
+ goto fail_registration;
+ break;
+
+ case QPNP_IBB_TYPE:
+ labibb->ibb_base = resource->start;
+ rc = register_qpnp_ibb_regulator(labibb,
+ spmi_resource->of_node);
+ if (rc)
+ goto fail_registration;
+ break;
+
+ default:
+ pr_err("qpnp_labibb: unknown peripheral type %x\n",
+ type);
+ rc = -EINVAL;
+ goto fail_registration;
+ }
+ }
+
+ dev_set_drvdata(&spmi->dev, labibb);
+ return 0;
+
+fail_registration:
+ if (labibb->lab_vreg.rdev)
+ regulator_unregister(labibb->lab_vreg.rdev);
+ if (labibb->ibb_vreg.rdev)
+ regulator_unregister(labibb->ibb_vreg.rdev);
+
+ return rc;
+}
+
+static int qpnp_labibb_regulator_remove(struct spmi_device *spmi)
+{
+ struct qpnp_labibb *labibb = dev_get_drvdata(&spmi->dev);
+
+ if (labibb) {
+ if (labibb->lab_vreg.rdev)
+ regulator_unregister(labibb->lab_vreg.rdev);
+ if (labibb->ibb_vreg.rdev)
+ regulator_unregister(labibb->ibb_vreg.rdev);
+ }
+ return 0;
+}
+
+static struct of_device_id spmi_match_table[] = {
+ { .compatible = QPNP_LABIBB_REGULATOR_DRIVER_NAME, },
+ { },
+};
+
+static struct spmi_driver qpnp_labibb_regulator_driver = {
+ .driver = {
+ .name = QPNP_LABIBB_REGULATOR_DRIVER_NAME,
+ .of_match_table = spmi_match_table,
+ },
+ .probe = qpnp_labibb_regulator_probe,
+ .remove = qpnp_labibb_regulator_remove,
+};
+
+static int __init qpnp_labibb_regulator_init(void)
+{
+ return spmi_driver_register(&qpnp_labibb_regulator_driver);
+}
+arch_initcall(qpnp_labibb_regulator_init);
+
+static void __exit qpnp_labibb_regulator_exit(void)
+{
+ spmi_driver_unregister(&qpnp_labibb_regulator_driver);
+}
+module_exit(qpnp_labibb_regulator_exit);
+
+MODULE_DESCRIPTION("QPNP labibb driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/qpnp-regulator.c b/drivers/regulator/qpnp-regulator.c
new file mode 100644
index 000000000000..a5b92d3a0660
--- /dev/null
+++ b/drivers/regulator/qpnp-regulator.c
@@ -0,0 +1,2170 @@
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/ktime.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/qpnp-regulator.h>
+
+/* Debug Flag Definitions */
+enum {
+ QPNP_VREG_DEBUG_REQUEST = BIT(0), /* Show requests */
+ QPNP_VREG_DEBUG_DUPLICATE = BIT(1), /* Show duplicate requests */
+ QPNP_VREG_DEBUG_INIT = BIT(2), /* Show state after probe */
+ QPNP_VREG_DEBUG_WRITES = BIT(3), /* Show SPMI writes */
+ QPNP_VREG_DEBUG_READS = BIT(4), /* Show SPMI reads */
+ QPNP_VREG_DEBUG_OCP = BIT(5), /* Show VS OCP IRQ events */
+};
+
+static int qpnp_vreg_debug_mask;
+module_param_named(
+ debug_mask, qpnp_vreg_debug_mask, int, S_IRUSR | S_IWUSR
+);
+
+#define vreg_err(vreg, fmt, ...) \
+ pr_err("%s: " fmt, vreg->rdesc.name, ##__VA_ARGS__)
+
+/* These types correspond to unique register layouts. */
+enum qpnp_regulator_logical_type {
+ QPNP_REGULATOR_LOGICAL_TYPE_SMPS,
+ QPNP_REGULATOR_LOGICAL_TYPE_LDO,
+ QPNP_REGULATOR_LOGICAL_TYPE_VS,
+ QPNP_REGULATOR_LOGICAL_TYPE_BOOST,
+ QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS,
+ QPNP_REGULATOR_LOGICAL_TYPE_BOOST_BYP,
+ QPNP_REGULATOR_LOGICAL_TYPE_LN_LDO,
+ QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS,
+ QPNP_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS,
+ QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO,
+};
+
+enum qpnp_regulator_type {
+ QPNP_REGULATOR_TYPE_BUCK = 0x03,
+ QPNP_REGULATOR_TYPE_LDO = 0x04,
+ QPNP_REGULATOR_TYPE_VS = 0x05,
+ QPNP_REGULATOR_TYPE_BOOST = 0x1B,
+ QPNP_REGULATOR_TYPE_FTS = 0x1C,
+ QPNP_REGULATOR_TYPE_BOOST_BYP = 0x1F,
+ QPNP_REGULATOR_TYPE_ULT_LDO = 0x21,
+ QPNP_REGULATOR_TYPE_ULT_BUCK = 0x22,
+};
+
+enum qpnp_regulator_subtype {
+ QPNP_REGULATOR_SUBTYPE_GP_CTL = 0x08,
+ QPNP_REGULATOR_SUBTYPE_RF_CTL = 0x09,
+ QPNP_REGULATOR_SUBTYPE_N50 = 0x01,
+ QPNP_REGULATOR_SUBTYPE_N150 = 0x02,
+ QPNP_REGULATOR_SUBTYPE_N300 = 0x03,
+ QPNP_REGULATOR_SUBTYPE_N600 = 0x04,
+ QPNP_REGULATOR_SUBTYPE_N1200 = 0x05,
+ QPNP_REGULATOR_SUBTYPE_N600_ST = 0x06,
+ QPNP_REGULATOR_SUBTYPE_N1200_ST = 0x07,
+ QPNP_REGULATOR_SUBTYPE_N300_ST = 0x15,
+ QPNP_REGULATOR_SUBTYPE_P50 = 0x08,
+ QPNP_REGULATOR_SUBTYPE_P150 = 0x09,
+ QPNP_REGULATOR_SUBTYPE_P300 = 0x0A,
+ QPNP_REGULATOR_SUBTYPE_P600 = 0x0B,
+ QPNP_REGULATOR_SUBTYPE_P1200 = 0x0C,
+ QPNP_REGULATOR_SUBTYPE_LN = 0x10,
+ QPNP_REGULATOR_SUBTYPE_LV_P50 = 0x28,
+ QPNP_REGULATOR_SUBTYPE_LV_P150 = 0x29,
+ QPNP_REGULATOR_SUBTYPE_LV_P300 = 0x2A,
+ QPNP_REGULATOR_SUBTYPE_LV_P600 = 0x2B,
+ QPNP_REGULATOR_SUBTYPE_LV_P1200 = 0x2C,
+ QPNP_REGULATOR_SUBTYPE_LV100 = 0x01,
+ QPNP_REGULATOR_SUBTYPE_LV300 = 0x02,
+ QPNP_REGULATOR_SUBTYPE_MV300 = 0x08,
+ QPNP_REGULATOR_SUBTYPE_MV500 = 0x09,
+ QPNP_REGULATOR_SUBTYPE_HDMI = 0x10,
+ QPNP_REGULATOR_SUBTYPE_OTG = 0x11,
+ QPNP_REGULATOR_SUBTYPE_5V_BOOST = 0x01,
+ QPNP_REGULATOR_SUBTYPE_FTS_CTL = 0x08,
+ QPNP_REGULATOR_SUBTYPE_FTS2p5_CTL = 0x09,
+ QPNP_REGULATOR_SUBTYPE_BB_2A = 0x01,
+ QPNP_REGULATOR_SUBTYPE_ULT_HF_CTL1 = 0x0D,
+ QPNP_REGULATOR_SUBTYPE_ULT_HF_CTL2 = 0x0E,
+ QPNP_REGULATOR_SUBTYPE_ULT_HF_CTL3 = 0x0F,
+ QPNP_REGULATOR_SUBTYPE_ULT_HF_CTL4 = 0x10,
+};
+
+enum qpnp_common_regulator_registers {
+ QPNP_COMMON_REG_DIG_MAJOR_REV = 0x01,
+ QPNP_COMMON_REG_TYPE = 0x04,
+ QPNP_COMMON_REG_SUBTYPE = 0x05,
+ QPNP_COMMON_REG_VOLTAGE_RANGE = 0x40,
+ QPNP_COMMON_REG_VOLTAGE_SET = 0x41,
+ QPNP_COMMON_REG_MODE = 0x45,
+ QPNP_COMMON_REG_ENABLE = 0x46,
+ QPNP_COMMON_REG_PULL_DOWN = 0x48,
+ QPNP_COMMON_REG_STEP_CTRL = 0x61,
+};
+
+enum qpnp_ldo_registers {
+ QPNP_LDO_REG_SOFT_START = 0x4C,
+};
+
+enum qpnp_vs_registers {
+ QPNP_VS_REG_OCP = 0x4A,
+ QPNP_VS_REG_SOFT_START = 0x4C,
+};
+
+enum qpnp_boost_registers {
+ QPNP_BOOST_REG_CURRENT_LIMIT = 0x4A,
+};
+
+enum qpnp_boost_byp_registers {
+ QPNP_BOOST_BYP_REG_CURRENT_LIMIT = 0x4B,
+};
+
+/* Used for indexing into ctrl_reg. These are offets from 0x40 */
+enum qpnp_common_control_register_index {
+ QPNP_COMMON_IDX_VOLTAGE_RANGE = 0,
+ QPNP_COMMON_IDX_VOLTAGE_SET = 1,
+ QPNP_COMMON_IDX_MODE = 5,
+ QPNP_COMMON_IDX_ENABLE = 6,
+};
+
+/* Common regulator control register layout */
+#define QPNP_COMMON_ENABLE_MASK 0x80
+#define QPNP_COMMON_ENABLE 0x80
+#define QPNP_COMMON_DISABLE 0x00
+#define QPNP_COMMON_ENABLE_FOLLOW_HW_EN3_MASK 0x08
+#define QPNP_COMMON_ENABLE_FOLLOW_HW_EN2_MASK 0x04
+#define QPNP_COMMON_ENABLE_FOLLOW_HW_EN1_MASK 0x02
+#define QPNP_COMMON_ENABLE_FOLLOW_HW_EN0_MASK 0x01
+#define QPNP_COMMON_ENABLE_FOLLOW_ALL_MASK 0x0F
+
+/* Common regulator mode register layout */
+#define QPNP_COMMON_MODE_HPM_MASK 0x80
+#define QPNP_COMMON_MODE_AUTO_MASK 0x40
+#define QPNP_COMMON_MODE_BYPASS_MASK 0x20
+#define QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK 0x10
+#define QPNP_COMMON_MODE_FOLLOW_HW_EN3_MASK 0x08
+#define QPNP_COMMON_MODE_FOLLOW_HW_EN2_MASK 0x04
+#define QPNP_COMMON_MODE_FOLLOW_HW_EN1_MASK 0x02
+#define QPNP_COMMON_MODE_FOLLOW_HW_EN0_MASK 0x01
+#define QPNP_COMMON_MODE_FOLLOW_ALL_MASK 0x1F
+
+/* Common regulator pull down control register layout */
+#define QPNP_COMMON_PULL_DOWN_ENABLE_MASK 0x80
+
+/* LDO regulator current limit control register layout */
+#define QPNP_LDO_CURRENT_LIMIT_ENABLE_MASK 0x80
+
+/* LDO regulator soft start control register layout */
+#define QPNP_LDO_SOFT_START_ENABLE_MASK 0x80
+
+/* VS regulator over current protection control register layout */
+#define QPNP_VS_OCP_OVERRIDE 0x01
+#define QPNP_VS_OCP_NO_OVERRIDE 0x00
+
+/* VS regulator soft start control register layout */
+#define QPNP_VS_SOFT_START_ENABLE_MASK 0x80
+#define QPNP_VS_SOFT_START_SEL_MASK 0x03
+
+/* Boost regulator current limit control register layout */
+#define QPNP_BOOST_CURRENT_LIMIT_ENABLE_MASK 0x80
+#define QPNP_BOOST_CURRENT_LIMIT_MASK 0x07
+
+#define QPNP_VS_OCP_DEFAULT_MAX_RETRIES 10
+#define QPNP_VS_OCP_DEFAULT_RETRY_DELAY_MS 30
+#define QPNP_VS_OCP_FALL_DELAY_US 90
+#define QPNP_VS_OCP_FAULT_DELAY_US 20000
+
+#define QPNP_FTSMPS_STEP_CTRL_STEP_MASK 0x18
+#define QPNP_FTSMPS_STEP_CTRL_STEP_SHIFT 3
+#define QPNP_FTSMPS_STEP_CTRL_DELAY_MASK 0x07
+#define QPNP_FTSMPS_STEP_CTRL_DELAY_SHIFT 0
+
+/* Clock rate in kHz of the FTSMPS regulator reference clock. */
+#define QPNP_FTSMPS_CLOCK_RATE 19200
+
+/* Minimum voltage stepper delay for each step. */
+#define QPNP_FTSMPS_STEP_DELAY 8
+
+/*
+ * The ratio QPNP_FTSMPS_STEP_MARGIN_NUM/QPNP_FTSMPS_STEP_MARGIN_DEN is used to
+ * adjust the step rate in order to account for oscillator variance.
+ */
+#define QPNP_FTSMPS_STEP_MARGIN_NUM 4
+#define QPNP_FTSMPS_STEP_MARGIN_DEN 5
+
+/*
+ * This voltage in uV is returned by get_voltage functions when there is no way
+ * to determine the current voltage level. It is needed because the regulator
+ * framework treats a 0 uV voltage as an error.
+ */
+#define VOLTAGE_UNKNOWN 1
+
+/* VSET value to decide the range of ULT SMPS */
+#define ULT_SMPS_RANGE_SPLIT 0x60
+
+/**
+ * struct qpnp_voltage_range - regulator set point voltage mapping description
+ * @min_uV: Minimum programmable output voltage resulting from
+ * set point register value 0x00
+ * @max_uV: Maximum programmable output voltage
+ * @step_uV: Output voltage increase resulting from the set point
+ * register value increasing by 1
+ * @set_point_min_uV: Minimum allowed voltage
+ * @set_point_max_uV: Maximum allowed voltage. This may be tweaked in order
+ * to pick which range should be used in the case of
+ * overlapping set points.
+ * @n_voltages: Number of preferred voltage set points present in this
+ * range
+ * @range_sel: Voltage range register value corresponding to this range
+ *
+ * The following relationships must be true for the values used in this struct:
+ * (max_uV - min_uV) % step_uV == 0
+ * (set_point_min_uV - min_uV) % step_uV == 0*
+ * (set_point_max_uV - min_uV) % step_uV == 0*
+ * n_voltages = (set_point_max_uV - set_point_min_uV) / step_uV + 1
+ *
+ * *Note, set_point_min_uV == set_point_max_uV == 0 is allowed in order to
+ * specify that the voltage range has meaning, but is not preferred.
+ */
+struct qpnp_voltage_range {
+ int min_uV;
+ int max_uV;
+ int step_uV;
+ int set_point_min_uV;
+ int set_point_max_uV;
+ unsigned n_voltages;
+ u8 range_sel;
+};
+
+/*
+ * The ranges specified in the qpnp_voltage_set_points struct must be listed
+ * so that range[i].set_point_max_uV < range[i+1].set_point_min_uV.
+ */
+struct qpnp_voltage_set_points {
+ struct qpnp_voltage_range *range;
+ int count;
+ unsigned n_voltages;
+};
+
+struct qpnp_regulator_mapping {
+ enum qpnp_regulator_type type;
+ enum qpnp_regulator_subtype subtype;
+ enum qpnp_regulator_logical_type logical_type;
+ u32 revision_min;
+ u32 revision_max;
+ struct regulator_ops *ops;
+ struct qpnp_voltage_set_points *set_points;
+ int hpm_min_load;
+};
+
+struct qpnp_regulator {
+ struct regulator_desc rdesc;
+ struct delayed_work ocp_work;
+ struct spmi_device *spmi_dev;
+ struct regulator_dev *rdev;
+ struct qpnp_voltage_set_points *set_points;
+ enum qpnp_regulator_logical_type logical_type;
+ int enable_time;
+ int ocp_enable;
+ int ocp_irq;
+ int ocp_count;
+ int ocp_max_retries;
+ int ocp_retry_delay_ms;
+ int system_load;
+ int hpm_min_load;
+ int slew_rate;
+ u32 write_count;
+ u32 prev_write_count;
+ ktime_t vs_enable_time;
+ u16 base_addr;
+ /* ctrl_reg provides a shadow copy of register values 0x40 to 0x47. */
+ u8 ctrl_reg[8];
+};
+
+#define QPNP_VREG_MAP(_type, _subtype, _dig_major_min, _dig_major_max, \
+ _logical_type, _ops_val, _set_points_val, _hpm_min_load) \
+ { \
+ .type = QPNP_REGULATOR_TYPE_##_type, \
+ .subtype = QPNP_REGULATOR_SUBTYPE_##_subtype, \
+ .revision_min = _dig_major_min, \
+ .revision_max = _dig_major_max, \
+ .logical_type = QPNP_REGULATOR_LOGICAL_TYPE_##_logical_type, \
+ .ops = &qpnp_##_ops_val##_ops, \
+ .set_points = &_set_points_val##_set_points, \
+ .hpm_min_load = _hpm_min_load, \
+ }
+
+#define VOLTAGE_RANGE(_range_sel, _min_uV, _set_point_min_uV, \
+ _set_point_max_uV, _max_uV, _step_uV) \
+ { \
+ .min_uV = _min_uV, \
+ .max_uV = _max_uV, \
+ .set_point_min_uV = _set_point_min_uV, \
+ .set_point_max_uV = _set_point_max_uV, \
+ .step_uV = _step_uV, \
+ .range_sel = _range_sel, \
+ }
+
+#define SET_POINTS(_ranges) \
+{ \
+ .range = _ranges, \
+ .count = ARRAY_SIZE(_ranges), \
+};
+
+/*
+ * These tables contain the physically available PMIC regulator voltage setpoint
+ * ranges. Where two ranges overlap in hardware, one of the ranges is trimmed
+ * to ensure that the setpoints available to software are monotonically
+ * increasing and unique. The set_voltage callback functions expect these
+ * properties to hold.
+ */
+static struct qpnp_voltage_range pldo_ranges[] = {
+ VOLTAGE_RANGE(2, 750000, 750000, 1537500, 1537500, 12500),
+ VOLTAGE_RANGE(3, 1500000, 1550000, 3075000, 3075000, 25000),
+ VOLTAGE_RANGE(4, 1750000, 3100000, 4900000, 4900000, 50000),
+};
+
+static struct qpnp_voltage_range nldo1_ranges[] = {
+ VOLTAGE_RANGE(2, 750000, 750000, 1537500, 1537500, 12500),
+};
+
+static struct qpnp_voltage_range nldo2_ranges[] = {
+ VOLTAGE_RANGE(0, 375000, 0, 0, 1537500, 12500),
+ VOLTAGE_RANGE(1, 375000, 375000, 768750, 768750, 6250),
+ VOLTAGE_RANGE(2, 750000, 775000, 1537500, 1537500, 12500),
+};
+
+static struct qpnp_voltage_range nldo3_ranges[] = {
+ VOLTAGE_RANGE(0, 375000, 375000, 1537500, 1537500, 12500),
+ VOLTAGE_RANGE(1, 375000, 0, 0, 1537500, 12500),
+ VOLTAGE_RANGE(2, 750000, 0, 0, 1537500, 12500),
+};
+
+static struct qpnp_voltage_range ln_ldo_ranges[] = {
+ VOLTAGE_RANGE(1, 690000, 690000, 1110000, 1110000, 60000),
+ VOLTAGE_RANGE(0, 1380000, 1380000, 2220000, 2220000, 120000),
+};
+
+static struct qpnp_voltage_range smps_ranges[] = {
+ VOLTAGE_RANGE(0, 375000, 375000, 1562500, 1562500, 12500),
+ VOLTAGE_RANGE(1, 1550000, 1575000, 3125000, 3125000, 25000),
+};
+
+static struct qpnp_voltage_range ftsmps_ranges[] = {
+ VOLTAGE_RANGE(0, 0, 350000, 1275000, 1275000, 5000),
+ VOLTAGE_RANGE(1, 0, 1280000, 2040000, 2040000, 10000),
+};
+
+static struct qpnp_voltage_range ftsmps2p5_ranges[] = {
+ VOLTAGE_RANGE(0, 80000, 350000, 1355000, 1355000, 5000),
+ VOLTAGE_RANGE(1, 160000, 1360000, 2200000, 2200000, 10000),
+};
+
+static struct qpnp_voltage_range boost_ranges[] = {
+ VOLTAGE_RANGE(0, 4000000, 4000000, 5550000, 5550000, 50000),
+};
+
+static struct qpnp_voltage_range boost_byp_ranges[] = {
+ VOLTAGE_RANGE(0, 2500000, 2500000, 5200000, 5650000, 50000),
+};
+
+static struct qpnp_voltage_range ult_lo_smps_ranges[] = {
+ VOLTAGE_RANGE(0, 375000, 375000, 1562500, 1562500, 12500),
+ VOLTAGE_RANGE(1, 750000, 0, 0, 1525000, 25000),
+};
+
+static struct qpnp_voltage_range ult_ho_smps_ranges[] = {
+ VOLTAGE_RANGE(0, 1550000, 1550000, 2325000, 2325000, 25000),
+};
+
+static struct qpnp_voltage_range ult_nldo_ranges[] = {
+ VOLTAGE_RANGE(0, 375000, 375000, 1537500, 1537500, 12500),
+};
+
+static struct qpnp_voltage_range ult_pldo_ranges[] = {
+ VOLTAGE_RANGE(0, 1750000, 1750000, 3337500, 3337500, 12500),
+};
+
+static struct qpnp_voltage_set_points pldo_set_points = SET_POINTS(pldo_ranges);
+static struct qpnp_voltage_set_points nldo1_set_points
+ = SET_POINTS(nldo1_ranges);
+static struct qpnp_voltage_set_points nldo2_set_points
+ = SET_POINTS(nldo2_ranges);
+static struct qpnp_voltage_set_points nldo3_set_points
+ = SET_POINTS(nldo3_ranges);
+static struct qpnp_voltage_set_points ln_ldo_set_points
+ = SET_POINTS(ln_ldo_ranges);
+static struct qpnp_voltage_set_points smps_set_points = SET_POINTS(smps_ranges);
+static struct qpnp_voltage_set_points ftsmps_set_points
+ = SET_POINTS(ftsmps_ranges);
+static struct qpnp_voltage_set_points ftsmps2p5_set_points
+ = SET_POINTS(ftsmps2p5_ranges);
+static struct qpnp_voltage_set_points boost_set_points
+ = SET_POINTS(boost_ranges);
+static struct qpnp_voltage_set_points boost_byp_set_points
+ = SET_POINTS(boost_byp_ranges);
+static struct qpnp_voltage_set_points ult_lo_smps_set_points
+ = SET_POINTS(ult_lo_smps_ranges);
+static struct qpnp_voltage_set_points ult_ho_smps_set_points
+ = SET_POINTS(ult_ho_smps_ranges);
+static struct qpnp_voltage_set_points ult_nldo_set_points
+ = SET_POINTS(ult_nldo_ranges);
+static struct qpnp_voltage_set_points ult_pldo_set_points
+ = SET_POINTS(ult_pldo_ranges);
+static struct qpnp_voltage_set_points none_set_points;
+
+static struct qpnp_voltage_set_points *all_set_points[] = {
+ &pldo_set_points,
+ &nldo1_set_points,
+ &nldo2_set_points,
+ &nldo3_set_points,
+ &ln_ldo_set_points,
+ &smps_set_points,
+ &ftsmps_set_points,
+ &ftsmps2p5_set_points,
+ &boost_set_points,
+ &boost_byp_set_points,
+ &ult_lo_smps_set_points,
+ &ult_ho_smps_set_points,
+ &ult_nldo_set_points,
+ &ult_pldo_set_points,
+};
+
+/* Determines which label to add to a debug print statement. */
+enum qpnp_regulator_action {
+ QPNP_REGULATOR_ACTION_INIT,
+ QPNP_REGULATOR_ACTION_ENABLE,
+ QPNP_REGULATOR_ACTION_DISABLE,
+ QPNP_REGULATOR_ACTION_VOLTAGE,
+ QPNP_REGULATOR_ACTION_MODE,
+};
+
+static void qpnp_vreg_show_state(struct regulator_dev *rdev,
+ enum qpnp_regulator_action action);
+
+#define DEBUG_PRINT_BUFFER_SIZE 64
+static void fill_string(char *str, size_t str_len, u8 *buf, int buf_len)
+{
+ int pos = 0;
+ int i;
+
+ for (i = 0; i < buf_len; i++) {
+ pos += scnprintf(str + pos, str_len - pos, "0x%02X", buf[i]);
+ if (i < buf_len - 1)
+ pos += scnprintf(str + pos, str_len - pos, ", ");
+ }
+}
+
+static inline int qpnp_vreg_read(struct qpnp_regulator *vreg, u16 addr, u8 *buf,
+ int len)
+{
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+ int rc = 0;
+
+ rc = spmi_ext_register_readl(vreg->spmi_dev->ctrl, vreg->spmi_dev->sid,
+ vreg->base_addr + addr, buf, len);
+
+ if (!rc && (qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_READS)) {
+ str[0] = '\0';
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, buf, len);
+ pr_info(" %-11s: read(0x%04X), sid=%d, len=%d; %s\n",
+ vreg->rdesc.name, vreg->base_addr + addr,
+ vreg->spmi_dev->sid, len, str);
+ }
+
+ return rc;
+}
+
+static inline int qpnp_vreg_write(struct qpnp_regulator *vreg, u16 addr,
+ u8 *buf, int len)
+{
+ char str[DEBUG_PRINT_BUFFER_SIZE];
+ int rc = 0;
+
+ if (qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_WRITES) {
+ str[0] = '\0';
+ fill_string(str, DEBUG_PRINT_BUFFER_SIZE, buf, len);
+ pr_info("%-11s: write(0x%04X), sid=%d, len=%d; %s\n",
+ vreg->rdesc.name, vreg->base_addr + addr,
+ vreg->spmi_dev->sid, len, str);
+ }
+
+ rc = spmi_ext_register_writel(vreg->spmi_dev->ctrl,
+ vreg->spmi_dev->sid, vreg->base_addr + addr, buf, len);
+ if (!rc)
+ vreg->write_count += len;
+
+ return rc;
+}
+
+/*
+ * qpnp_vreg_write_optimized - write the minimum sized contiguous subset of buf
+ * @vreg: qpnp_regulator pointer for this regulator
+ * @addr: local SPMI address offset from this peripheral's base address
+ * @buf: new data to write into the SPMI registers
+ * @buf_save: old data in the registers
+ * @len: number of bytes to write
+ *
+ * This function checks for unchanged register values between buf and buf_save
+ * starting at both ends of buf. Only the contiguous subset in the middle of
+ * buf starting and ending with new values is sent.
+ *
+ * Consider the following example:
+ * buf offset: 0 1 2 3 4 5 6 7
+ * reg state: U U C C U C U U
+ * (U = unchanged, C = changed)
+ * In this example registers 2 through 5 will be written with a single
+ * transaction.
+ */
+static inline int qpnp_vreg_write_optimized(struct qpnp_regulator *vreg,
+ u16 addr, u8 *buf, u8 *buf_save, int len)
+{
+ int i, rc, start, end;
+
+ for (i = 0; i < len; i++)
+ if (buf[i] != buf_save[i])
+ break;
+ start = i;
+
+ for (i = len - 1; i >= 0; i--)
+ if (buf[i] != buf_save[i])
+ break;
+ end = i;
+
+ if (start > end) {
+ /* No modified register values present. */
+ return 0;
+ }
+
+ rc = qpnp_vreg_write(vreg, addr + start, &buf[start], end - start + 1);
+ if (!rc)
+ for (i = start; i <= end; i++)
+ buf_save[i] = buf[i];
+
+ return rc;
+}
+
+/*
+ * Perform a masked write to a PMIC register only if the new value differs
+ * from the last value written to the register. This removes redundant
+ * register writing.
+ */
+static int qpnp_vreg_masked_write(struct qpnp_regulator *vreg, u16 addr, u8 val,
+ u8 mask, u8 *reg_save)
+{
+ int rc = 0;
+ u8 reg;
+
+ reg = (*reg_save & ~mask) | (val & mask);
+ if (reg != *reg_save) {
+ rc = qpnp_vreg_write(vreg, addr, &reg, 1);
+
+ if (rc) {
+ vreg_err(vreg, "write failed; addr=0x%03X, rc=%d\n",
+ addr, rc);
+ } else {
+ *reg_save = reg;
+ }
+ }
+
+ return rc;
+}
+
+/*
+ * Perform a masked read-modify-write to a PMIC register only if the new value
+ * differs from the value currently in the register. This removes redundant
+ * register writing.
+ */
+static int qpnp_vreg_masked_read_write(struct qpnp_regulator *vreg, u16 addr,
+ u8 val, u8 mask)
+{
+ int rc;
+ u8 reg;
+
+ rc = qpnp_vreg_read(vreg, addr, &reg, 1);
+ if (rc) {
+ vreg_err(vreg, "read failed; addr=0x%03X, rc=%d\n", addr, rc);
+ return rc;
+ }
+
+ return qpnp_vreg_masked_write(vreg, addr, val, mask, &reg);
+}
+
+static int qpnp_regulator_common_is_enabled(struct regulator_dev *rdev)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+
+ return (vreg->ctrl_reg[QPNP_COMMON_IDX_ENABLE]
+ & QPNP_COMMON_ENABLE_MASK)
+ == QPNP_COMMON_ENABLE;
+}
+
+static int qpnp_regulator_common_enable(struct regulator_dev *rdev)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ int rc;
+
+ rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_ENABLE,
+ QPNP_COMMON_ENABLE, QPNP_COMMON_ENABLE_MASK,
+ &vreg->ctrl_reg[QPNP_COMMON_IDX_ENABLE]);
+
+ if (rc)
+ vreg_err(vreg, "qpnp_vreg_masked_write failed, rc=%d\n", rc);
+ else
+ qpnp_vreg_show_state(rdev, QPNP_REGULATOR_ACTION_ENABLE);
+
+ return rc;
+}
+
+static int qpnp_regulator_vs_enable(struct regulator_dev *rdev)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+
+ if (vreg->ocp_irq) {
+ vreg->ocp_count = 0;
+ vreg->vs_enable_time = ktime_get();
+ }
+
+ return qpnp_regulator_common_enable(rdev);
+}
+
+static int qpnp_regulator_common_disable(struct regulator_dev *rdev)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ int rc;
+
+ rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_ENABLE,
+ QPNP_COMMON_DISABLE, QPNP_COMMON_ENABLE_MASK,
+ &vreg->ctrl_reg[QPNP_COMMON_IDX_ENABLE]);
+
+ if (rc)
+ vreg_err(vreg, "qpnp_vreg_masked_write failed, rc=%d\n", rc);
+ else
+ qpnp_vreg_show_state(rdev, QPNP_REGULATOR_ACTION_DISABLE);
+
+ return rc;
+}
+
+/*
+ * Returns 1 if the voltage can be set in the current range, 0 if the voltage
+ * cannot be set in the current range, or errno if an error occurred.
+ */
+static int qpnp_regulator_select_voltage_same_range(struct qpnp_regulator *vreg,
+ int min_uV, int max_uV, int *range_sel, int *voltage_sel,
+ unsigned *selector)
+{
+ struct qpnp_voltage_range *range = NULL;
+ int uV = min_uV;
+ int i;
+
+ *range_sel = vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE];
+
+ for (i = 0; i < vreg->set_points->count; i++) {
+ if (vreg->set_points->range[i].range_sel == *range_sel) {
+ range = &vreg->set_points->range[i];
+ break;
+ }
+ }
+
+ if (!range) {
+ /* Unknown range */
+ return 0;
+ }
+
+ if (uV < range->min_uV && max_uV >= range->min_uV)
+ uV = range->min_uV;
+
+ if (uV < range->min_uV || uV > range->max_uV) {
+ /* Current range doesn't support the requested voltage. */
+ return 0;
+ }
+
+ /*
+ * Force uV to be an allowed set point by applying a ceiling function to
+ * the uV value.
+ */
+ *voltage_sel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
+ uV = *voltage_sel * range->step_uV + range->min_uV;
+
+ if (uV > max_uV) {
+ /*
+ * No set point in the current voltage range is within the
+ * requested min_uV to max_uV range.
+ */
+ return 0;
+ }
+
+ *selector = 0;
+ for (i = 0; i < vreg->set_points->count; i++) {
+ if (uV >= vreg->set_points->range[i].set_point_min_uV
+ && uV <= vreg->set_points->range[i].set_point_max_uV) {
+ *selector +=
+ (uV - vreg->set_points->range[i].set_point_min_uV)
+ / vreg->set_points->range[i].step_uV;
+ break;
+ } else {
+ *selector += vreg->set_points->range[i].n_voltages;
+ }
+ }
+
+ if (*selector >= vreg->set_points->n_voltages)
+ return 0;
+
+ return 1;
+}
+
+static int qpnp_regulator_select_voltage(struct qpnp_regulator *vreg,
+ int min_uV, int max_uV, int *range_sel, int *voltage_sel,
+ unsigned *selector)
+{
+ struct qpnp_voltage_range *range;
+ int uV = min_uV;
+ int lim_min_uV, lim_max_uV, i, range_id, range_max_uV;
+
+ /* Check if request voltage is outside of physically settable range. */
+ lim_min_uV = vreg->set_points->range[0].set_point_min_uV;
+ lim_max_uV =
+ vreg->set_points->range[vreg->set_points->count - 1].set_point_max_uV;
+
+ if (uV < lim_min_uV && max_uV >= lim_min_uV)
+ uV = lim_min_uV;
+
+ if (uV < lim_min_uV || uV > lim_max_uV) {
+ vreg_err(vreg,
+ "request v=[%d, %d] is outside possible v=[%d, %d]\n",
+ min_uV, max_uV, lim_min_uV, lim_max_uV);
+ return -EINVAL;
+ }
+
+ /* Find the range which uV is inside of. */
+ for (i = vreg->set_points->count - 1; i > 0; i--) {
+ range_max_uV = vreg->set_points->range[i - 1].set_point_max_uV;
+ if (uV > range_max_uV && range_max_uV > 0)
+ break;
+ }
+
+ range_id = i;
+ range = &vreg->set_points->range[range_id];
+ *range_sel = range->range_sel;
+
+ /*
+ * Force uV to be an allowed set point by applying a ceiling function to
+ * the uV value.
+ */
+ *voltage_sel = (uV - range->min_uV + range->step_uV - 1)
+ / range->step_uV;
+ uV = *voltage_sel * range->step_uV + range->min_uV;
+
+ if (uV > max_uV) {
+ vreg_err(vreg,
+ "request v=[%d, %d] cannot be met by any set point; "
+ "next set point: %d\n",
+ min_uV, max_uV, uV);
+ return -EINVAL;
+ }
+
+ *selector = 0;
+ for (i = 0; i < range_id; i++)
+ *selector += vreg->set_points->range[i].n_voltages;
+ *selector += (uV - range->set_point_min_uV) / range->step_uV;
+
+ return 0;
+}
+
+static int qpnp_regulator_common_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ int rc, range_sel, voltage_sel, voltage_old = 0, voltage_new = 0;
+ u8 buf[2];
+
+ if (vreg->slew_rate && vreg->rdesc.ops->get_voltage) {
+ voltage_old = vreg->rdesc.ops->get_voltage(rdev);
+ if (voltage_old < 0) {
+ vreg_err(vreg, "could not get current voltage, rc=%d\n",
+ voltage_old);
+ return voltage_old;
+ }
+ }
+
+ /*
+ * Favor staying in the current voltage range if possible. This avoids
+ * voltage spikes that occur when changing the voltage range.
+ */
+ rc = qpnp_regulator_select_voltage_same_range(vreg, min_uV, max_uV,
+ &range_sel, &voltage_sel, selector);
+ if (rc == 0)
+ rc = qpnp_regulator_select_voltage(vreg, min_uV, max_uV,
+ &range_sel, &voltage_sel, selector);
+ if (rc < 0) {
+ vreg_err(vreg, "could not set voltage, rc=%d\n", rc);
+ return rc;
+ }
+
+ buf[0] = range_sel;
+ buf[1] = voltage_sel;
+ if ((vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE] != range_sel)
+ && (vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET] == voltage_sel)) {
+ /* Handle latched range change. */
+ rc = qpnp_vreg_write(vreg, QPNP_COMMON_REG_VOLTAGE_RANGE,
+ buf, 2);
+ if (!rc) {
+ vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE] = buf[0];
+ vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET] = buf[1];
+ }
+ } else {
+ /* Either write can be optimized away safely. */
+ rc = qpnp_vreg_write_optimized(vreg,
+ QPNP_COMMON_REG_VOLTAGE_RANGE, buf,
+ &vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE], 2);
+ }
+
+ if (rc) {
+ vreg_err(vreg, "SPMI write failed, rc=%d\n", rc);
+ } else {
+ /* Delay for voltage slewing if a step rate is specified. */
+ if (vreg->slew_rate && vreg->rdesc.ops->get_voltage) {
+ voltage_new = vreg->rdesc.ops->get_voltage(rdev);
+ if (voltage_new < 0) {
+ vreg_err(vreg, "could not get new voltage, rc=%d\n",
+ voltage_new);
+ return voltage_new;
+ }
+
+ udelay(DIV_ROUND_UP(abs(voltage_new - voltage_old),
+ vreg->slew_rate));
+ }
+
+ qpnp_vreg_show_state(rdev, QPNP_REGULATOR_ACTION_VOLTAGE);
+ }
+
+ return rc;
+}
+
+static int qpnp_regulator_common_get_voltage(struct regulator_dev *rdev)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ struct qpnp_voltage_range *range = NULL;
+ int range_sel, voltage_sel, i;
+
+ range_sel = vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE];
+ voltage_sel = vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET];
+
+ for (i = 0; i < vreg->set_points->count; i++) {
+ if (vreg->set_points->range[i].range_sel == range_sel) {
+ range = &vreg->set_points->range[i];
+ break;
+ }
+ }
+
+ if (!range) {
+ vreg_err(vreg, "voltage unknown, range %d is invalid\n",
+ range_sel);
+ return VOLTAGE_UNKNOWN;
+ }
+
+ return range->step_uV * voltage_sel + range->min_uV;
+}
+
+static int qpnp_regulator_single_range_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ int rc, range_sel, voltage_sel;
+
+ rc = qpnp_regulator_select_voltage(vreg, min_uV, max_uV, &range_sel,
+ &voltage_sel, selector);
+ if (rc) {
+ vreg_err(vreg, "could not set voltage, rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Certain types of regulators do not have a range select register so
+ * only voltage set register needs to be written.
+ */
+ rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_VOLTAGE_SET,
+ voltage_sel, 0xFF, &vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET]);
+
+ if (rc)
+ vreg_err(vreg, "SPMI write failed, rc=%d\n", rc);
+ else
+ qpnp_vreg_show_state(rdev, QPNP_REGULATOR_ACTION_VOLTAGE);
+
+ return rc;
+}
+
+static int qpnp_regulator_single_range_get_voltage(struct regulator_dev *rdev)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ struct qpnp_voltage_range *range = &vreg->set_points->range[0];
+ int voltage_sel = vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET];
+
+ return range->step_uV * voltage_sel + range->min_uV;
+}
+
+static int qpnp_regulator_ult_lo_smps_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV, unsigned *selector)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ int rc, range_sel, voltage_sel;
+
+ /*
+ * Favor staying in the current voltage range if possible. This avoids
+ * voltage spikes that occur when changing the voltage range.
+ */
+ rc = qpnp_regulator_select_voltage_same_range(vreg, min_uV, max_uV,
+ &range_sel, &voltage_sel, selector);
+ if (rc == 0)
+ rc = qpnp_regulator_select_voltage(vreg, min_uV, max_uV,
+ &range_sel, &voltage_sel, selector);
+ if (rc < 0) {
+ vreg_err(vreg, "could not set voltage, rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Calculate VSET based on range
+ * In case of range 0: voltage_sel is a 7 bit value, can be written
+ * witout any modification.
+ * In case of range 1: voltage_sel is a 5 bit value, bits[7-5] set to
+ * [011].
+ */
+ if (range_sel == 1)
+ voltage_sel |= ULT_SMPS_RANGE_SPLIT;
+
+ rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_VOLTAGE_SET,
+ voltage_sel, 0xFF, &vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET]);
+ if (rc) {
+ vreg_err(vreg, "SPMI write failed, rc=%d\n", rc);
+ } else {
+ vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE] = range_sel;
+ qpnp_vreg_show_state(rdev, QPNP_REGULATOR_ACTION_VOLTAGE);
+ }
+
+ return rc;
+}
+
+static int qpnp_regulator_ult_lo_smps_get_voltage(struct regulator_dev *rdev)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ struct qpnp_voltage_range *range = NULL;
+ int range_sel, voltage_sel, i;
+
+ range_sel = vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE];
+ voltage_sel = vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET];
+
+ for (i = 0; i < vreg->set_points->count; i++) {
+ if (vreg->set_points->range[i].range_sel == range_sel) {
+ range = &vreg->set_points->range[i];
+ break;
+ }
+ }
+
+ if (!range) {
+ vreg_err(vreg, "voltage unknown, range %d is invalid\n",
+ range_sel);
+ return VOLTAGE_UNKNOWN;
+ }
+
+ if (range_sel == 1)
+ voltage_sel &= ~ULT_SMPS_RANGE_SPLIT;
+
+ return range->step_uV * voltage_sel + range->min_uV;
+}
+
+static int qpnp_regulator_common_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ int uV = 0;
+ int i;
+
+ if (selector >= vreg->set_points->n_voltages)
+ return 0;
+
+ for (i = 0; i < vreg->set_points->count; i++) {
+ if (selector < vreg->set_points->range[i].n_voltages) {
+ uV = selector * vreg->set_points->range[i].step_uV
+ + vreg->set_points->range[i].set_point_min_uV;
+ break;
+ } else {
+ selector -= vreg->set_points->range[i].n_voltages;
+ }
+ }
+
+ return uV;
+}
+
+static unsigned int qpnp_regulator_common_get_mode(struct regulator_dev *rdev)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+
+ return (vreg->ctrl_reg[QPNP_COMMON_IDX_MODE]
+ & QPNP_COMMON_MODE_HPM_MASK)
+ ? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+}
+
+static int qpnp_regulator_common_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ int rc = 0;
+ u8 val;
+
+ if (mode != REGULATOR_MODE_NORMAL && mode != REGULATOR_MODE_IDLE) {
+ vreg_err(vreg, "invalid mode: %u\n", mode);
+ return -EINVAL;
+ }
+
+ val = (mode == REGULATOR_MODE_NORMAL ? QPNP_COMMON_MODE_HPM_MASK : 0);
+
+ rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_MODE, val,
+ QPNP_COMMON_MODE_HPM_MASK,
+ &vreg->ctrl_reg[QPNP_COMMON_IDX_MODE]);
+
+ if (rc)
+ vreg_err(vreg, "SPMI write failed, rc=%d\n", rc);
+ else
+ qpnp_vreg_show_state(rdev, QPNP_REGULATOR_ACTION_MODE);
+
+ return rc;
+}
+
+static unsigned int qpnp_regulator_common_get_optimum_mode(
+ struct regulator_dev *rdev, int input_uV, int output_uV,
+ int load_uA)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ unsigned int mode;
+
+ if (load_uA + vreg->system_load >= vreg->hpm_min_load)
+ mode = REGULATOR_MODE_NORMAL;
+ else
+ mode = REGULATOR_MODE_IDLE;
+
+ return mode;
+}
+
+static int qpnp_regulator_common_enable_time(struct regulator_dev *rdev)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->enable_time;
+}
+
+static int qpnp_regulator_vs_clear_ocp(struct qpnp_regulator *vreg)
+{
+ int rc;
+
+ rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_ENABLE,
+ QPNP_COMMON_DISABLE, QPNP_COMMON_ENABLE_MASK,
+ &vreg->ctrl_reg[QPNP_COMMON_IDX_ENABLE]);
+ if (rc)
+ vreg_err(vreg, "qpnp_vreg_masked_write failed, rc=%d\n", rc);
+
+ vreg->vs_enable_time = ktime_get();
+
+ rc = qpnp_vreg_masked_write(vreg, QPNP_COMMON_REG_ENABLE,
+ QPNP_COMMON_ENABLE, QPNP_COMMON_ENABLE_MASK,
+ &vreg->ctrl_reg[QPNP_COMMON_IDX_ENABLE]);
+ if (rc)
+ vreg_err(vreg, "qpnp_vreg_masked_write failed, rc=%d\n", rc);
+
+ if (qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_OCP) {
+ pr_info("%s: switch state toggled after OCP event\n",
+ vreg->rdesc.name);
+ }
+
+ return rc;
+}
+
+static void qpnp_regulator_vs_ocp_work(struct work_struct *work)
+{
+ struct delayed_work *dwork
+ = container_of(work, struct delayed_work, work);
+ struct qpnp_regulator *vreg
+ = container_of(dwork, struct qpnp_regulator, ocp_work);
+
+ qpnp_regulator_vs_clear_ocp(vreg);
+
+ return;
+}
+
+static irqreturn_t qpnp_regulator_vs_ocp_isr(int irq, void *data)
+{
+ struct qpnp_regulator *vreg = data;
+ ktime_t ocp_irq_time;
+ s64 ocp_trigger_delay_us;
+
+ ocp_irq_time = ktime_get();
+ ocp_trigger_delay_us = ktime_us_delta(ocp_irq_time,
+ vreg->vs_enable_time);
+
+ /*
+ * Reset the OCP count if there is a large delay between switch enable
+ * and when OCP triggers. This is indicative of a hotplug event as
+ * opposed to a fault.
+ */
+ if (ocp_trigger_delay_us > QPNP_VS_OCP_FAULT_DELAY_US)
+ vreg->ocp_count = 0;
+
+ /* Wait for switch output to settle back to 0 V after OCP triggered. */
+ udelay(QPNP_VS_OCP_FALL_DELAY_US);
+
+ vreg->ocp_count++;
+
+ if (qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_OCP) {
+ pr_info("%s: VS OCP triggered, count = %d, delay = %lld us\n",
+ vreg->rdesc.name, vreg->ocp_count,
+ ocp_trigger_delay_us);
+ }
+
+ if (vreg->ocp_count == 1) {
+ /* Immediately clear the over current condition. */
+ qpnp_regulator_vs_clear_ocp(vreg);
+ } else if (vreg->ocp_count <= vreg->ocp_max_retries) {
+ /* Schedule the over current clear task to run later. */
+ schedule_delayed_work(&vreg->ocp_work,
+ msecs_to_jiffies(vreg->ocp_retry_delay_ms) + 1);
+ } else {
+ vreg_err(vreg, "OCP triggered %d times; no further retries\n",
+ vreg->ocp_count);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const char * const qpnp_print_actions[] = {
+ [QPNP_REGULATOR_ACTION_INIT] = "initial ",
+ [QPNP_REGULATOR_ACTION_ENABLE] = "enable ",
+ [QPNP_REGULATOR_ACTION_DISABLE] = "disable ",
+ [QPNP_REGULATOR_ACTION_VOLTAGE] = "set voltage",
+ [QPNP_REGULATOR_ACTION_MODE] = "set mode ",
+};
+
+static void qpnp_vreg_show_state(struct regulator_dev *rdev,
+ enum qpnp_regulator_action action)
+{
+ struct qpnp_regulator *vreg = rdev_get_drvdata(rdev);
+ const char *action_label = qpnp_print_actions[action];
+ unsigned int mode = 0;
+ int uV = 0;
+ const char *mode_label = "";
+ enum qpnp_regulator_logical_type type;
+ const char *enable_label;
+ char pc_enable_label[5] = {'\0'};
+ char pc_mode_label[8] = {'\0'};
+ bool show_req, show_dupe, show_init, has_changed;
+ u8 en_reg, mode_reg;
+
+ /* Do not print unless appropriate flags are set. */
+ show_req = qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_REQUEST;
+ show_dupe = qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_DUPLICATE;
+ show_init = qpnp_vreg_debug_mask & QPNP_VREG_DEBUG_INIT;
+ has_changed = vreg->write_count != vreg->prev_write_count;
+ if (!((show_init && action == QPNP_REGULATOR_ACTION_INIT)
+ || (show_req && (has_changed || show_dupe)))) {
+ return;
+ }
+
+ vreg->prev_write_count = vreg->write_count;
+
+ type = vreg->logical_type;
+
+ enable_label = qpnp_regulator_common_is_enabled(rdev) ? "on " : "off";
+
+ if (type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_LN_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS)
+ uV = qpnp_regulator_common_get_voltage(rdev);
+
+ if (type == QPNP_REGULATOR_LOGICAL_TYPE_BOOST
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_BOOST_BYP
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO)
+ uV = qpnp_regulator_single_range_get_voltage(rdev);
+
+ if (type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS)
+ uV = qpnp_regulator_ult_lo_smps_get_voltage(rdev);
+
+ if (type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_VS) {
+ mode = qpnp_regulator_common_get_mode(rdev);
+ mode_label = mode == REGULATOR_MODE_NORMAL ? "HPM" : "LPM";
+ }
+
+ if (type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_VS) {
+ en_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_ENABLE];
+ pc_enable_label[0] =
+ en_reg & QPNP_COMMON_ENABLE_FOLLOW_HW_EN3_MASK ? '3' : '_';
+ pc_enable_label[1] =
+ en_reg & QPNP_COMMON_ENABLE_FOLLOW_HW_EN2_MASK ? '2' : '_';
+ pc_enable_label[2] =
+ en_reg & QPNP_COMMON_ENABLE_FOLLOW_HW_EN1_MASK ? '1' : '_';
+ pc_enable_label[3] =
+ en_reg & QPNP_COMMON_ENABLE_FOLLOW_HW_EN0_MASK ? '0' : '_';
+ }
+
+ switch (type) {
+ case QPNP_REGULATOR_LOGICAL_TYPE_SMPS:
+ mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+ pc_mode_label[0] =
+ mode_reg & QPNP_COMMON_MODE_AUTO_MASK ? 'A' : '_';
+ pc_mode_label[1] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK ? 'W' : '_';
+ pc_mode_label[2] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN3_MASK ? '3' : '_';
+ pc_mode_label[3] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN2_MASK ? '2' : '_';
+ pc_mode_label[4] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN1_MASK ? '1' : '_';
+ pc_mode_label[5] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN0_MASK ? '0' : '_';
+
+ pr_info("%s %-11s: %s, v=%7d uV, mode=%s, pc_en=%s, "
+ "alt_mode=%s\n",
+ action_label, vreg->rdesc.name, enable_label, uV,
+ mode_label, pc_enable_label, pc_mode_label);
+ break;
+ case QPNP_REGULATOR_LOGICAL_TYPE_LDO:
+ mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+ pc_mode_label[0] =
+ mode_reg & QPNP_COMMON_MODE_AUTO_MASK ? 'A' : '_';
+ pc_mode_label[1] =
+ mode_reg & QPNP_COMMON_MODE_BYPASS_MASK ? 'B' : '_';
+ pc_mode_label[2] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK ? 'W' : '_';
+ pc_mode_label[3] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN3_MASK ? '3' : '_';
+ pc_mode_label[4] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN2_MASK ? '2' : '_';
+ pc_mode_label[5] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN1_MASK ? '1' : '_';
+ pc_mode_label[6] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_HW_EN0_MASK ? '0' : '_';
+
+ pr_info("%s %-11s: %s, v=%7d uV, mode=%s, pc_en=%s, "
+ "alt_mode=%s\n",
+ action_label, vreg->rdesc.name, enable_label, uV,
+ mode_label, pc_enable_label, pc_mode_label);
+ break;
+ case QPNP_REGULATOR_LOGICAL_TYPE_LN_LDO:
+ mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+ pc_mode_label[0] =
+ mode_reg & QPNP_COMMON_MODE_BYPASS_MASK ? 'B' : '_';
+
+ pr_info("%s %-11s: %s, v=%7d uV, alt_mode=%s\n",
+ action_label, vreg->rdesc.name, enable_label, uV,
+ pc_mode_label);
+ break;
+ case QPNP_REGULATOR_LOGICAL_TYPE_VS:
+ mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+ pc_mode_label[0] =
+ mode_reg & QPNP_COMMON_MODE_AUTO_MASK ? 'A' : '_';
+ pc_mode_label[1] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK ? 'W' : '_';
+
+ pr_info("%s %-11s: %s, mode=%s, pc_en=%s, alt_mode=%s\n",
+ action_label, vreg->rdesc.name, enable_label,
+ mode_label, pc_enable_label, pc_mode_label);
+ break;
+ case QPNP_REGULATOR_LOGICAL_TYPE_BOOST:
+ pr_info("%s %-11s: %s, v=%7d uV\n",
+ action_label, vreg->rdesc.name, enable_label, uV);
+ break;
+ case QPNP_REGULATOR_LOGICAL_TYPE_BOOST_BYP:
+ pr_info("%s %-11s: %s, v=%7d uV\n",
+ action_label, vreg->rdesc.name, enable_label, uV);
+ break;
+ case QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS:
+ mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+ pc_mode_label[0] =
+ mode_reg & QPNP_COMMON_MODE_AUTO_MASK ? 'A' : '_';
+
+ pr_info("%s %-11s: %s, v=%7d uV, mode=%s, alt_mode=%s\n",
+ action_label, vreg->rdesc.name, enable_label, uV,
+ mode_label, pc_mode_label);
+ break;
+ case QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS:
+ case QPNP_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS:
+ mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+ pc_mode_label[0] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK ? 'W' : '_';
+ pr_info("%s %-11s: %s, v=%7d uV, mode=%s, alt_mode=%s\n",
+ action_label, vreg->rdesc.name, enable_label, uV,
+ mode_label, pc_mode_label);
+ break;
+ case QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO:
+ mode_reg = vreg->ctrl_reg[QPNP_COMMON_IDX_MODE];
+ pc_mode_label[0] =
+ mode_reg & QPNP_COMMON_MODE_BYPASS_MASK ? 'B' : '_';
+ pc_mode_label[1] =
+ mode_reg & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK ? 'W' : '_';
+ pr_info("%s %-11s: %s, v=%7d uV, mode=%s, alt_mode=%s\n",
+ action_label, vreg->rdesc.name, enable_label, uV,
+ mode_label, pc_mode_label);
+ break;
+ default:
+ break;
+ }
+}
+
+static struct regulator_ops qpnp_smps_ops = {
+ .enable = qpnp_regulator_common_enable,
+ .disable = qpnp_regulator_common_disable,
+ .is_enabled = qpnp_regulator_common_is_enabled,
+ .set_voltage = qpnp_regulator_common_set_voltage,
+ .get_voltage = qpnp_regulator_common_get_voltage,
+ .list_voltage = qpnp_regulator_common_list_voltage,
+ .set_mode = qpnp_regulator_common_set_mode,
+ .get_mode = qpnp_regulator_common_get_mode,
+ .get_optimum_mode = qpnp_regulator_common_get_optimum_mode,
+ .enable_time = qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_ldo_ops = {
+ .enable = qpnp_regulator_common_enable,
+ .disable = qpnp_regulator_common_disable,
+ .is_enabled = qpnp_regulator_common_is_enabled,
+ .set_voltage = qpnp_regulator_common_set_voltage,
+ .get_voltage = qpnp_regulator_common_get_voltage,
+ .list_voltage = qpnp_regulator_common_list_voltage,
+ .set_mode = qpnp_regulator_common_set_mode,
+ .get_mode = qpnp_regulator_common_get_mode,
+ .get_optimum_mode = qpnp_regulator_common_get_optimum_mode,
+ .enable_time = qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_ln_ldo_ops = {
+ .enable = qpnp_regulator_common_enable,
+ .disable = qpnp_regulator_common_disable,
+ .is_enabled = qpnp_regulator_common_is_enabled,
+ .set_voltage = qpnp_regulator_common_set_voltage,
+ .get_voltage = qpnp_regulator_common_get_voltage,
+ .list_voltage = qpnp_regulator_common_list_voltage,
+ .enable_time = qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_vs_ops = {
+ .enable = qpnp_regulator_vs_enable,
+ .disable = qpnp_regulator_common_disable,
+ .is_enabled = qpnp_regulator_common_is_enabled,
+ .enable_time = qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_boost_ops = {
+ .enable = qpnp_regulator_common_enable,
+ .disable = qpnp_regulator_common_disable,
+ .is_enabled = qpnp_regulator_common_is_enabled,
+ .set_voltage = qpnp_regulator_single_range_set_voltage,
+ .get_voltage = qpnp_regulator_single_range_get_voltage,
+ .list_voltage = qpnp_regulator_common_list_voltage,
+ .enable_time = qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_ftsmps_ops = {
+ .enable = qpnp_regulator_common_enable,
+ .disable = qpnp_regulator_common_disable,
+ .is_enabled = qpnp_regulator_common_is_enabled,
+ .set_voltage = qpnp_regulator_common_set_voltage,
+ .get_voltage = qpnp_regulator_common_get_voltage,
+ .list_voltage = qpnp_regulator_common_list_voltage,
+ .set_mode = qpnp_regulator_common_set_mode,
+ .get_mode = qpnp_regulator_common_get_mode,
+ .get_optimum_mode = qpnp_regulator_common_get_optimum_mode,
+ .enable_time = qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_ult_lo_smps_ops = {
+ .enable = qpnp_regulator_common_enable,
+ .disable = qpnp_regulator_common_disable,
+ .is_enabled = qpnp_regulator_common_is_enabled,
+ .set_voltage = qpnp_regulator_ult_lo_smps_set_voltage,
+ .get_voltage = qpnp_regulator_ult_lo_smps_get_voltage,
+ .list_voltage = qpnp_regulator_common_list_voltage,
+ .set_mode = qpnp_regulator_common_set_mode,
+ .get_mode = qpnp_regulator_common_get_mode,
+ .get_optimum_mode = qpnp_regulator_common_get_optimum_mode,
+ .enable_time = qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_ult_ho_smps_ops = {
+ .enable = qpnp_regulator_common_enable,
+ .disable = qpnp_regulator_common_disable,
+ .is_enabled = qpnp_regulator_common_is_enabled,
+ .set_voltage = qpnp_regulator_single_range_set_voltage,
+ .get_voltage = qpnp_regulator_single_range_get_voltage,
+ .list_voltage = qpnp_regulator_common_list_voltage,
+ .set_mode = qpnp_regulator_common_set_mode,
+ .get_mode = qpnp_regulator_common_get_mode,
+ .get_optimum_mode = qpnp_regulator_common_get_optimum_mode,
+ .enable_time = qpnp_regulator_common_enable_time,
+};
+
+static struct regulator_ops qpnp_ult_ldo_ops = {
+ .enable = qpnp_regulator_common_enable,
+ .disable = qpnp_regulator_common_disable,
+ .is_enabled = qpnp_regulator_common_is_enabled,
+ .set_voltage = qpnp_regulator_single_range_set_voltage,
+ .get_voltage = qpnp_regulator_single_range_get_voltage,
+ .list_voltage = qpnp_regulator_common_list_voltage,
+ .set_mode = qpnp_regulator_common_set_mode,
+ .get_mode = qpnp_regulator_common_get_mode,
+ .get_optimum_mode = qpnp_regulator_common_get_optimum_mode,
+ .enable_time = qpnp_regulator_common_enable_time,
+};
+
+/* Maximum possible digital major revision value */
+#define INF 0xFF
+
+static const struct qpnp_regulator_mapping supported_regulators[] = {
+ /* type subtype dig_min dig_max ltype ops setpoints hpm_min */
+ QPNP_VREG_MAP(BUCK, GP_CTL, 0, INF, SMPS, smps, smps, 100000),
+ QPNP_VREG_MAP(LDO, N300, 0, INF, LDO, ldo, nldo1, 10000),
+ QPNP_VREG_MAP(LDO, N600, 0, 0, LDO, ldo, nldo2, 10000),
+ QPNP_VREG_MAP(LDO, N1200, 0, 0, LDO, ldo, nldo2, 10000),
+ QPNP_VREG_MAP(LDO, N600, 1, INF, LDO, ldo, nldo3, 10000),
+ QPNP_VREG_MAP(LDO, N1200, 1, INF, LDO, ldo, nldo3, 10000),
+ QPNP_VREG_MAP(LDO, N600_ST, 0, 0, LDO, ldo, nldo2, 10000),
+ QPNP_VREG_MAP(LDO, N1200_ST, 0, 0, LDO, ldo, nldo2, 10000),
+ QPNP_VREG_MAP(LDO, N600_ST, 1, INF, LDO, ldo, nldo3, 10000),
+ QPNP_VREG_MAP(LDO, N1200_ST, 1, INF, LDO, ldo, nldo3, 10000),
+ QPNP_VREG_MAP(LDO, P50, 0, INF, LDO, ldo, pldo, 5000),
+ QPNP_VREG_MAP(LDO, P150, 0, INF, LDO, ldo, pldo, 10000),
+ QPNP_VREG_MAP(LDO, P300, 0, INF, LDO, ldo, pldo, 10000),
+ QPNP_VREG_MAP(LDO, P600, 0, INF, LDO, ldo, pldo, 10000),
+ QPNP_VREG_MAP(LDO, P1200, 0, INF, LDO, ldo, pldo, 10000),
+ QPNP_VREG_MAP(LDO, LN, 0, INF, LN_LDO, ln_ldo, ln_ldo, 0),
+ QPNP_VREG_MAP(LDO, LV_P50, 0, INF, LDO, ldo, pldo, 5000),
+ QPNP_VREG_MAP(LDO, LV_P150, 0, INF, LDO, ldo, pldo, 10000),
+ QPNP_VREG_MAP(LDO, LV_P300, 0, INF, LDO, ldo, pldo, 10000),
+ QPNP_VREG_MAP(LDO, LV_P600, 0, INF, LDO, ldo, pldo, 10000),
+ QPNP_VREG_MAP(LDO, LV_P1200, 0, INF, LDO, ldo, pldo, 10000),
+ QPNP_VREG_MAP(VS, LV100, 0, INF, VS, vs, none, 0),
+ QPNP_VREG_MAP(VS, LV300, 0, INF, VS, vs, none, 0),
+ QPNP_VREG_MAP(VS, MV300, 0, INF, VS, vs, none, 0),
+ QPNP_VREG_MAP(VS, MV500, 0, INF, VS, vs, none, 0),
+ QPNP_VREG_MAP(VS, HDMI, 0, INF, VS, vs, none, 0),
+ QPNP_VREG_MAP(VS, OTG, 0, INF, VS, vs, none, 0),
+ QPNP_VREG_MAP(BOOST, 5V_BOOST, 0, INF, BOOST, boost, boost, 0),
+ QPNP_VREG_MAP(FTS, FTS_CTL, 0, INF, FTSMPS, ftsmps, ftsmps, 100000),
+ QPNP_VREG_MAP(FTS, FTS2p5_CTL, 0, INF, FTSMPS, ftsmps, ftsmps2p5,
+ 100000),
+ QPNP_VREG_MAP(BOOST_BYP, BB_2A, 0, INF, BOOST_BYP, boost, boost_byp, 0),
+ QPNP_VREG_MAP(ULT_BUCK, ULT_HF_CTL1, 0, INF, ULT_LO_SMPS, ult_lo_smps,
+ ult_lo_smps, 100000),
+ QPNP_VREG_MAP(ULT_BUCK, ULT_HF_CTL2, 0, INF, ULT_LO_SMPS, ult_lo_smps,
+ ult_lo_smps, 100000),
+ QPNP_VREG_MAP(ULT_BUCK, ULT_HF_CTL3, 0, INF, ULT_LO_SMPS, ult_lo_smps,
+ ult_lo_smps, 100000),
+ QPNP_VREG_MAP(ULT_BUCK, ULT_HF_CTL4, 0, INF, ULT_HO_SMPS, ult_ho_smps,
+ ult_ho_smps, 100000),
+ QPNP_VREG_MAP(ULT_LDO, N300_ST, 0, INF, ULT_LDO, ult_ldo, ult_nldo,
+ 10000),
+ QPNP_VREG_MAP(ULT_LDO, N600_ST, 0, INF, ULT_LDO, ult_ldo, ult_nldo,
+ 10000),
+ QPNP_VREG_MAP(ULT_LDO, N1200_ST, 0, INF, ULT_LDO, ult_ldo, ult_nldo,
+ 10000),
+ QPNP_VREG_MAP(ULT_LDO, LV_P150, 0, INF, ULT_LDO, ult_ldo, ult_pldo,
+ 10000),
+ QPNP_VREG_MAP(ULT_LDO, LV_P300, 0, INF, ULT_LDO, ult_ldo, ult_pldo,
+ 10000),
+ QPNP_VREG_MAP(ULT_LDO, P600, 0, INF, ULT_LDO, ult_ldo, ult_pldo,
+ 10000),
+ QPNP_VREG_MAP(ULT_LDO, P150, 0, INF, ULT_LDO, ult_ldo, ult_pldo,
+ 10000),
+ QPNP_VREG_MAP(ULT_LDO, P50, 0, INF, ULT_LDO, ult_ldo, ult_pldo,
+ 5000),
+};
+
+static int qpnp_regulator_match(struct qpnp_regulator *vreg)
+{
+ const struct qpnp_regulator_mapping *mapping;
+ struct device_node *node = vreg->spmi_dev->dev.of_node;
+ int rc, i;
+ u32 type_reg[2], dig_major_rev;
+ u8 version[QPNP_COMMON_REG_SUBTYPE - QPNP_COMMON_REG_DIG_MAJOR_REV + 1];
+ u8 type, subtype;
+
+ rc = qpnp_vreg_read(vreg, QPNP_COMMON_REG_DIG_MAJOR_REV, version,
+ ARRAY_SIZE(version));
+ if (rc) {
+ vreg_err(vreg, "could not read version registers, rc=%d\n", rc);
+ return rc;
+ }
+ dig_major_rev = version[QPNP_COMMON_REG_DIG_MAJOR_REV
+ - QPNP_COMMON_REG_DIG_MAJOR_REV];
+ type = version[QPNP_COMMON_REG_TYPE
+ - QPNP_COMMON_REG_DIG_MAJOR_REV];
+ subtype = version[QPNP_COMMON_REG_SUBTYPE
+ - QPNP_COMMON_REG_DIG_MAJOR_REV];
+
+ /*
+ * Override type and subtype register values if qcom,force-type is
+ * present in the device tree node.
+ */
+ rc = of_property_read_u32_array(node, "qcom,force-type", type_reg, 2);
+ if (!rc) {
+ type = type_reg[0];
+ subtype = type_reg[1];
+ }
+
+ rc = -ENODEV;
+ for (i = 0; i < ARRAY_SIZE(supported_regulators); i++) {
+ mapping = &supported_regulators[i];
+ if (mapping->type == type && mapping->subtype == subtype
+ && mapping->revision_min <= dig_major_rev
+ && mapping->revision_max >= dig_major_rev) {
+ vreg->logical_type = mapping->logical_type;
+ vreg->set_points = mapping->set_points;
+ vreg->hpm_min_load = mapping->hpm_min_load;
+ vreg->rdesc.ops = mapping->ops;
+ vreg->rdesc.n_voltages
+ = mapping->set_points->n_voltages;
+ rc = 0;
+ break;
+ }
+ }
+
+ if (rc)
+ vreg_err(vreg, "unsupported regulator: type=0x%02X, subtype=0x%02X, dig major rev=0x%02X\n",
+ type, subtype, dig_major_rev);
+
+ return rc;
+}
+
+static int qpnp_regulator_ftsmps_init_slew_rate(struct qpnp_regulator *vreg)
+{
+ int rc;
+ u8 reg = 0;
+ int step = 0, delay, i, range_sel;
+ struct qpnp_voltage_range *range = NULL;
+
+ rc = qpnp_vreg_read(vreg, QPNP_COMMON_REG_STEP_CTRL, &reg, 1);
+ if (rc) {
+ vreg_err(vreg, "spmi read failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ range_sel = vreg->ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE];
+
+ for (i = 0; i < vreg->set_points->count; i++) {
+ if (vreg->set_points->range[i].range_sel == range_sel) {
+ range = &vreg->set_points->range[i];
+ break;
+ }
+ }
+
+ if (!range) {
+ vreg_err(vreg, "range %d is invalid\n", range_sel);
+ return -EINVAL;
+ }
+
+ step = (reg & QPNP_FTSMPS_STEP_CTRL_STEP_MASK)
+ >> QPNP_FTSMPS_STEP_CTRL_STEP_SHIFT;
+
+ delay = (reg & QPNP_FTSMPS_STEP_CTRL_DELAY_MASK)
+ >> QPNP_FTSMPS_STEP_CTRL_DELAY_SHIFT;
+
+ /* slew_rate has units of uV/us. */
+ vreg->slew_rate = QPNP_FTSMPS_CLOCK_RATE * range->step_uV * (1 << step);
+
+ vreg->slew_rate /= 1000 * (QPNP_FTSMPS_STEP_DELAY << delay);
+
+ vreg->slew_rate = vreg->slew_rate * QPNP_FTSMPS_STEP_MARGIN_NUM
+ / QPNP_FTSMPS_STEP_MARGIN_DEN;
+
+ /* Ensure that the slew rate is greater than 0. */
+ vreg->slew_rate = max(vreg->slew_rate, 1);
+
+ return rc;
+}
+
+static int qpnp_regulator_init_registers(struct qpnp_regulator *vreg,
+ struct qpnp_regulator_platform_data *pdata)
+{
+ int rc, i;
+ enum qpnp_regulator_logical_type type;
+ u8 ctrl_reg[8], reg, mask;
+
+ type = vreg->logical_type;
+
+ rc = qpnp_vreg_read(vreg, QPNP_COMMON_REG_VOLTAGE_RANGE,
+ vreg->ctrl_reg, 8);
+ if (rc) {
+ vreg_err(vreg, "spmi read failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ctrl_reg); i++)
+ ctrl_reg[i] = vreg->ctrl_reg[i];
+
+ /* Set up enable pin control. */
+ if ((type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_VS)
+ && !(pdata->pin_ctrl_enable
+ & QPNP_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT)) {
+ ctrl_reg[QPNP_COMMON_IDX_ENABLE] &=
+ ~QPNP_COMMON_ENABLE_FOLLOW_ALL_MASK;
+ ctrl_reg[QPNP_COMMON_IDX_ENABLE] |=
+ pdata->pin_ctrl_enable & QPNP_COMMON_ENABLE_FOLLOW_ALL_MASK;
+ }
+
+ /* Set up HPM control. */
+ if ((type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_VS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS)
+ && (pdata->hpm_enable != QPNP_REGULATOR_USE_HW_DEFAULT)) {
+ ctrl_reg[QPNP_COMMON_IDX_MODE] &= ~QPNP_COMMON_MODE_HPM_MASK;
+ ctrl_reg[QPNP_COMMON_IDX_MODE] |=
+ (pdata->hpm_enable ? QPNP_COMMON_MODE_HPM_MASK : 0);
+ }
+
+ /* Set up auto mode control. */
+ if ((type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_VS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS)
+ && (pdata->auto_mode_enable != QPNP_REGULATOR_USE_HW_DEFAULT)) {
+ ctrl_reg[QPNP_COMMON_IDX_MODE] &=
+ ~QPNP_COMMON_MODE_AUTO_MASK;
+ ctrl_reg[QPNP_COMMON_IDX_MODE] |=
+ (pdata->auto_mode_enable ? QPNP_COMMON_MODE_AUTO_MASK : 0);
+ }
+
+ /* Set up mode pin control. */
+ if ((type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO)
+ && !(pdata->pin_ctrl_hpm
+ & QPNP_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
+ ctrl_reg[QPNP_COMMON_IDX_MODE] &=
+ ~QPNP_COMMON_MODE_FOLLOW_ALL_MASK;
+ ctrl_reg[QPNP_COMMON_IDX_MODE] |=
+ pdata->pin_ctrl_hpm & QPNP_COMMON_MODE_FOLLOW_ALL_MASK;
+ }
+
+ if (type == QPNP_REGULATOR_LOGICAL_TYPE_VS
+ && !(pdata->pin_ctrl_hpm & QPNP_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
+ ctrl_reg[QPNP_COMMON_IDX_MODE] &=
+ ~QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK;
+ ctrl_reg[QPNP_COMMON_IDX_MODE] |=
+ pdata->pin_ctrl_hpm & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK;
+ }
+
+ if ((type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO)
+ && !(pdata->pin_ctrl_hpm
+ & QPNP_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT)) {
+ ctrl_reg[QPNP_COMMON_IDX_MODE] &=
+ ~QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK;
+ ctrl_reg[QPNP_COMMON_IDX_MODE] |=
+ pdata->pin_ctrl_hpm & QPNP_COMMON_MODE_FOLLOW_AWAKE_MASK;
+ }
+
+ if ((type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_LN_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO)
+ && pdata->bypass_mode_enable != QPNP_REGULATOR_USE_HW_DEFAULT) {
+ ctrl_reg[QPNP_COMMON_IDX_MODE] &=
+ ~QPNP_COMMON_MODE_BYPASS_MASK;
+ ctrl_reg[QPNP_COMMON_IDX_MODE] |=
+ (pdata->bypass_mode_enable
+ ? QPNP_COMMON_MODE_BYPASS_MASK : 0);
+ }
+
+ /* Set boost current limit. */
+ if ((type == QPNP_REGULATOR_LOGICAL_TYPE_BOOST
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_BOOST_BYP)
+ && pdata->boost_current_limit
+ != QPNP_BOOST_CURRENT_LIMIT_HW_DEFAULT) {
+ reg = pdata->boost_current_limit;
+ mask = QPNP_BOOST_CURRENT_LIMIT_MASK;
+ rc = qpnp_vreg_masked_read_write(vreg,
+ (type == QPNP_REGULATOR_LOGICAL_TYPE_BOOST
+ ? QPNP_BOOST_REG_CURRENT_LIMIT
+ : QPNP_BOOST_BYP_REG_CURRENT_LIMIT),
+ reg, mask);
+ if (rc) {
+ vreg_err(vreg, "spmi write failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ /* Write back any control register values that were modified. */
+ rc = qpnp_vreg_write_optimized(vreg, QPNP_COMMON_REG_VOLTAGE_RANGE,
+ ctrl_reg, vreg->ctrl_reg, 8);
+ if (rc) {
+ vreg_err(vreg, "spmi write failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* Setup initial range for ULT_LO_SMPS */
+ if (type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS) {
+ ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_RANGE] =
+ (ctrl_reg[QPNP_COMMON_IDX_VOLTAGE_SET]
+ < ULT_SMPS_RANGE_SPLIT) ? 0 : 1;
+ }
+
+ /* Set pull down. */
+ if ((type == QPNP_REGULATOR_LOGICAL_TYPE_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_VS)
+ && pdata->pull_down_enable != QPNP_REGULATOR_USE_HW_DEFAULT) {
+ reg = pdata->pull_down_enable
+ ? QPNP_COMMON_PULL_DOWN_ENABLE_MASK : 0;
+ rc = qpnp_vreg_write(vreg, QPNP_COMMON_REG_PULL_DOWN, &reg, 1);
+ if (rc) {
+ vreg_err(vreg, "spmi write failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS
+ && pdata->pull_down_enable != QPNP_REGULATOR_USE_HW_DEFAULT) {
+ /* FTSMPS has other bits in the pull down control register. */
+ reg = pdata->pull_down_enable
+ ? QPNP_COMMON_PULL_DOWN_ENABLE_MASK : 0;
+ rc = qpnp_vreg_masked_read_write(vreg,
+ QPNP_COMMON_REG_PULL_DOWN, reg,
+ QPNP_COMMON_PULL_DOWN_ENABLE_MASK);
+ if (rc) {
+ vreg_err(vreg, "spmi write failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ /* Set soft start for LDO. */
+ if ((type == QPNP_REGULATOR_LOGICAL_TYPE_LDO
+ || type == QPNP_REGULATOR_LOGICAL_TYPE_ULT_LDO)
+ && pdata->soft_start_enable != QPNP_REGULATOR_USE_HW_DEFAULT) {
+ reg = pdata->soft_start_enable
+ ? QPNP_LDO_SOFT_START_ENABLE_MASK : 0;
+ rc = qpnp_vreg_write(vreg, QPNP_LDO_REG_SOFT_START, &reg, 1);
+ if (rc) {
+ vreg_err(vreg, "spmi write failed, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ /* Set soft start strength and over current protection for VS. */
+ if (type == QPNP_REGULATOR_LOGICAL_TYPE_VS) {
+ reg = 0;
+ mask = 0;
+ if (pdata->soft_start_enable != QPNP_REGULATOR_USE_HW_DEFAULT) {
+ reg |= pdata->soft_start_enable
+ ? QPNP_VS_SOFT_START_ENABLE_MASK : 0;
+ mask |= QPNP_VS_SOFT_START_ENABLE_MASK;
+ }
+ if (pdata->vs_soft_start_strength
+ != QPNP_VS_SOFT_START_STR_HW_DEFAULT) {
+ reg |= pdata->vs_soft_start_strength
+ & QPNP_VS_SOFT_START_SEL_MASK;
+ mask |= QPNP_VS_SOFT_START_SEL_MASK;
+ }
+ rc = qpnp_vreg_masked_read_write(vreg, QPNP_VS_REG_SOFT_START,
+ reg, mask);
+ if (rc) {
+ vreg_err(vreg, "spmi write failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (pdata->ocp_enable != QPNP_REGULATOR_USE_HW_DEFAULT) {
+ reg = pdata->ocp_enable ? QPNP_VS_OCP_NO_OVERRIDE
+ : QPNP_VS_OCP_OVERRIDE;
+ rc = qpnp_vreg_write(vreg, QPNP_VS_REG_OCP, &reg, 1);
+ if (rc) {
+ vreg_err(vreg, "spmi write failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+ }
+
+ /* Calculate the slew rate for FTSMPS regulators. */
+ if (type == QPNP_REGULATOR_LOGICAL_TYPE_FTSMPS) {
+ rc = qpnp_regulator_ftsmps_init_slew_rate(vreg);
+ if (rc) {
+ vreg_err(vreg, "failed to initialize step rate, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
+/* Fill in pdata elements based on values found in device tree. */
+static int qpnp_regulator_get_dt_config(struct spmi_device *spmi,
+ struct qpnp_regulator_platform_data *pdata)
+{
+ struct resource *res;
+ struct device_node *node = spmi->dev.of_node;
+ int rc = 0;
+
+ pdata->init_data.constraints.input_uV
+ = pdata->init_data.constraints.max_uV;
+
+ res = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&spmi->dev, "%s: node is missing base address\n",
+ __func__);
+ return -EINVAL;
+ }
+ pdata->base_addr = res->start;
+
+ /* OCP IRQ is optional so ignore get errors. */
+ pdata->ocp_irq = spmi_get_irq_byname(spmi, NULL, "ocp");
+ if (pdata->ocp_irq < 0)
+ pdata->ocp_irq = 0;
+
+ /*
+ * Initialize configuration parameters to use hardware default in case
+ * no value is specified via device tree.
+ */
+ pdata->auto_mode_enable = QPNP_REGULATOR_USE_HW_DEFAULT;
+ pdata->bypass_mode_enable = QPNP_REGULATOR_USE_HW_DEFAULT;
+ pdata->ocp_enable = QPNP_REGULATOR_USE_HW_DEFAULT;
+ pdata->pull_down_enable = QPNP_REGULATOR_USE_HW_DEFAULT;
+ pdata->soft_start_enable = QPNP_REGULATOR_USE_HW_DEFAULT;
+ pdata->boost_current_limit = QPNP_BOOST_CURRENT_LIMIT_HW_DEFAULT;
+ pdata->pin_ctrl_enable = QPNP_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT;
+ pdata->pin_ctrl_hpm = QPNP_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT;
+ pdata->vs_soft_start_strength = QPNP_VS_SOFT_START_STR_HW_DEFAULT;
+ pdata->hpm_enable = QPNP_REGULATOR_USE_HW_DEFAULT;
+
+ /* These bindings are optional, so it is okay if they are not found. */
+ of_property_read_u32(node, "qcom,auto-mode-enable",
+ &pdata->auto_mode_enable);
+ of_property_read_u32(node, "qcom,bypass-mode-enable",
+ &pdata->bypass_mode_enable);
+ of_property_read_u32(node, "qcom,ocp-enable", &pdata->ocp_enable);
+ of_property_read_u32(node, "qcom,ocp-max-retries",
+ &pdata->ocp_max_retries);
+ of_property_read_u32(node, "qcom,ocp-retry-delay",
+ &pdata->ocp_retry_delay_ms);
+ of_property_read_u32(node, "qcom,pull-down-enable",
+ &pdata->pull_down_enable);
+ of_property_read_u32(node, "qcom,soft-start-enable",
+ &pdata->soft_start_enable);
+ of_property_read_u32(node, "qcom,boost-current-limit",
+ &pdata->boost_current_limit);
+ of_property_read_u32(node, "qcom,pin-ctrl-enable",
+ &pdata->pin_ctrl_enable);
+ of_property_read_u32(node, "qcom,pin-ctrl-hpm", &pdata->pin_ctrl_hpm);
+ of_property_read_u32(node, "qcom,hpm-enable", &pdata->hpm_enable);
+ of_property_read_u32(node, "qcom,vs-soft-start-strength",
+ &pdata->vs_soft_start_strength);
+ of_property_read_u32(node, "qcom,system-load", &pdata->system_load);
+ of_property_read_u32(node, "qcom,enable-time", &pdata->enable_time);
+
+ return rc;
+}
+
+static struct of_device_id spmi_match_table[];
+
+#define MAX_NAME_LEN 127
+
+static int qpnp_regulator_probe(struct spmi_device *spmi)
+{
+ struct regulator_config reg_config = {};
+ struct qpnp_regulator_platform_data *pdata;
+ struct qpnp_regulator *vreg;
+ struct regulator_desc *rdesc;
+ struct qpnp_regulator_platform_data of_pdata;
+ struct regulator_init_data *init_data;
+ char *reg_name;
+ int rc;
+ bool is_dt;
+
+ vreg = kzalloc(sizeof(struct qpnp_regulator), GFP_KERNEL);
+ if (!vreg) {
+ dev_err(&spmi->dev, "%s: Can't allocate qpnp_regulator\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ is_dt = of_match_device(spmi_match_table, &spmi->dev);
+
+ /* Check if device tree is in use. */
+ if (is_dt) {
+ init_data = of_get_regulator_init_data(&spmi->dev,
+ spmi->dev.of_node);
+ if (!init_data) {
+ dev_err(&spmi->dev, "%s: unable to allocate memory\n",
+ __func__);
+ kfree(vreg);
+ return -ENOMEM;
+ }
+ memset(&of_pdata, 0,
+ sizeof(struct qpnp_regulator_platform_data));
+ memcpy(&of_pdata.init_data, init_data,
+ sizeof(struct regulator_init_data));
+
+ if (of_get_property(spmi->dev.of_node, "parent-supply", NULL))
+ of_pdata.init_data.supply_regulator = "parent";
+
+ rc = qpnp_regulator_get_dt_config(spmi, &of_pdata);
+ if (rc) {
+ dev_err(&spmi->dev, "%s: DT parsing failed, rc=%d\n",
+ __func__, rc);
+ kfree(vreg);
+ return -ENOMEM;
+ }
+
+ pdata = &of_pdata;
+ } else {
+ pdata = spmi->dev.platform_data;
+ }
+
+ if (pdata == NULL) {
+ dev_err(&spmi->dev, "%s: no platform data specified\n",
+ __func__);
+ kfree(vreg);
+ return -EINVAL;
+ }
+
+ vreg->spmi_dev = spmi;
+ vreg->prev_write_count = -1;
+ vreg->write_count = 0;
+ vreg->base_addr = pdata->base_addr;
+ vreg->enable_time = pdata->enable_time;
+ vreg->system_load = pdata->system_load;
+ vreg->ocp_enable = pdata->ocp_enable;
+ vreg->ocp_irq = pdata->ocp_irq;
+ vreg->ocp_max_retries = pdata->ocp_max_retries;
+ vreg->ocp_retry_delay_ms = pdata->ocp_retry_delay_ms;
+
+ if (vreg->ocp_max_retries == 0)
+ vreg->ocp_max_retries = QPNP_VS_OCP_DEFAULT_MAX_RETRIES;
+ if (vreg->ocp_retry_delay_ms == 0)
+ vreg->ocp_retry_delay_ms = QPNP_VS_OCP_DEFAULT_RETRY_DELAY_MS;
+
+ rdesc = &vreg->rdesc;
+ rdesc->id = spmi->ctrl->nr;
+ rdesc->owner = THIS_MODULE;
+ rdesc->type = REGULATOR_VOLTAGE;
+
+ reg_name = kzalloc(strnlen(pdata->init_data.constraints.name,
+ MAX_NAME_LEN) + 1, GFP_KERNEL);
+ if (!reg_name) {
+ dev_err(&spmi->dev, "%s: Can't allocate regulator name\n",
+ __func__);
+ kfree(vreg);
+ return -ENOMEM;
+ }
+ strlcpy(reg_name, pdata->init_data.constraints.name,
+ strnlen(pdata->init_data.constraints.name, MAX_NAME_LEN) + 1);
+ rdesc->name = reg_name;
+
+ dev_set_drvdata(&spmi->dev, vreg);
+
+ rc = qpnp_regulator_match(vreg);
+ if (rc)
+ goto bail;
+
+ if (is_dt && rdesc->ops) {
+ /* Fill in ops and mode masks when using device tree. */
+ if (rdesc->ops->enable)
+ pdata->init_data.constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_STATUS;
+ if (rdesc->ops->get_voltage)
+ pdata->init_data.constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_VOLTAGE;
+ if (rdesc->ops->get_mode) {
+ pdata->init_data.constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_DRMS;
+ pdata->init_data.constraints.valid_modes_mask
+ = REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
+ }
+ }
+
+ rc = qpnp_regulator_init_registers(vreg, pdata);
+ if (rc) {
+ vreg_err(vreg, "common initialization failed, rc=%d\n", rc);
+ goto bail;
+ }
+
+ if (vreg->logical_type != QPNP_REGULATOR_LOGICAL_TYPE_VS)
+ vreg->ocp_irq = 0;
+
+ if (vreg->ocp_irq) {
+ rc = devm_request_irq(&spmi->dev, vreg->ocp_irq,
+ qpnp_regulator_vs_ocp_isr, IRQF_TRIGGER_RISING, "ocp",
+ vreg);
+ if (rc < 0) {
+ vreg_err(vreg, "failed to request irq %d, rc=%d\n",
+ vreg->ocp_irq, rc);
+ goto bail;
+ }
+
+ INIT_DELAYED_WORK(&vreg->ocp_work, qpnp_regulator_vs_ocp_work);
+ }
+
+ reg_config.dev = &spmi->dev;
+ reg_config.init_data = &pdata->init_data;
+ reg_config.driver_data = vreg;
+ reg_config.of_node = spmi->dev.of_node;
+ vreg->rdev = regulator_register(rdesc, &reg_config);
+ if (IS_ERR(vreg->rdev)) {
+ rc = PTR_ERR(vreg->rdev);
+ if (rc != -EPROBE_DEFER)
+ vreg_err(vreg, "regulator_register failed, rc=%d\n",
+ rc);
+ goto cancel_ocp_work;
+ }
+
+ qpnp_vreg_show_state(vreg->rdev, QPNP_REGULATOR_ACTION_INIT);
+
+ return 0;
+
+cancel_ocp_work:
+ if (vreg->ocp_irq)
+ cancel_delayed_work_sync(&vreg->ocp_work);
+bail:
+ if (rc && rc != -EPROBE_DEFER)
+ vreg_err(vreg, "probe failed, rc=%d\n", rc);
+
+ kfree(vreg->rdesc.name);
+ kfree(vreg);
+
+ return rc;
+}
+
+static int qpnp_regulator_remove(struct spmi_device *spmi)
+{
+ struct qpnp_regulator *vreg;
+
+ vreg = dev_get_drvdata(&spmi->dev);
+ dev_set_drvdata(&spmi->dev, NULL);
+
+ if (vreg) {
+ regulator_unregister(vreg->rdev);
+ if (vreg->ocp_irq)
+ cancel_delayed_work_sync(&vreg->ocp_work);
+ kfree(vreg->rdesc.name);
+ kfree(vreg);
+ }
+
+ return 0;
+}
+
+static struct of_device_id spmi_match_table[] = {
+ { .compatible = QPNP_REGULATOR_DRIVER_NAME, },
+ {}
+};
+
+static const struct spmi_device_id qpnp_regulator_id[] = {
+ { QPNP_REGULATOR_DRIVER_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spmi, qpnp_regulator_id);
+
+static struct spmi_driver qpnp_regulator_driver = {
+ .driver = {
+ .name = QPNP_REGULATOR_DRIVER_NAME,
+ .of_match_table = spmi_match_table,
+ .owner = THIS_MODULE,
+ },
+ .probe = qpnp_regulator_probe,
+ .remove = qpnp_regulator_remove,
+ .id_table = qpnp_regulator_id,
+};
+
+/*
+ * Pre-compute the number of set points available for each regulator type to
+ * avoid unnecessary calculations later in runtime.
+ */
+static void qpnp_regulator_set_point_init(void)
+{
+ struct qpnp_voltage_set_points **set_points;
+ int i, j, temp;
+
+ set_points = all_set_points;
+
+ for (i = 0; i < ARRAY_SIZE(all_set_points); i++) {
+ temp = 0;
+ for (j = 0; j < all_set_points[i]->count; j++) {
+ all_set_points[i]->range[j].n_voltages
+ = (all_set_points[i]->range[j].set_point_max_uV
+ - all_set_points[i]->range[j].set_point_min_uV)
+ / all_set_points[i]->range[j].step_uV + 1;
+ if (all_set_points[i]->range[j].set_point_max_uV == 0)
+ all_set_points[i]->range[j].n_voltages = 0;
+ temp += all_set_points[i]->range[j].n_voltages;
+ }
+ all_set_points[i]->n_voltages = temp;
+ }
+}
+
+/**
+ * qpnp_regulator_init() - register spmi driver for qpnp-regulator
+ *
+ * This initialization function should be called in systems in which driver
+ * registration ordering must be controlled precisely.
+ */
+int __init qpnp_regulator_init(void)
+{
+ static bool has_registered;
+
+ if (has_registered)
+ return 0;
+ else
+ has_registered = true;
+
+ qpnp_regulator_set_point_init();
+
+ return spmi_driver_register(&qpnp_regulator_driver);
+}
+EXPORT_SYMBOL(qpnp_regulator_init);
+
+static void __exit qpnp_regulator_exit(void)
+{
+ spmi_driver_unregister(&qpnp_regulator_driver);
+}
+
+MODULE_DESCRIPTION("QPNP PMIC regulator driver");
+MODULE_LICENSE("GPL v2");
+
+arch_initcall(qpnp_regulator_init);
+module_exit(qpnp_regulator_exit);
diff --git a/drivers/regulator/rpm-smd-regulator.c b/drivers/regulator/rpm-smd-regulator.c
new file mode 100644
index 000000000000..042884f3d23e
--- /dev/null
+++ b/drivers/regulator/rpm-smd-regulator.c
@@ -0,0 +1,1945 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <soc/qcom/rpm-smd.h>
+
+/* Debug Definitions */
+
+enum {
+ RPM_VREG_DEBUG_REQUEST = BIT(0),
+ RPM_VREG_DEBUG_FULL_REQUEST = BIT(1),
+ RPM_VREG_DEBUG_DUPLICATE = BIT(2),
+};
+
+static int rpm_vreg_debug_mask;
+module_param_named(
+ debug_mask, rpm_vreg_debug_mask, int, S_IRUSR | S_IWUSR
+);
+
+#define vreg_err(req, fmt, ...) \
+ pr_err("%s: " fmt, req->rdesc.name, ##__VA_ARGS__)
+
+/* RPM regulator request types */
+enum rpm_regulator_type {
+ RPM_REGULATOR_TYPE_LDO,
+ RPM_REGULATOR_TYPE_SMPS,
+ RPM_REGULATOR_TYPE_VS,
+ RPM_REGULATOR_TYPE_NCP,
+ RPM_REGULATOR_TYPE_BOB,
+ RPM_REGULATOR_TYPE_MAX,
+};
+
+/* RPM resource parameters */
+enum rpm_regulator_param_index {
+ RPM_REGULATOR_PARAM_ENABLE,
+ RPM_REGULATOR_PARAM_VOLTAGE,
+ RPM_REGULATOR_PARAM_CURRENT,
+ RPM_REGULATOR_PARAM_MODE_LDO,
+ RPM_REGULATOR_PARAM_MODE_SMPS,
+ RPM_REGULATOR_PARAM_PIN_CTRL_ENABLE,
+ RPM_REGULATOR_PARAM_PIN_CTRL_MODE,
+ RPM_REGULATOR_PARAM_FREQUENCY,
+ RPM_REGULATOR_PARAM_HEAD_ROOM,
+ RPM_REGULATOR_PARAM_QUIET_MODE,
+ RPM_REGULATOR_PARAM_FREQ_REASON,
+ RPM_REGULATOR_PARAM_CORNER,
+ RPM_REGULATOR_PARAM_BYPASS,
+ RPM_REGULATOR_PARAM_FLOOR_CORNER,
+ RPM_REGULATOR_PARAM_LEVEL,
+ RPM_REGULATOR_PARAM_FLOOR_LEVEL,
+ RPM_REGULATOR_PARAM_MODE_BOB,
+ RPM_REGULATOR_PARAM_PIN_CTRL_VOLTAGE1,
+ RPM_REGULATOR_PARAM_PIN_CTRL_VOLTAGE2,
+ RPM_REGULATOR_PARAM_PIN_CTRL_VOLTAGE3,
+ RPM_REGULATOR_PARAM_MAX,
+};
+
+enum rpm_regulator_smps_mode {
+ RPM_REGULATOR_SMPS_MODE_AUTO = 0,
+ RPM_REGULATOR_SMPS_MODE_IPEAK = 1,
+ RPM_REGULATOR_SMPS_MODE_PWM = 2,
+};
+
+enum rpm_regulator_ldo_mode {
+ RPM_REGULATOR_LDO_MODE_IPEAK = 0,
+ RPM_REGULATOR_LDO_MODE_HPM = 1,
+};
+
+enum rpm_regulator_bob_mode {
+ RPM_REGULATOR_BOB_MODE_PASS = 0,
+ RPM_REGULATOR_BOB_MODE_PFM = 1,
+ RPM_REGULATOR_BOB_MODE_AUTO = 2,
+ RPM_REGULATOR_BOB_MODE_PWM = 3,
+};
+
+#define RPM_SET_CONFIG_ACTIVE BIT(0)
+#define RPM_SET_CONFIG_SLEEP BIT(1)
+#define RPM_SET_CONFIG_BOTH (RPM_SET_CONFIG_ACTIVE \
+ | RPM_SET_CONFIG_SLEEP)
+struct rpm_regulator_param {
+ char *name;
+ char *property_name;
+ u32 key;
+ u32 min;
+ u32 max;
+ u32 supported_regulator_types;
+};
+
+#define PARAM(_idx, _support_ldo, _support_smps, _support_vs, _support_ncp, \
+ _support_bob, _name, _min, _max, _property_name) \
+ [RPM_REGULATOR_PARAM_##_idx] = { \
+ .name = _name, \
+ .property_name = _property_name, \
+ .min = _min, \
+ .max = _max, \
+ .supported_regulator_types = \
+ _support_ldo << RPM_REGULATOR_TYPE_LDO | \
+ _support_smps << RPM_REGULATOR_TYPE_SMPS | \
+ _support_vs << RPM_REGULATOR_TYPE_VS | \
+ _support_ncp << RPM_REGULATOR_TYPE_NCP | \
+ _support_bob << RPM_REGULATOR_TYPE_BOB, \
+ }
+
+static struct rpm_regulator_param params[RPM_REGULATOR_PARAM_MAX] = {
+ /* ID LDO SMPS VS NCP BOB name min max property-name */
+ PARAM(ENABLE, 1, 1, 1, 1, 1, "swen", 0, 1, "qcom,init-enable"),
+ PARAM(VOLTAGE, 1, 1, 0, 1, 1, "uv", 0, 0x7FFFFFF, "qcom,init-voltage"),
+ PARAM(CURRENT, 1, 1, 0, 0, 0, "ma", 0, 0x1FFF, "qcom,init-current"),
+ PARAM(MODE_LDO, 1, 0, 0, 0, 0, "lsmd", 0, 1, "qcom,init-ldo-mode"),
+ PARAM(MODE_SMPS, 0, 1, 0, 0, 0, "ssmd", 0, 2, "qcom,init-smps-mode"),
+ PARAM(PIN_CTRL_ENABLE, 1, 1, 1, 0, 0, "pcen", 0, 0xF, "qcom,init-pin-ctrl-enable"),
+ PARAM(PIN_CTRL_MODE, 1, 1, 1, 0, 0, "pcmd", 0, 0x1F, "qcom,init-pin-ctrl-mode"),
+ PARAM(FREQUENCY, 0, 1, 0, 1, 0, "freq", 0, 31, "qcom,init-frequency"),
+ PARAM(HEAD_ROOM, 1, 0, 0, 1, 0, "hr", 0, 0x7FFFFFFF, "qcom,init-head-room"),
+ PARAM(QUIET_MODE, 0, 1, 0, 0, 0, "qm", 0, 2, "qcom,init-quiet-mode"),
+ PARAM(FREQ_REASON, 0, 1, 0, 1, 0, "resn", 0, 8, "qcom,init-freq-reason"),
+ PARAM(CORNER, 1, 1, 0, 0, 0, "corn", 0, 6, "qcom,init-voltage-corner"),
+ PARAM(BYPASS, 1, 0, 0, 0, 0, "bypa", 0, 1, "qcom,init-disallow-bypass"),
+ PARAM(FLOOR_CORNER, 1, 1, 0, 0, 0, "vfc", 0, 6, "qcom,init-voltage-floor-corner"),
+ PARAM(LEVEL, 1, 1, 0, 0, 0, "vlvl", 0, 0xFFFF, "qcom,init-voltage-level"),
+ PARAM(FLOOR_LEVEL, 1, 1, 0, 0, 0, "vfl", 0, 0xFFFF, "qcom,init-voltage-floor-level"),
+ PARAM(MODE_BOB, 0, 0, 0, 0, 1, "bobm", 0, 3, "qcom,init-bob-mode"),
+ PARAM(PIN_CTRL_VOLTAGE1, 0, 0, 0, 0, 1, "pcv1", 0, 0x7FFFFFF, "qcom,init-pin-ctrl-voltage1"),
+ PARAM(PIN_CTRL_VOLTAGE2, 0, 0, 0, 0, 1, "pcv2", 0, 0x7FFFFFF, "qcom,init-pin-ctrl-voltage2"),
+ PARAM(PIN_CTRL_VOLTAGE3, 0, 0, 0, 0, 1, "pcv3", 0, 0x7FFFFFF, "qcom,init-pin-ctrl-voltage3"),
+};
+
+struct rpm_regulator_mode_map {
+ int ldo_mode;
+ int smps_mode;
+};
+
+static struct rpm_regulator_mode_map mode_mapping[] = {
+ [RPM_REGULATOR_MODE_AUTO]
+ = {-1, RPM_REGULATOR_SMPS_MODE_AUTO},
+ [RPM_REGULATOR_MODE_IPEAK]
+ = {RPM_REGULATOR_LDO_MODE_IPEAK, RPM_REGULATOR_SMPS_MODE_IPEAK},
+ [RPM_REGULATOR_MODE_HPM]
+ = {RPM_REGULATOR_LDO_MODE_HPM, RPM_REGULATOR_SMPS_MODE_PWM},
+};
+
+/* Indices for use with pin control enable via enable/disable feature. */
+#define RPM_VREG_PIN_CTRL_STATE_DISABLE 0
+#define RPM_VREG_PIN_CTRL_STATE_ENABLE 1
+#define RPM_VREG_PIN_CTRL_STATE_COUNT 2
+
+struct rpm_vreg_request {
+ u32 param[RPM_REGULATOR_PARAM_MAX];
+ u32 valid;
+ u32 modified;
+};
+
+struct rpm_vreg {
+ struct rpm_vreg_request aggr_req_active;
+ struct rpm_vreg_request aggr_req_sleep;
+ struct list_head reg_list;
+ const char *resource_name;
+ u32 resource_id;
+ bool allow_atomic;
+ int regulator_type;
+ int hpm_min_load;
+ int enable_time;
+ spinlock_t slock;
+ struct mutex mlock;
+ unsigned long flags;
+ bool sleep_request_sent;
+ bool wait_for_ack_active;
+ bool wait_for_ack_sleep;
+ bool always_wait_for_ack;
+ bool apps_only;
+ struct msm_rpm_request *handle_active;
+ struct msm_rpm_request *handle_sleep;
+};
+
+struct rpm_regulator {
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+ struct rpm_vreg *rpm_vreg;
+ struct list_head list;
+ bool set_active;
+ bool set_sleep;
+ bool always_send_voltage;
+ bool always_send_current;
+ bool use_pin_ctrl_for_enable;
+ struct rpm_vreg_request req;
+ int system_load;
+ int min_uV;
+ int max_uV;
+ u32 pin_ctrl_mask[RPM_VREG_PIN_CTRL_STATE_COUNT];
+ enum rpm_regulator_param_index voltage_index;
+ int voltage_offset;
+};
+
+/*
+ * This voltage in uV is returned by get_voltage functions when there is no way
+ * to determine the current voltage level. It is needed because the regulator
+ * framework treats a 0 uV voltage as an error.
+ */
+#define VOLTAGE_UNKNOWN 1
+
+/*
+ * Regulator requests sent in the active set take effect immediately. Requests
+ * sent in the sleep set take effect when the Apps processor transitions into
+ * RPM assisted power collapse. For any given regulator, if an active set
+ * request is present, but not a sleep set request, then the active set request
+ * is used at all times, even when the Apps processor is power collapsed.
+ *
+ * The rpm-regulator-smd takes advantage of this default usage of the active set
+ * request by only sending a sleep set request if it differs from the
+ * corresponding active set request.
+ */
+#define RPM_SET_ACTIVE MSM_RPM_CTX_ACTIVE_SET
+#define RPM_SET_SLEEP MSM_RPM_CTX_SLEEP_SET
+
+static u32 rpm_vreg_string_to_int(const u8 *str)
+{
+ int i, len;
+ u32 output = 0;
+
+ len = strnlen(str, sizeof(u32));
+ for (i = 0; i < len; i++)
+ output |= str[i] << (i * 8);
+
+ return output;
+}
+
+static inline void rpm_vreg_lock(struct rpm_vreg *rpm_vreg)
+{
+ if (rpm_vreg->allow_atomic)
+ spin_lock_irqsave(&rpm_vreg->slock, rpm_vreg->flags);
+ else
+ mutex_lock(&rpm_vreg->mlock);
+}
+
+static inline void rpm_vreg_unlock(struct rpm_vreg *rpm_vreg)
+{
+ if (rpm_vreg->allow_atomic)
+ spin_unlock_irqrestore(&rpm_vreg->slock, rpm_vreg->flags);
+ else
+ mutex_unlock(&rpm_vreg->mlock);
+}
+
+static inline bool rpm_vreg_active_or_sleep_enabled(struct rpm_vreg *rpm_vreg)
+{
+ return (rpm_vreg->aggr_req_active.param[RPM_REGULATOR_PARAM_ENABLE]
+ && (rpm_vreg->aggr_req_active.valid
+ & BIT(RPM_REGULATOR_PARAM_ENABLE)))
+ || ((rpm_vreg->aggr_req_sleep.param[RPM_REGULATOR_PARAM_ENABLE])
+ && (rpm_vreg->aggr_req_sleep.valid
+ & BIT(RPM_REGULATOR_PARAM_ENABLE)));
+}
+
+static inline bool rpm_vreg_shared_active_or_sleep_enabled_valid
+ (struct rpm_vreg *rpm_vreg)
+{
+ return !rpm_vreg->apps_only &&
+ ((rpm_vreg->aggr_req_active.valid
+ & BIT(RPM_REGULATOR_PARAM_ENABLE))
+ || (rpm_vreg->aggr_req_sleep.valid
+ & BIT(RPM_REGULATOR_PARAM_ENABLE)));
+}
+
+static const u32 power_level_params =
+ BIT(RPM_REGULATOR_PARAM_ENABLE) |
+ BIT(RPM_REGULATOR_PARAM_VOLTAGE) |
+ BIT(RPM_REGULATOR_PARAM_CURRENT) |
+ BIT(RPM_REGULATOR_PARAM_CORNER) |
+ BIT(RPM_REGULATOR_PARAM_BYPASS) |
+ BIT(RPM_REGULATOR_PARAM_FLOOR_CORNER) |
+ BIT(RPM_REGULATOR_PARAM_LEVEL) |
+ BIT(RPM_REGULATOR_PARAM_FLOOR_LEVEL);
+
+static bool rpm_vreg_ack_required(struct rpm_vreg *rpm_vreg, u32 set,
+ const u32 *prev_param, const u32 *param,
+ u32 prev_valid, u32 modified)
+{
+ u32 mask;
+ int i;
+
+ if (rpm_vreg->always_wait_for_ack
+ || (set == RPM_SET_ACTIVE && rpm_vreg->wait_for_ack_active)
+ || (set == RPM_SET_SLEEP && rpm_vreg->wait_for_ack_sleep))
+ return true;
+
+ for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+ mask = BIT(i);
+ if (modified & mask) {
+ if ((prev_valid & mask) && (power_level_params & mask)
+ && (param[i] <= prev_param[i]))
+ continue;
+ else
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void rpm_vreg_check_param_max(struct rpm_regulator *regulator, int index,
+ u32 new_max)
+{
+ struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+
+ if (regulator->set_active
+ && (rpm_vreg->aggr_req_active.valid & BIT(index))
+ && rpm_vreg->aggr_req_active.param[index] > new_max)
+ rpm_vreg->wait_for_ack_active = true;
+
+ if (regulator->set_sleep
+ && (rpm_vreg->aggr_req_sleep.valid & BIT(index))
+ && rpm_vreg->aggr_req_sleep.param[index] > new_max)
+ rpm_vreg->wait_for_ack_sleep = true;
+}
+
+/*
+ * This is used when voting for LPM or HPM by subtracting or adding to the
+ * hpm_min_load of a regulator. It has units of uA.
+ */
+#define LOAD_THRESHOLD_STEP 1000
+
+static inline int rpm_vreg_hpm_min_uA(struct rpm_vreg *rpm_vreg)
+{
+ return rpm_vreg->hpm_min_load;
+}
+
+static inline int rpm_vreg_lpm_max_uA(struct rpm_vreg *rpm_vreg)
+{
+ return rpm_vreg->hpm_min_load - LOAD_THRESHOLD_STEP;
+}
+
+#define MICRO_TO_MILLI(uV) ((uV) / 1000)
+#define MILLI_TO_MICRO(uV) ((uV) * 1000)
+
+#define DEBUG_PRINT_BUFFER_SIZE 512
+#define REQ_SENT 0
+#define REQ_PREV 1
+#define REQ_CACHED 2
+#define REQ_TYPES 3
+
+static void rpm_regulator_req(struct rpm_regulator *regulator, int set,
+ bool sent)
+{
+ char buf[DEBUG_PRINT_BUFFER_SIZE];
+ size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
+ struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+ struct rpm_vreg_request *aggr;
+ bool first;
+ u32 mask[REQ_TYPES] = {0, 0, 0};
+ const char *req_names[REQ_TYPES] = {"sent", "prev", "cached"};
+ int pos = 0;
+ int i, j;
+
+ aggr = (set == RPM_SET_ACTIVE)
+ ? &rpm_vreg->aggr_req_active : &rpm_vreg->aggr_req_sleep;
+
+ if (rpm_vreg_debug_mask & RPM_VREG_DEBUG_DUPLICATE) {
+ mask[REQ_SENT] = aggr->modified;
+ mask[REQ_PREV] = aggr->valid & ~aggr->modified;
+ } else if (sent
+ && (rpm_vreg_debug_mask & RPM_VREG_DEBUG_FULL_REQUEST)) {
+ mask[REQ_SENT] = aggr->modified;
+ mask[REQ_PREV] = aggr->valid & ~aggr->modified;
+ } else if (sent && (rpm_vreg_debug_mask & RPM_VREG_DEBUG_REQUEST)) {
+ mask[REQ_SENT] = aggr->modified;
+ }
+
+ if (!(mask[REQ_SENT] | mask[REQ_PREV]))
+ return;
+
+ if (set == RPM_SET_SLEEP && !rpm_vreg->sleep_request_sent) {
+ mask[REQ_CACHED] = mask[REQ_SENT] | mask[REQ_PREV];
+ mask[REQ_SENT] = 0;
+ mask[REQ_PREV] = 0;
+ }
+
+ pos += scnprintf(buf + pos, buflen - pos, "%s%s: ",
+ KERN_INFO, __func__);
+
+ pos += scnprintf(buf + pos, buflen - pos, "%s %u (%s): s=%s",
+ rpm_vreg->resource_name, rpm_vreg->resource_id,
+ regulator->rdesc.name,
+ (set == RPM_SET_ACTIVE ? "act" : "slp"));
+
+ for (i = 0; i < REQ_TYPES; i++) {
+ if (mask[i])
+ pos += scnprintf(buf + pos, buflen - pos, "; %s: ",
+ req_names[i]);
+
+ first = true;
+ for (j = 0; j < RPM_REGULATOR_PARAM_MAX; j++) {
+ if (mask[i] & BIT(j)) {
+ pos += scnprintf(buf + pos, buflen - pos,
+ "%s%s=%u", (first ? "" : ", "),
+ params[j].name, aggr->param[j]);
+ first = false;
+ }
+ }
+ }
+
+ pos += scnprintf(buf + pos, buflen - pos, "\n");
+ printk(buf);
+}
+
+#define RPM_VREG_SET_PARAM(_regulator, _param, _val) \
+{ \
+ (_regulator)->req.param[RPM_REGULATOR_PARAM_##_param] = _val; \
+ (_regulator)->req.modified |= BIT(RPM_REGULATOR_PARAM_##_param); \
+} \
+
+static int rpm_vreg_add_kvp_to_request(struct rpm_vreg *rpm_vreg,
+ const u32 *param, int idx, u32 set)
+{
+ struct msm_rpm_request *handle;
+
+ handle = (set == RPM_SET_ACTIVE ? rpm_vreg->handle_active
+ : rpm_vreg->handle_sleep);
+
+ if (rpm_vreg->allow_atomic)
+ return msm_rpm_add_kvp_data_noirq(handle, params[idx].key,
+ (u8 *)&param[idx], 4);
+ else
+ return msm_rpm_add_kvp_data(handle, params[idx].key,
+ (u8 *)&param[idx], 4);
+}
+
+static void rpm_vreg_check_modified_requests(const u32 *prev_param,
+ const u32 *param, u32 prev_valid, u32 *modified)
+{
+ u32 value_changed = 0;
+ int i;
+
+ for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+ if (param[i] != prev_param[i])
+ value_changed |= BIT(i);
+ }
+
+ /*
+ * Only keep bits that are for changed parameters or previously
+ * invalid parameters.
+ */
+ *modified &= value_changed | ~prev_valid;
+}
+
+static int rpm_vreg_add_modified_requests(struct rpm_regulator *regulator,
+ u32 set, const u32 *param, u32 modified)
+{
+ struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+ int rc = 0;
+ int i;
+
+ for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+ /* Only send requests for modified parameters. */
+ if (modified & BIT(i)) {
+ rc = rpm_vreg_add_kvp_to_request(rpm_vreg, param, i,
+ set);
+ if (rc) {
+ vreg_err(regulator,
+ "add KVP failed: %s %u; %s, rc=%d\n",
+ rpm_vreg->resource_name,
+ rpm_vreg->resource_id, params[i].name,
+ rc);
+ return rc;
+ }
+ }
+ }
+
+ return rc;
+}
+
+static int rpm_vreg_send_request(struct rpm_regulator *regulator, u32 set,
+ bool wait_for_ack)
+{
+ struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+ struct msm_rpm_request *handle
+ = (set == RPM_SET_ACTIVE ? rpm_vreg->handle_active
+ : rpm_vreg->handle_sleep);
+ int rc = 0;
+ void *temp;
+
+ if (unlikely(rpm_vreg->allow_atomic)) {
+ rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(
+ handle));
+ } else if (wait_for_ack) {
+ rc = msm_rpm_wait_for_ack(msm_rpm_send_request(handle));
+ } else {
+ temp = msm_rpm_send_request_noack(handle);
+ if (IS_ERR(temp))
+ rc = PTR_ERR(temp);
+ }
+
+ if (rc)
+ vreg_err(regulator,
+ "msm rpm send failed: %s %u; set=%s, rc=%d\n",
+ rpm_vreg->resource_name,
+ rpm_vreg->resource_id,
+ (set == RPM_SET_ACTIVE ? "act" : "slp"), rc);
+
+ return rc;
+}
+
+#define RPM_VREG_AGGR_MIN(_idx, _param_aggr, _param_reg) \
+{ \
+ _param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+ = min(_param_aggr[RPM_REGULATOR_PARAM_##_idx], \
+ _param_reg[RPM_REGULATOR_PARAM_##_idx]); \
+}
+
+#define RPM_VREG_AGGR_MAX(_idx, _param_aggr, _param_reg) \
+{ \
+ _param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+ = max(_param_aggr[RPM_REGULATOR_PARAM_##_idx], \
+ _param_reg[RPM_REGULATOR_PARAM_##_idx]); \
+}
+
+#define RPM_VREG_AGGR_SUM(_idx, _param_aggr, _param_reg) \
+{ \
+ _param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+ += _param_reg[RPM_REGULATOR_PARAM_##_idx]; \
+}
+
+#define RPM_VREG_AGGR_OR(_idx, _param_aggr, _param_reg) \
+{ \
+ _param_aggr[RPM_REGULATOR_PARAM_##_idx] \
+ |= _param_reg[RPM_REGULATOR_PARAM_##_idx]; \
+}
+
+/*
+ * Aggregation is performed on each parameter based on the way that the RPM
+ * aggregates that type internally between RPM masters.
+ */
+static void rpm_vreg_aggregate_params(u32 *param_aggr, const u32 *param_reg)
+{
+ RPM_VREG_AGGR_MAX(ENABLE, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(VOLTAGE, param_aggr, param_reg);
+ RPM_VREG_AGGR_SUM(CURRENT, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(MODE_LDO, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(MODE_SMPS, param_aggr, param_reg);
+ RPM_VREG_AGGR_OR(PIN_CTRL_ENABLE, param_aggr, param_reg);
+ RPM_VREG_AGGR_OR(PIN_CTRL_MODE, param_aggr, param_reg);
+ RPM_VREG_AGGR_MIN(FREQUENCY, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(HEAD_ROOM, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(QUIET_MODE, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(FREQ_REASON, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(CORNER, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(BYPASS, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(FLOOR_CORNER, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(LEVEL, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(FLOOR_LEVEL, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(MODE_BOB, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(PIN_CTRL_VOLTAGE1, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(PIN_CTRL_VOLTAGE2, param_aggr, param_reg);
+ RPM_VREG_AGGR_MAX(PIN_CTRL_VOLTAGE3, param_aggr, param_reg);
+}
+
+static int rpm_vreg_aggregate_requests(struct rpm_regulator *regulator)
+{
+ struct rpm_vreg *rpm_vreg = regulator->rpm_vreg;
+ u32 param_active[RPM_REGULATOR_PARAM_MAX];
+ u32 param_sleep[RPM_REGULATOR_PARAM_MAX];
+ u32 modified_active, modified_sleep;
+ struct rpm_regulator *reg;
+ bool sleep_set_differs = false;
+ bool send_active = false;
+ bool send_sleep = false;
+ bool wait_for_ack;
+ int rc = 0;
+ int i;
+
+ memset(param_active, 0, sizeof(param_active));
+ memset(param_sleep, 0, sizeof(param_sleep));
+ modified_active = rpm_vreg->aggr_req_active.modified;
+ modified_sleep = rpm_vreg->aggr_req_sleep.modified;
+
+ /*
+ * Aggregate all of the requests for this regulator in both active
+ * and sleep sets.
+ */
+ list_for_each_entry(reg, &rpm_vreg->reg_list, list) {
+ if (reg->set_active) {
+ rpm_vreg_aggregate_params(param_active, reg->req.param);
+ modified_active |= reg->req.modified;
+ }
+ if (reg->set_sleep) {
+ rpm_vreg_aggregate_params(param_sleep, reg->req.param);
+ modified_sleep |= reg->req.modified;
+ }
+ }
+
+ /*
+ * Check if the aggregated sleep set parameter values differ from the
+ * aggregated active set parameter values.
+ */
+ if (!rpm_vreg->sleep_request_sent) {
+ for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+ if ((param_active[i] != param_sleep[i])
+ && (modified_sleep & BIT(i))) {
+ sleep_set_differs = true;
+ break;
+ }
+ }
+ }
+
+ /* Add KVPs to the active set RPM request if they have new values. */
+ rpm_vreg_check_modified_requests(rpm_vreg->aggr_req_active.param,
+ param_active, rpm_vreg->aggr_req_active.valid,
+ &modified_active);
+ rc = rpm_vreg_add_modified_requests(regulator, RPM_SET_ACTIVE,
+ param_active, modified_active);
+ if (rc)
+ return rc;
+ send_active = modified_active;
+
+ /*
+ * Sleep set configurations are only sent if they differ from the
+ * active set values. This is because the active set values will take
+ * effect during rpm assisted power collapse in the absence of sleep set
+ * values.
+ *
+ * However, once a sleep set request is sent for a given regulator,
+ * additional sleep set requests must be sent in the future even if they
+ * match the corresponding active set requests.
+ */
+ if (rpm_vreg->sleep_request_sent || sleep_set_differs) {
+ /* Add KVPs to the sleep set RPM request if they are new. */
+ rpm_vreg_check_modified_requests(rpm_vreg->aggr_req_sleep.param,
+ param_sleep, rpm_vreg->aggr_req_sleep.valid,
+ &modified_sleep);
+ rc = rpm_vreg_add_modified_requests(regulator, RPM_SET_SLEEP,
+ param_sleep, modified_sleep);
+ if (rc)
+ return rc;
+ send_sleep = modified_sleep;
+ }
+
+ /* Send active set request to the RPM if it contains new KVPs. */
+ if (send_active) {
+ wait_for_ack = rpm_vreg_ack_required(rpm_vreg, RPM_SET_ACTIVE,
+ rpm_vreg->aggr_req_active.param,
+ param_active,
+ rpm_vreg->aggr_req_active.valid,
+ modified_active);
+ rc = rpm_vreg_send_request(regulator, RPM_SET_ACTIVE,
+ wait_for_ack);
+ if (rc)
+ return rc;
+ rpm_vreg->aggr_req_active.valid |= modified_active;
+ rpm_vreg->wait_for_ack_active = false;
+ }
+ /* Store the results of the aggregation. */
+ rpm_vreg->aggr_req_active.modified = modified_active;
+ memcpy(rpm_vreg->aggr_req_active.param, param_active,
+ sizeof(param_active));
+
+ /* Handle debug printing of the active set request. */
+ rpm_regulator_req(regulator, RPM_SET_ACTIVE, send_active);
+ if (send_active)
+ rpm_vreg->aggr_req_active.modified = 0;
+
+ /* Send sleep set request to the RPM if it contains new KVPs. */
+ if (send_sleep) {
+ wait_for_ack = rpm_vreg_ack_required(rpm_vreg, RPM_SET_SLEEP,
+ rpm_vreg->aggr_req_sleep.param,
+ param_sleep,
+ rpm_vreg->aggr_req_sleep.valid,
+ modified_sleep);
+ rc = rpm_vreg_send_request(regulator, RPM_SET_SLEEP,
+ wait_for_ack);
+ if (rc)
+ return rc;
+ else
+ rpm_vreg->sleep_request_sent = true;
+ rpm_vreg->aggr_req_sleep.valid |= modified_sleep;
+ rpm_vreg->wait_for_ack_sleep = false;
+ }
+ /* Store the results of the aggregation. */
+ rpm_vreg->aggr_req_sleep.modified = modified_sleep;
+ memcpy(rpm_vreg->aggr_req_sleep.param, param_sleep,
+ sizeof(param_sleep));
+
+ /* Handle debug printing of the sleep set request. */
+ rpm_regulator_req(regulator, RPM_SET_SLEEP, send_sleep);
+ if (send_sleep)
+ rpm_vreg->aggr_req_sleep.modified = 0;
+
+ /*
+ * Loop over all requests for this regulator to update the valid and
+ * modified values for use in future aggregation.
+ */
+ list_for_each_entry(reg, &rpm_vreg->reg_list, list) {
+ reg->req.valid |= reg->req.modified;
+ reg->req.modified = 0;
+ }
+
+ return rc;
+}
+
+static int rpm_vreg_is_enabled(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+ if (likely(!reg->use_pin_ctrl_for_enable))
+ return reg->req.param[RPM_REGULATOR_PARAM_ENABLE];
+ else
+ return reg->req.param[RPM_REGULATOR_PARAM_PIN_CTRL_ENABLE]
+ == reg->pin_ctrl_mask[RPM_VREG_PIN_CTRL_STATE_ENABLE];
+}
+
+static int rpm_vreg_enable(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc;
+ u32 prev_enable;
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ if (likely(!reg->use_pin_ctrl_for_enable)) {
+ /* Enable using swen KVP. */
+ prev_enable = reg->req.param[RPM_REGULATOR_PARAM_ENABLE];
+ RPM_VREG_SET_PARAM(reg, ENABLE, 1);
+ rc = rpm_vreg_aggregate_requests(reg);
+ if (rc) {
+ vreg_err(reg, "enable failed, rc=%d", rc);
+ RPM_VREG_SET_PARAM(reg, ENABLE, prev_enable);
+ }
+ } else {
+ /* Enable using pcen KVP. */
+ prev_enable
+ = reg->req.param[RPM_REGULATOR_PARAM_PIN_CTRL_ENABLE];
+ RPM_VREG_SET_PARAM(reg, PIN_CTRL_ENABLE,
+ reg->pin_ctrl_mask[RPM_VREG_PIN_CTRL_STATE_ENABLE]);
+ rc = rpm_vreg_aggregate_requests(reg);
+ if (rc) {
+ vreg_err(reg, "enable failed, rc=%d", rc);
+ RPM_VREG_SET_PARAM(reg, PIN_CTRL_ENABLE, prev_enable);
+ }
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static int rpm_vreg_disable(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc;
+ u32 prev_enable;
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ if (likely(!reg->use_pin_ctrl_for_enable)) {
+ /* Disable using swen KVP. */
+ prev_enable = reg->req.param[RPM_REGULATOR_PARAM_ENABLE];
+ RPM_VREG_SET_PARAM(reg, ENABLE, 0);
+ rc = rpm_vreg_aggregate_requests(reg);
+ if (rc) {
+ vreg_err(reg, "disable failed, rc=%d", rc);
+ RPM_VREG_SET_PARAM(reg, ENABLE, prev_enable);
+ }
+ } else {
+ /* Disable using pcen KVP. */
+ prev_enable
+ = reg->req.param[RPM_REGULATOR_PARAM_PIN_CTRL_ENABLE];
+ RPM_VREG_SET_PARAM(reg, PIN_CTRL_ENABLE,
+ reg->pin_ctrl_mask[RPM_VREG_PIN_CTRL_STATE_DISABLE]);
+ rc = rpm_vreg_aggregate_requests(reg);
+ if (rc) {
+ vreg_err(reg, "disable failed, rc=%d", rc);
+ RPM_VREG_SET_PARAM(reg, PIN_CTRL_ENABLE, prev_enable);
+ }
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+#define RPM_VREG_SET_VOLTAGE(_regulator, _val) \
+{ \
+ (_regulator)->req.param[(_regulator)->voltage_index] = _val; \
+ (_regulator)->req.modified |= BIT((_regulator)->voltage_index); \
+} \
+
+static int rpm_vreg_set_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned *selector)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc = 0;
+ int voltage;
+ u32 prev_voltage;
+
+ voltage = min_uV - reg->voltage_offset;
+
+ if (voltage < params[reg->voltage_index].min
+ || voltage > params[reg->voltage_index].max) {
+ vreg_err(reg, "voltage=%d for key=%s is not within allowed range: [%u, %u]\n",
+ voltage, params[reg->voltage_index].name,
+ params[reg->voltage_index].min,
+ params[reg->voltage_index].max);
+ return -EINVAL;
+ }
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ prev_voltage = reg->req.param[reg->voltage_index];
+ RPM_VREG_SET_VOLTAGE(reg, voltage);
+
+ rpm_vreg_check_param_max(reg, reg->voltage_index,
+ max_uV - reg->voltage_offset);
+
+ /*
+ * Only send a new voltage if the regulator is currently enabled or
+ * if the regulator has been configured to always send voltage updates.
+ */
+ if (reg->always_send_voltage
+ || rpm_vreg_active_or_sleep_enabled(reg->rpm_vreg)
+ || rpm_vreg_shared_active_or_sleep_enabled_valid(reg->rpm_vreg))
+ rc = rpm_vreg_aggregate_requests(reg);
+
+ if (rc) {
+ vreg_err(reg, "set voltage for key=%s failed, rc=%d",
+ params[reg->voltage_index].name, rc);
+ RPM_VREG_SET_VOLTAGE(reg, prev_voltage);
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static int rpm_vreg_get_voltage(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int uV;
+
+ uV = reg->req.param[reg->voltage_index] + reg->voltage_offset;
+ if (uV == 0)
+ uV = VOLTAGE_UNKNOWN;
+
+ return uV;
+}
+
+static int rpm_vreg_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc = 0;
+ u32 prev_current;
+ int prev_uA;
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ prev_current = reg->req.param[RPM_REGULATOR_PARAM_CURRENT];
+ prev_uA = MILLI_TO_MICRO(prev_current);
+
+ if (mode == REGULATOR_MODE_NORMAL) {
+ /* Make sure that request current is in HPM range. */
+ if (prev_uA < rpm_vreg_hpm_min_uA(reg->rpm_vreg))
+ RPM_VREG_SET_PARAM(reg, CURRENT,
+ MICRO_TO_MILLI(rpm_vreg_hpm_min_uA(reg->rpm_vreg)));
+ } else if (REGULATOR_MODE_IDLE) {
+ /* Make sure that request current is in LPM range. */
+ if (prev_uA > rpm_vreg_lpm_max_uA(reg->rpm_vreg))
+ RPM_VREG_SET_PARAM(reg, CURRENT,
+ MICRO_TO_MILLI(rpm_vreg_lpm_max_uA(reg->rpm_vreg)));
+ } else {
+ vreg_err(reg, "invalid mode: %u\n", mode);
+ rpm_vreg_unlock(reg->rpm_vreg);
+ return -EINVAL;
+ }
+
+ /*
+ * Only send a new load current value if the regulator is currently
+ * enabled or if the regulator has been configured to always send
+ * current updates.
+ */
+ if (reg->always_send_current
+ || rpm_vreg_active_or_sleep_enabled(reg->rpm_vreg)
+ || rpm_vreg_shared_active_or_sleep_enabled_valid(reg->rpm_vreg))
+ rc = rpm_vreg_aggregate_requests(reg);
+
+ if (rc) {
+ vreg_err(reg, "set mode failed, rc=%d\n", rc);
+ RPM_VREG_SET_PARAM(reg, CURRENT, prev_current);
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static unsigned int rpm_vreg_get_mode(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+ return (reg->req.param[RPM_REGULATOR_PARAM_CURRENT]
+ >= MICRO_TO_MILLI(reg->rpm_vreg->hpm_min_load))
+ ? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+}
+
+static unsigned int rpm_vreg_get_optimum_mode(struct regulator_dev *rdev,
+ int input_uV, int output_uV, int load_uA)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ u32 load_mA;
+
+ load_uA += reg->system_load;
+
+ load_mA = MICRO_TO_MILLI(load_uA);
+ if (load_mA > params[RPM_REGULATOR_PARAM_CURRENT].max)
+ load_mA = params[RPM_REGULATOR_PARAM_CURRENT].max;
+
+ rpm_vreg_lock(reg->rpm_vreg);
+ RPM_VREG_SET_PARAM(reg, CURRENT, load_mA);
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return (load_uA >= reg->rpm_vreg->hpm_min_load)
+ ? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+}
+
+static int rpm_vreg_set_bob_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ int rc;
+ u32 prev_mode;
+
+ rpm_vreg_lock(reg->rpm_vreg);
+
+ prev_mode = reg->req.param[RPM_REGULATOR_PARAM_MODE_BOB];
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ RPM_VREG_SET_PARAM(reg, MODE_BOB, RPM_REGULATOR_BOB_MODE_PWM);
+ break;
+ case REGULATOR_MODE_NORMAL:
+ RPM_VREG_SET_PARAM(reg, MODE_BOB, RPM_REGULATOR_BOB_MODE_AUTO);
+ break;
+ case REGULATOR_MODE_IDLE:
+ RPM_VREG_SET_PARAM(reg, MODE_BOB, RPM_REGULATOR_BOB_MODE_PFM);
+ break;
+ case REGULATOR_MODE_STANDBY:
+ RPM_VREG_SET_PARAM(reg, MODE_BOB, RPM_REGULATOR_BOB_MODE_PASS);
+ break;
+ default:
+ vreg_err(reg, "invalid mode: %u\n", mode);
+ rpm_vreg_unlock(reg->rpm_vreg);
+ return -EINVAL;
+ }
+
+ rc = rpm_vreg_aggregate_requests(reg);
+ if (rc) {
+ vreg_err(reg, "set BoB mode failed, rc=%d\n", rc);
+ RPM_VREG_SET_PARAM(reg, MODE_BOB, prev_mode);
+ }
+
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static unsigned int rpm_vreg_get_bob_mode(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+ unsigned int mode;
+
+ switch (reg->req.param[RPM_REGULATOR_PARAM_MODE_BOB]) {
+ case RPM_REGULATOR_BOB_MODE_PWM:
+ mode = REGULATOR_MODE_FAST;
+ break;
+ case RPM_REGULATOR_BOB_MODE_AUTO:
+ mode = REGULATOR_MODE_NORMAL;
+ break;
+ case RPM_REGULATOR_BOB_MODE_PFM:
+ mode = REGULATOR_MODE_IDLE;
+ break;
+ case RPM_REGULATOR_BOB_MODE_PASS:
+ mode = REGULATOR_MODE_STANDBY;
+ break;
+ default:
+ vreg_err(reg, "BoB mode unknown\n");
+ mode = REGULATOR_MODE_NORMAL;
+ }
+
+ return mode;
+}
+
+static int rpm_vreg_enable_time(struct regulator_dev *rdev)
+{
+ struct rpm_regulator *reg = rdev_get_drvdata(rdev);
+
+ return reg->rpm_vreg->enable_time;
+}
+
+static int rpm_vreg_send_defaults(struct rpm_regulator *reg)
+{
+ int rc;
+
+ rpm_vreg_lock(reg->rpm_vreg);
+ rc = rpm_vreg_aggregate_requests(reg);
+ if (rc)
+ vreg_err(reg, "RPM request failed, rc=%d", rc);
+ rpm_vreg_unlock(reg->rpm_vreg);
+
+ return rc;
+}
+
+static int rpm_vreg_configure_pin_control_enable(struct rpm_regulator *reg,
+ struct device_node *node)
+{
+ struct rpm_regulator_param *pcen_param =
+ &params[RPM_REGULATOR_PARAM_PIN_CTRL_ENABLE];
+ int rc, i;
+
+ if (!of_find_property(node, "qcom,enable-with-pin-ctrl", NULL))
+ return 0;
+
+ if (pcen_param->supported_regulator_types
+ & BIT(reg->rpm_vreg->regulator_type)) {
+ rc = of_property_read_u32_array(node,
+ "qcom,enable-with-pin-ctrl", reg->pin_ctrl_mask,
+ RPM_VREG_PIN_CTRL_STATE_COUNT);
+ if (rc) {
+ vreg_err(reg, "could not read qcom,enable-with-pin-ctrl, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* Verify that the mask values are valid. */
+ for (i = 0; i < RPM_VREG_PIN_CTRL_STATE_COUNT; i++) {
+ if (reg->pin_ctrl_mask[i] < pcen_param->min
+ || reg->pin_ctrl_mask[i] > pcen_param->max) {
+ vreg_err(reg, "device tree property: qcom,enable-with-pin-ctrl[%d]=%u is outside allowed range [%u, %u]\n",
+ i, reg->pin_ctrl_mask[i],
+ pcen_param->min, pcen_param->max);
+ return -EINVAL;
+ }
+ }
+
+ reg->use_pin_ctrl_for_enable = true;
+ } else {
+ pr_warn("%s: regulator type=%d does not support device tree property: qcom,enable-with-pin-ctrl\n",
+ reg->rdesc.name, reg->rpm_vreg->regulator_type);
+ }
+
+ return 0;
+}
+
+/**
+ * rpm_regulator_get() - lookup and obtain a handle to an RPM regulator
+ * @dev: device for regulator consumer
+ * @supply: supply name
+ *
+ * Returns a struct rpm_regulator corresponding to the regulator producer,
+ * or ERR_PTR() containing errno.
+ *
+ * This function may only be called from nonatomic context.
+ */
+struct rpm_regulator *rpm_regulator_get(struct device *dev, const char *supply)
+{
+ struct rpm_regulator *framework_reg;
+ struct rpm_regulator *priv_reg = NULL;
+ struct regulator *regulator;
+ struct rpm_vreg *rpm_vreg;
+
+ regulator = regulator_get(dev, supply);
+ if (IS_ERR(regulator)) {
+ pr_err("could not find regulator for: dev=%s, supply=%s, rc=%ld\n",
+ (dev ? dev_name(dev) : ""), (supply ? supply : ""),
+ PTR_ERR(regulator));
+ return ERR_CAST(regulator);
+ }
+
+ framework_reg = regulator_get_drvdata(regulator);
+ if (framework_reg == NULL) {
+ pr_err("regulator structure not found.\n");
+ regulator_put(regulator);
+ return ERR_PTR(-ENODEV);
+ }
+ regulator_put(regulator);
+
+ rpm_vreg = framework_reg->rpm_vreg;
+
+ priv_reg = kzalloc(sizeof(struct rpm_regulator), GFP_KERNEL);
+ if (priv_reg == NULL) {
+ vreg_err(framework_reg,
+ "could not allocate memory for regulator\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * Allocate a regulator_dev struct so that framework callback functions
+ * can be called from the private API functions.
+ */
+ priv_reg->rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL);
+ if (priv_reg->rdev == NULL) {
+ vreg_err(framework_reg,
+ "could not allocate memory for regulator_dev\n");
+ kfree(priv_reg);
+ return ERR_PTR(-ENOMEM);
+ }
+ priv_reg->rdev->reg_data = priv_reg;
+ priv_reg->rpm_vreg = rpm_vreg;
+ priv_reg->rdesc.name = framework_reg->rdesc.name;
+ priv_reg->rdesc.ops = framework_reg->rdesc.ops;
+ priv_reg->set_active = framework_reg->set_active;
+ priv_reg->set_sleep = framework_reg->set_sleep;
+ priv_reg->min_uV = framework_reg->min_uV;
+ priv_reg->max_uV = framework_reg->max_uV;
+ priv_reg->system_load = framework_reg->system_load;
+
+ might_sleep_if(!rpm_vreg->allow_atomic);
+ rpm_vreg_lock(rpm_vreg);
+ list_add(&priv_reg->list, &rpm_vreg->reg_list);
+ rpm_vreg_unlock(rpm_vreg);
+
+ return priv_reg;
+}
+EXPORT_SYMBOL(rpm_regulator_get);
+
+static int rpm_regulator_check_input(struct rpm_regulator *regulator)
+{
+ if (IS_ERR_OR_NULL(regulator) || regulator->rpm_vreg == NULL) {
+ pr_err("invalid rpm_regulator pointer\n");
+ return -EINVAL;
+ }
+
+ might_sleep_if(!regulator->rpm_vreg->allow_atomic);
+
+ return 0;
+}
+
+/**
+ * rpm_regulator_put() - free the RPM regulator handle
+ * @regulator: RPM regulator handle
+ *
+ * Parameter reaggregation does not take place when rpm_regulator_put is called.
+ * Therefore, regulator enable state and voltage must be configured
+ * appropriately before calling rpm_regulator_put.
+ *
+ * This function may be called from either atomic or nonatomic context. If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+void rpm_regulator_put(struct rpm_regulator *regulator)
+{
+ struct rpm_vreg *rpm_vreg;
+ int rc = rpm_regulator_check_input(regulator);
+
+ if (rc)
+ return;
+
+ rpm_vreg = regulator->rpm_vreg;
+
+ might_sleep_if(!rpm_vreg->allow_atomic);
+ rpm_vreg_lock(rpm_vreg);
+ list_del(&regulator->list);
+ rpm_vreg_unlock(rpm_vreg);
+
+ kfree(regulator->rdev);
+ kfree(regulator);
+}
+EXPORT_SYMBOL(rpm_regulator_put);
+
+/**
+ * rpm_regulator_enable() - enable regulator output
+ * @regulator: RPM regulator handle
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * This function may be called from either atomic or nonatomic context. If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+int rpm_regulator_enable(struct rpm_regulator *regulator)
+{
+ int rc = rpm_regulator_check_input(regulator);
+
+ if (rc)
+ return rc;
+
+ return rpm_vreg_enable(regulator->rdev);
+}
+EXPORT_SYMBOL(rpm_regulator_enable);
+
+/**
+ * rpm_regulator_disable() - disable regulator output
+ * @regulator: RPM regulator handle
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * The enable state of the regulator is determined by aggregating the requests
+ * of all consumers. Therefore, it is possible that the regulator will remain
+ * enabled even after rpm_regulator_disable is called.
+ *
+ * This function may be called from either atomic or nonatomic context. If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+int rpm_regulator_disable(struct rpm_regulator *regulator)
+{
+ int rc = rpm_regulator_check_input(regulator);
+
+ if (rc)
+ return rc;
+
+ return rpm_vreg_disable(regulator->rdev);
+}
+EXPORT_SYMBOL(rpm_regulator_disable);
+
+/**
+ * rpm_regulator_set_voltage() - set regulator output voltage
+ * @regulator: RPM regulator handle
+ * @min_uV: minimum required voltage in uV
+ * @max_uV: maximum acceptable voltage in uV
+ *
+ * Sets a voltage regulator to the desired output voltage. This can be set
+ * while the regulator is disabled or enabled. If the regulator is enabled then
+ * the voltage will change to the new value immediately; otherwise, if the
+ * regulator is disabled, then the regulator will output at the new voltage when
+ * enabled.
+ *
+ * The min_uV to max_uV voltage range requested must intersect with the
+ * voltage constraint range configured for the regulator.
+ *
+ * Returns 0 on success or errno on failure.
+ *
+ * The final voltage value that is sent to the RPM is aggregated based upon the
+ * values requested by all consumers of the regulator. This corresponds to the
+ * maximum min_uV value.
+ *
+ * This function may be called from either atomic or nonatomic context. If this
+ * function is called from atomic context, then the regulator being operated on
+ * must be configured via device tree with qcom,allow-atomic == 1.
+ */
+int rpm_regulator_set_voltage(struct rpm_regulator *regulator, int min_uV,
+ int max_uV)
+{
+ int rc = rpm_regulator_check_input(regulator);
+ int uV = min_uV;
+
+ if (rc)
+ return rc;
+
+ if (regulator->rpm_vreg->regulator_type == RPM_REGULATOR_TYPE_VS) {
+ vreg_err(regulator, "unsupported regulator type: %d\n",
+ regulator->rpm_vreg->regulator_type);
+ return -EINVAL;
+ }
+
+ if (min_uV > max_uV) {
+ vreg_err(regulator, "min_uV=%d must be less than max_uV=%d\n",
+ min_uV, max_uV);
+ return -EINVAL;
+ }
+
+ if (uV < regulator->min_uV && max_uV >= regulator->min_uV)
+ uV = regulator->min_uV;
+
+ if (uV < regulator->min_uV || uV > regulator->max_uV) {
+ vreg_err(regulator,
+ "request v=[%d, %d] is outside allowed v=[%d, %d]\n",
+ min_uV, max_uV, regulator->min_uV, regulator->max_uV);
+ return -EINVAL;
+ }
+
+ return regulator->rdesc.ops->set_voltage(regulator->rdev, uV, uV, NULL);
+}
+EXPORT_SYMBOL(rpm_regulator_set_voltage);
+
+/**
+ * rpm_regulator_set_mode() - set regulator operating mode
+ * @regulator: RPM regulator handle
+ * @mode: operating mode requested for the regulator
+ *
+ * Requests that the mode of the regulator be set to the mode specified. This
+ * parameter is aggregated using a max function such that AUTO < IPEAK < HPM.
+ *
+ * Returns 0 on success or errno on failure.
+ */
+int rpm_regulator_set_mode(struct rpm_regulator *regulator,
+ enum rpm_regulator_mode mode)
+{
+ int index = 0;
+ u32 new_mode, prev_mode;
+ int rc;
+
+ rc = rpm_regulator_check_input(regulator);
+ if (rc)
+ return rc;
+
+ if (mode < 0 || mode >= ARRAY_SIZE(mode_mapping)) {
+ vreg_err(regulator, "invalid mode requested: %d\n", mode);
+ return -EINVAL;
+ }
+
+ switch (regulator->rpm_vreg->regulator_type) {
+ case RPM_REGULATOR_TYPE_SMPS:
+ index = RPM_REGULATOR_PARAM_MODE_SMPS;
+ new_mode = mode_mapping[mode].smps_mode;
+ break;
+ case RPM_REGULATOR_TYPE_LDO:
+ index = RPM_REGULATOR_PARAM_MODE_LDO;
+ new_mode = mode_mapping[mode].ldo_mode;
+ break;
+ default:
+ vreg_err(regulator, "unsupported regulator type: %d\n",
+ regulator->rpm_vreg->regulator_type);
+ return -EINVAL;
+ };
+
+ if (new_mode < params[index].min || new_mode > params[index].max) {
+ vreg_err(regulator, "invalid mode requested: %d for type: %d\n",
+ mode, regulator->rpm_vreg->regulator_type);
+ return -EINVAL;
+ }
+
+ rpm_vreg_lock(regulator->rpm_vreg);
+
+ prev_mode = regulator->req.param[index];
+ regulator->req.param[index] = new_mode;
+ regulator->req.modified |= BIT(index);
+
+ rc = rpm_vreg_aggregate_requests(regulator);
+ if (rc) {
+ vreg_err(regulator, "set mode failed, rc=%d", rc);
+ regulator->req.param[index] = prev_mode;
+ }
+
+ rpm_vreg_unlock(regulator->rpm_vreg);
+
+ return rc;
+}
+EXPORT_SYMBOL(rpm_regulator_set_mode);
+
+static struct regulator_ops ldo_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage,
+ .get_voltage = rpm_vreg_get_voltage,
+ .set_mode = rpm_vreg_set_mode,
+ .get_mode = rpm_vreg_get_mode,
+ .get_optimum_mode = rpm_vreg_get_optimum_mode,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops smps_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage,
+ .get_voltage = rpm_vreg_get_voltage,
+ .set_mode = rpm_vreg_set_mode,
+ .get_mode = rpm_vreg_get_mode,
+ .get_optimum_mode = rpm_vreg_get_optimum_mode,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops switch_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops ncp_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage,
+ .get_voltage = rpm_vreg_get_voltage,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops bob_ops = {
+ .enable = rpm_vreg_enable,
+ .disable = rpm_vreg_disable,
+ .is_enabled = rpm_vreg_is_enabled,
+ .set_voltage = rpm_vreg_set_voltage,
+ .get_voltage = rpm_vreg_get_voltage,
+ .set_mode = rpm_vreg_set_bob_mode,
+ .get_mode = rpm_vreg_get_bob_mode,
+ .enable_time = rpm_vreg_enable_time,
+};
+
+static struct regulator_ops *vreg_ops[] = {
+ [RPM_REGULATOR_TYPE_LDO] = &ldo_ops,
+ [RPM_REGULATOR_TYPE_SMPS] = &smps_ops,
+ [RPM_REGULATOR_TYPE_VS] = &switch_ops,
+ [RPM_REGULATOR_TYPE_NCP] = &ncp_ops,
+ [RPM_REGULATOR_TYPE_BOB] = &bob_ops,
+};
+
+static int rpm_vreg_device_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rpm_regulator *reg;
+ struct rpm_vreg *rpm_vreg;
+
+ reg = platform_get_drvdata(pdev);
+ if (reg) {
+ rpm_vreg = reg->rpm_vreg;
+ rpm_vreg_lock(rpm_vreg);
+ regulator_unregister(reg->rdev);
+ list_del(&reg->list);
+ kfree(reg);
+ rpm_vreg_unlock(rpm_vreg);
+ } else {
+ dev_err(dev, "%s: drvdata missing\n", __func__);
+ return -EINVAL;
+ }
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static int rpm_vreg_resource_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rpm_regulator *reg, *reg_temp;
+ struct rpm_vreg *rpm_vreg;
+
+ rpm_vreg = platform_get_drvdata(pdev);
+ if (rpm_vreg) {
+ rpm_vreg_lock(rpm_vreg);
+ list_for_each_entry_safe(reg, reg_temp, &rpm_vreg->reg_list,
+ list) {
+ /* Only touch data for private consumers. */
+ if (reg->rdev->desc == NULL) {
+ list_del(&reg->list);
+ kfree(reg->rdev);
+ kfree(reg);
+ } else {
+ dev_err(dev, "%s: not all child devices have been removed\n",
+ __func__);
+ }
+ }
+ rpm_vreg_unlock(rpm_vreg);
+
+ msm_rpm_free_request(rpm_vreg->handle_active);
+ msm_rpm_free_request(rpm_vreg->handle_sleep);
+
+ kfree(rpm_vreg);
+ } else {
+ dev_err(dev, "%s: drvdata missing\n", __func__);
+ return -EINVAL;
+ }
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static int rpm_vreg_set_smps_ldo_voltage_index(struct device *dev,
+ struct rpm_regulator *reg)
+{
+ struct device_node *node = dev->of_node;
+ int chosen = 0;
+
+ if (of_property_read_bool(node, "qcom,use-voltage-corner")) {
+ reg->voltage_index = RPM_REGULATOR_PARAM_CORNER;
+ reg->voltage_offset = RPM_REGULATOR_CORNER_NONE;
+ chosen++;
+ }
+
+ if (of_property_read_bool(node, "qcom,use-voltage-floor-corner")) {
+ reg->voltage_index = RPM_REGULATOR_PARAM_FLOOR_CORNER;
+ reg->voltage_offset = RPM_REGULATOR_CORNER_NONE;
+ chosen++;
+ }
+
+ if (of_property_read_bool(node, "qcom,use-voltage-level")) {
+ reg->voltage_index = RPM_REGULATOR_PARAM_LEVEL;
+ chosen++;
+ }
+
+ if (of_property_read_bool(node, "qcom,use-voltage-floor-level")) {
+ reg->voltage_index = RPM_REGULATOR_PARAM_FLOOR_LEVEL;
+ chosen++;
+ }
+
+ if (chosen > 1) {
+ dev_err(dev, "only one qcom,use-voltage-* may be specified\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rpm_vreg_set_bob_voltage_index(struct device *dev,
+ struct rpm_regulator *reg)
+{
+ struct device_node *node = dev->of_node;
+ int chosen = 0;
+
+ if (of_property_read_bool(node, "qcom,use-pin-ctrl-voltage1")) {
+ reg->voltage_index = RPM_REGULATOR_PARAM_PIN_CTRL_VOLTAGE1;
+ chosen++;
+ }
+
+ if (of_property_read_bool(node, "qcom,use-pin-ctrl-voltage2")) {
+ reg->voltage_index = RPM_REGULATOR_PARAM_PIN_CTRL_VOLTAGE2;
+ chosen++;
+ }
+
+ if (of_property_read_bool(node, "qcom,use-pin-ctrl-voltage3")) {
+ reg->voltage_index = RPM_REGULATOR_PARAM_PIN_CTRL_VOLTAGE3;
+ chosen++;
+ }
+
+ if (chosen > 1) {
+ dev_err(dev, "only one qcom,use-pin-ctrl-voltage* may be specified\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rpm_vreg_device_set_voltage_index(struct device *dev,
+ struct rpm_regulator *reg, int type)
+{
+ int rc = 0;
+
+ reg->voltage_index = RPM_REGULATOR_PARAM_VOLTAGE;
+
+ switch (type) {
+ case RPM_REGULATOR_TYPE_SMPS:
+ case RPM_REGULATOR_TYPE_LDO:
+ rc = rpm_vreg_set_smps_ldo_voltage_index(dev, reg);
+ break;
+ case RPM_REGULATOR_TYPE_BOB:
+ rc = rpm_vreg_set_bob_voltage_index(dev, reg);
+ break;
+ }
+
+ return rc;
+}
+
+/*
+ * This probe is called for child rpm-regulator devices which have
+ * properties which are required to configure individual regulator
+ * framework regulators for a given RPM regulator resource.
+ */
+static int rpm_vreg_device_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct regulator_init_data *init_data;
+ struct rpm_vreg *rpm_vreg;
+ struct rpm_regulator *reg;
+ struct regulator_config reg_config = {};
+ int rc = 0;
+ int i, regulator_type;
+ u32 val;
+
+ if (!dev->of_node) {
+ dev_err(dev, "%s: device tree information missing\n", __func__);
+ return -ENODEV;
+ }
+
+ if (pdev->dev.parent == NULL) {
+ dev_err(dev, "%s: parent device missing\n", __func__);
+ return -ENODEV;
+ }
+
+ rpm_vreg = dev_get_drvdata(pdev->dev.parent);
+ if (rpm_vreg == NULL) {
+ dev_err(dev, "%s: rpm_vreg not found in parent device\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ reg = kzalloc(sizeof(struct rpm_regulator), GFP_KERNEL);
+ if (reg == NULL) {
+ dev_err(dev, "%s: could not allocate memory for reg\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ regulator_type = rpm_vreg->regulator_type;
+ reg->rpm_vreg = rpm_vreg;
+ reg->rdesc.owner = THIS_MODULE;
+ reg->rdesc.type = REGULATOR_VOLTAGE;
+ reg->rdesc.ops = vreg_ops[regulator_type];
+
+ rc = rpm_vreg_device_set_voltage_index(dev, reg, regulator_type);
+ if (rc)
+ goto fail_free_reg;
+
+ reg->always_send_voltage
+ = of_property_read_bool(node, "qcom,always-send-voltage");
+ reg->always_send_current
+ = of_property_read_bool(node, "qcom,always-send-current");
+
+ if (regulator_type == RPM_REGULATOR_TYPE_VS)
+ reg->rdesc.n_voltages = 0;
+ else
+ reg->rdesc.n_voltages = 2;
+
+ rc = of_property_read_u32(node, "qcom,set", &val);
+ if (rc) {
+ dev_err(dev, "%s: sleep set and/or active set must be configured via qcom,set property, rc=%d\n",
+ __func__, rc);
+ goto fail_free_reg;
+ } else if (!(val & RPM_SET_CONFIG_BOTH)) {
+ dev_err(dev, "%s: qcom,set=%u property is invalid\n", __func__,
+ val);
+ rc = -EINVAL;
+ goto fail_free_reg;
+ }
+
+ reg->set_active = !!(val & RPM_SET_CONFIG_ACTIVE);
+ reg->set_sleep = !!(val & RPM_SET_CONFIG_SLEEP);
+
+ init_data = of_get_regulator_init_data(dev, node);
+ if (init_data == NULL) {
+ dev_err(dev, "%s: unable to allocate memory\n", __func__);
+ rc = -ENOMEM;
+ goto fail_free_reg;
+ }
+ if (init_data->constraints.name == NULL) {
+ dev_err(dev, "%s: regulator name not specified\n", __func__);
+ rc = -EINVAL;
+ goto fail_free_reg;
+ }
+
+ init_data->constraints.input_uV = init_data->constraints.max_uV;
+
+ if (of_get_property(node, "parent-supply", NULL))
+ init_data->supply_regulator = "parent";
+
+ /*
+ * Fill in ops and mode masks based on callbacks specified for
+ * this type of regulator.
+ */
+ if (reg->rdesc.ops->enable)
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_STATUS;
+ if (reg->rdesc.ops->get_voltage)
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_VOLTAGE;
+ if (reg->rdesc.ops->get_mode) {
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_DRMS;
+ init_data->constraints.valid_modes_mask
+ |= REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
+ }
+
+ reg->rdesc.name = init_data->constraints.name;
+ reg->min_uV = init_data->constraints.min_uV;
+ reg->max_uV = init_data->constraints.max_uV;
+
+ /* Initialize the param array based on optional properties. */
+ for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++) {
+ rc = of_property_read_u32(node, params[i].property_name, &val);
+ if (rc == 0) {
+ if (params[i].supported_regulator_types
+ & BIT(regulator_type)) {
+ if (val < params[i].min
+ || val > params[i].max) {
+ pr_warn("%s: device tree property: %s=%u is outsided allowed range [%u, %u]\n",
+ reg->rdesc.name,
+ params[i].property_name, val,
+ params[i].min, params[i].max);
+ continue;
+ }
+ reg->req.param[i] = val;
+ reg->req.modified |= BIT(i);
+ } else {
+ pr_warn("%s: regulator type=%d does not support device tree property: %s\n",
+ reg->rdesc.name, regulator_type,
+ params[i].property_name);
+ }
+ }
+ }
+
+ of_property_read_u32(node, "qcom,system-load", &reg->system_load);
+
+ rc = rpm_vreg_configure_pin_control_enable(reg, node);
+ if (rc) {
+ vreg_err(reg, "could not configure pin control enable, rc=%d\n",
+ rc);
+ goto fail_free_reg;
+ }
+
+ rpm_vreg_lock(rpm_vreg);
+ list_add(&reg->list, &rpm_vreg->reg_list);
+ rpm_vreg_unlock(rpm_vreg);
+
+ if (of_property_read_bool(node, "qcom,send-defaults")) {
+ rc = rpm_vreg_send_defaults(reg);
+ if (rc) {
+ vreg_err(reg, "could not send defaults, rc=%d\n", rc);
+ goto fail_remove_from_list;
+ }
+ }
+
+ reg_config.dev = dev;
+ reg_config.init_data = init_data;
+ reg_config.of_node = node;
+ reg_config.driver_data = reg;
+ reg->rdev = regulator_register(&reg->rdesc, &reg_config);
+ if (IS_ERR(reg->rdev)) {
+ rc = PTR_ERR(reg->rdev);
+ reg->rdev = NULL;
+ pr_err("regulator_register failed: %s, rc=%d\n",
+ reg->rdesc.name, rc);
+ goto fail_remove_from_list;
+ }
+
+ platform_set_drvdata(pdev, reg);
+
+ pr_debug("successfully probed: %s\n", reg->rdesc.name);
+
+ return 0;
+
+fail_remove_from_list:
+ rpm_vreg_lock(rpm_vreg);
+ list_del(&reg->list);
+ rpm_vreg_unlock(rpm_vreg);
+
+fail_free_reg:
+ kfree(reg);
+ return rc;
+}
+
+/*
+ * This probe is called for parent rpm-regulator devices which have
+ * properties which are required to identify a given RPM resource.
+ */
+static int rpm_vreg_resource_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct rpm_vreg *rpm_vreg;
+ int val = 0;
+ u32 resource_type;
+ int rc;
+
+ if (!dev->of_node) {
+ dev_err(dev, "%s: device tree information missing\n", __func__);
+ return -ENODEV;
+ }
+
+ /* Create new rpm_vreg entry. */
+ rpm_vreg = kzalloc(sizeof(struct rpm_vreg), GFP_KERNEL);
+ if (rpm_vreg == NULL) {
+ dev_err(dev, "%s: could not allocate memory for vreg\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* Required device tree properties: */
+ rc = of_property_read_string(node, "qcom,resource-name",
+ &rpm_vreg->resource_name);
+ if (rc) {
+ dev_err(dev, "%s: qcom,resource-name missing in DT node\n",
+ __func__);
+ goto fail_free_vreg;
+ }
+ resource_type = rpm_vreg_string_to_int(rpm_vreg->resource_name);
+
+ rc = of_property_read_u32(node, "qcom,resource-id",
+ &rpm_vreg->resource_id);
+ if (rc) {
+ dev_err(dev, "%s: qcom,resource-id missing in DT node\n",
+ __func__);
+ goto fail_free_vreg;
+ }
+
+ rc = of_property_read_u32(node, "qcom,regulator-type",
+ &rpm_vreg->regulator_type);
+ if (rc) {
+ dev_err(dev, "%s: qcom,regulator-type missing in DT node\n",
+ __func__);
+ goto fail_free_vreg;
+ }
+
+ if ((rpm_vreg->regulator_type < 0)
+ || (rpm_vreg->regulator_type >= RPM_REGULATOR_TYPE_MAX)) {
+ dev_err(dev, "%s: invalid regulator type: %d\n", __func__,
+ rpm_vreg->regulator_type);
+ rc = -EINVAL;
+ goto fail_free_vreg;
+ }
+
+ /* Optional device tree properties: */
+ of_property_read_u32(node, "qcom,allow-atomic", &val);
+ rpm_vreg->allow_atomic = !!val;
+ of_property_read_u32(node, "qcom,enable-time", &rpm_vreg->enable_time);
+ of_property_read_u32(node, "qcom,hpm-min-load",
+ &rpm_vreg->hpm_min_load);
+ rpm_vreg->apps_only = of_property_read_bool(node, "qcom,apps-only");
+ rpm_vreg->always_wait_for_ack
+ = of_property_read_bool(node, "qcom,always-wait-for-ack");
+
+ rpm_vreg->handle_active = msm_rpm_create_request(RPM_SET_ACTIVE,
+ resource_type, rpm_vreg->resource_id, RPM_REGULATOR_PARAM_MAX);
+ if (rpm_vreg->handle_active == NULL
+ || IS_ERR(rpm_vreg->handle_active)) {
+ rc = PTR_ERR(rpm_vreg->handle_active);
+ if (rc != -EPROBE_DEFER)
+ dev_err(dev, "%s: failed to create active RPM handle, rc=%d\n",
+ __func__, rc);
+ goto fail_free_vreg;
+ }
+
+ rpm_vreg->handle_sleep = msm_rpm_create_request(RPM_SET_SLEEP,
+ resource_type, rpm_vreg->resource_id, RPM_REGULATOR_PARAM_MAX);
+ if (rpm_vreg->handle_sleep == NULL || IS_ERR(rpm_vreg->handle_sleep)) {
+ rc = PTR_ERR(rpm_vreg->handle_sleep);
+ if (rc != -EPROBE_DEFER)
+ dev_err(dev, "%s: failed to create sleep RPM handle, rc=%d\n",
+ __func__, rc);
+ goto fail_free_handle_active;
+ }
+
+ INIT_LIST_HEAD(&rpm_vreg->reg_list);
+
+ if (rpm_vreg->allow_atomic)
+ spin_lock_init(&rpm_vreg->slock);
+ else
+ mutex_init(&rpm_vreg->mlock);
+
+ platform_set_drvdata(pdev, rpm_vreg);
+
+ rc = of_platform_populate(node, NULL, NULL, dev);
+ if (rc) {
+ dev_err(dev, "%s: failed to add child nodes, rc=%d\n", __func__,
+ rc);
+ goto fail_unset_drvdata;
+ }
+
+ pr_debug("successfully probed: %s (%08X) %u\n", rpm_vreg->resource_name,
+ resource_type, rpm_vreg->resource_id);
+
+ return rc;
+
+fail_unset_drvdata:
+ platform_set_drvdata(pdev, NULL);
+ msm_rpm_free_request(rpm_vreg->handle_sleep);
+
+fail_free_handle_active:
+ msm_rpm_free_request(rpm_vreg->handle_active);
+
+fail_free_vreg:
+ kfree(rpm_vreg);
+
+ return rc;
+}
+
+static struct of_device_id rpm_vreg_match_table_device[] = {
+ { .compatible = "qcom,rpm-smd-regulator", },
+ {}
+};
+
+static struct of_device_id rpm_vreg_match_table_resource[] = {
+ { .compatible = "qcom,rpm-smd-regulator-resource", },
+ {}
+};
+
+static struct platform_driver rpm_vreg_device_driver = {
+ .probe = rpm_vreg_device_probe,
+ .remove = rpm_vreg_device_remove,
+ .driver = {
+ .name = "qcom,rpm-smd-regulator",
+ .owner = THIS_MODULE,
+ .of_match_table = rpm_vreg_match_table_device,
+ },
+};
+
+static struct platform_driver rpm_vreg_resource_driver = {
+ .probe = rpm_vreg_resource_probe,
+ .remove = rpm_vreg_resource_remove,
+ .driver = {
+ .name = "qcom,rpm-smd-regulator-resource",
+ .owner = THIS_MODULE,
+ .of_match_table = rpm_vreg_match_table_resource,
+ },
+};
+
+/**
+ * rpm_smd_regulator_driver_init() - initialize the RPM SMD regulator drivers
+ *
+ * This function registers the RPM SMD regulator platform drivers.
+ *
+ * Returns 0 on success or errno on failure.
+ */
+int __init rpm_smd_regulator_driver_init(void)
+{
+ static bool initialized;
+ int i, rc;
+
+ if (initialized)
+ return 0;
+ else
+ initialized = true;
+
+ /* Store parameter string names as integers */
+ for (i = 0; i < RPM_REGULATOR_PARAM_MAX; i++)
+ params[i].key = rpm_vreg_string_to_int(params[i].name);
+
+ rc = platform_driver_register(&rpm_vreg_device_driver);
+ if (rc)
+ return rc;
+
+ return platform_driver_register(&rpm_vreg_resource_driver);
+}
+EXPORT_SYMBOL(rpm_smd_regulator_driver_init);
+
+static void __exit rpm_vreg_exit(void)
+{
+ platform_driver_unregister(&rpm_vreg_device_driver);
+ platform_driver_unregister(&rpm_vreg_resource_driver);
+}
+
+arch_initcall(rpm_smd_regulator_driver_init);
+module_exit(rpm_vreg_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM RPM SMD regulator driver");
diff --git a/drivers/regulator/spm-regulator.c b/drivers/regulator/spm-regulator.c
new file mode 100644
index 000000000000..73bea0ada363
--- /dev/null
+++ b/drivers/regulator/spm-regulator.c
@@ -0,0 +1,1076 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/string.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/spm-regulator.h>
+#include <soc/qcom/spm.h>
+
+#if defined(CONFIG_ARM64) || (defined(CONFIG_ARM) && defined(CONFIG_ARM_PSCI))
+ asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64);
+#else
+ #define __invoke_psci_fn_smc(a, b, c, d) 0
+#endif
+
+#define SPM_REGULATOR_DRIVER_NAME "qcom,spm-regulator"
+
+struct voltage_range {
+ int min_uV;
+ int set_point_min_uV;
+ int max_uV;
+ int step_uV;
+};
+
+enum qpnp_regulator_uniq_type {
+ QPNP_TYPE_HF,
+ QPNP_TYPE_FTS2,
+ QPNP_TYPE_FTS2p5,
+ QPNP_TYPE_ULT_HF,
+};
+
+enum qpnp_regulator_type {
+ QPNP_HF_TYPE = 0x03,
+ QPNP_FTS2_TYPE = 0x1C,
+ QPNP_FTS2p5_TYPE = 0x1C,
+ QPNP_ULT_HF_TYPE = 0x22,
+};
+
+enum qpnp_regulator_subtype {
+ QPNP_FTS2_SUBTYPE = 0x08,
+ QPNP_HF_SUBTYPE = 0x08,
+ QPNP_FTS2p5_SUBTYPE = 0x09,
+ QPNP_ULT_HF_SUBTYPE = 0x0D,
+};
+
+static const struct voltage_range fts2_range0 = {0, 350000, 1275000, 5000};
+static const struct voltage_range fts2_range1 = {0, 700000, 2040000, 10000};
+static const struct voltage_range fts2p5_range0
+ = { 80000, 350000, 1355000, 5000};
+static const struct voltage_range fts2p5_range1
+ = {160000, 700000, 2200000, 10000};
+static const struct voltage_range ult_hf_range0 = {375000, 375000, 1562500,
+ 12500};
+static const struct voltage_range ult_hf_range1 = {750000, 750000, 1525000,
+ 25000};
+static const struct voltage_range hf_range0 = {375000, 375000, 1562500, 12500};
+static const struct voltage_range hf_range1 = {1550000, 1550000, 3125000,
+ 25000};
+
+#define QPNP_SMPS_REG_TYPE 0x04
+#define QPNP_SMPS_REG_SUBTYPE 0x05
+#define QPNP_SMPS_REG_VOLTAGE_RANGE 0x40
+#define QPNP_SMPS_REG_VOLTAGE_SETPOINT 0x41
+#define QPNP_SMPS_REG_MODE 0x45
+#define QPNP_SMPS_REG_STEP_CTRL 0x61
+
+#define QPNP_SMPS_MODE_PWM 0x80
+#define QPNP_SMPS_MODE_AUTO 0x40
+
+#define QPNP_SMPS_STEP_CTRL_STEP_MASK 0x18
+#define QPNP_SMPS_STEP_CTRL_STEP_SHIFT 3
+#define QPNP_SMPS_STEP_CTRL_DELAY_MASK 0x07
+#define QPNP_SMPS_STEP_CTRL_DELAY_SHIFT 0
+
+/* Clock rate in kHz of the FTS2 regulator reference clock. */
+#define QPNP_SMPS_CLOCK_RATE 19200
+
+/* Time to delay in us to ensure that a mode change has completed. */
+#define QPNP_FTS2_MODE_CHANGE_DELAY 50
+
+/* Minimum time in us that it takes to complete a single SPMI write. */
+#define QPNP_SPMI_WRITE_MIN_DELAY 8
+
+/* Minimum voltage stepper delay for each step. */
+#define QPNP_FTS2_STEP_DELAY 8
+#define QPNP_HF_STEP_DELAY 20
+
+/* Arbitrarily large max step size used to avoid possible numerical overflow */
+#define SPM_REGULATOR_MAX_STEP_UV 10000000
+
+/*
+ * The ratio QPNP_FTS2_STEP_MARGIN_NUM/QPNP_FTS2_STEP_MARGIN_DEN is use to
+ * adjust the step rate in order to account for oscillator variance.
+ */
+#define QPNP_FTS2_STEP_MARGIN_NUM 4
+#define QPNP_FTS2_STEP_MARGIN_DEN 5
+
+/* VSET value to decide the range of ULT SMPS */
+#define ULT_SMPS_RANGE_SPLIT 0x60
+
+struct spm_vreg {
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+ struct spmi_device *spmi_dev;
+ const struct voltage_range *range;
+ int uV;
+ int last_set_uV;
+ unsigned vlevel;
+ unsigned last_set_vlevel;
+ u32 max_step_uV;
+ bool online;
+ u16 spmi_base_addr;
+ u8 init_mode;
+ u8 mode;
+ int step_rate;
+ enum qpnp_regulator_uniq_type regulator_type;
+ u32 cpu_num;
+ bool bypass_spm;
+ struct regulator_desc avs_rdesc;
+ struct regulator_dev *avs_rdev;
+ int avs_min_uV;
+ int avs_max_uV;
+ bool avs_enabled;
+ u32 recal_cluster_mask;
+};
+
+static inline bool spm_regulator_using_avs(struct spm_vreg *vreg)
+{
+ return vreg->avs_rdev && !vreg->bypass_spm;
+}
+
+static int spm_regulator_uv_to_vlevel(struct spm_vreg *vreg, int uV)
+{
+ int vlevel;
+
+ vlevel = DIV_ROUND_UP(uV - vreg->range->min_uV, vreg->range->step_uV);
+
+ /* Fix VSET for ULT HF Buck */
+ if (vreg->regulator_type == QPNP_TYPE_ULT_HF
+ && vreg->range == &ult_hf_range1) {
+ vlevel &= 0x1F;
+ vlevel |= ULT_SMPS_RANGE_SPLIT;
+ }
+
+ return vlevel;
+}
+
+static int spm_regulator_vlevel_to_uv(struct spm_vreg *vreg, int vlevel)
+{
+ /*
+ * Calculate ULT HF buck VSET based on range:
+ * In case of range 0: VSET is a 7 bit value.
+ * In case of range 1: VSET is a 5 bit value.
+ */
+ if (vreg->regulator_type == QPNP_TYPE_ULT_HF
+ && vreg->range == &ult_hf_range1)
+ vlevel &= ~ULT_SMPS_RANGE_SPLIT;
+
+ return vlevel * vreg->range->step_uV + vreg->range->min_uV;
+}
+
+static unsigned spm_regulator_vlevel_to_selector(struct spm_vreg *vreg,
+ unsigned vlevel)
+{
+ /* Fix VSET for ULT HF Buck */
+ if (vreg->regulator_type == QPNP_TYPE_ULT_HF
+ && vreg->range == &ult_hf_range1)
+ vlevel &= ~ULT_SMPS_RANGE_SPLIT;
+
+ return vlevel - (vreg->range->set_point_min_uV - vreg->range->min_uV)
+ / vreg->range->step_uV;
+}
+
+static int qpnp_smps_read_voltage(struct spm_vreg *vreg)
+{
+ int rc;
+ u8 reg = 0;
+
+ rc = spmi_ext_register_readl(vreg->spmi_dev->ctrl, vreg->spmi_dev->sid,
+ vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_SETPOINT, &reg, 1);
+ if (rc) {
+ dev_err(&vreg->spmi_dev->dev, "%s: could not read voltage setpoint register, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ vreg->last_set_vlevel = reg;
+ vreg->last_set_uV = spm_regulator_vlevel_to_uv(vreg, reg);
+
+ return rc;
+}
+
+static int qpnp_smps_set_mode(struct spm_vreg *vreg, u8 mode)
+{
+ int rc;
+
+ rc = spmi_ext_register_writel(vreg->spmi_dev->ctrl, vreg->spmi_dev->sid,
+ vreg->spmi_base_addr + QPNP_SMPS_REG_MODE, &mode, 1);
+ if (rc)
+ dev_err(&vreg->spmi_dev->dev, "%s: could not write to mode register, rc=%d\n",
+ __func__, rc);
+
+ return rc;
+}
+
+static int spm_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+ int vlevel, rc;
+
+ if (spm_regulator_using_avs(vreg)) {
+ vlevel = msm_spm_get_vdd(vreg->cpu_num);
+
+ if (IS_ERR_VALUE(vlevel)) {
+ pr_debug("%s: msm_spm_get_vdd failed, rc=%d; falling back on SPMI read\n",
+ vreg->rdesc.name, vlevel);
+
+ rc = qpnp_smps_read_voltage(vreg);
+ if (rc) {
+ pr_err("%s: voltage read failed, rc=%d\n",
+ vreg->rdesc.name, rc);
+ return rc;
+ }
+
+ return vreg->last_set_uV;
+ }
+
+ vreg->last_set_vlevel = vlevel;
+ vreg->last_set_uV = spm_regulator_vlevel_to_uv(vreg, vlevel);
+
+ return vreg->last_set_uV;
+ } else {
+ return vreg->uV;
+ }
+};
+
+static int spm_regulator_write_voltage(struct spm_vreg *vreg, int uV)
+{
+ unsigned vlevel = spm_regulator_uv_to_vlevel(vreg, uV);
+ bool spm_failed = false;
+ int rc = 0;
+ u8 reg;
+
+ if (likely(!vreg->bypass_spm)) {
+ /* Set voltage control register via SPM. */
+ rc = msm_spm_set_vdd(vreg->cpu_num, vlevel);
+ if (rc) {
+ pr_debug("%s: msm_spm_set_vdd failed, rc=%d; falling back on SPMI write\n",
+ vreg->rdesc.name, rc);
+ spm_failed = true;
+ }
+ }
+
+ if (unlikely(vreg->bypass_spm || spm_failed)) {
+ /* Set voltage control register via SPMI. */
+ reg = vlevel;
+ rc = spmi_ext_register_writel(vreg->spmi_dev->ctrl,
+ vreg->spmi_dev->sid,
+ vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_SETPOINT,
+ &reg, 1);
+ if (rc) {
+ pr_err("%s: spmi_ext_register_writel failed, rc=%d\n",
+ vreg->rdesc.name, rc);
+ return rc;
+ }
+ }
+
+ if (uV > vreg->last_set_uV) {
+ /* Wait for voltage stepping to complete. */
+ udelay(DIV_ROUND_UP(uV - vreg->last_set_uV, vreg->step_rate));
+ }
+
+ vreg->last_set_uV = uV;
+ vreg->last_set_vlevel = vlevel;
+
+ return rc;
+}
+
+static int spm_regulator_recalibrate(struct spm_vreg *vreg)
+{
+ int rc;
+
+ if (!vreg->recal_cluster_mask)
+ return 0;
+
+ rc = __invoke_psci_fn_smc(0xC4000020, vreg->recal_cluster_mask,
+ 2, 0);
+ if (rc)
+ pr_err("%s: recalibration failed, rc=%d\n", vreg->rdesc.name,
+ rc);
+
+ return rc;
+}
+
+static int _spm_regulator_set_voltage(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+ bool pwm_required;
+ int rc = 0;
+ int uV;
+
+ rc = spm_regulator_get_voltage(rdev);
+ if (IS_ERR_VALUE(rc))
+ return rc;
+
+ if (vreg->vlevel == vreg->last_set_vlevel)
+ return 0;
+
+ pwm_required = (vreg->regulator_type == QPNP_TYPE_FTS2)
+ && !(vreg->init_mode & QPNP_SMPS_MODE_PWM)
+ && vreg->uV > vreg->last_set_uV;
+
+ if (pwm_required) {
+ /* Switch to PWM mode so that voltage ramping is fast. */
+ rc = qpnp_smps_set_mode(vreg, QPNP_SMPS_MODE_PWM);
+ if (rc)
+ return rc;
+ }
+
+ do {
+ uV = vreg->uV > vreg->last_set_uV
+ ? min(vreg->uV, vreg->last_set_uV + (int)vreg->max_step_uV)
+ : max(vreg->uV, vreg->last_set_uV - (int)vreg->max_step_uV);
+
+ rc = spm_regulator_write_voltage(vreg, uV);
+ if (rc)
+ return rc;
+ } while (vreg->last_set_uV != vreg->uV);
+
+ if (pwm_required) {
+ /* Wait for mode transition to complete. */
+ udelay(QPNP_FTS2_MODE_CHANGE_DELAY - QPNP_SPMI_WRITE_MIN_DELAY);
+ /* Switch to AUTO mode so that power consumption is lowered. */
+ rc = qpnp_smps_set_mode(vreg, QPNP_SMPS_MODE_AUTO);
+ if (rc)
+ return rc;
+ }
+
+ rc = spm_regulator_recalibrate(vreg);
+
+ return rc;
+}
+
+static int spm_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned *selector)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+ const struct voltage_range *range = vreg->range;
+ int uV = min_uV;
+ unsigned vlevel;
+
+ if (uV < range->set_point_min_uV && max_uV >= range->set_point_min_uV)
+ uV = range->set_point_min_uV;
+
+ if (uV < range->set_point_min_uV || uV > range->max_uV) {
+ pr_err("%s: request v=[%d, %d] is outside possible v=[%d, %d]\n",
+ vreg->rdesc.name, min_uV, max_uV,
+ range->set_point_min_uV, range->max_uV);
+ return -EINVAL;
+ }
+
+ vlevel = spm_regulator_uv_to_vlevel(vreg, uV);
+ uV = spm_regulator_vlevel_to_uv(vreg, vlevel);
+
+ if (uV > max_uV) {
+ pr_err("%s: request v=[%d, %d] cannot be met by any set point\n",
+ vreg->rdesc.name, min_uV, max_uV);
+ return -EINVAL;
+ }
+
+ *selector = spm_regulator_vlevel_to_selector(vreg, vlevel);
+ vreg->vlevel = vlevel;
+ vreg->uV = uV;
+
+ if (!vreg->online)
+ return 0;
+
+ return _spm_regulator_set_voltage(rdev);
+}
+
+static int spm_regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+ if (selector >= vreg->rdesc.n_voltages)
+ return 0;
+
+ return selector * vreg->range->step_uV + vreg->range->set_point_min_uV;
+}
+
+static int spm_regulator_enable(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+ int rc;
+
+ rc = _spm_regulator_set_voltage(rdev);
+
+ if (!rc)
+ vreg->online = true;
+
+ return rc;
+}
+
+static int spm_regulator_disable(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+ vreg->online = false;
+
+ return 0;
+}
+
+static int spm_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->online;
+}
+
+static unsigned int spm_regulator_get_mode(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->mode == QPNP_SMPS_MODE_PWM
+ ? REGULATOR_MODE_NORMAL : REGULATOR_MODE_IDLE;
+}
+
+static int spm_regulator_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+ /*
+ * Map REGULATOR_MODE_NORMAL to PWM mode and REGULATOR_MODE_IDLE to
+ * init_mode. This ensures that the regulator always stays in PWM mode
+ * in the case that qcom,mode has been specified as "pwm" in device
+ * tree.
+ */
+ vreg->mode
+ = mode == REGULATOR_MODE_NORMAL ? QPNP_SMPS_MODE_PWM : vreg->init_mode;
+
+ return qpnp_smps_set_mode(vreg, vreg->mode);
+}
+
+static struct regulator_ops spm_regulator_ops = {
+ .get_voltage = spm_regulator_get_voltage,
+ .set_voltage = spm_regulator_set_voltage,
+ .list_voltage = spm_regulator_list_voltage,
+ .get_mode = spm_regulator_get_mode,
+ .set_mode = spm_regulator_set_mode,
+ .enable = spm_regulator_enable,
+ .disable = spm_regulator_disable,
+ .is_enabled = spm_regulator_is_enabled,
+};
+
+static int spm_regulator_avs_set_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned *selector)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+ const struct voltage_range *range = vreg->range;
+ unsigned vlevel_min, vlevel_max;
+ int uV, avs_min_uV, avs_max_uV, rc;
+
+ uV = min_uV;
+
+ if (uV < range->set_point_min_uV && max_uV >= range->set_point_min_uV)
+ uV = range->set_point_min_uV;
+
+ if (uV < range->set_point_min_uV || uV > range->max_uV) {
+ pr_err("%s: request v=[%d, %d] is outside possible v=[%d, %d]\n",
+ vreg->avs_rdesc.name, min_uV, max_uV,
+ range->set_point_min_uV, range->max_uV);
+ return -EINVAL;
+ }
+
+ vlevel_min = spm_regulator_uv_to_vlevel(vreg, uV);
+ avs_min_uV = spm_regulator_vlevel_to_uv(vreg, vlevel_min);
+
+ if (avs_min_uV > max_uV) {
+ pr_err("%s: request v=[%d, %d] cannot be met by any set point\n",
+ vreg->avs_rdesc.name, min_uV, max_uV);
+ return -EINVAL;
+ }
+
+ uV = max_uV;
+
+ if (uV > range->max_uV && min_uV <= range->max_uV)
+ uV = range->max_uV;
+
+ if (uV < range->set_point_min_uV || uV > range->max_uV) {
+ pr_err("%s: request v=[%d, %d] is outside possible v=[%d, %d]\n",
+ vreg->avs_rdesc.name, min_uV, max_uV,
+ range->set_point_min_uV, range->max_uV);
+ return -EINVAL;
+ }
+
+ vlevel_max = (uV - range->min_uV) / range->step_uV;
+ avs_max_uV = spm_regulator_vlevel_to_uv(vreg, vlevel_max);
+
+ if (avs_max_uV < min_uV) {
+ pr_err("%s: request v=[%d, %d] cannot be met by any set point\n",
+ vreg->avs_rdesc.name, min_uV, max_uV);
+ return -EINVAL;
+ }
+
+ if (likely(!vreg->bypass_spm)) {
+ rc = msm_spm_avs_set_limit(vreg->cpu_num, vlevel_min,
+ vlevel_max);
+ if (rc) {
+ pr_err("%s: AVS limit setting failed, rc=%d\n",
+ vreg->avs_rdesc.name, rc);
+ return rc;
+ }
+ }
+
+ *selector = spm_regulator_vlevel_to_selector(vreg, vlevel_min);
+ vreg->avs_min_uV = avs_min_uV;
+ vreg->avs_max_uV = avs_max_uV;
+
+ return 0;
+}
+
+static int spm_regulator_avs_get_voltage(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->avs_min_uV;
+}
+
+static int spm_regulator_avs_enable(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+ int rc;
+
+ if (likely(!vreg->bypass_spm)) {
+ rc = msm_spm_avs_enable(vreg->cpu_num);
+ if (rc) {
+ pr_err("%s: AVS enable failed, rc=%d\n",
+ vreg->avs_rdesc.name, rc);
+ return rc;
+ }
+ }
+
+ vreg->avs_enabled = true;
+
+ return 0;
+}
+
+static int spm_regulator_avs_disable(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+ int rc;
+
+ if (likely(!vreg->bypass_spm)) {
+ rc = msm_spm_avs_disable(vreg->cpu_num);
+ if (rc) {
+ pr_err("%s: AVS disable failed, rc=%d\n",
+ vreg->avs_rdesc.name, rc);
+ return rc;
+ }
+ }
+
+ vreg->avs_enabled = false;
+
+ return 0;
+}
+
+static int spm_regulator_avs_is_enabled(struct regulator_dev *rdev)
+{
+ struct spm_vreg *vreg = rdev_get_drvdata(rdev);
+
+ return vreg->avs_enabled;
+}
+
+static struct regulator_ops spm_regulator_avs_ops = {
+ .get_voltage = spm_regulator_avs_get_voltage,
+ .set_voltage = spm_regulator_avs_set_voltage,
+ .list_voltage = spm_regulator_list_voltage,
+ .enable = spm_regulator_avs_enable,
+ .disable = spm_regulator_avs_disable,
+ .is_enabled = spm_regulator_avs_is_enabled,
+};
+
+static int qpnp_smps_check_type(struct spm_vreg *vreg)
+{
+ int rc;
+ u8 type[2];
+
+ rc = spmi_ext_register_readl(vreg->spmi_dev->ctrl, vreg->spmi_dev->sid,
+ vreg->spmi_base_addr + QPNP_SMPS_REG_TYPE, type, 2);
+ if (rc) {
+ dev_err(&vreg->spmi_dev->dev, "%s: could not read type register, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ if (type[0] == QPNP_FTS2_TYPE && type[1] == QPNP_FTS2_SUBTYPE) {
+ vreg->regulator_type = QPNP_TYPE_FTS2;
+ } else if (type[0] == QPNP_FTS2p5_TYPE
+ && type[1] == QPNP_FTS2p5_SUBTYPE) {
+ vreg->regulator_type = QPNP_TYPE_FTS2p5;
+ } else if (type[0] == QPNP_ULT_HF_TYPE
+ && type[1] == QPNP_ULT_HF_SUBTYPE) {
+ vreg->regulator_type = QPNP_TYPE_ULT_HF;
+ } else if (type[0] == QPNP_HF_TYPE
+ && type[1] == QPNP_HF_SUBTYPE) {
+ vreg->regulator_type = QPNP_TYPE_HF;
+ } else {
+ dev_err(&vreg->spmi_dev->dev, "%s: invalid type=0x%02X, subtype=0x%02X register pair\n",
+ __func__, type[0], type[1]);
+ return -ENODEV;
+ };
+
+ return rc;
+}
+
+static int qpnp_smps_init_range(struct spm_vreg *vreg,
+ const struct voltage_range *range0, const struct voltage_range *range1)
+{
+ int rc;
+ u8 reg = 0;
+
+ rc = spmi_ext_register_readl(vreg->spmi_dev->ctrl, vreg->spmi_dev->sid,
+ vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_RANGE, &reg, 1);
+ if (rc) {
+ dev_err(&vreg->spmi_dev->dev, "%s: could not read voltage range register, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ if (reg == 0x00) {
+ vreg->range = range0;
+ } else if (reg == 0x01) {
+ vreg->range = range1;
+ } else {
+ dev_err(&vreg->spmi_dev->dev, "%s: voltage range=%d is invalid\n",
+ __func__, reg);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int qpnp_ult_hf_init_range(struct spm_vreg *vreg)
+{
+ int rc;
+ u8 reg = 0;
+
+ rc = spmi_ext_register_readl(vreg->spmi_dev->ctrl, vreg->spmi_dev->sid,
+ vreg->spmi_base_addr + QPNP_SMPS_REG_VOLTAGE_SETPOINT, &reg, 1);
+ if (rc) {
+ dev_err(&vreg->spmi_dev->dev, "%s: could not read voltage range register, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ vreg->range = (reg < ULT_SMPS_RANGE_SPLIT) ? &ult_hf_range0 :
+ &ult_hf_range1;
+ return rc;
+}
+
+static int qpnp_smps_init_voltage(struct spm_vreg *vreg)
+{
+ int rc;
+
+ rc = qpnp_smps_read_voltage(vreg);
+ if (rc) {
+ pr_err("%s: voltage read failed, rc=%d\n", vreg->rdesc.name,
+ rc);
+ return rc;
+ }
+
+ vreg->vlevel = vreg->last_set_vlevel;
+ vreg->uV = vreg->last_set_uV;
+
+ /* Initialize SAW voltage control register */
+ if (!vreg->bypass_spm) {
+ rc = msm_spm_set_vdd(vreg->cpu_num, vreg->vlevel);
+ if (rc)
+ pr_err("%s: msm_spm_set_vdd failed, rc=%d\n",
+ vreg->rdesc.name, rc);
+ }
+
+ return 0;
+}
+
+static int qpnp_smps_init_mode(struct spm_vreg *vreg)
+{
+ const char *mode_name;
+ int rc;
+
+ rc = of_property_read_string(vreg->spmi_dev->dev.of_node, "qcom,mode",
+ &mode_name);
+ if (!rc) {
+ if (strcmp("pwm", mode_name) == 0) {
+ vreg->init_mode = QPNP_SMPS_MODE_PWM;
+ } else if ((strcmp("auto", mode_name) == 0) &&
+ (vreg->regulator_type != QPNP_TYPE_ULT_HF)) {
+ vreg->init_mode = QPNP_SMPS_MODE_AUTO;
+ } else {
+ dev_err(&vreg->spmi_dev->dev, "%s: unknown regulator mode: %s\n",
+ __func__, mode_name);
+ return -EINVAL;
+ }
+
+ rc = spmi_ext_register_writel(vreg->spmi_dev->ctrl,
+ vreg->spmi_dev->sid,
+ vreg->spmi_base_addr + QPNP_SMPS_REG_MODE,
+ &vreg->init_mode, 1);
+ if (rc)
+ dev_err(&vreg->spmi_dev->dev, "%s: could not write mode register, rc=%d\n",
+ __func__, rc);
+ } else {
+ rc = spmi_ext_register_readl(vreg->spmi_dev->ctrl,
+ vreg->spmi_dev->sid,
+ vreg->spmi_base_addr + QPNP_SMPS_REG_MODE,
+ &vreg->init_mode, 1);
+ if (rc)
+ dev_err(&vreg->spmi_dev->dev, "%s: could not read mode register, rc=%d\n",
+ __func__, rc);
+ }
+
+ vreg->mode = vreg->init_mode;
+
+ return rc;
+}
+
+static int qpnp_smps_init_step_rate(struct spm_vreg *vreg)
+{
+ int rc;
+ u8 reg = 0;
+ int step = 0, delay;
+
+ rc = spmi_ext_register_readl(vreg->spmi_dev->ctrl, vreg->spmi_dev->sid,
+ vreg->spmi_base_addr + QPNP_SMPS_REG_STEP_CTRL, &reg, 1);
+ if (rc) {
+ dev_err(&vreg->spmi_dev->dev, "%s: could not read stepping control register, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ /* ULT buck does not support steps */
+ if (vreg->regulator_type != QPNP_TYPE_ULT_HF)
+ step = (reg & QPNP_SMPS_STEP_CTRL_STEP_MASK)
+ >> QPNP_SMPS_STEP_CTRL_STEP_SHIFT;
+
+ delay = (reg & QPNP_SMPS_STEP_CTRL_DELAY_MASK)
+ >> QPNP_SMPS_STEP_CTRL_DELAY_SHIFT;
+
+ /* step_rate has units of uV/us. */
+ vreg->step_rate = QPNP_SMPS_CLOCK_RATE * vreg->range->step_uV
+ * (1 << step);
+
+ if ((vreg->regulator_type == QPNP_TYPE_ULT_HF)
+ || (vreg->regulator_type == QPNP_TYPE_HF))
+ vreg->step_rate /= 1000 * (QPNP_HF_STEP_DELAY << delay);
+ else
+ vreg->step_rate /= 1000 * (QPNP_FTS2_STEP_DELAY << delay);
+
+ vreg->step_rate = vreg->step_rate * QPNP_FTS2_STEP_MARGIN_NUM
+ / QPNP_FTS2_STEP_MARGIN_DEN;
+
+ /* Ensure that the stepping rate is greater than 0. */
+ vreg->step_rate = max(vreg->step_rate, 1);
+
+ return rc;
+}
+
+static bool spm_regulator_using_range0(struct spm_vreg *vreg)
+{
+ return vreg->range == &fts2_range0 || vreg->range == &fts2p5_range0
+ || vreg->range == &ult_hf_range0 || vreg->range == &hf_range0;
+}
+
+/* Register a regulator to enable/disable AVS and set AVS min/max limits. */
+static int spm_regulator_avs_register(struct spm_vreg *vreg,
+ struct device *dev, struct device_node *node)
+{
+ struct regulator_config reg_config = {};
+ struct device_node *avs_node = NULL;
+ struct device_node *child_node;
+ struct regulator_init_data *init_data;
+ int rc;
+
+ /*
+ * Find the first available child node (if any). It corresponds to an
+ * AVS limits regulator.
+ */
+ for_each_available_child_of_node(node, child_node) {
+ avs_node = child_node;
+ break;
+ }
+
+ if (!avs_node)
+ return 0;
+
+ init_data = of_get_regulator_init_data(dev, avs_node);
+ if (!init_data) {
+ dev_err(dev, "%s: unable to allocate memory\n", __func__);
+ return -ENOMEM;
+ }
+ init_data->constraints.input_uV = init_data->constraints.max_uV;
+ init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS
+ | REGULATOR_CHANGE_VOLTAGE;
+
+ if (!init_data->constraints.name) {
+ dev_err(dev, "%s: AVS node is missing regulator name\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ vreg->avs_rdesc.name = init_data->constraints.name;
+ vreg->avs_rdesc.type = REGULATOR_VOLTAGE;
+ vreg->avs_rdesc.owner = THIS_MODULE;
+ vreg->avs_rdesc.ops = &spm_regulator_avs_ops;
+ vreg->avs_rdesc.n_voltages
+ = (vreg->range->max_uV - vreg->range->set_point_min_uV)
+ / vreg->range->step_uV + 1;
+
+ reg_config.dev = dev;
+ reg_config.init_data = init_data;
+ reg_config.driver_data = vreg;
+ reg_config.of_node = avs_node;
+
+ vreg->avs_rdev = regulator_register(&vreg->avs_rdesc, &reg_config);
+ if (IS_ERR(vreg->avs_rdev)) {
+ rc = PTR_ERR(vreg->avs_rdev);
+ dev_err(dev, "%s: AVS regulator_register failed, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ if (vreg->bypass_spm)
+ pr_debug("%s: SPM bypassed so AVS regulator calls are no-ops\n",
+ vreg->avs_rdesc.name);
+
+ return 0;
+}
+
+static int spm_regulator_probe(struct spmi_device *spmi)
+{
+ struct regulator_config reg_config = {};
+ struct device_node *node = spmi->dev.of_node;
+ struct regulator_init_data *init_data;
+ struct spm_vreg *vreg;
+ struct resource *res;
+ bool bypass_spm;
+ int rc;
+
+ if (!node) {
+ dev_err(&spmi->dev, "%s: device node missing\n", __func__);
+ return -ENODEV;
+ }
+
+ bypass_spm = of_property_read_bool(node, "qcom,bypass-spm");
+ if (!bypass_spm) {
+ rc = msm_spm_probe_done();
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ dev_err(&spmi->dev, "%s: spm unavailable, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+ }
+
+ vreg = devm_kzalloc(&spmi->dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg) {
+ pr_err("allocation failed.\n");
+ return -ENOMEM;
+ }
+ vreg->spmi_dev = spmi;
+ vreg->bypass_spm = bypass_spm;
+
+ res = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&spmi->dev, "%s: node is missing base address\n",
+ __func__);
+ return -EINVAL;
+ }
+ vreg->spmi_base_addr = res->start;
+
+ rc = qpnp_smps_check_type(vreg);
+ if (rc)
+ return rc;
+
+ /* Specify CPU 0 as default in order to handle shared regulator case. */
+ vreg->cpu_num = 0;
+ of_property_read_u32(vreg->spmi_dev->dev.of_node, "qcom,cpu-num",
+ &vreg->cpu_num);
+
+ of_property_read_u32(vreg->spmi_dev->dev.of_node, "qcom,recal-mask",
+ &vreg->recal_cluster_mask);
+
+ /*
+ * The regulator must be initialized to range 0 or range 1 during
+ * PMIC power on sequence. Once it is set, it cannot be changed
+ * dynamically.
+ */
+ if (vreg->regulator_type == QPNP_TYPE_FTS2)
+ rc = qpnp_smps_init_range(vreg, &fts2_range0, &fts2_range1);
+ else if (vreg->regulator_type == QPNP_TYPE_FTS2p5)
+ rc = qpnp_smps_init_range(vreg, &fts2p5_range0, &fts2p5_range1);
+ else if (vreg->regulator_type == QPNP_TYPE_HF)
+ rc = qpnp_smps_init_range(vreg, &hf_range0, &hf_range1);
+ else if (vreg->regulator_type == QPNP_TYPE_ULT_HF)
+ rc = qpnp_ult_hf_init_range(vreg);
+ if (rc)
+ return rc;
+
+ rc = qpnp_smps_init_voltage(vreg);
+ if (rc)
+ return rc;
+
+ rc = qpnp_smps_init_mode(vreg);
+ if (rc)
+ return rc;
+
+ rc = qpnp_smps_init_step_rate(vreg);
+ if (rc)
+ return rc;
+
+ init_data = of_get_regulator_init_data(&spmi->dev, node);
+ if (!init_data) {
+ dev_err(&spmi->dev, "%s: unable to allocate memory\n",
+ __func__);
+ return -ENOMEM;
+ }
+ init_data->constraints.input_uV = init_data->constraints.max_uV;
+ init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS
+ | REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE;
+ init_data->constraints.valid_modes_mask
+ = REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
+
+ if (!init_data->constraints.name) {
+ dev_err(&spmi->dev, "%s: node is missing regulator name\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ vreg->rdesc.name = init_data->constraints.name;
+ vreg->rdesc.type = REGULATOR_VOLTAGE;
+ vreg->rdesc.owner = THIS_MODULE;
+ vreg->rdesc.ops = &spm_regulator_ops;
+ vreg->rdesc.n_voltages
+ = (vreg->range->max_uV - vreg->range->set_point_min_uV)
+ / vreg->range->step_uV + 1;
+
+ vreg->max_step_uV = SPM_REGULATOR_MAX_STEP_UV;
+ of_property_read_u32(vreg->spmi_dev->dev.of_node,
+ "qcom,max-voltage-step", &vreg->max_step_uV);
+
+ if (vreg->max_step_uV > SPM_REGULATOR_MAX_STEP_UV)
+ vreg->max_step_uV = SPM_REGULATOR_MAX_STEP_UV;
+
+ vreg->max_step_uV = rounddown(vreg->max_step_uV, vreg->range->step_uV);
+ pr_debug("%s: max single voltage step size=%u uV\n",
+ vreg->rdesc.name, vreg->max_step_uV);
+
+ reg_config.dev = &spmi->dev;
+ reg_config.init_data = init_data;
+ reg_config.driver_data = vreg;
+ reg_config.of_node = node;
+ vreg->rdev = regulator_register(&vreg->rdesc, &reg_config);
+
+ if (IS_ERR(vreg->rdev)) {
+ rc = PTR_ERR(vreg->rdev);
+ dev_err(&spmi->dev, "%s: regulator_register failed, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ rc = spm_regulator_avs_register(vreg, &spmi->dev, node);
+ if (rc) {
+ regulator_unregister(vreg->rdev);
+ return rc;
+ }
+
+ dev_set_drvdata(&spmi->dev, vreg);
+
+ pr_info("name=%s, range=%s, voltage=%d uV, mode=%s, step rate=%d uV/us\n",
+ vreg->rdesc.name,
+ spm_regulator_using_range0(vreg) ? "LV" : "MV",
+ vreg->uV,
+ vreg->init_mode & QPNP_SMPS_MODE_PWM ? "PWM" :
+ (vreg->init_mode & QPNP_SMPS_MODE_AUTO ? "AUTO" : "PFM"),
+ vreg->step_rate);
+
+ return rc;
+}
+
+static int spm_regulator_remove(struct spmi_device *spmi)
+{
+ struct spm_vreg *vreg = dev_get_drvdata(&spmi->dev);
+
+ if (vreg->avs_rdev)
+ regulator_unregister(vreg->avs_rdev);
+ regulator_unregister(vreg->rdev);
+
+ return 0;
+}
+
+static struct of_device_id spm_regulator_match_table[] = {
+ { .compatible = SPM_REGULATOR_DRIVER_NAME, },
+ {}
+};
+
+static const struct spmi_device_id spm_regulator_id[] = {
+ { SPM_REGULATOR_DRIVER_NAME, 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spmi, spm_regulator_id);
+
+static struct spmi_driver spm_regulator_driver = {
+ .driver = {
+ .name = SPM_REGULATOR_DRIVER_NAME,
+ .of_match_table = spm_regulator_match_table,
+ .owner = THIS_MODULE,
+ },
+ .probe = spm_regulator_probe,
+ .remove = spm_regulator_remove,
+ .id_table = spm_regulator_id,
+};
+
+/**
+ * spm_regulator_init() - register spmi driver for spm-regulator
+ *
+ * This initialization function should be called in systems in which driver
+ * registration ordering must be controlled precisely.
+ *
+ * Returns 0 on success or errno on failure.
+ */
+int __init spm_regulator_init(void)
+{
+ static bool has_registered;
+
+ if (has_registered)
+ return 0;
+ else
+ has_registered = true;
+
+ return spmi_driver_register(&spm_regulator_driver);
+}
+EXPORT_SYMBOL(spm_regulator_init);
+
+static void __exit spm_regulator_exit(void)
+{
+ spmi_driver_unregister(&spm_regulator_driver);
+}
+
+arch_initcall(spm_regulator_init);
+module_exit(spm_regulator_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SPM regulator driver");
+MODULE_ALIAS("platform:spm-regulator");
diff --git a/drivers/regulator/stub-regulator.c b/drivers/regulator/stub-regulator.c
new file mode 100644
index 000000000000..04302f05f685
--- /dev/null
+++ b/drivers/regulator/stub-regulator.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/stub-regulator.h>
+
+#define STUB_REGULATOR_MAX_NAME 40
+
+struct regulator_stub {
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+ int voltage;
+ bool enabled;
+ int mode;
+ int hpm_min_load;
+ int system_uA;
+ char name[STUB_REGULATOR_MAX_NAME];
+};
+
+static int regulator_stub_set_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned *selector)
+{
+ struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+ vreg_priv->voltage = min_uV;
+ return 0;
+}
+
+static int regulator_stub_get_voltage(struct regulator_dev *rdev)
+{
+ struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+ return vreg_priv->voltage;
+}
+
+static int regulator_stub_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct regulation_constraints *constraints = rdev->constraints;
+
+ if (selector >= 2)
+ return -EINVAL;
+ else if (selector == 0)
+ return constraints->min_uV;
+ else
+ return constraints->max_uV;
+}
+
+static unsigned int regulator_stub_get_mode(struct regulator_dev *rdev)
+{
+ struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+ return vreg_priv->mode;
+}
+
+static int regulator_stub_set_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+
+ if (mode != REGULATOR_MODE_NORMAL && mode != REGULATOR_MODE_IDLE) {
+ dev_err(&rdev->dev, "%s: invalid mode requested %u\n",
+ __func__, mode);
+ return -EINVAL;
+ }
+ vreg_priv->mode = mode;
+ return 0;
+}
+
+static unsigned int regulator_stub_get_optimum_mode(struct regulator_dev *rdev,
+ int input_uV, int output_uV, int load_uA)
+{
+ struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+ unsigned int mode;
+
+ if (load_uA + vreg_priv->system_uA >= vreg_priv->hpm_min_load)
+ mode = REGULATOR_MODE_NORMAL;
+ else
+ mode = REGULATOR_MODE_IDLE;
+
+ return mode;
+}
+
+static int regulator_stub_enable(struct regulator_dev *rdev)
+{
+ struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+ vreg_priv->enabled = true;
+ return 0;
+}
+
+static int regulator_stub_disable(struct regulator_dev *rdev)
+{
+ struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+ vreg_priv->enabled = false;
+ return 0;
+}
+
+static int regulator_stub_is_enabled(struct regulator_dev *rdev)
+{
+ struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+ return vreg_priv->enabled;
+}
+
+/* Real regulator operations. */
+static struct regulator_ops regulator_stub_ops = {
+ .enable = regulator_stub_enable,
+ .disable = regulator_stub_disable,
+ .is_enabled = regulator_stub_is_enabled,
+ .set_voltage = regulator_stub_set_voltage,
+ .get_voltage = regulator_stub_get_voltage,
+ .list_voltage = regulator_stub_list_voltage,
+ .set_mode = regulator_stub_set_mode,
+ .get_mode = regulator_stub_get_mode,
+ .get_optimum_mode = regulator_stub_get_optimum_mode,
+};
+
+static void regulator_stub_cleanup(struct regulator_stub *vreg_priv)
+{
+ if (vreg_priv && vreg_priv->rdev)
+ regulator_unregister(vreg_priv->rdev);
+ kfree(vreg_priv);
+}
+
+static int regulator_stub_probe(struct platform_device *pdev)
+{
+ struct regulator_config reg_config = {};
+ struct regulator_init_data *init_data = NULL;
+ struct device *dev = &pdev->dev;
+ struct stub_regulator_pdata *vreg_pdata;
+ struct regulator_desc *rdesc;
+ struct regulator_stub *vreg_priv;
+ int rc;
+
+ vreg_priv = kzalloc(sizeof(*vreg_priv), GFP_KERNEL);
+ if (!vreg_priv) {
+ dev_err(dev, "%s: Unable to allocate memory\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ if (dev->of_node) {
+ /* Use device tree. */
+ init_data = of_get_regulator_init_data(dev,
+ dev->of_node);
+ if (!init_data) {
+ dev_err(dev, "%s: unable to allocate memory\n",
+ __func__);
+ rc = -ENOMEM;
+ goto err_probe;
+ }
+
+ if (init_data->constraints.name == NULL) {
+ dev_err(dev, "%s: regulator name not specified\n",
+ __func__);
+ rc = -EINVAL;
+ goto err_probe;
+ }
+
+ if (of_get_property(dev->of_node, "parent-supply", NULL))
+ init_data->supply_regulator = "parent";
+
+ of_property_read_u32(dev->of_node, "qcom,system-load",
+ &vreg_priv->system_uA);
+ of_property_read_u32(dev->of_node, "qcom,hpm-min-load",
+ &vreg_priv->hpm_min_load);
+
+ init_data->constraints.input_uV = init_data->constraints.max_uV;
+
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_STATUS;
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_VOLTAGE;
+ init_data->constraints.valid_ops_mask
+ |= REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_DRMS;
+ init_data->constraints.valid_modes_mask
+ = REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
+ } else {
+ /* Use platform data. */
+ vreg_pdata = dev->platform_data;
+ if (!vreg_pdata) {
+ dev_err(dev, "%s: no platform data\n", __func__);
+ rc = -EINVAL;
+ goto err_probe;
+ }
+ init_data = &vreg_pdata->init_data;
+
+ vreg_priv->system_uA = vreg_pdata->system_uA;
+ vreg_priv->hpm_min_load = vreg_pdata->hpm_min_load;
+ }
+
+ dev_set_drvdata(dev, vreg_priv);
+
+ rdesc = &vreg_priv->rdesc;
+ strlcpy(vreg_priv->name, init_data->constraints.name,
+ STUB_REGULATOR_MAX_NAME);
+ rdesc->name = vreg_priv->name;
+ rdesc->ops = &regulator_stub_ops;
+
+ /*
+ * Ensure that voltage set points are handled correctly for regulators
+ * which have a specified voltage constraint range, as well as those
+ * that do not.
+ */
+ if (init_data->constraints.min_uV == 0 &&
+ init_data->constraints.max_uV == 0)
+ rdesc->n_voltages = 0;
+ else
+ rdesc->n_voltages = 2;
+
+ rdesc->id = pdev->id;
+ rdesc->owner = THIS_MODULE;
+ rdesc->type = REGULATOR_VOLTAGE;
+ vreg_priv->voltage = init_data->constraints.min_uV;
+ if (vreg_priv->system_uA >= vreg_priv->hpm_min_load)
+ vreg_priv->mode = REGULATOR_MODE_NORMAL;
+ else
+ vreg_priv->mode = REGULATOR_MODE_IDLE;
+
+ reg_config.dev = dev;
+ reg_config.init_data = init_data;
+ reg_config.driver_data = vreg_priv;
+ reg_config.of_node = dev->of_node;
+ vreg_priv->rdev = regulator_register(rdesc, &reg_config);
+
+ if (IS_ERR(vreg_priv->rdev)) {
+ rc = PTR_ERR(vreg_priv->rdev);
+ vreg_priv->rdev = NULL;
+ if (rc != -EPROBE_DEFER)
+ dev_err(dev, "%s: regulator_register failed\n",
+ __func__);
+ goto err_probe;
+ }
+
+ return 0;
+
+err_probe:
+ regulator_stub_cleanup(vreg_priv);
+ return rc;
+}
+
+static int regulator_stub_remove(struct platform_device *pdev)
+{
+ struct regulator_stub *vreg_priv = dev_get_drvdata(&pdev->dev);
+
+ regulator_stub_cleanup(vreg_priv);
+ return 0;
+}
+
+static struct of_device_id regulator_stub_match_table[] = {
+ { .compatible = "qcom," STUB_REGULATOR_DRIVER_NAME, },
+ {}
+};
+
+static struct platform_driver regulator_stub_driver = {
+ .probe = regulator_stub_probe,
+ .remove = regulator_stub_remove,
+ .driver = {
+ .name = STUB_REGULATOR_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = regulator_stub_match_table,
+ },
+};
+
+int __init regulator_stub_init(void)
+{
+ static int registered;
+
+ if (registered)
+ return 0;
+ else
+ registered = 1;
+ return platform_driver_register(&regulator_stub_driver);
+}
+postcore_initcall(regulator_stub_init);
+EXPORT_SYMBOL(regulator_stub_init);
+
+static void __exit regulator_stub_exit(void)
+{
+ platform_driver_unregister(&regulator_stub_driver);
+}
+module_exit(regulator_stub_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("stub regulator driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform: " STUB_REGULATOR_DRIVER_NAME);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 2a524244afec..da25ffeb520d 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1591,6 +1591,15 @@ config RTC_DRV_MOXART
This driver can also be built as a module. If so, the module
will be called rtc-moxart
+config RTC_DRV_QPNP
+ tristate "Qualcomm QPNP PMIC RTC"
+ depends on (SPMI || MSM_SPMI) && OF_SPMI && MSM_QPNP_INT
+ help
+ Say Y here if you want to support the Qualcomm QPNP PMIC RTC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called qpnp-rtc.
+
config RTC_DRV_MT6397
tristate "Mediatek Real Time Clock driver"
depends on MFD_MT6397 || COMPILE_TEST
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 231f76451615..f264c343e6e9 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -118,6 +118,7 @@ obj-$(CONFIG_RTC_DRV_PM8XXX) += rtc-pm8xxx.o
obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o
obj-$(CONFIG_RTC_DRV_PUV3) += rtc-puv3.o
obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o
+obj-$(CONFIG_RTC_DRV_QPNP) += qpnp-rtc.o
obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
obj-$(CONFIG_RTC_DRV_RC5T583) += rtc-rc5t583.o
obj-$(CONFIG_RTC_DRV_RK808) += rtc-rk808.o
diff --git a/drivers/rtc/qpnp-rtc.c b/drivers/rtc/qpnp-rtc.c
new file mode 100644
index 000000000000..bb69b892d5e6
--- /dev/null
+++ b/drivers/rtc/qpnp-rtc.c
@@ -0,0 +1,714 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rtc.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/of_device.h>
+#include <linux/spmi.h>
+#include <linux/spinlock.h>
+#include <linux/spmi.h>
+#include <linux/alarmtimer.h>
+
+/* RTC/ALARM Register offsets */
+#define REG_OFFSET_ALARM_RW 0x40
+#define REG_OFFSET_ALARM_CTRL1 0x46
+#define REG_OFFSET_ALARM_CTRL2 0x48
+#define REG_OFFSET_RTC_WRITE 0x40
+#define REG_OFFSET_RTC_CTRL 0x46
+#define REG_OFFSET_RTC_READ 0x48
+#define REG_OFFSET_PERP_SUBTYPE 0x05
+
+/* RTC_CTRL register bit fields */
+#define BIT_RTC_ENABLE BIT(7)
+#define BIT_RTC_ALARM_ENABLE BIT(7)
+#define BIT_RTC_ABORT_ENABLE BIT(0)
+#define BIT_RTC_ALARM_CLEAR BIT(0)
+
+/* RTC/ALARM peripheral subtype values */
+#define RTC_PERPH_SUBTYPE 0x1
+#define ALARM_PERPH_SUBTYPE 0x3
+
+#define NUM_8_BIT_RTC_REGS 0x4
+
+#define TO_SECS(arr) (arr[0] | (arr[1] << 8) | (arr[2] << 16) | \
+ (arr[3] << 24))
+
+/* Module parameter to control power-on-alarm */
+bool poweron_alarm;
+module_param(poweron_alarm, bool, 0644);
+MODULE_PARM_DESC(poweron_alarm, "Enable/Disable power-on alarm");
+EXPORT_SYMBOL(poweron_alarm);
+
+/* rtc driver internal structure */
+struct qpnp_rtc {
+ u8 rtc_ctrl_reg;
+ u8 alarm_ctrl_reg1;
+ u16 rtc_base;
+ u16 alarm_base;
+ u32 rtc_write_enable;
+ u32 rtc_alarm_powerup;
+ int rtc_alarm_irq;
+ struct device *rtc_dev;
+ struct rtc_device *rtc;
+ struct spmi_device *spmi;
+ spinlock_t alarm_ctrl_lock;
+};
+
+static int qpnp_read_wrapper(struct qpnp_rtc *rtc_dd, u8 *rtc_val,
+ u16 base, int count)
+{
+ int rc;
+ struct spmi_device *spmi = rtc_dd->spmi;
+
+ rc = spmi_ext_register_readl(spmi->ctrl, spmi->sid, base, rtc_val,
+ count);
+ if (rc) {
+ dev_err(rtc_dd->rtc_dev, "SPMI read failed\n");
+ return rc;
+ }
+ return 0;
+}
+
+static int qpnp_write_wrapper(struct qpnp_rtc *rtc_dd, u8 *rtc_val,
+ u16 base, int count)
+{
+ int rc;
+ struct spmi_device *spmi = rtc_dd->spmi;
+
+ rc = spmi_ext_register_writel(spmi->ctrl, spmi->sid, base, rtc_val,
+ count);
+ if (rc) {
+ dev_err(rtc_dd->rtc_dev, "SPMI write failed\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int
+qpnp_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ int rc;
+ unsigned long secs, irq_flags;
+ u8 value[4], reg = 0, alarm_enabled = 0, ctrl_reg;
+ u8 rtc_disabled = 0, rtc_ctrl_reg;
+ struct qpnp_rtc *rtc_dd = dev_get_drvdata(dev);
+
+ rtc_tm_to_time(tm, &secs);
+
+ value[0] = secs & 0xFF;
+ value[1] = (secs >> 8) & 0xFF;
+ value[2] = (secs >> 16) & 0xFF;
+ value[3] = (secs >> 24) & 0xFF;
+
+ dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs);
+
+ spin_lock_irqsave(&rtc_dd->alarm_ctrl_lock, irq_flags);
+ ctrl_reg = rtc_dd->alarm_ctrl_reg1;
+
+ if (ctrl_reg & BIT_RTC_ALARM_ENABLE) {
+ alarm_enabled = 1;
+ ctrl_reg &= ~BIT_RTC_ALARM_ENABLE;
+ rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ dev_err(dev, "Write to ALARM ctrl reg failed\n");
+ goto rtc_rw_fail;
+ }
+ } else
+ spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+
+ /*
+ * 32 bit seconds value is coverted to four 8 bit values
+ * |<------ 32 bit time value in seconds ------>|
+ * <- 8 bit ->|<- 8 bit ->|<- 8 bit ->|<- 8 bit ->|
+ * ----------------------------------------------
+ * | BYTE[3] | BYTE[2] | BYTE[1] | BYTE[0] |
+ * ----------------------------------------------
+ *
+ * RTC has four 8 bit registers for writting time in seconds:
+ * WDATA[3], WDATA[2], WDATA[1], WDATA[0]
+ *
+ * Write to the RTC registers should be done in following order
+ * Clear WDATA[0] register
+ *
+ * Write BYTE[1], BYTE[2] and BYTE[3] of time to
+ * RTC WDATA[3], WDATA[2], WDATA[1] registers
+ *
+ * Write BYTE[0] of time to RTC WDATA[0] register
+ *
+ * Clearing BYTE[0] and writting in the end will prevent any
+ * unintentional overflow from WDATA[0] to higher bytes during the
+ * write operation
+ */
+
+ /* Disable RTC H/w before writing on RTC register*/
+ rtc_ctrl_reg = rtc_dd->rtc_ctrl_reg;
+ if (rtc_ctrl_reg & BIT_RTC_ENABLE) {
+ rtc_disabled = 1;
+ rtc_ctrl_reg &= ~BIT_RTC_ENABLE;
+ rc = qpnp_write_wrapper(rtc_dd, &rtc_ctrl_reg,
+ rtc_dd->rtc_base + REG_OFFSET_RTC_CTRL, 1);
+ if (rc) {
+ dev_err(dev,
+ "Disabling of RTC control reg failed"
+ " with error:%d\n", rc);
+ goto rtc_rw_fail;
+ }
+ rtc_dd->rtc_ctrl_reg = rtc_ctrl_reg;
+ }
+
+ /* Clear WDATA[0] */
+ reg = 0x0;
+ rc = qpnp_write_wrapper(rtc_dd, &reg,
+ rtc_dd->rtc_base + REG_OFFSET_RTC_WRITE, 1);
+ if (rc) {
+ dev_err(dev, "Write to RTC reg failed\n");
+ goto rtc_rw_fail;
+ }
+
+ /* Write to WDATA[3], WDATA[2] and WDATA[1] */
+ rc = qpnp_write_wrapper(rtc_dd, &value[1],
+ rtc_dd->rtc_base + REG_OFFSET_RTC_WRITE + 1, 3);
+ if (rc) {
+ dev_err(dev, "Write to RTC reg failed\n");
+ goto rtc_rw_fail;
+ }
+
+ /* Write to WDATA[0] */
+ rc = qpnp_write_wrapper(rtc_dd, value,
+ rtc_dd->rtc_base + REG_OFFSET_RTC_WRITE, 1);
+ if (rc) {
+ dev_err(dev, "Write to RTC reg failed\n");
+ goto rtc_rw_fail;
+ }
+
+ /* Enable RTC H/w after writing on RTC register*/
+ if (rtc_disabled) {
+ rtc_ctrl_reg |= BIT_RTC_ENABLE;
+ rc = qpnp_write_wrapper(rtc_dd, &rtc_ctrl_reg,
+ rtc_dd->rtc_base + REG_OFFSET_RTC_CTRL, 1);
+ if (rc) {
+ dev_err(dev,
+ "Enabling of RTC control reg failed"
+ " with error:%d\n", rc);
+ goto rtc_rw_fail;
+ }
+ rtc_dd->rtc_ctrl_reg = rtc_ctrl_reg;
+ }
+
+ if (alarm_enabled) {
+ ctrl_reg |= BIT_RTC_ALARM_ENABLE;
+ rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ dev_err(dev, "Write to ALARM ctrl reg failed\n");
+ goto rtc_rw_fail;
+ }
+ }
+
+ rtc_dd->alarm_ctrl_reg1 = ctrl_reg;
+
+rtc_rw_fail:
+ if (alarm_enabled)
+ spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+
+ return rc;
+}
+
+static int
+qpnp_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ int rc;
+ u8 value[4], reg;
+ unsigned long secs;
+ struct qpnp_rtc *rtc_dd = dev_get_drvdata(dev);
+
+ rc = qpnp_read_wrapper(rtc_dd, value,
+ rtc_dd->rtc_base + REG_OFFSET_RTC_READ,
+ NUM_8_BIT_RTC_REGS);
+ if (rc) {
+ dev_err(dev, "Read from RTC reg failed\n");
+ return rc;
+ }
+
+ /*
+ * Read the LSB again and check if there has been a carry over
+ * If there is, redo the read operation
+ */
+ rc = qpnp_read_wrapper(rtc_dd, &reg,
+ rtc_dd->rtc_base + REG_OFFSET_RTC_READ, 1);
+ if (rc) {
+ dev_err(dev, "Read from RTC reg failed\n");
+ return rc;
+ }
+
+ if (reg < value[0]) {
+ rc = qpnp_read_wrapper(rtc_dd, value,
+ rtc_dd->rtc_base + REG_OFFSET_RTC_READ,
+ NUM_8_BIT_RTC_REGS);
+ if (rc) {
+ dev_err(dev, "Read from RTC reg failed\n");
+ return rc;
+ }
+ }
+
+ secs = TO_SECS(value);
+
+ rtc_time_to_tm(secs, tm);
+
+ rc = rtc_valid_tm(tm);
+ if (rc) {
+ dev_err(dev, "Invalid time read from RTC\n");
+ return rc;
+ }
+
+ dev_dbg(dev, "secs = %lu, h:m:s == %d:%d:%d, d/m/y = %d/%d/%d\n",
+ secs, tm->tm_hour, tm->tm_min, tm->tm_sec,
+ tm->tm_mday, tm->tm_mon, tm->tm_year);
+
+ return 0;
+}
+
+static int
+qpnp_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ int rc;
+ u8 value[4], ctrl_reg;
+ unsigned long secs, secs_rtc, irq_flags;
+ struct qpnp_rtc *rtc_dd = dev_get_drvdata(dev);
+ struct rtc_time rtc_tm;
+
+ rtc_tm_to_time(&alarm->time, &secs);
+
+ /*
+ * Read the current RTC time and verify if the alarm time is in the
+ * past. If yes, return invalid
+ */
+ rc = qpnp_rtc_read_time(dev, &rtc_tm);
+ if (rc) {
+ dev_err(dev, "Unable to read RTC time\n");
+ return -EINVAL;
+ }
+
+ rtc_tm_to_time(&rtc_tm, &secs_rtc);
+ if (secs < secs_rtc) {
+ dev_err(dev, "Trying to set alarm in the past\n");
+ return -EINVAL;
+ }
+
+ value[0] = secs & 0xFF;
+ value[1] = (secs >> 8) & 0xFF;
+ value[2] = (secs >> 16) & 0xFF;
+ value[3] = (secs >> 24) & 0xFF;
+
+ spin_lock_irqsave(&rtc_dd->alarm_ctrl_lock, irq_flags);
+
+ rc = qpnp_write_wrapper(rtc_dd, value,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_RW,
+ NUM_8_BIT_RTC_REGS);
+ if (rc) {
+ dev_err(dev, "Write to ALARM reg failed\n");
+ goto rtc_rw_fail;
+ }
+
+ ctrl_reg = (alarm->enabled) ?
+ (rtc_dd->alarm_ctrl_reg1 | BIT_RTC_ALARM_ENABLE) :
+ (rtc_dd->alarm_ctrl_reg1 & ~BIT_RTC_ALARM_ENABLE);
+
+ rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ dev_err(dev, "Write to ALARM cntrol reg failed\n");
+ goto rtc_rw_fail;
+ }
+
+ rtc_dd->alarm_ctrl_reg1 = ctrl_reg;
+
+ dev_dbg(dev, "Alarm Set for h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
+ alarm->time.tm_hour, alarm->time.tm_min,
+ alarm->time.tm_sec, alarm->time.tm_mday,
+ alarm->time.tm_mon, alarm->time.tm_year);
+rtc_rw_fail:
+ spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+ return rc;
+}
+
+static int
+qpnp_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+ int rc;
+ u8 value[4];
+ unsigned long secs;
+ struct qpnp_rtc *rtc_dd = dev_get_drvdata(dev);
+
+ rc = qpnp_read_wrapper(rtc_dd, value,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_RW,
+ NUM_8_BIT_RTC_REGS);
+ if (rc) {
+ dev_err(dev, "Read from ALARM reg failed\n");
+ return rc;
+ }
+
+ secs = TO_SECS(value);
+ rtc_time_to_tm(secs, &alarm->time);
+
+ rc = rtc_valid_tm(&alarm->time);
+ if (rc) {
+ dev_err(dev, "Invalid time read from RTC\n");
+ return rc;
+ }
+
+ dev_dbg(dev, "Alarm set for - h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
+ alarm->time.tm_hour, alarm->time.tm_min,
+ alarm->time.tm_sec, alarm->time.tm_mday,
+ alarm->time.tm_mon, alarm->time.tm_year);
+
+ return 0;
+}
+
+
+static int
+qpnp_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ int rc;
+ unsigned long irq_flags;
+ struct qpnp_rtc *rtc_dd = dev_get_drvdata(dev);
+ u8 ctrl_reg;
+ u8 value[4] = {0};
+
+ spin_lock_irqsave(&rtc_dd->alarm_ctrl_lock, irq_flags);
+ ctrl_reg = rtc_dd->alarm_ctrl_reg1;
+ ctrl_reg = enabled ? (ctrl_reg | BIT_RTC_ALARM_ENABLE) :
+ (ctrl_reg & ~BIT_RTC_ALARM_ENABLE);
+
+ rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ dev_err(dev, "Write to ALARM control reg failed\n");
+ goto rtc_rw_fail;
+ }
+
+ rtc_dd->alarm_ctrl_reg1 = ctrl_reg;
+
+ /* Clear Alarm register */
+ if (!enabled) {
+ rc = qpnp_write_wrapper(rtc_dd, value,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_RW,
+ NUM_8_BIT_RTC_REGS);
+ if (rc)
+ dev_err(dev, "Clear ALARM value reg failed\n");
+ }
+
+rtc_rw_fail:
+ spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+ return rc;
+}
+
+static struct rtc_class_ops qpnp_rtc_ops = {
+ .read_time = qpnp_rtc_read_time,
+ .set_alarm = qpnp_rtc_set_alarm,
+ .read_alarm = qpnp_rtc_read_alarm,
+ .alarm_irq_enable = qpnp_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t qpnp_alarm_trigger(int irq, void *dev_id)
+{
+ struct qpnp_rtc *rtc_dd = dev_id;
+ u8 ctrl_reg;
+ int rc;
+ unsigned long irq_flags;
+
+ rtc_update_irq(rtc_dd->rtc, 1, RTC_IRQF | RTC_AF);
+
+ spin_lock_irqsave(&rtc_dd->alarm_ctrl_lock, irq_flags);
+
+ /* Clear the alarm enable bit */
+ ctrl_reg = rtc_dd->alarm_ctrl_reg1;
+ ctrl_reg &= ~BIT_RTC_ALARM_ENABLE;
+
+ rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+ dev_err(rtc_dd->rtc_dev,
+ "Write to ALARM control reg failed\n");
+ goto rtc_alarm_handled;
+ }
+
+ rtc_dd->alarm_ctrl_reg1 = ctrl_reg;
+ spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+
+ /* Set ALARM_CLR bit */
+ ctrl_reg = 0x1;
+ rc = qpnp_write_wrapper(rtc_dd, &ctrl_reg,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL2, 1);
+ if (rc)
+ dev_err(rtc_dd->rtc_dev,
+ "Write to ALARM control reg failed\n");
+
+rtc_alarm_handled:
+ return IRQ_HANDLED;
+}
+
+static int qpnp_rtc_probe(struct spmi_device *spmi)
+{
+ int rc;
+ u8 subtype;
+ struct qpnp_rtc *rtc_dd;
+ struct resource *resource;
+ struct spmi_resource *spmi_resource;
+
+ rtc_dd = devm_kzalloc(&spmi->dev, sizeof(*rtc_dd), GFP_KERNEL);
+ if (rtc_dd == NULL) {
+ dev_err(&spmi->dev, "Unable to allocate memory!\n");
+ return -ENOMEM;
+ }
+
+ /* Get the rtc write property */
+ rc = of_property_read_u32(spmi->dev.of_node, "qcom,qpnp-rtc-write",
+ &rtc_dd->rtc_write_enable);
+ if (rc && rc != -EINVAL) {
+ dev_err(&spmi->dev,
+ "Error reading rtc_write_enable property %d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(spmi->dev.of_node,
+ "qcom,qpnp-rtc-alarm-pwrup",
+ &rtc_dd->rtc_alarm_powerup);
+ if (rc && rc != -EINVAL) {
+ dev_err(&spmi->dev,
+ "Error reading rtc_alarm_powerup property %d\n", rc);
+ return rc;
+ }
+
+ /* Initialise spinlock to protect RTC control register */
+ spin_lock_init(&rtc_dd->alarm_ctrl_lock);
+
+ rtc_dd->rtc_dev = &(spmi->dev);
+ rtc_dd->spmi = spmi;
+
+ /* Get RTC/ALARM resources */
+ spmi_for_each_container_dev(spmi_resource, spmi) {
+ if (!spmi_resource) {
+ dev_err(&spmi->dev,
+ "%s: rtc_alarm: spmi resource absent!\n",
+ __func__);
+ rc = -ENXIO;
+ goto fail_rtc_enable;
+ }
+
+ resource = spmi_get_resource(spmi, spmi_resource,
+ IORESOURCE_MEM, 0);
+ if (!(resource && resource->start)) {
+ dev_err(&spmi->dev,
+ "%s: node %s IO resource absent!\n",
+ __func__, spmi->dev.of_node->full_name);
+ rc = -ENXIO;
+ goto fail_rtc_enable;
+ }
+
+ rc = qpnp_read_wrapper(rtc_dd, &subtype,
+ resource->start + REG_OFFSET_PERP_SUBTYPE, 1);
+ if (rc) {
+ dev_err(&spmi->dev,
+ "Peripheral subtype read failed\n");
+ goto fail_rtc_enable;
+ }
+
+ switch (subtype) {
+ case RTC_PERPH_SUBTYPE:
+ rtc_dd->rtc_base = resource->start;
+ break;
+ case ALARM_PERPH_SUBTYPE:
+ rtc_dd->alarm_base = resource->start;
+ rtc_dd->rtc_alarm_irq =
+ spmi_get_irq(spmi, spmi_resource, 0);
+ if (rtc_dd->rtc_alarm_irq < 0) {
+ dev_err(&spmi->dev, "ALARM IRQ absent\n");
+ rc = -ENXIO;
+ goto fail_rtc_enable;
+ }
+ break;
+ default:
+ dev_err(&spmi->dev, "Invalid peripheral subtype\n");
+ rc = -EINVAL;
+ goto fail_rtc_enable;
+ }
+ }
+
+ rc = qpnp_read_wrapper(rtc_dd, &rtc_dd->rtc_ctrl_reg,
+ rtc_dd->rtc_base + REG_OFFSET_RTC_CTRL, 1);
+ if (rc) {
+ dev_err(&spmi->dev,
+ "Read from RTC control reg failed\n");
+ goto fail_rtc_enable;
+ }
+
+ if (!(rtc_dd->rtc_ctrl_reg & BIT_RTC_ENABLE)) {
+ dev_err(&spmi->dev,
+ "RTC h/w disabled, rtc not registered\n");
+ goto fail_rtc_enable;
+ }
+
+ rc = qpnp_read_wrapper(rtc_dd, &rtc_dd->alarm_ctrl_reg1,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ dev_err(&spmi->dev,
+ "Read from Alarm control reg failed\n");
+ goto fail_rtc_enable;
+ }
+ /* Enable abort enable feature */
+ rtc_dd->alarm_ctrl_reg1 |= BIT_RTC_ABORT_ENABLE;
+ rc = qpnp_write_wrapper(rtc_dd, &rtc_dd->alarm_ctrl_reg1,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ dev_err(&spmi->dev, "SPMI write failed!\n");
+ goto fail_rtc_enable;
+ }
+
+ if (rtc_dd->rtc_write_enable == true)
+ qpnp_rtc_ops.set_time = qpnp_rtc_set_time;
+
+ dev_set_drvdata(&spmi->dev, rtc_dd);
+
+ /* Register the RTC device */
+ rtc_dd->rtc = rtc_device_register("qpnp_rtc", &spmi->dev,
+ &qpnp_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc_dd->rtc)) {
+ dev_err(&spmi->dev, "%s: RTC registration failed (%ld)\n",
+ __func__, PTR_ERR(rtc_dd->rtc));
+ rc = PTR_ERR(rtc_dd->rtc);
+ goto fail_rtc_enable;
+ }
+
+ /* Init power_on_alarm after adding rtc device */
+ power_on_alarm_init();
+
+ /* Request the alarm IRQ */
+ rc = request_any_context_irq(rtc_dd->rtc_alarm_irq,
+ qpnp_alarm_trigger, IRQF_TRIGGER_RISING,
+ "qpnp_rtc_alarm", rtc_dd);
+ if (rc) {
+ dev_err(&spmi->dev, "Request IRQ failed (%d)\n", rc);
+ goto fail_req_irq;
+ }
+
+ device_init_wakeup(&spmi->dev, 1);
+ enable_irq_wake(rtc_dd->rtc_alarm_irq);
+
+ dev_dbg(&spmi->dev, "Probe success !!\n");
+
+ return 0;
+
+fail_req_irq:
+ rtc_device_unregister(rtc_dd->rtc);
+fail_rtc_enable:
+ dev_set_drvdata(&spmi->dev, NULL);
+
+ return rc;
+}
+
+static int qpnp_rtc_remove(struct spmi_device *spmi)
+{
+ struct qpnp_rtc *rtc_dd = dev_get_drvdata(&spmi->dev);
+
+ device_init_wakeup(&spmi->dev, 0);
+ free_irq(rtc_dd->rtc_alarm_irq, rtc_dd);
+ rtc_device_unregister(rtc_dd->rtc);
+ dev_set_drvdata(&spmi->dev, NULL);
+
+ return 0;
+}
+
+static void qpnp_rtc_shutdown(struct spmi_device *spmi)
+{
+ u8 value[4] = {0};
+ u8 reg;
+ int rc;
+ unsigned long irq_flags;
+ struct qpnp_rtc *rtc_dd;
+ bool rtc_alarm_powerup;
+
+ if (!spmi) {
+ pr_err("qpnp-rtc: spmi device not found\n");
+ return;
+ }
+ rtc_dd = dev_get_drvdata(&spmi->dev);
+ if (!rtc_dd) {
+ pr_err("qpnp-rtc: rtc driver data not found\n");
+ return;
+ }
+ rtc_alarm_powerup = rtc_dd->rtc_alarm_powerup;
+ if (!rtc_alarm_powerup && !poweron_alarm) {
+ spin_lock_irqsave(&rtc_dd->alarm_ctrl_lock, irq_flags);
+ dev_dbg(&spmi->dev, "Disabling alarm interrupts\n");
+
+ /* Disable RTC alarms */
+ reg = rtc_dd->alarm_ctrl_reg1;
+ reg &= ~BIT_RTC_ALARM_ENABLE;
+ rc = qpnp_write_wrapper(rtc_dd, &reg,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_CTRL1, 1);
+ if (rc) {
+ dev_err(rtc_dd->rtc_dev, "SPMI write failed\n");
+ goto fail_alarm_disable;
+ }
+
+ /* Clear Alarm register */
+ rc = qpnp_write_wrapper(rtc_dd, value,
+ rtc_dd->alarm_base + REG_OFFSET_ALARM_RW,
+ NUM_8_BIT_RTC_REGS);
+ if (rc)
+ dev_err(rtc_dd->rtc_dev, "SPMI write failed\n");
+
+fail_alarm_disable:
+ spin_unlock_irqrestore(&rtc_dd->alarm_ctrl_lock, irq_flags);
+ }
+}
+
+static struct of_device_id spmi_match_table[] = {
+ {
+ .compatible = "qcom,qpnp-rtc",
+ },
+ {}
+};
+
+static struct spmi_driver qpnp_rtc_driver = {
+ .probe = qpnp_rtc_probe,
+ .remove = qpnp_rtc_remove,
+ .shutdown = qpnp_rtc_shutdown,
+ .driver = {
+ .name = "qcom,qpnp-rtc",
+ .owner = THIS_MODULE,
+ .of_match_table = spmi_match_table,
+ },
+};
+
+static int __init qpnp_rtc_init(void)
+{
+ return spmi_driver_register(&qpnp_rtc_driver);
+}
+module_init(qpnp_rtc_init);
+
+static void __exit qpnp_rtc_exit(void)
+{
+ spmi_driver_unregister(&qpnp_rtc_driver);
+}
+module_exit(qpnp_rtc_exit);
+
+MODULE_DESCRIPTION("SMPI PMIC RTC driver");
+MODULE_LICENSE("GPL V2");
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 8cc4ac64a91c..5cfce4246b7a 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -365,6 +365,33 @@ config INTEL_PCH_THERMAL
Thermal reporting device will provide temperature reading,
programmable trip points and other information.
+config THERMAL_QPNP
+ tristate "Qualcomm Plug-and-Play PMIC Temperature Alarm"
+ depends on THERMAL
+ depends on OF
+ depends on SPMI || MSM_SPMI
+ depends on OF_SPMI
+ help
+ This enables a thermal Sysfs driver for Qualcomm plug-and-play (QPNP)
+ PMIC devices. It shows up in Sysfs as a thermal zone with multiple
+ trip points. The temperature reported by the thermal zone reflects the
+ real time die temperature if an ADC is present or an estimate of the
+ temperature based upon the over temperature stage value if no ADC is
+ available. If allowed via compile time configuration; enabling the
+ thermal zone device via the mode file results in shifting PMIC over
+ temperature shutdown control from hardware to software.
+
+config THERMAL_QPNP_ADC_TM
+ tristate "Qualcomm 8974 Thermal Monitor ADC Driver"
+ depends on THERMAL
+ depends on SPMI || MSM_SPMI
+ help
+ This enables the thermal Sysfs driver for the ADC thermal monitoring
+ device. It shows up in Sysfs as a thermal zone with multiple trip points.
+ Disabling the thermal zone device via the mode file results in disabling
+ the sensor. Also able to set threshold temperature for both hot and cold
+ and update when a threshold is reached.
+
menu "Texas Instruments thermal drivers"
depends on ARCH_HAS_BANDGAP || COMPILE_TEST
source "drivers/thermal/ti-soc-thermal/Kconfig"
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index cfae6a654793..950d43f3bcc8 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -48,3 +48,5 @@ obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
obj-$(CONFIG_ST_THERMAL) += st/
obj-$(CONFIG_TEGRA_SOCTHERM) += tegra_soctherm.o
obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
+obj-$(CONFIG_THERMAL_QPNP) += qpnp-temp-alarm.o
+obj-$(CONFIG_THERMAL_QPNP_ADC_TM) += qpnp-adc-tm.o
diff --git a/drivers/thermal/qpnp-temp-alarm.c b/drivers/thermal/qpnp-temp-alarm.c
new file mode 100644
index 000000000000..19b599c0f1ff
--- /dev/null
+++ b/drivers/thermal/qpnp-temp-alarm.c
@@ -0,0 +1,725 @@
+/*
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/thermal.h>
+#include <linux/qpnp/qpnp-adc.h>
+
+#define QPNP_TM_DRIVER_NAME "qcom,qpnp-temp-alarm"
+
+enum qpnp_tm_registers {
+ QPNP_TM_REG_TYPE = 0x04,
+ QPNP_TM_REG_SUBTYPE = 0x05,
+ QPNP_TM_REG_STATUS = 0x08,
+ QPNP_TM_REG_SHUTDOWN_CTRL1 = 0x40,
+ QPNP_TM_REG_SHUTDOWN_CTRL2 = 0x42,
+ QPNP_TM_REG_ALARM_CTRL = 0x46,
+};
+
+#define QPNP_TM_TYPE 0x09
+#define QPNP_TM_SUBTYPE 0x08
+
+#define STATUS_STAGE_MASK 0x03
+
+#define SHUTDOWN_CTRL1_OVERRIDE_STAGE3 0x80
+#define SHUTDOWN_CTRL1_OVERRIDE_STAGE2 0x40
+#define SHUTDOWN_CTRL1_THRESHOLD_MASK 0x03
+
+#define SHUTDOWN_CTRL2_CLEAR_STAGE3 0x80
+#define SHUTDOWN_CTRL2_CLEAR_STAGE2 0x40
+
+#define ALARM_CTRL_FORCE_ENABLE 0x80
+#define ALARM_CTRL_FOLLOW_HW_ENABLE 0x01
+
+#define TEMP_STAGE_STEP 20000 /* Stage step: 20.000 C */
+#define TEMP_STAGE_HYSTERESIS 2000
+
+#define TEMP_THRESH_MIN 105000 /* Threshold Min: 105 C */
+#define TEMP_THRESH_STEP 5000 /* Threshold step: 5 C */
+
+#define THRESH_MIN 0
+#define THRESH_MAX 3
+
+/* Trip points from most critical to least critical */
+#define TRIP_STAGE3 0
+#define TRIP_STAGE2 1
+#define TRIP_STAGE1 2
+#define TRIP_NUM 3
+
+enum qpnp_tm_adc_type {
+ QPNP_TM_ADC_NONE, /* Estimates temp based on overload level. */
+ QPNP_TM_ADC_QPNP_ADC,
+};
+
+/*
+ * Temperature in millicelcius reported during stage 0 if no ADC is present and
+ * no value has been specified via device tree.
+ */
+#define DEFAULT_NO_ADC_TEMP 37000
+
+struct qpnp_tm_chip {
+ struct delayed_work irq_work;
+ struct spmi_device *spmi_dev;
+ struct thermal_zone_device *tz_dev;
+ const char *tm_name;
+ enum qpnp_tm_adc_type adc_type;
+ unsigned long temperature;
+ enum thermal_device_mode mode;
+ unsigned int thresh;
+ unsigned int stage;
+ unsigned int prev_stage;
+ int irq;
+ enum qpnp_vadc_channels adc_channel;
+ u16 base_addr;
+ bool allow_software_override;
+ struct qpnp_vadc_chip *vadc_dev;
+};
+
+/* Delay between TEMP_STAT IRQ going high and status value changing in ms. */
+#define STATUS_REGISTER_DELAY_MS 40
+
+enum pmic_thermal_override_mode {
+ SOFTWARE_OVERRIDE_DISABLED = 0,
+ SOFTWARE_OVERRIDE_ENABLED,
+};
+
+static inline int qpnp_tm_read(struct qpnp_tm_chip *chip, u16 addr, u8 *buf,
+ int len)
+{
+ int rc;
+
+ rc = spmi_ext_register_readl(chip->spmi_dev->ctrl,
+ chip->spmi_dev->sid, chip->base_addr + addr, buf, len);
+
+ if (rc)
+ dev_err(&chip->spmi_dev->dev, "%s: spmi_ext_register_readl() failed. sid=%d, addr=%04X, len=%d, rc=%d\n",
+ __func__, chip->spmi_dev->sid, chip->base_addr + addr,
+ len, rc);
+
+ return rc;
+}
+
+static inline int qpnp_tm_write(struct qpnp_tm_chip *chip, u16 addr, u8 *buf,
+ int len)
+{
+ int rc;
+
+ rc = spmi_ext_register_writel(chip->spmi_dev->ctrl,
+ chip->spmi_dev->sid, chip->base_addr + addr, buf, len);
+
+ if (rc)
+ dev_err(&chip->spmi_dev->dev, "%s: spmi_ext_register_writel() failed. sid=%d, addr=%04X, len=%d, rc=%d\n",
+ __func__, chip->spmi_dev->sid, chip->base_addr + addr,
+ len, rc);
+
+ return rc;
+}
+
+
+static inline int qpnp_tm_shutdown_override(struct qpnp_tm_chip *chip,
+ enum pmic_thermal_override_mode mode)
+{
+ int rc = 0;
+ u8 reg;
+
+ if (chip->allow_software_override) {
+ reg = chip->thresh & SHUTDOWN_CTRL1_THRESHOLD_MASK;
+
+ if (mode == SOFTWARE_OVERRIDE_ENABLED)
+ reg |= SHUTDOWN_CTRL1_OVERRIDE_STAGE2
+ | SHUTDOWN_CTRL1_OVERRIDE_STAGE3;
+
+ rc = qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, &reg, 1);
+ }
+
+ return rc;
+}
+
+static int qpnp_tm_update_temp(struct qpnp_tm_chip *chip)
+{
+ struct qpnp_vadc_result adc_result;
+ int rc;
+
+ rc = qpnp_vadc_read(chip->vadc_dev, chip->adc_channel, &adc_result);
+ if (!rc)
+ chip->temperature = adc_result.physical;
+ else
+ dev_err(&chip->spmi_dev->dev, "%s: qpnp_vadc_read(%d) failed, rc=%d\n",
+ __func__, chip->adc_channel, rc);
+
+ return rc;
+}
+
+/*
+ * This function initializes the internal temperature value based on only the
+ * current thermal stage and threshold.
+ */
+static int qpnp_tm_init_temp_no_adc(struct qpnp_tm_chip *chip)
+{
+ int rc;
+ u8 reg;
+
+ rc = qpnp_tm_read(chip, QPNP_TM_REG_STATUS, &reg, 1);
+ if (rc < 0)
+ return rc;
+
+ chip->stage = reg & STATUS_STAGE_MASK;
+
+ if (chip->stage)
+ chip->temperature = chip->thresh * TEMP_THRESH_STEP +
+ (chip->stage - 1) * TEMP_STAGE_STEP +
+ TEMP_THRESH_MIN;
+
+ return 0;
+}
+
+/*
+ * This function updates the internal temperature value based on the
+ * current thermal stage and threshold as well as the previous stage
+ */
+static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
+{
+ unsigned int stage;
+ int rc;
+ u8 reg;
+
+ rc = qpnp_tm_read(chip, QPNP_TM_REG_STATUS, &reg, 1);
+ if (rc < 0)
+ return rc;
+
+ stage = reg & STATUS_STAGE_MASK;
+
+ if (stage > chip->stage) {
+ /* increasing stage, use lower bound */
+ chip->temperature = (stage - 1) * TEMP_STAGE_STEP
+ + chip->thresh * TEMP_THRESH_STEP
+ + TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN;
+ } else if (stage < chip->stage) {
+ /* decreasing stage, use upper bound */
+ chip->temperature = stage * TEMP_STAGE_STEP
+ + chip->thresh * TEMP_THRESH_STEP
+ - TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN;
+ }
+
+ chip->stage = stage;
+
+ return 0;
+}
+
+static int qpnp_tz_get_temp_no_adc(struct thermal_zone_device *thermal,
+ unsigned long *temperature)
+{
+ struct qpnp_tm_chip *chip = thermal->devdata;
+ int rc;
+
+ if (!temperature)
+ return -EINVAL;
+
+ rc = qpnp_tm_update_temp_no_adc(chip);
+ if (rc < 0)
+ return rc;
+
+ *temperature = chip->temperature;
+
+ return 0;
+}
+
+static int qpnp_tz_get_temp_qpnp_adc(struct thermal_zone_device *thermal,
+ unsigned long *temperature)
+{
+ struct qpnp_tm_chip *chip = thermal->devdata;
+ int rc;
+
+ if (!temperature)
+ return -EINVAL;
+
+ rc = qpnp_tm_update_temp(chip);
+ if (rc < 0) {
+ dev_err(&chip->spmi_dev->dev, "%s: %s: adc read failed, rc = %d\n",
+ __func__, chip->tm_name, rc);
+ return rc;
+ }
+
+ *temperature = chip->temperature;
+
+ return 0;
+}
+
+static int qpnp_tz_get_mode(struct thermal_zone_device *thermal,
+ enum thermal_device_mode *mode)
+{
+ struct qpnp_tm_chip *chip = thermal->devdata;
+
+ if (!mode)
+ return -EINVAL;
+
+ *mode = chip->mode;
+
+ return 0;
+}
+
+static int qpnp_tz_set_mode(struct thermal_zone_device *thermal,
+ enum thermal_device_mode mode)
+{
+ struct qpnp_tm_chip *chip = thermal->devdata;
+ int rc = 0;
+
+ if (mode != chip->mode) {
+ if (mode == THERMAL_DEVICE_ENABLED)
+ rc = qpnp_tm_shutdown_override(chip,
+ SOFTWARE_OVERRIDE_ENABLED);
+ else
+ rc = qpnp_tm_shutdown_override(chip,
+ SOFTWARE_OVERRIDE_DISABLED);
+
+ chip->mode = mode;
+ }
+
+ return rc;
+}
+
+static int qpnp_tz_get_trip_type(struct thermal_zone_device *thermal,
+ int trip, enum thermal_trip_type *type)
+{
+ if (trip < 0 || !type)
+ return -EINVAL;
+
+ switch (trip) {
+ case TRIP_STAGE3:
+ *type = THERMAL_TRIP_CRITICAL;
+ break;
+ case TRIP_STAGE2:
+ *type = THERMAL_TRIP_HOT;
+ break;
+ case TRIP_STAGE1:
+ *type = THERMAL_TRIP_HOT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qpnp_tz_get_trip_temp(struct thermal_zone_device *thermal,
+ int trip, unsigned long *temperature)
+{
+ struct qpnp_tm_chip *chip = thermal->devdata;
+ int thresh_temperature;
+
+ if (trip < 0 || !temperature)
+ return -EINVAL;
+
+ thresh_temperature = chip->thresh * TEMP_THRESH_STEP + TEMP_THRESH_MIN;
+
+ switch (trip) {
+ case TRIP_STAGE3:
+ thresh_temperature += 2 * TEMP_STAGE_STEP;
+ break;
+ case TRIP_STAGE2:
+ thresh_temperature += TEMP_STAGE_STEP;
+ break;
+ case TRIP_STAGE1:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *temperature = thresh_temperature;
+
+ return 0;
+}
+
+static int qpnp_tz_get_crit_temp(struct thermal_zone_device *thermal,
+ unsigned long *temperature)
+{
+ struct qpnp_tm_chip *chip = thermal->devdata;
+
+ if (!temperature)
+ return -EINVAL;
+
+ *temperature = chip->thresh * TEMP_THRESH_STEP + TEMP_THRESH_MIN +
+ 2 * TEMP_STAGE_STEP;
+
+ return 0;
+}
+
+static struct thermal_zone_device_ops qpnp_thermal_zone_ops_no_adc = {
+ .get_temp = qpnp_tz_get_temp_no_adc,
+ .get_mode = qpnp_tz_get_mode,
+ .set_mode = qpnp_tz_set_mode,
+ .get_trip_type = qpnp_tz_get_trip_type,
+ .get_trip_temp = qpnp_tz_get_trip_temp,
+ .get_crit_temp = qpnp_tz_get_crit_temp,
+};
+
+static struct thermal_zone_device_ops qpnp_thermal_zone_ops_qpnp_adc = {
+ .get_temp = qpnp_tz_get_temp_qpnp_adc,
+ .get_mode = qpnp_tz_get_mode,
+ .set_mode = qpnp_tz_set_mode,
+ .get_trip_type = qpnp_tz_get_trip_type,
+ .get_trip_temp = qpnp_tz_get_trip_temp,
+ .get_crit_temp = qpnp_tz_get_crit_temp,
+};
+
+static void qpnp_tm_work(struct work_struct *work)
+{
+ struct delayed_work *dwork
+ = container_of(work, struct delayed_work, work);
+ struct qpnp_tm_chip *chip
+ = container_of(dwork, struct qpnp_tm_chip, irq_work);
+ int rc;
+ u8 reg;
+
+ if (chip->adc_type == QPNP_TM_ADC_NONE) {
+ rc = qpnp_tm_update_temp_no_adc(chip);
+ if (rc < 0)
+ goto bail;
+ } else {
+ rc = qpnp_tm_read(chip, QPNP_TM_REG_STATUS, &reg, 1);
+ if (rc < 0)
+ goto bail;
+
+ chip->stage = reg & STATUS_STAGE_MASK;
+
+ rc = qpnp_tm_update_temp(chip);
+ if (rc < 0)
+ goto bail;
+ }
+
+ if (chip->stage != chip->prev_stage) {
+ chip->prev_stage = chip->stage;
+
+ pr_crit("%s: PMIC Temp Alarm - stage=%u, threshold=%u, temperature=%lu mC\n",
+ chip->tm_name, chip->stage, chip->thresh,
+ chip->temperature);
+
+ thermal_zone_device_update(chip->tz_dev);
+
+ /* Notify user space */
+ sysfs_notify(&chip->tz_dev->device.kobj, NULL, "type");
+ }
+
+bail:
+ return;
+}
+
+static irqreturn_t qpnp_tm_isr(int irq, void *data)
+{
+ struct qpnp_tm_chip *chip = data;
+
+ schedule_delayed_work(&chip->irq_work,
+ msecs_to_jiffies(STATUS_REGISTER_DELAY_MS) + 1);
+
+ return IRQ_HANDLED;
+}
+
+static int qpnp_tm_init_reg(struct qpnp_tm_chip *chip)
+{
+ int rc = 0;
+ u8 reg;
+
+ if (chip->thresh < THRESH_MIN || chip->thresh > THRESH_MAX) {
+ /* Read hardware threshold value if configuration is invalid. */
+ rc = qpnp_tm_read(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, &reg, 1);
+ if (rc < 0)
+ return rc;
+ chip->thresh = reg & SHUTDOWN_CTRL1_THRESHOLD_MASK;
+ }
+
+ /*
+ * Set threshold and disable software override of stage 2 and 3
+ * shutdowns.
+ */
+ reg = chip->thresh & SHUTDOWN_CTRL1_THRESHOLD_MASK;
+ rc = qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, &reg, 1);
+ if (rc < 0)
+ return rc;
+
+ /* Enable the thermal alarm PMIC module in always-on mode. */
+ reg = ALARM_CTRL_FORCE_ENABLE;
+ rc = qpnp_tm_write(chip, QPNP_TM_REG_ALARM_CTRL, &reg, 1);
+
+ return rc;
+}
+
+static int qpnp_tm_probe(struct spmi_device *spmi)
+{
+ struct device_node *node;
+ struct resource *res;
+ struct qpnp_tm_chip *chip;
+ struct thermal_zone_device_ops *tz_ops;
+ char *tm_name;
+ u32 default_temperature;
+ int rc = 0;
+ u8 raw_type[2], type, subtype;
+
+ if (!spmi || !(&spmi->dev) || !spmi->dev.of_node) {
+ dev_err(&spmi->dev, "%s: device tree node not found\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ node = spmi->dev.of_node;
+
+ chip = kzalloc(sizeof(struct qpnp_tm_chip), GFP_KERNEL);
+ if (!chip) {
+ dev_err(&spmi->dev, "%s: Can't allocate qpnp_tm_chip\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ dev_set_drvdata(&spmi->dev, chip);
+
+ res = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&spmi->dev, "%s: node is missing base address\n",
+ __func__);
+ rc = -EINVAL;
+ goto free_chip;
+ }
+ chip->base_addr = res->start;
+ chip->spmi_dev = spmi;
+
+ chip->irq = spmi_get_irq(spmi, NULL, 0);
+ if (chip->irq < 0) {
+ rc = chip->irq;
+ dev_err(&spmi->dev, "%s: node is missing irq, rc=%d\n",
+ __func__, rc);
+ goto free_chip;
+ }
+
+ chip->tm_name = of_get_property(node, "label", NULL);
+ if (chip->tm_name == NULL) {
+ dev_err(&spmi->dev, "%s: node is missing label\n",
+ __func__);
+ rc = -EINVAL;
+ goto free_chip;
+ }
+
+ tm_name = kstrdup(chip->tm_name, GFP_KERNEL);
+ if (tm_name == NULL) {
+ dev_err(&spmi->dev, "%s: could not allocate memory for label\n",
+ __func__);
+ rc = -ENOMEM;
+ goto free_chip;
+ }
+ chip->tm_name = tm_name;
+
+ INIT_DELAYED_WORK(&chip->irq_work, qpnp_tm_work);
+
+ /* These bindings are optional, so it is okay if they are not found. */
+ chip->thresh = THRESH_MAX + 1;
+ rc = of_property_read_u32(node, "qcom,threshold-set", &chip->thresh);
+ if (!rc && (chip->thresh < THRESH_MIN || chip->thresh > THRESH_MAX))
+ dev_err(&spmi->dev, "%s: invalid qcom,threshold-set=%u specified\n",
+ __func__, chip->thresh);
+
+ chip->adc_type = QPNP_TM_ADC_NONE;
+ rc = of_property_read_u32(node, "qcom,channel-num", &chip->adc_channel);
+ if (!rc) {
+ if (chip->adc_channel < 0 || chip->adc_channel >= ADC_MAX_NUM) {
+ dev_err(&spmi->dev, "%s: invalid qcom,channel-num=%d specified\n",
+ __func__, chip->adc_channel);
+ } else {
+ chip->adc_type = QPNP_TM_ADC_QPNP_ADC;
+ chip->vadc_dev = qpnp_get_vadc(&spmi->dev,
+ "temp_alarm");
+ if (IS_ERR(chip->vadc_dev)) {
+ rc = PTR_ERR(chip->vadc_dev);
+ if (rc != -EPROBE_DEFER)
+ pr_err("vadc property missing\n");
+ goto err_cancel_work;
+ }
+ }
+ }
+
+ if (chip->adc_type == QPNP_TM_ADC_QPNP_ADC)
+ tz_ops = &qpnp_thermal_zone_ops_qpnp_adc;
+ else
+ tz_ops = &qpnp_thermal_zone_ops_no_adc;
+
+ chip->allow_software_override
+ = of_property_read_bool(node, "qcom,allow-override");
+
+ default_temperature = DEFAULT_NO_ADC_TEMP;
+ rc = of_property_read_u32(node, "qcom,default-temp",
+ &default_temperature);
+ chip->temperature = default_temperature;
+
+ rc = qpnp_tm_read(chip, QPNP_TM_REG_TYPE, raw_type, 2);
+ if (rc) {
+ dev_err(&spmi->dev, "%s: could not read type register, rc=%d\n",
+ __func__, rc);
+ goto err_cancel_work;
+ }
+ type = raw_type[0];
+ subtype = raw_type[1];
+
+ if (type != QPNP_TM_TYPE || subtype != QPNP_TM_SUBTYPE) {
+ dev_err(&spmi->dev, "%s: invalid type=%02X or subtype=%02X register value\n",
+ __func__, type, subtype);
+ rc = -ENODEV;
+ goto err_cancel_work;
+ }
+
+ rc = qpnp_tm_init_reg(chip);
+ if (rc) {
+ dev_err(&spmi->dev, "%s: qpnp_tm_init_reg() failed, rc=%d\n",
+ __func__, rc);
+ goto err_cancel_work;
+ }
+
+ if (chip->adc_type == QPNP_TM_ADC_NONE) {
+ rc = qpnp_tm_init_temp_no_adc(chip);
+ if (rc) {
+ dev_err(&spmi->dev, "%s: qpnp_tm_init_temp_no_adc() failed, rc=%d\n",
+ __func__, rc);
+ goto err_cancel_work;
+ }
+ }
+
+ /* Start in HW control; switch to SW control when user changes mode. */
+ chip->mode = THERMAL_DEVICE_DISABLED;
+ rc = qpnp_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_DISABLED);
+ if (rc) {
+ dev_err(&spmi->dev, "%s: qpnp_tm_shutdown_override() failed, rc=%d\n",
+ __func__, rc);
+ goto err_cancel_work;
+ }
+
+ chip->tz_dev = thermal_zone_device_register(tm_name, TRIP_NUM, 0, chip,
+ tz_ops, NULL, 0, 0);
+ if (chip->tz_dev == NULL) {
+ dev_err(&spmi->dev, "%s: thermal_zone_device_register() failed.\n",
+ __func__);
+ rc = -ENODEV;
+ goto err_cancel_work;
+ }
+
+ rc = request_irq(chip->irq, qpnp_tm_isr, IRQF_TRIGGER_RISING, tm_name,
+ chip);
+ if (rc < 0) {
+ dev_err(&spmi->dev, "%s: request_irq(%d) failed: %d\n",
+ __func__, chip->irq, rc);
+ goto err_free_tz;
+ }
+
+ return 0;
+
+err_free_tz:
+ thermal_zone_device_unregister(chip->tz_dev);
+err_cancel_work:
+ cancel_delayed_work_sync(&chip->irq_work);
+ kfree(chip->tm_name);
+free_chip:
+ dev_set_drvdata(&spmi->dev, NULL);
+ kfree(chip);
+ return rc;
+}
+
+static int qpnp_tm_remove(struct spmi_device *spmi)
+{
+ struct qpnp_tm_chip *chip = dev_get_drvdata(&spmi->dev);
+
+ dev_set_drvdata(&spmi->dev, NULL);
+ thermal_zone_device_unregister(chip->tz_dev);
+ kfree(chip->tm_name);
+ qpnp_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_DISABLED);
+ free_irq(chip->irq, chip);
+ cancel_delayed_work_sync(&chip->irq_work);
+ kfree(chip);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int qpnp_tm_suspend(struct device *dev)
+{
+ struct qpnp_tm_chip *chip = dev_get_drvdata(dev);
+
+ /* Clear override bits in suspend to allow hardware control */
+ qpnp_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_DISABLED);
+
+ return 0;
+}
+
+static int qpnp_tm_resume(struct device *dev)
+{
+ struct qpnp_tm_chip *chip = dev_get_drvdata(dev);
+
+ /* Override hardware actions so software can control */
+ if (chip->mode == THERMAL_DEVICE_ENABLED)
+ qpnp_tm_shutdown_override(chip, SOFTWARE_OVERRIDE_ENABLED);
+
+ return 0;
+}
+
+static const struct dev_pm_ops qpnp_tm_pm_ops = {
+ .suspend = qpnp_tm_suspend,
+ .resume = qpnp_tm_resume,
+};
+
+#define QPNP_TM_PM_OPS (&qpnp_tm_pm_ops)
+#else
+#define QPNP_TM_PM_OPS NULL
+#endif
+
+static struct of_device_id qpnp_tm_match_table[] = {
+ { .compatible = QPNP_TM_DRIVER_NAME, },
+ {}
+};
+
+static const struct spmi_device_id qpnp_tm_id[] = {
+ { QPNP_TM_DRIVER_NAME, 0 },
+ {}
+};
+
+static struct spmi_driver qpnp_tm_driver = {
+ .driver = {
+ .name = QPNP_TM_DRIVER_NAME,
+ .of_match_table = qpnp_tm_match_table,
+ .owner = THIS_MODULE,
+ .pm = QPNP_TM_PM_OPS,
+ },
+ .probe = qpnp_tm_probe,
+ .remove = qpnp_tm_remove,
+ .id_table = qpnp_tm_id,
+};
+
+int __init qpnp_tm_init(void)
+{
+ return spmi_driver_register(&qpnp_tm_driver);
+}
+
+static void __exit qpnp_tm_exit(void)
+{
+ spmi_driver_unregister(&qpnp_tm_driver);
+}
+
+module_init(qpnp_tm_init);
+module_exit(qpnp_tm_exit);
+
+MODULE_DESCRIPTION("QPNP PMIC Temperature Alarm driver");
+MODULE_LICENSE("GPL v2");
diff --git a/include/linux/batterydata-lib.h b/include/linux/batterydata-lib.h
new file mode 100644
index 000000000000..39517f83c875
--- /dev/null
+++ b/include/linux/batterydata-lib.h
@@ -0,0 +1,218 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __BMS_BATTERYDATA_H
+#define __BMS_BATTERYDATA_H
+
+#include <linux/errno.h>
+
+#define FCC_CC_COLS 5
+#define FCC_TEMP_COLS 8
+
+#define PC_CC_ROWS 31
+#define PC_CC_COLS 13
+
+#define PC_TEMP_ROWS 31
+#define PC_TEMP_COLS 8
+
+#define ACC_IBAT_ROWS 4
+#define ACC_TEMP_COLS 3
+
+#define MAX_SINGLE_LUT_COLS 20
+
+#define MAX_BATT_ID_NUM 4
+#define DEGC_SCALE 10
+
+struct single_row_lut {
+ int x[MAX_SINGLE_LUT_COLS];
+ int y[MAX_SINGLE_LUT_COLS];
+ int cols;
+};
+
+/**
+ * struct sf_lut -
+ * @rows: number of percent charge entries should be <= PC_CC_ROWS
+ * @cols: number of charge cycle entries should be <= PC_CC_COLS
+ * @row_entries: the charge cycles/temperature at which sf data
+ * is available in the table.
+ * The charge cycles must be in increasing order from 0 to rows.
+ * @percent: the percent charge at which sf data is available in the table
+ * The percentcharge must be in decreasing order from 0 to cols.
+ * @sf: the scaling factor data
+ */
+struct sf_lut {
+ int rows;
+ int cols;
+ int row_entries[PC_CC_COLS];
+ int percent[PC_CC_ROWS];
+ int sf[PC_CC_ROWS][PC_CC_COLS];
+};
+
+/**
+ * struct pc_temp_ocv_lut -
+ * @rows: number of percent charge entries should be <= PC_TEMP_ROWS
+ * @cols: number of temperature entries should be <= PC_TEMP_COLS
+ * @temp: the temperatures at which ocv data is available in the table
+ * The temperatures must be in increasing order from 0 to rows.
+ * @percent: the percent charge at which ocv data is available in the table
+ * The percentcharge must be in decreasing order from 0 to cols.
+ * @ocv: the open circuit voltage
+ */
+struct pc_temp_ocv_lut {
+ int rows;
+ int cols;
+ int temp[PC_TEMP_COLS];
+ int percent[PC_TEMP_ROWS];
+ int ocv[PC_TEMP_ROWS][PC_TEMP_COLS];
+};
+
+struct ibat_temp_acc_lut {
+ int rows;
+ int cols;
+ int temp[ACC_TEMP_COLS];
+ int ibat[ACC_IBAT_ROWS];
+ int acc[ACC_IBAT_ROWS][ACC_TEMP_COLS];
+};
+
+struct batt_ids {
+ int kohm[MAX_BATT_ID_NUM];
+ int num;
+};
+
+enum battery_type {
+ BATT_UNKNOWN = 0,
+ BATT_PALLADIUM,
+ BATT_DESAY,
+ BATT_OEM,
+ BATT_QRD_4V35_2000MAH,
+ BATT_QRD_4V2_1300MAH,
+};
+
+/**
+ * struct bms_battery_data -
+ * @fcc: full charge capacity (mAmpHour)
+ * @fcc_temp_lut: table to get fcc at a given temp
+ * @pc_temp_ocv_lut: table to get percent charge given batt temp and cycles
+ * @pc_sf_lut: table to get percent charge scaling factor given cycles
+ * and percent charge
+ * @rbatt_sf_lut: table to get battery resistance scaling factor given
+ * temperature and percent charge
+ * @default_rbatt_mohm: the default value of battery resistance to use when
+ * readings from bms are not available.
+ * @delta_rbatt_mohm: the resistance to be added towards lower soc to
+ * compensate for battery capacitance.
+ * @rbatt_capacitve_mohm: the resistance to be added to compensate for
+ * battery capacitance
+ * @flat_ocv_threshold_uv: the voltage where the battery's discharge curve
+ * starts flattening out.
+ * @max_voltage_uv: max voltage of the battery
+ * @cutoff_uv: cutoff voltage of the battery
+ * @iterm_ua: termination current of the battery when charging
+ * to 100%
+ * @batt_id_kohm: the best matched battery id resistor value
+ * @fastchg_current_ma: maximum fast charge current
+ * @fg_cc_cv_threshold_mv: CC to CV threashold voltage
+ */
+
+struct bms_battery_data {
+ unsigned int fcc;
+ struct single_row_lut *fcc_temp_lut;
+ struct single_row_lut *fcc_sf_lut;
+ struct pc_temp_ocv_lut *pc_temp_ocv_lut;
+ struct ibat_temp_acc_lut *ibat_acc_lut;
+ struct sf_lut *pc_sf_lut;
+ struct sf_lut *rbatt_sf_lut;
+ int default_rbatt_mohm;
+ int delta_rbatt_mohm;
+ int rbatt_capacitive_mohm;
+ int flat_ocv_threshold_uv;
+ int max_voltage_uv;
+ int cutoff_uv;
+ int iterm_ua;
+ int batt_id_kohm;
+ int fastchg_current_ma;
+ int fg_cc_cv_threshold_mv;
+ const char *battery_type;
+};
+
+#define is_between(left, right, value) \
+ (((left) >= (right) && (left) >= (value) \
+ && (value) >= (right)) \
+ || ((left) <= (right) && (left) <= (value) \
+ && (value) <= (right)))
+
+#if defined(CONFIG_PM8921_BMS) || \
+ defined(CONFIG_PM8921_BMS_MODULE) || \
+ defined(CONFIG_QPNP_BMS) || \
+ defined(CONFIG_QPNP_VM_BMS)
+extern struct bms_battery_data palladium_1500_data;
+extern struct bms_battery_data desay_5200_data;
+extern struct bms_battery_data oem_batt_data;
+extern struct bms_battery_data QRD_4v35_2000mAh_data;
+extern struct bms_battery_data qrd_4v2_1300mah_data;
+
+int interpolate_fcc(struct single_row_lut *fcc_temp_lut, int batt_temp);
+int interpolate_scalingfactor(struct sf_lut *sf_lut, int row_entry, int pc);
+int interpolate_scalingfactor_fcc(struct single_row_lut *fcc_sf_lut,
+ int cycles);
+int interpolate_pc(struct pc_temp_ocv_lut *pc_temp_ocv,
+ int batt_temp_degc, int ocv);
+int interpolate_ocv(struct pc_temp_ocv_lut *pc_temp_ocv,
+ int batt_temp_degc, int pc);
+int interpolate_slope(struct pc_temp_ocv_lut *pc_temp_ocv,
+ int batt_temp, int pc);
+int interpolate_acc(struct ibat_temp_acc_lut *ibat_acc_lut,
+ int batt_temp, int ibat);
+int linear_interpolate(int y0, int x0, int y1, int x1, int x);
+#else
+static inline int interpolate_fcc(struct single_row_lut *fcc_temp_lut,
+ int batt_temp)
+{
+ return -EINVAL;
+}
+static inline int interpolate_scalingfactor(struct sf_lut *sf_lut,
+ int row_entry, int pc)
+{
+ return -EINVAL;
+}
+static inline int interpolate_scalingfactor_fcc(
+ struct single_row_lut *fcc_sf_lut, int cycles)
+{
+ return -EINVAL;
+}
+static inline int interpolate_pc(struct pc_temp_ocv_lut *pc_temp_ocv,
+ int batt_temp_degc, int ocv)
+{
+ return -EINVAL;
+}
+static inline int interpolate_ocv(struct pc_temp_ocv_lut *pc_temp_ocv,
+ int batt_temp_degc, int pc)
+{
+ return -EINVAL;
+}
+static inline int interpolate_slope(struct pc_temp_ocv_lut *pc_temp_ocv,
+ int batt_temp, int pc)
+{
+ return -EINVAL;
+}
+static inline int linear_interpolate(int y0, int x0, int y1, int x1, int x)
+{
+ return -EINVAL;
+}
+static inline int interpolate_acc(struct ibat_temp_acc_lut *ibat_acc_lut,
+ int batt_temp, int ibat)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif
diff --git a/include/linux/leds-qpnp-wled.h b/include/linux/leds-qpnp-wled.h
new file mode 100644
index 000000000000..6880bc41394c
--- /dev/null
+++ b/include/linux/leds-qpnp-wled.h
@@ -0,0 +1,22 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __LEDS_QPNP_WLED_H
+
+#ifdef CONFIG_LEDS_QPNP_WLED
+int qpnp_ibb_enable(bool state);
+#else
+int qpnp_ibb_enable(bool state)
+{
+ return 0;
+}
+#endif
+#endif
diff --git a/include/linux/msm_bcl.h b/include/linux/msm_bcl.h
new file mode 100644
index 000000000000..3b84f37ed956
--- /dev/null
+++ b/include/linux/msm_bcl.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BCL_H
+#define __MSM_BCL_H
+
+#define BCL_NAME_MAX_LEN 20
+
+enum bcl_trip_type {
+ BCL_HIGH_TRIP,
+ BCL_LOW_TRIP,
+ BCL_TRIP_MAX,
+};
+
+enum bcl_param {
+ BCL_PARAM_VOLTAGE,
+ BCL_PARAM_CURRENT,
+ BCL_PARAM_MAX,
+};
+
+struct bcl_threshold {
+ int trip_value;
+ enum bcl_trip_type type;
+ void *trip_data;
+ void (*trip_notify) (enum bcl_trip_type, int, void *);
+};
+struct bcl_param_data;
+struct bcl_driver_ops {
+ int (*read) (int *);
+ int (*set_high_trip) (int);
+ int (*get_high_trip) (int *);
+ int (*set_low_trip) (int);
+ int (*get_low_trip) (int *);
+ int (*disable) (void);
+ int (*enable) (void);
+ int (*notify) (struct bcl_param_data *, int,
+ enum bcl_trip_type);
+};
+
+struct bcl_param_data {
+ char name[BCL_NAME_MAX_LEN];
+ struct device device;
+ struct bcl_driver_ops *ops;
+ int high_trip;
+ int low_trip;
+ int last_read_val;
+ bool registered;
+ struct kobj_attribute val_attr;
+ struct kobj_attribute high_trip_attr;
+ struct kobj_attribute low_trip_attr;
+ struct attribute_group bcl_attr_gp;
+ struct bcl_threshold *thresh[BCL_TRIP_MAX];
+};
+
+#ifdef CONFIG_MSM_BCL_CTL
+struct bcl_param_data *msm_bcl_register_param(enum bcl_param,
+ struct bcl_driver_ops *, char *);
+int msm_bcl_unregister_param(struct bcl_param_data *);
+int msm_bcl_enable(void);
+int msm_bcl_disable(void);
+int msm_bcl_set_threshold(enum bcl_param, enum bcl_trip_type,
+ struct bcl_threshold *);
+int msm_bcl_read(enum bcl_param, int *);
+#else
+static inline struct bcl_param_data *msm_bcl_register_param(
+ enum bcl_param param_type, struct bcl_driver_ops *ops, char *name)
+{
+ return NULL;
+}
+static inline int msm_bcl_unregister_param(struct bcl_param_data *data)
+{
+ return -ENOSYS;
+}
+static inline int msm_bcl_enable(void)
+{
+ return -ENOSYS;
+}
+static inline int msm_bcl_disable(void)
+{
+ return -ENOSYS;
+}
+static inline int msm_bcl_set_threshold(enum bcl_param param_type,
+ enum bcl_trip_type type,
+ struct bcl_threshold *inp_thresh)
+{
+ return -ENOSYS;
+}
+static inline int msm_bcl_read(enum bcl_param param_type, int *vbat_value)
+{
+ return -ENOSYS;
+}
+#endif
+
+#endif /*__MSM_BCL_H*/
diff --git a/include/linux/of_batterydata.h b/include/linux/of_batterydata.h
new file mode 100644
index 000000000000..fe2c996de264
--- /dev/null
+++ b/include/linux/of_batterydata.h
@@ -0,0 +1,67 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/batterydata-lib.h>
+
+#ifdef CONFIG_OF_BATTERYDATA
+/**
+ * of_batterydata_read_data() - Populate battery data from the device tree
+ * @container_node: pointer to the battery-data container device node
+ * containing the profile nodes.
+ * @batt_data: pointer to an allocated bms_battery_data structure that the
+ * loaded profile will be written to.
+ * @batt_id_uv: ADC voltage of the battery id line used to differentiate
+ * between different battery profiles. If there are multiple
+ * battery data in the device tree, the one with the closest
+ * battery id resistance will be automatically loaded.
+ *
+ * This routine loads the closest match battery data from device tree based on
+ * the battery id reading. Then, it will try to load all the relevant data from
+ * the device tree battery data profile.
+ *
+ * If any of the lookup table pointers are NULL, this routine will skip trying
+ * to read them.
+ */
+int of_batterydata_read_data(struct device_node *container_node,
+ struct bms_battery_data *batt_data,
+ int batt_id_uv);
+/**
+ * of_batterydata_get_best_profile() - Find matching battery data device node
+ * @batterydata_container_node: pointer to the battery-data container device
+ * node containing the profile nodes.
+ * @psy_name: Name of the power supply which holds the
+ * POWER_SUPPLY_RESISTANCE_ID value to be used to match
+ * against the id resistances specified in the corresponding
+ * battery data profiles.
+ * @batt_type: Battery type which we want to force load the profile.
+ *
+ * This routine returns a device_node pointer to the closest match battery data
+ * from device tree based on the battery id reading.
+ */
+struct device_node *of_batterydata_get_best_profile(
+ struct device_node *batterydata_container_node,
+ const char *psy_name, const char *batt_type);
+#else
+static inline int of_batterydata_read_data(struct device_node *container_node,
+ struct bms_battery_data *batt_data,
+ int batt_id_uv)
+{
+ return -ENXIO;
+}
+static inline struct device_node *of_batterydata_get_best_profile(
+ struct device_node *batterydata_container_node,
+ struct device_node *best_node, const char *psy_name)
+{
+ return -ENXIO;
+}
+#endif /* CONFIG_OF_QPNP */
diff --git a/include/linux/power/qcom/apm.h b/include/linux/power/qcom/apm.h
new file mode 100644
index 000000000000..c71f9547f5d9
--- /dev/null
+++ b/include/linux/power/qcom/apm.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_POWER_QCOM_APM_H__
+#define __LINUX_POWER_QCOM_APM_H__
+
+#include <linux/device.h>
+#include <linux/err.h>
+
+/**
+ * enum msm_apm_supply - supported power rails to supply memory arrays
+ * %MSM_APM_SUPPLY_APCC: to enable selection of VDD_APCC rail as supply
+ * %MSM_APM_SUPPLY_MX: to enable selection of VDD_MX rail as supply
+ */
+enum msm_apm_supply {
+ MSM_APM_SUPPLY_APCC,
+ MSM_APM_SUPPLY_MX,
+};
+
+/* Handle used to identify an APM controller device */
+struct msm_apm_ctrl_dev;
+
+#ifdef CONFIG_MSM_APM
+struct msm_apm_ctrl_dev *msm_apm_ctrl_dev_get(struct device *dev);
+int msm_apm_set_supply(struct msm_apm_ctrl_dev *ctrl_dev,
+ enum msm_apm_supply supply);
+int msm_apm_get_supply(struct msm_apm_ctrl_dev *ctrl_dev);
+
+#else
+static inline struct msm_apm_ctrl_dev *msm_apm_ctrl_dev_get(struct device *dev)
+{ return ERR_PTR(-EPERM); }
+static inline int msm_apm_set_supply(struct msm_apm_ctrl_dev *ctrl_dev,
+ enum msm_apm_supply supply)
+{ return -EPERM; }
+static inline int msm_apm_get_supply(struct msm_apm_ctrl_dev *ctrl_dev);
+{ return -EPERM; }
+#endif
+#endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 1c075892c6fd..647d84dd60bb 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -46,6 +46,7 @@ enum {
POWER_SUPPLY_CHARGE_TYPE_NONE,
POWER_SUPPLY_CHARGE_TYPE_TRICKLE,
POWER_SUPPLY_CHARGE_TYPE_FAST,
+ POWER_SUPPLY_CHARGE_TYPE_TAPER,
};
enum {
@@ -58,6 +59,8 @@ enum {
POWER_SUPPLY_HEALTH_COLD,
POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE,
POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE,
+ POWER_SUPPLY_HEALTH_WARM,
+ POWER_SUPPLY_HEALTH_COOL,
};
enum {
@@ -85,6 +88,22 @@ enum {
POWER_SUPPLY_SCOPE_DEVICE,
};
+enum {
+ POWER_SUPPLY_DP_DM_UNKNOWN = 0,
+ POWER_SUPPLY_DP_DM_PREPARE = 1,
+ POWER_SUPPLY_DP_DM_UNPREPARE = 2,
+ POWER_SUPPLY_DP_DM_CONFIRMED_HVDCP3 = 3,
+ POWER_SUPPLY_DP_DM_DP_PULSE = 4,
+ POWER_SUPPLY_DP_DM_DM_PULSE = 5,
+ POWER_SUPPLY_DP_DM_DP0P6_DMF = 6,
+ POWER_SUPPLY_DP_DM_DP0P6_DM3P3 = 7,
+ POWER_SUPPLY_DP_DM_DPF_DMF = 8,
+ POWER_SUPPLY_DP_DM_DPR_DMR = 9,
+ POWER_SUPPLY_DP_DM_HVDCP3_SUPPORTED = 10,
+ POWER_SUPPLY_DP_DM_ICL_DOWN = 11,
+ POWER_SUPPLY_DP_DM_ICL_UP = 12,
+};
+
enum power_supply_property {
/* Properties of type `int' */
POWER_SUPPLY_PROP_STATUS = 0,
@@ -114,6 +133,8 @@ enum power_supply_property {
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_EMPTY,
POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+ POWER_SUPPLY_PROP_CHARGE_NOW_ERROR,
POWER_SUPPLY_PROP_CHARGE_AVG,
POWER_SUPPLY_PROP_CHARGE_COUNTER,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
@@ -133,6 +154,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN, /* in percents! */
POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX, /* in percents! */
POWER_SUPPLY_PROP_CAPACITY_LEVEL,
+ POWER_SUPPLY_PROP_CAPACITY_RAW,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TEMP_MAX,
POWER_SUPPLY_PROP_TEMP_MIN,
@@ -153,12 +175,47 @@ enum power_supply_property {
POWER_SUPPLY_PROP_USB_HC,
POWER_SUPPLY_PROP_USB_OTG,
POWER_SUPPLY_PROP_CHARGE_ENABLED,
+ POWER_SUPPLY_PROP_BATTERY_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED,
+ POWER_SUPPLY_PROP_INPUT_VOLTAGE_REGULATION,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_TRIM,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+ POWER_SUPPLY_PROP_VCHG_LOOP_DBC_BYPASS,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW,
+ POWER_SUPPLY_PROP_HI_POWER,
+ POWER_SUPPLY_PROP_LOW_POWER,
+ POWER_SUPPLY_PROP_COOL_TEMP,
+ POWER_SUPPLY_PROP_WARM_TEMP,
+ POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL,
+ POWER_SUPPLY_PROP_RESISTANCE,
+ POWER_SUPPLY_PROP_RESISTANCE_CAPACITIVE,
+ POWER_SUPPLY_PROP_RESISTANCE_ID, /* in Ohms */
+ POWER_SUPPLY_PROP_RESISTANCE_NOW,
+ POWER_SUPPLY_PROP_FLASH_CURRENT_MAX,
+ POWER_SUPPLY_PROP_UPDATE_NOW,
+ POWER_SUPPLY_PROP_ESR_COUNT,
+ POWER_SUPPLY_PROP_SAFETY_TIMER_ENABLE,
+ POWER_SUPPLY_PROP_CHARGE_DONE,
+ POWER_SUPPLY_PROP_FLASH_ACTIVE,
+ POWER_SUPPLY_PROP_FLASH_TRIGGER,
+ POWER_SUPPLY_PROP_FORCE_TLIM,
+ POWER_SUPPLY_PROP_DP_DM,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+ POWER_SUPPLY_PROP_RERUN_AICL,
+ POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+ POWER_SUPPLY_PROP_SAFETY_TIMER_EXPIRED,
+ POWER_SUPPLY_PROP_RESTRICTED_CHARGING,
+ POWER_SUPPLY_PROP_CURRENT_CAPABILITY,
+ POWER_SUPPLY_PROP_TYPEC_MODE,
/* Local extensions of type int64_t */
POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
/* Properties of type `const char *' */
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_MANUFACTURER,
POWER_SUPPLY_PROP_SERIAL_NUMBER,
+ POWER_SUPPLY_PROP_BATTERY_TYPE,
};
enum power_supply_type {
@@ -170,6 +227,15 @@ enum power_supply_type {
POWER_SUPPLY_TYPE_USB_DCP, /* Dedicated Charging Port */
POWER_SUPPLY_TYPE_USB_CDP, /* Charging Downstream Port */
POWER_SUPPLY_TYPE_USB_ACA, /* Accessory Charger Adapters */
+ POWER_SUPPLY_TYPE_USB_HVDCP, /* High Voltage DCP */
+ POWER_SUPPLY_TYPE_USB_HVDCP_3, /* Efficient High Voltage DCP */
+ POWER_SUPPLY_TYPE_WIRELESS, /* Accessory Charger Adapters */
+ POWER_SUPPLY_TYPE_BMS, /* Battery Monitor System */
+ POWER_SUPPLY_TYPE_USB_PARALLEL, /* USB Parallel Path */
+ POWER_SUPPLY_TYPE_WIPOWER, /* Wipower */
+ POWER_SUPPLY_TYPE_TYPEC, /*Type-C */
+ POWER_SUPPLY_TYPE_UFP, /* Type-C UFP */
+ POWER_SUPPLY_TYPE_DFP, /* TYpe-C DFP */
};
enum power_supply_notifier_events {
@@ -369,6 +435,9 @@ static inline bool power_supply_is_amp_property(enum power_supply_property psp)
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_CURRENT_AVG:
case POWER_SUPPLY_PROP_CURRENT_BOOT:
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_FLASH_CURRENT_MAX:
return 1;
default:
break;
diff --git a/include/linux/qpnp/pin.h b/include/linux/qpnp/pin.h
new file mode 100644
index 000000000000..7fb57aa7a778
--- /dev/null
+++ b/include/linux/qpnp/pin.h
@@ -0,0 +1,226 @@
+/* Copyright (c) 2012, 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* Mode select */
+#define QPNP_PIN_MODE_DIG_IN 0
+#define QPNP_PIN_MODE_DIG_OUT 1
+#define QPNP_PIN_MODE_DIG_IN_OUT 2
+#define QPNP_PIN_MODE_ANA_PASS_THRU 3
+#define QPNP_PIN_MODE_BIDIR 3
+#define QPNP_PIN_MODE_AIN 4
+#define QPNP_PIN_MODE_AOUT 5
+#define QPNP_PIN_MODE_SINK 6
+
+/* Invert source select (GPIO, MPP) */
+#define QPNP_PIN_INVERT_DISABLE 0
+#define QPNP_PIN_INVERT_ENABLE 1
+
+/* Output type (GPIO) */
+#define QPNP_PIN_OUT_BUF_CMOS 0
+#define QPNP_PIN_OUT_BUF_OPEN_DRAIN_NMOS 1
+#define QPNP_PIN_OUT_BUF_OPEN_DRAIN_PMOS 2
+#define QPNP_PIN_OUT_BUF_NO_DRIVE 3
+
+/* Voltage select (GPIO, MPP) */
+#define QPNP_PIN_VIN0 0
+#define QPNP_PIN_VIN1 1
+#define QPNP_PIN_VIN2 2
+#define QPNP_PIN_VIN3 3
+#define QPNP_PIN_VIN4 4
+#define QPNP_PIN_VIN5 5
+#define QPNP_PIN_VIN6 6
+#define QPNP_PIN_VIN7 7
+
+/* Pull Up Values (GPIO) */
+#define QPNP_PIN_GPIO_PULL_UP_30 0
+#define QPNP_PIN_GPIO_PULL_UP_1P5 1
+#define QPNP_PIN_GPIO_PULL_UP_31P5 2
+#define QPNP_PIN_GPIO_PULL_UP_1P5_30 3
+#define QPNP_PIN_GPIO_PULL_DN 4
+#define QPNP_PIN_GPIO_PULL_NO 5
+
+/* Pull Up Values (MPP) */
+#define QPNP_PIN_MPP_PULL_UP_0P6KOHM 0
+#define QPNP_PIN_MPP_PULL_UP_OPEN 1
+#define QPNP_PIN_MPP_PULL_UP_10KOHM 2
+#define QPNP_PIN_MPP_PULL_UP_30KOHM 3
+
+/* Out Strength (GPIO) */
+#define QPNP_PIN_OUT_STRENGTH_LOW 1
+#define QPNP_PIN_OUT_STRENGTH_MED 2
+#define QPNP_PIN_OUT_STRENGTH_HIGH 3
+
+/* Digital-in CTL (GPIO/MPP) */
+#define QPNP_PIN_DIG_IN_CTL_DTEST1 1
+#define QPNP_PIN_DIG_IN_CTL_DTEST2 2
+#define QPNP_PIN_DIG_IN_CTL_DTEST3 3
+#define QPNP_PIN_DIG_IN_CTL_DTEST4 4
+
+/* Source Select (GPIO) / Enable Select (MPP) */
+#define QPNP_PIN_SEL_FUNC_CONSTANT 0
+#define QPNP_PIN_SEL_FUNC_PAIRED 1
+#define QPNP_PIN_SEL_FUNC_1 2
+#define QPNP_PIN_SEL_FUNC_2 3
+#define QPNP_PIN_SEL_DTEST1 4
+#define QPNP_PIN_SEL_DTEST2 5
+#define QPNP_PIN_SEL_DTEST3 6
+#define QPNP_PIN_SEL_DTEST4 7
+
+/* Source Select for GPIO_LV/GPIO_MV only */
+#define QPNP_PIN_LV_MV_SEL_FUNC_CONSTANT 0
+#define QPNP_PIN_LV_MV_SEL_FUNC_PAIRED 1
+#define QPNP_PIN_LV_MV_SEL_FUNC_1 2
+#define QPNP_PIN_LV_MV_SEL_FUNC_2 3
+#define QPNP_PIN_LV_MV_SEL_FUNC_3 4
+#define QPNP_PIN_LV_MV_SEL_FUNC_4 5
+#define QPNP_PIN_LV_MV_SEL_DTEST1 6
+#define QPNP_PIN_LV_MV_SEL_DTEST2 7
+#define QPNP_PIN_LV_MV_SEL_DTEST3 8
+#define QPNP_PIN_LV_MV_SEL_DTEST4 9
+
+/* Master enable (GPIO, MPP) */
+#define QPNP_PIN_MASTER_DISABLE 0
+#define QPNP_PIN_MASTER_ENABLE 1
+
+/* Analog Output (MPP) */
+#define QPNP_PIN_AOUT_1V25 0
+#define QPNP_PIN_AOUT_0V625 1
+#define QPNP_PIN_AOUT_0V3125 2
+#define QPNP_PIN_AOUT_MPP 3
+#define QPNP_PIN_AOUT_ABUS1 4
+#define QPNP_PIN_AOUT_ABUS2 5
+#define QPNP_PIN_AOUT_ABUS3 6
+#define QPNP_PIN_AOUT_ABUS4 7
+
+/* Analog Input (MPP) */
+#define QPNP_PIN_AIN_AMUX_CH5 0
+#define QPNP_PIN_AIN_AMUX_CH6 1
+#define QPNP_PIN_AIN_AMUX_CH7 2
+#define QPNP_PIN_AIN_AMUX_CH8 3
+#define QPNP_PIN_AIN_AMUX_ABUS1 4
+#define QPNP_PIN_AIN_AMUX_ABUS2 5
+#define QPNP_PIN_AIN_AMUX_ABUS3 6
+#define QPNP_PIN_AIN_AMUX_ABUS4 7
+
+/* Current Sink (MPP) */
+#define QPNP_PIN_CS_OUT_5MA 0
+#define QPNP_PIN_CS_OUT_10MA 1
+#define QPNP_PIN_CS_OUT_15MA 2
+#define QPNP_PIN_CS_OUT_20MA 3
+#define QPNP_PIN_CS_OUT_25MA 4
+#define QPNP_PIN_CS_OUT_30MA 5
+#define QPNP_PIN_CS_OUT_35MA 6
+#define QPNP_PIN_CS_OUT_40MA 7
+
+/* ANALOG PASS SEL (GPIO LV/MV) */
+#define QPNP_PIN_APASS_SEL_ATEST1 0
+#define QPNP_PIN_APASS_SEL_ATEST2 1
+#define QPNP_PIN_APASS_SEL_ATEST3 2
+#define QPNP_PIN_APASS_SEL_ATEST4 3
+
+/**
+ * struct qpnp_pin_cfg - structure to specify pin configurtion values
+ * @mode: indicates whether the pin should be input, output, or
+ * both for gpios. mpp pins also support bidirectional,
+ * analog in, analog out and current sink. This value
+ * should be of type QPNP_PIN_MODE_*.
+ * @output_type: indicates pin should be configured as CMOS or open
+ * drain. Should be of the type QPNP_PIN_OUT_BUF_*. This
+ * setting applies for gpios only.
+ * @invert: Invert the signal of the line -
+ * QPNP_PIN_INVERT_DISABLE or QPNP_PIN_INVERT_ENABLE.
+ * @pull: This parameter should be programmed to different values
+ * depending on whether it's GPIO or MPP.
+ * For GPIO, it indicates whether a pull up or pull down
+ * should be applied. If a pullup is required the
+ * current strength needs to be specified.
+ * Current values of 30uA, 1.5uA, 31.5uA, 1.5uA with 30uA
+ * boost are supported. This value should be one of
+ * the QPNP_PIN_GPIO_PULL_*. Note that the hardware ignores
+ * this configuration if the GPIO is not set to input or
+ * output open-drain mode.
+ * For MPP, it indicates whether a pullup should be
+ * applied for bidirectitional mode only. The hardware
+ * ignores the configuration when operating in other modes.
+ * This value should be one of the QPNP_PIN_MPP_PULL_*.
+ * @vin_sel: specifies the voltage level when the output is set to 1.
+ * For an input gpio specifies the voltage level at which
+ * the input is interpreted as a logical 1.
+ * @out_strength: the amount of current supplied for an output gpio,
+ * should be of the type QPNP_PIN_STRENGTH_*.
+ * @src_sel: select alternate function for the pin. Certain pins
+ * can be paired (shorted) with each other. Some pins
+ * can act as alternate functions. In the context of
+ * gpio, this acts as a source select. For mpps,
+ * this is an enable select.
+ * This parameter should be of type QPNP_PIN_SEL_*.
+ * @master_en: QPNP_PIN_MASTER_ENABLE = Enable features within the
+ * pin block based on configurations.
+ * QPNP_PIN_MASTER_DISABLE = Completely disable the pin
+ * block and let the pin float with high impedance
+ * regardless of other settings.
+ * @aout_ref: Set the analog output reference. This parameter should
+ * be of type QPNP_PIN_AOUT_*. This parameter only applies
+ * to mpp pins.
+ * @ain_route: Set the source for analog input. This parameter
+ * should be of type QPNP_PIN_AIN_*. This parameter only
+ * applies to mpp pins.
+ * @cs_out: Set the the amount of current to sync in mA. This
+ * parameter should be of type QPNP_PIN_CS_OUT_*. This
+ * parameter only applies to mpp pins.
+ * @apass_sel: Set the ATEST line to which the signal is to be
+ * routed to. The parameter should be of type
+ * QPNP_PIN_APASS_SEL_*. This
+ * parameter only applies to GPIO LV/MV pins.
+ * @dtest_sel: Select the DTEST line to which the signal needs
+ * is routed to. The parameter should be of type
+ * QPNP_PIN_DIG_IN_CTL_*. The parameter applies
+ * to both gpio and mpp pins.
+ */
+struct qpnp_pin_cfg {
+ int mode;
+ int output_type;
+ int invert;
+ int pull;
+ int vin_sel;
+ int out_strength;
+ int src_sel;
+ int master_en;
+ int aout_ref;
+ int ain_route;
+ int cs_out;
+ int apass_sel;
+ int dtest_sel;
+};
+
+/**
+ * qpnp_pin_config - Apply pin configuration for Linux gpio
+ * @gpio: Linux gpio number to configure.
+ * @param: parameters to configure.
+ *
+ * This routine takes a Linux gpio number that corresponds with a
+ * PMIC pin and applies the configuration specified in 'param'.
+ * This gpio number can be ascertained by of_get_gpio_flags() or
+ * the qpnp_pin_map_gpio() API.
+ */
+int qpnp_pin_config(int gpio, struct qpnp_pin_cfg *param);
+
+/**
+ * qpnp_pin_map - Obtain Linux GPIO number from device spec
+ * @name: Name assigned by the 'label' binding for the primary node.
+ * @pmic_pin: PMIC pin number to lookup.
+ *
+ * This routine is used in legacy configurations that do not support
+ * Device Tree. If you are using Device Tree, you should not use this.
+ * For such cases, use of_get_gpio() or friends instead.
+ */
+int qpnp_pin_map(const char *name, uint32_t pmic_pin);
diff --git a/include/linux/qpnp/power-on.h b/include/linux/qpnp/power-on.h
new file mode 100644
index 000000000000..da8f5a8622dd
--- /dev/null
+++ b/include/linux/qpnp/power-on.h
@@ -0,0 +1,96 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef QPNP_PON_H
+#define QPNP_PON_H
+
+#include <linux/errno.h>
+
+/**
+ * enum pon_trigger_source: List of PON trigger sources
+ * %PON_SMPL: PON triggered by SMPL - Sudden Momentary Power Loss
+ * %PON_RTC: PON triggered by RTC alarm
+ * %PON_DC_CHG: PON triggered by insertion of DC charger
+ * %PON_USB_CHG: PON triggered by insertion of USB
+ * %PON_PON1: PON triggered by other PMIC (multi-PMIC option)
+ * %PON_CBLPWR_N: PON triggered by power-cable insertion
+ * %PON_KPDPWR_N: PON triggered by long press of the power-key
+ */
+enum pon_trigger_source {
+ PON_SMPL = 1,
+ PON_RTC,
+ PON_DC_CHG,
+ PON_USB_CHG,
+ PON_PON1,
+ PON_CBLPWR_N,
+ PON_KPDPWR_N,
+};
+
+/**
+ * enum pon_power_off_type: Possible power off actions to perform
+ * %PON_POWER_OFF_RESERVED: Reserved, not used
+ * %PON_POWER_OFF_WARM_RESET: Reset the MSM but not all PMIC peripherals
+ * %PON_POWER_OFF_SHUTDOWN: Shutdown the MSM and PMIC completely
+ * %PON_POWER_OFF_HARD_RESET: Reset the MSM and all PMIC peripherals
+ */
+enum pon_power_off_type {
+ PON_POWER_OFF_RESERVED = 0x00,
+ PON_POWER_OFF_WARM_RESET = 0x01,
+ PON_POWER_OFF_SHUTDOWN = 0x04,
+ PON_POWER_OFF_HARD_RESET = 0x07,
+ PON_POWER_OFF_MAX_TYPE = 0x10,
+};
+
+enum pon_restart_reason {
+ PON_RESTART_REASON_UNKNOWN = 0x00,
+ PON_RESTART_REASON_RECOVERY = 0x01,
+ PON_RESTART_REASON_BOOTLOADER = 0x02,
+ PON_RESTART_REASON_RTC = 0x03,
+ PON_RESTART_REASON_DMVERITY_CORRUPTED = 0x04,
+ PON_RESTART_REASON_DMVERITY_ENFORCE = 0x05,
+ PON_RESTART_REASON_KEYS_CLEAR = 0x06,
+};
+
+#ifdef CONFIG_QPNP_POWER_ON
+int qpnp_pon_system_pwr_off(enum pon_power_off_type type);
+int qpnp_pon_is_warm_reset(void);
+int qpnp_pon_trigger_config(enum pon_trigger_source pon_src, bool enable);
+int qpnp_pon_wd_config(bool enable);
+int qpnp_pon_set_restart_reason(enum pon_restart_reason reason);
+bool qpnp_pon_check_hard_reset_stored(void);
+
+#else
+static int qpnp_pon_system_pwr_off(enum pon_power_off_type type)
+{
+ return -ENODEV;
+}
+static inline int qpnp_pon_is_warm_reset(void) { return -ENODEV; }
+static inline int qpnp_pon_trigger_config(enum pon_trigger_source pon_src,
+ bool enable)
+{
+ return -ENODEV;
+}
+int qpnp_pon_wd_config(bool enable)
+{
+ return -ENODEV;
+}
+static inline int qpnp_pon_set_restart_reason(enum pon_restart_reason reason)
+{
+ return -ENODEV;
+}
+static inline bool qpnp_pon_check_hard_reset_stored(void)
+{
+ return false;
+}
+#endif
+
+#endif
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
new file mode 100644
index 000000000000..63bbbd69a94a
--- /dev/null
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -0,0 +1,2268 @@
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Qualcomm PMIC QPNP ADC driver header file
+ *
+ */
+
+#ifndef __QPNP_ADC_H
+#define __QPNP_ADC_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/regulator/consumer.h>
+/**
+ * enum qpnp_vadc_channels - QPNP AMUX arbiter channels
+ */
+enum qpnp_vadc_channels {
+ USBIN = 0,
+ DCIN,
+ VCHG_SNS,
+ SPARE1_03,
+ USB_ID_MV,
+ VCOIN,
+ VBAT_SNS,
+ VSYS,
+ DIE_TEMP,
+ REF_625MV,
+ REF_125V,
+ CHG_TEMP,
+ SPARE1,
+ SPARE2,
+ GND_REF,
+ VDD_VADC,
+ P_MUX1_1_1,
+ P_MUX2_1_1,
+ P_MUX3_1_1,
+ P_MUX4_1_1,
+ P_MUX5_1_1,
+ P_MUX6_1_1,
+ P_MUX7_1_1,
+ P_MUX8_1_1,
+ P_MUX9_1_1,
+ P_MUX10_1_1,
+ P_MUX11_1_1,
+ P_MUX12_1_1,
+ P_MUX13_1_1,
+ P_MUX14_1_1,
+ P_MUX15_1_1,
+ P_MUX16_1_1,
+ P_MUX1_1_3,
+ P_MUX2_1_3,
+ P_MUX3_1_3,
+ P_MUX4_1_3,
+ P_MUX5_1_3,
+ P_MUX6_1_3,
+ P_MUX7_1_3,
+ P_MUX8_1_3,
+ P_MUX9_1_3,
+ P_MUX10_1_3,
+ P_MUX11_1_3,
+ P_MUX12_1_3,
+ P_MUX13_1_3,
+ P_MUX14_1_3,
+ P_MUX15_1_3,
+ P_MUX16_1_3,
+ LR_MUX1_BATT_THERM,
+ LR_MUX2_BAT_ID,
+ LR_MUX3_XO_THERM,
+ LR_MUX4_AMUX_THM1,
+ LR_MUX5_AMUX_THM2,
+ LR_MUX6_AMUX_THM3,
+ LR_MUX7_HW_ID,
+ LR_MUX8_AMUX_THM4,
+ LR_MUX9_AMUX_THM5,
+ LR_MUX10_USB_ID_LV,
+ AMUX_PU1,
+ AMUX_PU2,
+ LR_MUX3_BUF_XO_THERM_BUF,
+ LR_MUX1_PU1_BAT_THERM = 112,
+ LR_MUX2_PU1_BAT_ID = 113,
+ LR_MUX3_PU1_XO_THERM = 114,
+ LR_MUX4_PU1_AMUX_THM1 = 115,
+ LR_MUX5_PU1_AMUX_THM2 = 116,
+ LR_MUX6_PU1_AMUX_THM3 = 117,
+ LR_MUX7_PU1_AMUX_HW_ID = 118,
+ LR_MUX8_PU1_AMUX_THM4 = 119,
+ LR_MUX9_PU1_AMUX_THM5 = 120,
+ LR_MUX10_PU1_AMUX_USB_ID_LV = 121,
+ LR_MUX3_BUF_PU1_XO_THERM_BUF = 124,
+ LR_MUX1_PU2_BAT_THERM = 176,
+ LR_MUX2_PU2_BAT_ID = 177,
+ LR_MUX3_PU2_XO_THERM = 178,
+ LR_MUX4_PU2_AMUX_THM1 = 179,
+ LR_MUX5_PU2_AMUX_THM2 = 180,
+ LR_MUX6_PU2_AMUX_THM3 = 181,
+ LR_MUX7_PU2_AMUX_HW_ID = 182,
+ LR_MUX8_PU2_AMUX_THM4 = 183,
+ LR_MUX9_PU2_AMUX_THM5 = 184,
+ LR_MUX10_PU2_AMUX_USB_ID_LV = 185,
+ LR_MUX3_BUF_PU2_XO_THERM_BUF = 188,
+ LR_MUX1_PU1_PU2_BAT_THERM = 240,
+ LR_MUX2_PU1_PU2_BAT_ID = 241,
+ LR_MUX3_PU1_PU2_XO_THERM = 242,
+ LR_MUX4_PU1_PU2_AMUX_THM1 = 243,
+ LR_MUX5_PU1_PU2_AMUX_THM2 = 244,
+ LR_MUX6_PU1_PU2_AMUX_THM3 = 245,
+ LR_MUX7_PU1_PU2_AMUX_HW_ID = 246,
+ LR_MUX8_PU1_PU2_AMUX_THM4 = 247,
+ LR_MUX9_PU1_PU2_AMUX_THM5 = 248,
+ LR_MUX10_PU1_PU2_AMUX_USB_ID_LV = 249,
+ LR_MUX3_BUF_PU1_PU2_XO_THERM_BUF = 252,
+ ALL_OFF = 255,
+ ADC_MAX_NUM = 0xffff,
+
+ /* Channel listing for refreshed VADC in hex format */
+ VADC_VREF_GND = 0,
+ VADC_CALIB_VREF_1P25 = 1,
+ VADC_CALIB_VREF = 2,
+ VADC_CALIB_VREF_1_DIV_3 = 0x82,
+ VADC_VPH_PWR = 0x83,
+ VADC_VBAT_SNS = 0x84,
+ VADC_VCOIN = 0x85,
+ VADC_DIE_TEMP = 6,
+ VADC_CHG_TEMP = 7,
+ VADC_USB_IN = 8,
+ VADC_IREG_FB = 9,
+ /* External input connection */
+ VADC_BAT_THERM = 0xa,
+ VADC_BAT_ID = 0xb,
+ VADC_XO_THERM = 0xc,
+ VADC_AMUX_THM1 = 0xd,
+ VADC_AMUX_THM2 = 0xe,
+ VADC_AMUX_THM3 = 0xf,
+ VADC_AMUX_THM4 = 0x10,
+ VADC_AMUX_THM5 = 0x11,
+ VADC_AMUX1_GPIO = 0x12,
+ VADC_AMUX2_GPIO = 0x13,
+ VADC_AMUX3_GPIO = 0x14,
+ VADC_AMUX4_GPIO = 0x15,
+ VADC_AMUX5_GPIO = 0x16,
+ VADC_AMUX6_GPIO = 0x17,
+ VADC_AMUX7_GPIO = 0x18,
+ VADC_AMUX8_GPIO = 0x19,
+ VADC_ATEST1 = 0x1a,
+ VADC_ATEST2 = 0x1b,
+ VADC_ATEST3 = 0x1c,
+ VADC_ATEST4 = 0x1d,
+ VADC_OFF = 0xff,
+ /* PU1 is 30K pull up */
+ VADC_BAT_THERM_PU1 = 0x2a,
+ VADC_BAT_ID_PU1 = 0x2b,
+ VADC_XO_THERM_PU1 = 0x2c,
+ VADC_AMUX_THM1_PU1 = 0x2d,
+ VADC_AMUX_THM2_PU1 = 0x2e,
+ VADC_AMUX_THM3_PU1 = 0x2f,
+ VADC_AMUX_THM4_PU1 = 0x30,
+ VADC_AMUX_THM5_PU1 = 0x31,
+ VADC_AMUX1_GPIO_PU1 = 0x32,
+ VADC_AMUX2_GPIO_PU1 = 0x33,
+ VADC_AMUX3_GPIO_PU1 = 0x34,
+ VADC_AMUX4_GPIO_PU1 = 0x35,
+ VADC_AMUX5_GPIO_PU1 = 0x36,
+ VADC_AMUX6_GPIO_PU1 = 0x37,
+ VADC_AMUX7_GPIO_PU1 = 0x38,
+ VADC_AMUX8_GPIO_PU1 = 0x39,
+ /* PU2 is 100K pull up */
+ VADC_BAT_THERM_PU2 = 0x4a,
+ VADC_BAT_ID_PU2 = 0x4b,
+ VADC_XO_THERM_PU2 = 0x4c,
+ VADC_AMUX_THM1_PU2 = 0x4d,
+ VADC_AMUX_THM2_PU2 = 0x4e,
+ VADC_AMUX_THM3_PU2 = 0x4f,
+ VADC_AMUX_THM4_PU2 = 0x50,
+ VADC_AMUX_THM5_PU2 = 0x51,
+ VADC_AMUX1_GPIO_PU2 = 0x52,
+ VADC_AMUX2_GPIO_PU2 = 0x53,
+ VADC_AMUX3_GPIO_PU2 = 0x54,
+ VADC_AMUX4_GPIO_PU2 = 0x55,
+ VADC_AMUX5_GPIO_PU2 = 0x56,
+ VADC_AMUX6_GPIO_PU2 = 0x57,
+ VADC_AMUX7_GPIO_PU2 = 0x58,
+ VADC_AMUX8_GPIO_PU2 = 0x59,
+ /* PU3 is 400K pull up */
+ VADC_BAT_THERM_PU3 = 0x6a,
+ VADC_BAT_ID_PU3 = 0x6b,
+ VADC_XO_THERM_PU3 = 0x6c,
+ VADC_AMUX_THM1_PU3 = 0x6d,
+ VADC_AMUX_THM2_PU3 = 0x6e,
+ VADC_AMUX_THM3_PU3 = 0x6f,
+ VADC_AMUX_THM4_PU3 = 0x70,
+ VADC_AMUX_THM5_PU3 = 0x71,
+ VADC_AMUX1_GPIO_PU3 = 0x72,
+ VADC_AMUX2_GPIO_PU3 = 0x73,
+ VADC_AMUX3_GPIO_PU3 = 0x74,
+ VADC_AMUX4_GPIO_PU3 = 0x75,
+ VADC_AMUX5_GPIO_PU3 = 0x76,
+ VADC_AMUX6_GPIO_PU3 = 0x77,
+ VADC_AMUX7_GPIO_PU3 = 0x78,
+ VADC_AMUX8_GPIO_PU3 = 0x79,
+ /* External input connection with 1/3 div */
+ VADC_AMUX1_GPIO_DIV_3 = 0x92,
+ VADC_AMUX2_GPIO_DIV_3 = 0x93,
+ VADC_AMUX3_GPIO_DIV_3 = 0x94,
+ VADC_AMUX4_GPIO_DIV_3 = 0x95,
+ VADC_AMUX5_GPIO_DIV_3 = 0x96,
+ VADC_AMUX6_GPIO_DIV_3 = 0x97,
+ VADC_AMUX7_GPIO_DIV_3 = 0x98,
+ VADC_AMUX8_GPIO_DIV_3 = 0x99,
+ VADC_ATEST1_DIV_3 = 0x9a,
+ VADC_ATEST2_DIV_3 = 0x9b,
+ VADC_ATEST3_DIV_3 = 0x9c,
+ VADC_ATEST4_DIV_3 = 0x9d,
+ VADC_REFRESH_MAX_NUM = 0xffff,
+};
+
+/**
+ * enum qpnp_iadc_channels - QPNP IADC channel list
+ */
+enum qpnp_iadc_channels {
+ INTERNAL_RSENSE = 0,
+ EXTERNAL_RSENSE,
+ ALT_LEAD_PAIR,
+ GAIN_CALIBRATION_17P857MV,
+ OFFSET_CALIBRATION_SHORT_CADC_LEADS,
+ OFFSET_CALIBRATION_CSP_CSN,
+ OFFSET_CALIBRATION_CSP2_CSN2,
+ IADC_MUX_NUM,
+};
+
+#define QPNP_ADC_625_UV 625000
+#define QPNP_ADC_HWMON_NAME_LENGTH 64
+#define QPNP_MAX_PROP_NAME_LEN 32
+#define QPNP_THERMALNODE_NAME_LENGTH 25
+
+/* Structure device for qpnp vadc */
+struct qpnp_vadc_chip;
+
+/* Structure device for qpnp iadc */
+struct qpnp_iadc_chip;
+
+/* Structure device for qpnp adc tm */
+struct qpnp_adc_tm_chip;
+
+/**
+ * enum qpnp_adc_clk_type - Clock rate supported.
+ * %CLK_TYPE1: 2P4MHZ
+ * %CLK_TYPE2: 4P8MHZ
+ * %CLK_TYPE3: 9P6MHZ
+ * %CLK_TYPE4: 19P2MHZ
+ * %CLK_NONE: Do not use this Clk type.
+ *
+ * The Clock rate is specific to each channel of the QPNP ADC arbiter.
+ */
+enum qpnp_adc_clk_type {
+ CLK_TYPE1 = 0,
+ CLK_TYPE2,
+ CLK_TYPE3,
+ CLK_TYPE4,
+ CLK_NONE,
+};
+
+/**
+ * enum qpnp_adc_decimation_type - Sampling rate supported.
+ * %DECIMATION_TYPE1: 512
+ * %DECIMATION_TYPE2: 1K
+ * %DECIMATION_TYPE3: 2K
+ * %DECIMATION_TYPE4: 4k
+ * %DECIMATION_NONE: Do not use this Sampling type.
+ *
+ * The Sampling rate is specific to each channel of the QPNP ADC arbiter.
+ */
+enum qpnp_adc_decimation_type {
+ DECIMATION_TYPE1 = 0,
+ DECIMATION_TYPE2,
+ DECIMATION_TYPE3,
+ DECIMATION_TYPE4,
+ DECIMATION_NONE = 0xff,
+
+ ADC_HC_DEC_RATIO_256 = 0,
+ ADC_HC_DEC_RATIO_512 = 1,
+ ADC_HC_DEC_RATIO_1024 = 2,
+ ADC_HC_DEC_RATIO_NONE = 0xff,
+};
+
+/**
+ * enum qpnp_adc_calib_type - QPNP ADC Calibration type.
+ * %ADC_CALIB_ABSOLUTE: Use 625mV and 1.25V reference channels.
+ * %ADC_CALIB_RATIOMETRIC: Use reference Voltage/GND.
+ * %ADC_CALIB_CONFIG_NONE: Do not use this calibration type.
+ *
+ * enum qpnp_adc_cal_sel - Selects the calibration type that is applied
+ * on the corresponding channel measurement after
+ * the ADC data is read.
+ * %ADC_HC_NO_CAL : To obtain raw, uncalibrated data on qpnp-vadc-hc type.
+ * %ADC_HC_RATIO_CAL : Applies ratiometric calibration. Note the calibration
+ * values stored in the CAL peripheral for VADC_VREF and
+ * VREF_1P25 already have GND_REF value removed. Used
+ * only with qpnp-vadc-hc type of VADC.
+ * %ADC_HC_ABS_CAL : Applies absolute calibration. Note the calibration
+ * values stored in the CAL peripheral for VADC_VREF and
+ * VREF_1P25 already have GND_REF value removed. Used
+ * only with qpnp-vadc-hc type of VADC.
+ *
+ * Use the input reference voltage depending on the calibration type
+ * to calcluate the offset and gain parameters. The calibration is
+ * specific to each channel of the QPNP ADC.
+ */
+enum qpnp_adc_calib_type {
+ CALIB_ABSOLUTE = 0,
+ CALIB_RATIOMETRIC,
+ CALIB_NONE,
+
+ ADC_HC_NO_CAL = 0,
+ ADC_HC_RATIO_CAL = 1,
+ ADC_HC_ABS_CAL = 2,
+ ADC_HC_CAL_SEL_NONE,
+};
+
+/**
+ * enum qpnp_adc_channel_scaling_param - pre-scaling AMUX ratio.
+ * %CHAN_PATH_SCALING0: ratio of {1, 1}
+ * %CHAN_PATH_SCALING1: ratio of {1, 3}
+ * %CHAN_PATH_SCALING2: ratio of {1, 4}
+ * %CHAN_PATH_SCALING3: ratio of {1, 6}
+ * %CHAN_PATH_SCALING4: ratio of {1, 20}
+ * %CHAN_PATH_SCALING5: ratio of {1, 8}
+ * %CHAN_PATH_SCALING6: ratio of {10, 81} The actual ratio is (1/8.1).
+ * %CHAN_PATH_SCALING7: ratio of {1, 10}
+ * %CHAN_PATH_NONE: Do not use this pre-scaling ratio type.
+ *
+ * The pre-scaling is applied for signals to be within the voltage range
+ * of the ADC.
+ */
+enum qpnp_adc_channel_scaling_param {
+ PATH_SCALING0 = 0,
+ PATH_SCALING1,
+ PATH_SCALING2,
+ PATH_SCALING3,
+ PATH_SCALING4,
+ PATH_SCALING5,
+ PATH_SCALING6,
+ PATH_SCALING7,
+ PATH_SCALING_NONE,
+};
+
+/**
+ * enum qpnp_adc_scale_fn_type - Scaling function for pm8941 pre calibrated
+ * digital data relative to ADC reference.
+ * %SCALE_DEFAULT: Default scaling to convert raw adc code to voltage (uV).
+ * %SCALE_BATT_THERM: Conversion to temperature(decidegC) based on btm
+ * parameters.
+ * %SCALE_THERM_100K_PULLUP: Returns temperature in degC.
+ * Uses a mapping table with 100K pullup.
+ * %SCALE_PMIC_THERM: Returns result in milli degree's Centigrade.
+ * %SCALE_XOTHERM: Returns XO thermistor voltage in degree's Centigrade.
+ * %SCALE_THERM_150K_PULLUP: Returns temperature in degC.
+ * Uses a mapping table with 150K pullup.
+ * %SCALE_QRD_BATT_THERM: Conversion to temperature(decidegC) based on
+ * btm parameters.
+ * %SCALE_QRD_SKUAA_BATT_THERM: Conversion to temperature(decidegC) based on
+ * btm parameters for SKUAA.
+ * %SCALE_SMB_BATT_THERM: Conversion to temperature(decidegC) based on
+ * btm parameters for SMB.
+ * %SCALE_QRD_SKUG_BATT_THERM: Conversion to temperature(decidegC) based on
+ * btm parameters for SKUG.
+ * %SCALE_QRD_SKUH_BATT_THERM: Conversion to temperature(decidegC) based on
+ * btm parameters for SKUH
+ * %SCALE_QRD_SKUT1_BATT_THERM: Conversion to temperature(decidegC) based on
+ * btm parameters for SKUT1
+ * %SCALE_PMI_CHG_TEMP: Conversion for PMI CHG temp
+ * %SCALE_NONE: Do not use this scaling type.
+ */
+enum qpnp_adc_scale_fn_type {
+ SCALE_DEFAULT = 0,
+ SCALE_BATT_THERM,
+ SCALE_THERM_100K_PULLUP,
+ SCALE_PMIC_THERM,
+ SCALE_XOTHERM,
+ SCALE_THERM_150K_PULLUP,
+ SCALE_QRD_BATT_THERM,
+ SCALE_QRD_SKUAA_BATT_THERM,
+ SCALE_SMB_BATT_THERM,
+ SCALE_QRD_SKUG_BATT_THERM,
+ SCALE_QRD_SKUH_BATT_THERM,
+ SCALE_NCP_03WF683_THERM,
+ SCALE_QRD_SKUT1_BATT_THERM,
+ SCALE_PMI_CHG_TEMP = 16,
+ SCALE_NONE,
+};
+
+/**
+ * enum qpnp_adc_tm_rscale_fn_type - Scaling function used to convert the
+ * channels input voltage/temperature to corresponding ADC code that is
+ * applied for thresholds. Check the corresponding channels scaling to
+ * determine the appropriate temperature/voltage units that are passed
+ * to the scaling function. Example battery follows the power supply
+ * framework that needs its units to be in decidegreesC so it passes
+ * deci-degreesC. PA_THERM clients pass the temperature in degrees.
+ * The order below should match the one in the driver for
+ * adc_tm_rscale_fn[].
+ */
+enum qpnp_adc_tm_rscale_fn_type {
+ SCALE_R_VBATT = 0,
+ SCALE_RBATT_THERM,
+ SCALE_R_USB_ID,
+ SCALE_RPMIC_THERM,
+ SCALE_R_SMB_BATT_THERM,
+ SCALE_R_ABSOLUTE,
+ SCALE_QRD_SKUH_RBATT_THERM,
+ SCALE_QRD_SKUT1_RBATT_THERM,
+ SCALE_RSCALE_NONE,
+};
+
+/**
+ * enum qpnp_vadc_rscale_fn_type - Scaling function used to convert the
+ * channels input voltage/temperature to corresponding ADC code that is
+ * applied for thresholds. Check the corresponding channels scaling to
+ * determine the appropriate temperature/voltage units that are passed
+ * to the scaling function. The order below should match the one in the
+ * driver for qpnp_adc_scale_fn[].
+ */
+enum qpnp_vadc_rscale_fn_type {
+ SCALE_RVADC_ABSOLUTE = 0,
+ SCALE_RVADC_SCALE_NONE,
+};
+
+/**
+ * enum qpnp_adc_fast_avg_ctl - Provides ability to obtain single result
+ * from the ADC that is an average of multiple measurement
+ * samples. Select number of samples for use in fast
+ * average mode (i.e. 2 ^ value).
+ * %ADC_FAST_AVG_SAMPLE_1: 0x0 = 1
+ * %ADC_FAST_AVG_SAMPLE_2: 0x1 = 2
+ * %ADC_FAST_AVG_SAMPLE_4: 0x2 = 4
+ * %ADC_FAST_AVG_SAMPLE_8: 0x3 = 8
+ * %ADC_FAST_AVG_SAMPLE_16: 0x4 = 16
+ * %ADC_FAST_AVG_SAMPLE_32: 0x5 = 32
+ * %ADC_FAST_AVG_SAMPLE_64: 0x6 = 64
+ * %ADC_FAST_AVG_SAMPLE_128: 0x7 = 128
+ * %ADC_FAST_AVG_SAMPLE_256: 0x8 = 256
+ * %ADC_FAST_AVG_SAMPLE_512: 0x9 = 512
+ */
+enum qpnp_adc_fast_avg_ctl {
+ ADC_FAST_AVG_SAMPLE_1 = 0,
+ ADC_FAST_AVG_SAMPLE_2,
+ ADC_FAST_AVG_SAMPLE_4,
+ ADC_FAST_AVG_SAMPLE_8,
+ ADC_FAST_AVG_SAMPLE_16,
+ ADC_FAST_AVG_SAMPLE_32,
+ ADC_FAST_AVG_SAMPLE_64,
+ ADC_FAST_AVG_SAMPLE_128,
+ ADC_FAST_AVG_SAMPLE_256,
+ ADC_FAST_AVG_SAMPLE_512,
+ ADC_FAST_AVG_SAMPLE_NONE,
+};
+
+/**
+ * enum qpnp_adc_hw_settle_time - Time between AMUX getting configured and
+ * the ADC starting conversion. Delay = 100us * value for
+ * value < 11 and 2ms * (value - 10) otherwise.
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_0US: 0us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_100US: 100us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_200US: 200us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_300US: 300us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_400US: 400us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_500US: 500us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_600US: 600us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_700US: 700us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_800US: 800us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_900US: 900us
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_1MS: 1ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_2MS: 2ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_4MS: 4ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_6MS: 6ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_8MS: 8ms
+ * %ADC_CHANNEL_HW_SETTLE_DELAY_10MS: 10ms
+ * %ADC_CHANNEL_HW_SETTLE_NONE
+ */
+enum qpnp_adc_hw_settle_time {
+ ADC_CHANNEL_HW_SETTLE_DELAY_0US = 0,
+ ADC_CHANNEL_HW_SETTLE_DELAY_100US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_2000US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_300US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_400US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_500US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_600US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_700US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_800US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_900US,
+ ADC_CHANNEL_HW_SETTLE_DELAY_1MS,
+ ADC_CHANNEL_HW_SETTLE_DELAY_2MS,
+ ADC_CHANNEL_HW_SETTLE_DELAY_4MS,
+ ADC_CHANNEL_HW_SETTLE_DELAY_6MS,
+ ADC_CHANNEL_HW_SETTLE_DELAY_8MS,
+ ADC_CHANNEL_HW_SETTLE_DELAY_10MS,
+ ADC_CHANNEL_HW_SETTLE_NONE,
+};
+
+/**
+ * enum qpnp_adc_dec_ratio_sel - Selects the decimation ratio of the ADC.
+ * Support values are 256, 512 and 1024.
+ */
+enum qpnp_vadc_dec_ratio_sel {
+ ADC_DEC_RATIO_256 = 0,
+ ADC_DEC_RATIO_512,
+ ADC_DEC_RATIO_1024,
+ ADC_DEC_RATIO_NONE,
+};
+
+/**
+ * enum qpnp_adc_cal_sel - Selects the calibration type that is applied
+ * on the corresponding channel measurement after
+ * the ADC data is read.
+ * %ADC_NO_CAL : To obtain raw, uncalibrated data.
+ * %ADC_RATIO_CAL : Applies ratiometric calibration. Note the calibration
+ * values stored in the CAL peripheral for VADC_VREF and
+ * VREF_1P25 already have GND_REF value removed.
+ * %ADC_ABS_CAL : Applies absolute calibration. Note the calibration
+ * values stored in the CAL peripheral for VADC_VREF and
+ * VREF_1P25 already have GND_REF value removed.
+ */
+
+/**
+ * enum qpnp_adc_cal_val - Selects if the calibration values applied
+ * are the ones when collected on a timer interval
+ * or if an immediate calibration needs to be forced.
+ * %ADC_TIMER_CAL : Uses calibration value collected on the timer interval.
+ * %ADC_NEW_CAL : Forces an immediate calibration. Use only when necessary
+ * since it forces 3 calibration measurements in addition to
+ * the channel measurement. For most measurement, using
+ * calibration based on the timer interval is sufficient.
+ */
+enum qpnp_adc_cal_val {
+ ADC_TIMER_CAL = 0,
+ ADC_NEW_CAL,
+ ADC_CAL_VAL_NONE,
+};
+
+/**
+ * enum qpnp_vadc_mode_sel - Selects the basic mode of operation.
+ * - The normal mode is used for single measurement.
+ * - The Conversion sequencer is used to trigger an
+ * ADC read when a HW trigger is selected.
+ * - The measurement interval performs a single or
+ * continous measurement at a specified interval/delay.
+ * %ADC_OP_NORMAL_MODE : Normal mode used for single measurement.
+ * %ADC_OP_CONVERSION_SEQUENCER : Conversion sequencer used to trigger
+ * an ADC read on a HW supported trigger.
+ * Refer to enum qpnp_vadc_trigger for
+ * supported HW triggers.
+ * %ADC_OP_MEASUREMENT_INTERVAL : The measurement interval performs a
+ * single or continous measurement after a specified delay.
+ * For delay look at qpnp_adc_meas_timer.
+ */
+enum qpnp_vadc_mode_sel {
+ ADC_OP_NORMAL_MODE = 0,
+ ADC_OP_CONVERSION_SEQUENCER,
+ ADC_OP_MEASUREMENT_INTERVAL,
+ ADC_OP_MODE_NONE,
+};
+
+/**
+ * enum qpnp_vadc_trigger - Select the HW trigger to be used while
+ * measuring the ADC reading.
+ * %ADC_GSM_PA_ON : GSM power amplifier on.
+ * %ADC_TX_GTR_THRES : Transmit power greater than threshold.
+ * %ADC_CAMERA_FLASH_RAMP : Flash ramp up done.
+ * %ADC_DTEST : DTEST.
+ */
+enum qpnp_vadc_trigger {
+ ADC_GSM_PA_ON = 0,
+ ADC_TX_GTR_THRES,
+ ADC_CAMERA_FLASH_RAMP,
+ ADC_DTEST,
+ ADC_SEQ_NONE,
+};
+
+/**
+ * enum qpnp_vadc_conv_seq_timeout - Select delay (0 to 15ms) from
+ * conversion request to triggering conversion sequencer
+ * hold off time.
+ */
+enum qpnp_vadc_conv_seq_timeout {
+ ADC_CONV_SEQ_TIMEOUT_0MS = 0,
+ ADC_CONV_SEQ_TIMEOUT_1MS,
+ ADC_CONV_SEQ_TIMEOUT_2MS,
+ ADC_CONV_SEQ_TIMEOUT_3MS,
+ ADC_CONV_SEQ_TIMEOUT_4MS,
+ ADC_CONV_SEQ_TIMEOUT_5MS,
+ ADC_CONV_SEQ_TIMEOUT_6MS,
+ ADC_CONV_SEQ_TIMEOUT_7MS,
+ ADC_CONV_SEQ_TIMEOUT_8MS,
+ ADC_CONV_SEQ_TIMEOUT_9MS,
+ ADC_CONV_SEQ_TIMEOUT_10MS,
+ ADC_CONV_SEQ_TIMEOUT_11MS,
+ ADC_CONV_SEQ_TIMEOUT_12MS,
+ ADC_CONV_SEQ_TIMEOUT_13MS,
+ ADC_CONV_SEQ_TIMEOUT_14MS,
+ ADC_CONV_SEQ_TIMEOUT_15MS,
+ ADC_CONV_SEQ_TIMEOUT_NONE,
+};
+
+/**
+ * enum qpnp_adc_conv_seq_holdoff - Select delay from conversion
+ * trigger signal (i.e. adc_conv_seq_trig) transition
+ * to ADC enable. Delay = 25us * (value + 1).
+ */
+enum qpnp_adc_conv_seq_holdoff {
+ ADC_SEQ_HOLD_25US = 0,
+ ADC_SEQ_HOLD_50US,
+ ADC_SEQ_HOLD_75US,
+ ADC_SEQ_HOLD_100US,
+ ADC_SEQ_HOLD_125US,
+ ADC_SEQ_HOLD_150US,
+ ADC_SEQ_HOLD_175US,
+ ADC_SEQ_HOLD_200US,
+ ADC_SEQ_HOLD_225US,
+ ADC_SEQ_HOLD_250US,
+ ADC_SEQ_HOLD_275US,
+ ADC_SEQ_HOLD_300US,
+ ADC_SEQ_HOLD_325US,
+ ADC_SEQ_HOLD_350US,
+ ADC_SEQ_HOLD_375US,
+ ADC_SEQ_HOLD_400US,
+ ADC_SEQ_HOLD_NONE,
+};
+
+/**
+ * enum qpnp_adc_conv_seq_state - Conversion sequencer operating state
+ * %ADC_CONV_SEQ_IDLE : Sequencer is in idle.
+ * %ADC_CONV_TRIG_RISE : Waiting for rising edge trigger.
+ * %ADC_CONV_TRIG_HOLDOFF : Waiting for rising trigger hold off time.
+ * %ADC_CONV_MEAS_RISE : Measuring selected ADC signal.
+ * %ADC_CONV_TRIG_FALL : Waiting for falling trigger edge.
+ * %ADC_CONV_FALL_HOLDOFF : Waiting for falling trigger hold off time.
+ * %ADC_CONV_MEAS_FALL : Measuring selected ADC signal.
+ * %ADC_CONV_ERROR : Aberrant Hardware problem.
+ */
+enum qpnp_adc_conv_seq_state {
+ ADC_CONV_SEQ_IDLE = 0,
+ ADC_CONV_TRIG_RISE,
+ ADC_CONV_TRIG_HOLDOFF,
+ ADC_CONV_MEAS_RISE,
+ ADC_CONV_TRIG_FALL,
+ ADC_CONV_FALL_HOLDOFF,
+ ADC_CONV_MEAS_FALL,
+ ADC_CONV_ERROR,
+ ADC_CONV_NONE,
+};
+
+/**
+ * enum qpnp_adc_meas_timer_1 - Selects the measurement interval time.
+ * If value = 0, use 0ms else use 2^(value + 4)/ 32768).
+ * The timer period is used by the USB_ID. Do not set a polling rate
+ * greater than 1 second on PMIC 2.0. The max polling rate on the PMIC 2.0
+ * appears to be limited to 1 second.
+ * %ADC_MEAS_INTERVAL_0MS : 0ms
+ * %ADC_MEAS_INTERVAL_1P0MS : 1ms
+ * %ADC_MEAS_INTERVAL_2P0MS : 2ms
+ * %ADC_MEAS_INTERVAL_3P9MS : 3.9ms
+ * %ADC_MEAS_INTERVAL_7P8MS : 7.8ms
+ * %ADC_MEAS_INTERVAL_15P6MS : 15.6ms
+ * %ADC_MEAS_INTERVAL_31P3MS : 31.3ms
+ * %ADC_MEAS_INTERVAL_62P5MS : 62.5ms
+ * %ADC_MEAS_INTERVAL_125MS : 125ms
+ * %ADC_MEAS_INTERVAL_250MS : 250ms
+ * %ADC_MEAS_INTERVAL_500MS : 500ms
+ * %ADC_MEAS_INTERVAL_1S : 1seconds
+ * %ADC_MEAS_INTERVAL_2S : 2seconds
+ * %ADC_MEAS_INTERVAL_4S : 4seconds
+ * %ADC_MEAS_INTERVAL_8S : 8seconds
+ * %ADC_MEAS_INTERVAL_16S: 16seconds
+ */
+enum qpnp_adc_meas_timer_1 {
+ ADC_MEAS1_INTERVAL_0MS = 0,
+ ADC_MEAS1_INTERVAL_1P0MS,
+ ADC_MEAS1_INTERVAL_2P0MS,
+ ADC_MEAS1_INTERVAL_3P9MS,
+ ADC_MEAS1_INTERVAL_7P8MS,
+ ADC_MEAS1_INTERVAL_15P6MS,
+ ADC_MEAS1_INTERVAL_31P3MS,
+ ADC_MEAS1_INTERVAL_62P5MS,
+ ADC_MEAS1_INTERVAL_125MS,
+ ADC_MEAS1_INTERVAL_250MS,
+ ADC_MEAS1_INTERVAL_500MS,
+ ADC_MEAS1_INTERVAL_1S,
+ ADC_MEAS1_INTERVAL_2S,
+ ADC_MEAS1_INTERVAL_4S,
+ ADC_MEAS1_INTERVAL_8S,
+ ADC_MEAS1_INTERVAL_16S,
+ ADC_MEAS1_INTERVAL_NONE,
+};
+
+/**
+ * enum qpnp_adc_meas_timer_2 - Selects the measurement interval time.
+ * If value = 0, use 0ms else use 2^(value + 4)/ 32768).
+ * The timer period is used by the batt_therm. Do not set a polling rate
+ * greater than 1 second on PMIC 2.0. The max polling rate on the PMIC 2.0
+ * appears to be limited to 1 second.
+ * %ADC_MEAS_INTERVAL_0MS : 0ms
+ * %ADC_MEAS_INTERVAL_100MS : 100ms
+ * %ADC_MEAS_INTERVAL_200MS : 200ms
+ * %ADC_MEAS_INTERVAL_300MS : 300ms
+ * %ADC_MEAS_INTERVAL_400MS : 400ms
+ * %ADC_MEAS_INTERVAL_500MS : 500ms
+ * %ADC_MEAS_INTERVAL_600MS : 600ms
+ * %ADC_MEAS_INTERVAL_700MS : 700ms
+ * %ADC_MEAS_INTERVAL_800MS : 800ms
+ * %ADC_MEAS_INTERVAL_900MS : 900ms
+ * %ADC_MEAS_INTERVAL_1S: 1seconds
+ * %ADC_MEAS_INTERVAL_1P1S: 1.1seconds
+ * %ADC_MEAS_INTERVAL_1P2S: 1.2seconds
+ * %ADC_MEAS_INTERVAL_1P3S: 1.3seconds
+ * %ADC_MEAS_INTERVAL_1P4S: 1.4seconds
+ * %ADC_MEAS_INTERVAL_1P5S: 1.5seconds
+ */
+enum qpnp_adc_meas_timer_2 {
+ ADC_MEAS2_INTERVAL_0MS = 0,
+ ADC_MEAS2_INTERVAL_100MS,
+ ADC_MEAS2_INTERVAL_200MS,
+ ADC_MEAS2_INTERVAL_300MS,
+ ADC_MEAS2_INTERVAL_400MS,
+ ADC_MEAS2_INTERVAL_500MS,
+ ADC_MEAS2_INTERVAL_600MS,
+ ADC_MEAS2_INTERVAL_700MS,
+ ADC_MEAS2_INTERVAL_800MS,
+ ADC_MEAS2_INTERVAL_900MS,
+ ADC_MEAS2_INTERVAL_1S,
+ ADC_MEAS2_INTERVAL_1P1S,
+ ADC_MEAS2_INTERVAL_1P2S,
+ ADC_MEAS2_INTERVAL_1P3S,
+ ADC_MEAS2_INTERVAL_1P4S,
+ ADC_MEAS2_INTERVAL_1P5S,
+ ADC_MEAS2_INTERVAL_NONE,
+};
+
+/**
+ * enum qpnp_adc_meas_timer_3 - Selects the measurement interval time.
+ * If value = 0, use 0ms else use 2^(value + 4)/ 32768).
+ * Do not set a polling rate greater than 1 second on PMIC 2.0.
+ * The max polling rate on the PMIC 2.0 appears to be limited to 1 second.
+ * %ADC_MEAS_INTERVAL_0MS : 0ms
+ * %ADC_MEAS_INTERVAL_1S : 1seconds
+ * %ADC_MEAS_INTERVAL_2S : 2seconds
+ * %ADC_MEAS_INTERVAL_3S : 3seconds
+ * %ADC_MEAS_INTERVAL_4S : 4seconds
+ * %ADC_MEAS_INTERVAL_5S : 5seconds
+ * %ADC_MEAS_INTERVAL_6S: 6seconds
+ * %ADC_MEAS_INTERVAL_7S : 7seconds
+ * %ADC_MEAS_INTERVAL_8S : 8seconds
+ * %ADC_MEAS_INTERVAL_9S : 9seconds
+ * %ADC_MEAS_INTERVAL_10S : 10seconds
+ * %ADC_MEAS_INTERVAL_11S : 11seconds
+ * %ADC_MEAS_INTERVAL_12S : 12seconds
+ * %ADC_MEAS_INTERVAL_13S : 13seconds
+ * %ADC_MEAS_INTERVAL_14S : 14seconds
+ * %ADC_MEAS_INTERVAL_15S : 15seconds
+ */
+enum qpnp_adc_meas_timer_3 {
+ ADC_MEAS3_INTERVAL_0S = 0,
+ ADC_MEAS3_INTERVAL_1S,
+ ADC_MEAS3_INTERVAL_2S,
+ ADC_MEAS3_INTERVAL_3S,
+ ADC_MEAS3_INTERVAL_4S,
+ ADC_MEAS3_INTERVAL_5S,
+ ADC_MEAS3_INTERVAL_6S,
+ ADC_MEAS3_INTERVAL_7S,
+ ADC_MEAS3_INTERVAL_8S,
+ ADC_MEAS3_INTERVAL_9S,
+ ADC_MEAS3_INTERVAL_10S,
+ ADC_MEAS3_INTERVAL_11S,
+ ADC_MEAS3_INTERVAL_12S,
+ ADC_MEAS3_INTERVAL_13S,
+ ADC_MEAS3_INTERVAL_14S,
+ ADC_MEAS3_INTERVAL_15S,
+ ADC_MEAS3_INTERVAL_NONE,
+};
+
+/**
+ * enum qpnp_adc_meas_timer_select - Selects the timer for which
+ * the appropriate polling frequency is set.
+ * %ADC_MEAS_TIMER_SELECT1 - Select this timer for measurement polling interval
+ * for 1 second.
+ * %ADC_MEAS_TIMER_SELECT2 - Select this timer for 500ms measurement interval.
+ * %ADC_MEAS_TIMER_SELECT3 - Select this timer for 5 second interval.
+ */
+enum qpnp_adc_meas_timer_select {
+ ADC_MEAS_TIMER_SELECT1 = 0,
+ ADC_MEAS_TIMER_SELECT2,
+ ADC_MEAS_TIMER_SELECT3,
+ ADC_MEAS_TIMER_NUM,
+};
+
+/**
+ * enum qpnp_adc_meas_interval_op_ctl - Select operating mode.
+ * %ADC_MEAS_INTERVAL_OP_SINGLE : Conduct single measurement at specified time
+ * delay.
+ * %ADC_MEAS_INTERVAL_OP_CONTINUOUS : Make measurements at measurement interval
+ * times.
+ */
+enum qpnp_adc_meas_interval_op_ctl {
+ ADC_MEAS_INTERVAL_OP_SINGLE = 0,
+ ADC_MEAS_INTERVAL_OP_CONTINUOUS,
+ ADC_MEAS_INTERVAL_OP_NONE,
+};
+
+/**
+ * Channel selection registers for each of the configurable measurements
+ * Channels allotment is set at device config for a channel.
+ * The USB_ID, BATT_THERM, PMIC_THERM and VBAT channels are used by the
+ * kernel space USB, Battery and IADC drivers.
+ * The other 3 channels are configurable for use by userspace clients.
+ */
+enum qpnp_adc_tm_channel_select {
+ QPNP_ADC_TM_M0_ADC_CH_SEL_CTL = 0x48,
+ QPNP_ADC_TM_M1_ADC_CH_SEL_CTL = 0x68,
+ QPNP_ADC_TM_M2_ADC_CH_SEL_CTL = 0x70,
+ QPNP_ADC_TM_M3_ADC_CH_SEL_CTL = 0x78,
+ QPNP_ADC_TM_M4_ADC_CH_SEL_CTL = 0x80,
+ QPNP_ADC_TM_M5_ADC_CH_SEL_CTL = 0x88,
+ QPNP_ADC_TM_M6_ADC_CH_SEL_CTL = 0x90,
+ QPNP_ADC_TM_M7_ADC_CH_SEL_CTL = 0x98,
+ QPNP_ADC_TM_CH_SELECT_NONE
+};
+
+/**
+ * Channel index for the corresponding index to qpnp_adc_tm_channel_selec
+ */
+enum qpnp_adc_tm_channel_num {
+ QPNP_ADC_TM_CHAN0 = 0,
+ QPNP_ADC_TM_CHAN1,
+ QPNP_ADC_TM_CHAN2,
+ QPNP_ADC_TM_CHAN3,
+ QPNP_ADC_TM_CHAN4,
+ QPNP_ADC_TM_CHAN5,
+ QPNP_ADC_TM_CHAN6,
+ QPNP_ADC_TM_CHAN7,
+ QPNP_ADC_TM_CHAN_NONE
+};
+
+enum qpnp_comp_scheme_type {
+ COMP_ID_GF = 0,
+ COMP_ID_SMIC,
+ COMP_ID_TSMC,
+ COMP_ID_NUM,
+};
+
+/**
+ * struct qpnp_adc_tm_config - Represent ADC Thermal Monitor configuration.
+ * @channel: ADC channel for which thermal monitoring is requested.
+ * @adc_code: The pre-calibrated digital output of a given ADC releative to the
+ * ADC reference.
+ * @high_thr_temp: Temperature at which high threshold notification is required.
+ * @low_thr_temp: Temperature at which low threshold notification is required.
+ * @low_thr_voltage : Low threshold voltage ADC code used for reverse
+ * calibration.
+ * @high_thr_voltage: High threshold voltage ADC code used for reverse
+ * calibration.
+ */
+struct qpnp_adc_tm_config {
+ int channel;
+ int adc_code;
+ int high_thr_temp;
+ int low_thr_temp;
+ int64_t high_thr_voltage;
+ int64_t low_thr_voltage;
+};
+
+/**
+ * enum qpnp_adc_tm_trip_type - Type for setting high/low temperature/voltage.
+ * %ADC_TM_TRIP_HIGH_WARM: Setting high temperature. Note that high temperature
+ * corresponds to low voltage. Driver handles this case
+ * appropriately to set high/low thresholds for voltage.
+ * threshold.
+ * %ADC_TM_TRIP_LOW_COOL: Setting low temperature.
+ */
+enum qpnp_adc_tm_trip_type {
+ ADC_TM_TRIP_HIGH_WARM = 0,
+ ADC_TM_TRIP_LOW_COOL,
+ ADC_TM_TRIP_NUM,
+};
+
+#define ADC_TM_WRITABLE_TRIPS_MASK ((1 << ADC_TM_TRIP_NUM) - 1)
+
+/**
+ * enum qpnp_tm_state - This lets the client know whether the threshold
+ * that was crossed was high/low.
+ * %ADC_TM_HIGH_STATE: Client is notified of crossing the requested high
+ * voltage threshold.
+ * %ADC_TM_COOL_STATE: Client is notified of crossing the requested cool
+ * temperature threshold.
+ * %ADC_TM_LOW_STATE: Client is notified of crossing the requested low
+ * voltage threshold.
+ * %ADC_TM_WARM_STATE: Client is notified of crossing the requested high
+ * temperature threshold.
+ */
+enum qpnp_tm_state {
+ ADC_TM_HIGH_STATE = 0,
+ ADC_TM_COOL_STATE = ADC_TM_HIGH_STATE,
+ ADC_TM_LOW_STATE,
+ ADC_TM_WARM_STATE = ADC_TM_LOW_STATE,
+ ADC_TM_STATE_NUM,
+};
+
+/**
+ * enum qpnp_state_request - Request to enable/disable the corresponding
+ * high/low voltage/temperature thresholds.
+ * %ADC_TM_HIGH_THR_ENABLE: Enable high voltage threshold.
+ * %ADC_TM_COOL_THR_ENABLE = Enables cool temperature threshold.
+ * %ADC_TM_LOW_THR_ENABLE: Enable low voltage/temperature threshold.
+ * %ADC_TM_WARM_THR_ENABLE = Enables warm temperature threshold.
+ * %ADC_TM_HIGH_LOW_THR_ENABLE: Enable high and low voltage/temperature
+ * threshold.
+ * %ADC_TM_HIGH_THR_DISABLE: Disable high voltage/temperature threshold.
+ * %ADC_TM_COOL_THR_ENABLE = Disables cool temperature threshold.
+ * %ADC_TM_LOW_THR_DISABLE: Disable low voltage/temperature threshold.
+ * %ADC_TM_WARM_THR_ENABLE = Disables warm temperature threshold.
+ * %ADC_TM_HIGH_THR_DISABLE: Disable high and low voltage/temperature
+ * threshold.
+ */
+enum qpnp_state_request {
+ ADC_TM_HIGH_THR_ENABLE = 0,
+ ADC_TM_COOL_THR_ENABLE = ADC_TM_HIGH_THR_ENABLE,
+ ADC_TM_LOW_THR_ENABLE,
+ ADC_TM_WARM_THR_ENABLE = ADC_TM_LOW_THR_ENABLE,
+ ADC_TM_HIGH_LOW_THR_ENABLE,
+ ADC_TM_HIGH_THR_DISABLE,
+ ADC_TM_COOL_THR_DISABLE = ADC_TM_HIGH_THR_DISABLE,
+ ADC_TM_LOW_THR_DISABLE,
+ ADC_TM_WARM_THR_DISABLE = ADC_TM_LOW_THR_DISABLE,
+ ADC_TM_HIGH_LOW_THR_DISABLE,
+ ADC_TM_THR_NUM,
+};
+
+/**
+ * struct qpnp_adc_tm_btm_param - Represent Battery temperature threshold
+ * monitoring configuration.
+ * @high_temp: High temperature threshold for which notification is requested.
+ * @low_temp: Low temperature threshold for which notification is requested.
+ * @high_thr_voltage: High voltage for which notification is requested.
+ * @low_thr_voltage: Low voltage for which notification is requested.
+ * @state_request: Enable/disable the corresponding high and low temperature
+ * thresholds.
+ * @timer_interval1: Select polling rate from qpnp_adc_meas_timer_1 type.
+ * @timer_interval2: Select polling rate from qpnp_adc_meas_timer_2 type.
+ * @timer_interval3: Select polling rate from qpnp_adc_meas_timer_3 type.
+ * @btmid_ctx: A context of void type.
+ * @threshold_notification: Notification callback once threshold are crossed.
+ * units to be used for High/Low temperature and voltage notification -
+ * This depends on the clients usage. Check the rscaling function
+ * for the appropriate channel nodes.
+ * @Batt therm clients temperature units is decidegreesCentigrate.
+ * @USB_ID inputs the voltage units in milli-volts.
+ * @PA_THERM inputs the units in degC.
+ * @PMIC_THERM inputs the units in millidegC.
+ */
+struct qpnp_adc_tm_btm_param {
+ int32_t high_temp;
+ int32_t low_temp;
+ int32_t high_thr;
+ int32_t low_thr;
+ int32_t gain_num;
+ int32_t gain_den;
+ enum qpnp_vadc_channels channel;
+ enum qpnp_state_request state_request;
+ enum qpnp_adc_meas_timer_1 timer_interval;
+ enum qpnp_adc_meas_timer_2 timer_interval2;
+ enum qpnp_adc_meas_timer_3 timer_interval3;
+ void *btm_ctx;
+ void (*threshold_notification)(enum qpnp_tm_state state,
+ void *ctx);
+};
+
+/**
+ * struct qpnp_vadc_linear_graph - Represent ADC characteristics.
+ * @dy: Numerator slope to calculate the gain.
+ * @dx: Denominator slope to calculate the gain.
+ * @adc_vref: A/D word of the voltage reference used for the channel.
+ * @adc_gnd: A/D word of the ground reference used for the channel.
+ *
+ * Each ADC device has different offset and gain parameters which are computed
+ * to calibrate the device.
+ */
+struct qpnp_vadc_linear_graph {
+ int64_t dy;
+ int64_t dx;
+ int64_t adc_vref;
+ int64_t adc_gnd;
+};
+
+/**
+ * struct qpnp_vadc_map_pt - Map the graph representation for ADC channel
+ * @x: Represent the ADC digitized code.
+ * @y: Represent the physical data which can be temperature, voltage,
+ * resistance.
+ */
+struct qpnp_vadc_map_pt {
+ int32_t x;
+ int32_t y;
+};
+
+/**
+ * struct qpnp_vadc_scaling_ratio - Represent scaling ratio for adc input.
+ * @num: Numerator scaling parameter.
+ * @den: Denominator scaling parameter.
+ */
+struct qpnp_vadc_scaling_ratio {
+ int32_t num;
+ int32_t den;
+};
+
+/**
+ * struct qpnp_adc_properties - Represent the ADC properties.
+ * @adc_reference: Reference voltage for QPNP ADC.
+ * @bitresolution: ADC bit resolution for QPNP ADC.
+ * @biploar: Polarity for QPNP ADC.
+ * @adc_hc: Represents using HC variant of the ADC controller.
+ */
+struct qpnp_adc_properties {
+ uint32_t adc_vdd_reference;
+ uint32_t bitresolution;
+ bool bipolar;
+ bool adc_hc;
+};
+
+/**
+ * struct qpnp_vadc_chan_properties - Represent channel properties of the ADC.
+ * @offset_gain_numerator: The inverse numerator of the gain applied to the
+ * input channel.
+ * @offset_gain_denominator: The inverse denominator of the gain applied to the
+ * input channel.
+ * @high_thr: High threshold voltage that is requested to be set.
+ * @low_thr: Low threshold voltage that is requested to be set.
+ * @timer_select: Choosen from one of the 3 timers to set the polling rate for
+ * the VADC_BTM channel.
+ * @meas_interval1: Polling rate to set for timer 1.
+ * @meas_interval2: Polling rate to set for timer 2.
+ * @tm_channel_select: BTM channel number for the 5 VADC_BTM channels.
+ * @state_request: User can select either enable or disable high/low or both
+ * activation levels based on the qpnp_state_request type.
+ * @adc_graph: ADC graph for the channel of struct type qpnp_adc_linear_graph.
+ */
+struct qpnp_vadc_chan_properties {
+ uint32_t offset_gain_numerator;
+ uint32_t offset_gain_denominator;
+ uint32_t high_thr;
+ uint32_t low_thr;
+ enum qpnp_adc_meas_timer_select timer_select;
+ enum qpnp_adc_meas_timer_1 meas_interval1;
+ enum qpnp_adc_meas_timer_2 meas_interval2;
+ enum qpnp_adc_tm_channel_select tm_channel_select;
+ enum qpnp_state_request state_request;
+ enum qpnp_adc_calib_type calib_type;
+ struct qpnp_vadc_linear_graph adc_graph[2];
+};
+
+/**
+ * struct qpnp_vadc_result - Represent the result of the QPNP ADC.
+ * @chan: The channel number of the requested conversion.
+ * @adc_code: The pre-calibrated digital output of a given ADC relative to the
+ * the ADC reference.
+ * @measurement: In units specific for a given ADC; most ADC uses reference
+ * voltage but some ADC uses reference current. This measurement
+ * here is a number relative to a reference of a given ADC.
+ * @physical: The data meaningful for each individual channel whether it is
+ * voltage, current, temperature, etc.
+ * All voltage units are represented in micro - volts.
+ * -Battery temperature units are represented as 0.1 DegC.
+ * -PA Therm temperature units are represented as DegC.
+ * -PMIC Die temperature units are represented as 0.001 DegC.
+ */
+struct qpnp_vadc_result {
+ uint32_t chan;
+ int32_t adc_code;
+ int64_t measurement;
+ int64_t physical;
+};
+
+/**
+ * struct qpnp_adc_amux - AMUX properties for individual channel
+ * @name: Channel string name.
+ * @channel_num: Channel in integer used from qpnp_adc_channels.
+ * @chan_path_prescaling: Channel scaling performed on the input signal.
+ * @adc_decimation: Sampling rate desired for the channel.
+ * adc_scale_fn: Scaling function to convert to the data meaningful for
+ * each individual channel whether it is voltage, current,
+ * temperature, etc and compensates the channel properties.
+ */
+struct qpnp_adc_amux {
+ char *name;
+ enum qpnp_vadc_channels channel_num;
+ enum qpnp_adc_channel_scaling_param chan_path_prescaling;
+ enum qpnp_adc_decimation_type adc_decimation;
+ enum qpnp_adc_scale_fn_type adc_scale_fn;
+ enum qpnp_adc_fast_avg_ctl fast_avg_setup;
+ enum qpnp_adc_hw_settle_time hw_settle_time;
+ enum qpnp_adc_calib_type calib_type;
+ enum qpnp_adc_cal_val cal_val;
+};
+
+/**
+ * struct qpnp_vadc_scaling_ratio
+ *
+ */
+static const struct qpnp_vadc_scaling_ratio qpnp_vadc_amux_scaling_ratio[] = {
+ {1, 1},
+ {1, 3},
+ {1, 4},
+ {1, 6},
+ {1, 20},
+ {1, 8},
+ {10, 81},
+ {1, 10}
+};
+
+/**
+ * struct qpnp_vadc_scale_fn - Scaling function prototype
+ * @chan: Function pointer to one of the scaling functions
+ * which takes the adc properties, channel properties,
+ * and returns the physical result
+ */
+struct qpnp_vadc_scale_fn {
+ int32_t (*chan)(struct qpnp_vadc_chip *, int32_t,
+ const struct qpnp_adc_properties *,
+ const struct qpnp_vadc_chan_properties *,
+ struct qpnp_vadc_result *);
+};
+
+/**
+ * struct qpnp_adc_tm_reverse_scale_fn - Reverse scaling prototype
+ * @chan: Function pointer to one of the scaling functions
+ * which takes the adc properties, channel properties,
+ * and returns the physical result
+ */
+struct qpnp_adc_tm_reverse_scale_fn {
+ int32_t (*chan)(struct qpnp_vadc_chip *,
+ struct qpnp_adc_tm_btm_param *,
+ uint32_t *, uint32_t *);
+};
+
+/**
+ * struct qpnp_vadc_rscale_fn - Scaling function prototype
+ * @chan: Function pointer to one of the scaling functions
+ * which takes the adc properties, channel properties,
+ * and returns the physical result
+ */
+struct qpnp_vadc_rscale_fn {
+ int32_t (*chan)(struct qpnp_vadc_chip *,
+ const struct qpnp_vadc_chan_properties *,
+ struct qpnp_adc_tm_btm_param *,
+ uint32_t *, uint32_t *);
+};
+
+/**
+ * struct qpnp_iadc_calib - IADC channel calibration structure.
+ * @channel - Channel for which the historical offset and gain is
+ * calculated. Available channels are internal rsense,
+ * external rsense and alternate lead pairs.
+ * @offset_raw - raw Offset value for the channel.
+ * @gain_raw - raw Gain of the channel.
+ * @ideal_offset_uv - ideal offset value for the channel.
+ * @ideal_gain_nv - ideal gain for the channel.
+ * @offset_uv - converted value of offset in uV.
+ * @gain_uv - converted value of gain in uV.
+ */
+struct qpnp_iadc_calib {
+ enum qpnp_iadc_channels channel;
+ uint16_t offset_raw;
+ uint16_t gain_raw;
+ uint32_t ideal_offset_uv;
+ uint32_t ideal_gain_nv;
+ uint32_t offset_uv;
+ uint32_t gain_uv;
+};
+
+/**
+ * struct qpnp_iadc_result - IADC read result structure.
+ * @oresult_uv - Result of ADC in uV.
+ * @result_ua - Result of ADC in uA.
+ */
+struct qpnp_iadc_result {
+ int32_t result_uv;
+ int32_t result_ua;
+};
+
+/**
+ * struct qpnp_adc_drv - QPNP ADC device structure.
+ * @spmi - spmi device for ADC peripheral.
+ * @offset - base offset for the ADC peripheral.
+ * @adc_prop - ADC properties specific to the ADC peripheral.
+ * @amux_prop - AMUX properties representing the ADC peripheral.
+ * @adc_channels - ADC channel properties for the ADC peripheral.
+ * @adc_irq_eoc - End of Conversion IRQ.
+ * @adc_irq_fifo_not_empty - Conversion sequencer request written
+ * to FIFO when not empty.
+ * @adc_irq_conv_seq_timeout - Conversion sequencer trigger timeout.
+ * @adc_high_thr_irq - Output higher than high threshold set for measurement.
+ * @adc_low_thr_irq - Output lower than low threshold set for measurement.
+ * @adc_lock - ADC lock for access to the peripheral.
+ * @adc_rslt_completion - ADC result notification after interrupt
+ * is received.
+ * @calib - Internal rsens calibration values for gain and offset.
+ */
+struct qpnp_adc_drv {
+ struct spmi_device *spmi;
+ uint8_t slave;
+ uint16_t offset;
+ struct qpnp_adc_properties *adc_prop;
+ struct qpnp_adc_amux_properties *amux_prop;
+ struct qpnp_adc_amux *adc_channels;
+ int adc_irq_eoc;
+ int adc_irq_fifo_not_empty;
+ int adc_irq_conv_seq_timeout;
+ int adc_high_thr_irq;
+ int adc_low_thr_irq;
+ struct mutex adc_lock;
+ struct completion adc_rslt_completion;
+ struct qpnp_iadc_calib calib;
+ struct regulator *hkadc_ldo;
+ struct regulator *hkadc_ldo_ok;
+ bool adc_hc;
+};
+
+/**
+ * struct qpnp_adc_amux_properties - QPNP VADC amux channel property.
+ * @amux_channel - Refer to the qpnp_vadc_channel list.
+ * @decimation - Sampling rate supported for the channel.
+ * @mode_sel - The basic mode of operation.
+ * @hw_settle_time - The time between AMUX being configured and the
+ * start of conversion.
+ * @fast_avg_setup - Ability to provide single result from the ADC
+ * that is an average of multiple measurements.
+ * @trigger_channel - HW trigger channel for conversion sequencer.
+ * @chan_prop - Represent the channel properties of the ADC.
+ */
+struct qpnp_adc_amux_properties {
+ uint32_t amux_channel;
+ uint32_t decimation;
+ uint32_t mode_sel;
+ uint32_t hw_settle_time;
+ uint32_t fast_avg_setup;
+ enum qpnp_vadc_trigger trigger_channel;
+ struct qpnp_vadc_chan_properties chan_prop[0];
+};
+
+/* SW index's for PMIC type and version used by QPNP VADC and IADC */
+#define QPNP_REV_ID_8941_3_1 1
+#define QPNP_REV_ID_8026_1_0 2
+#define QPNP_REV_ID_8026_2_0 3
+#define QPNP_REV_ID_8110_1_0 4
+#define QPNP_REV_ID_8026_2_1 5
+#define QPNP_REV_ID_8110_2_0 6
+#define QPNP_REV_ID_8026_2_2 7
+#define QPNP_REV_ID_8941_3_0 8
+#define QPNP_REV_ID_8941_2_0 9
+#define QPNP_REV_ID_8916_1_0 10
+#define QPNP_REV_ID_8916_1_1 11
+#define QPNP_REV_ID_8916_2_0 12
+#define QPNP_REV_ID_8909_1_0 13
+#define QPNP_REV_ID_8909_1_1 14
+#define QPNP_REV_ID_PM8950_1_0 16
+
+/* Public API */
+#if defined(CONFIG_SENSORS_QPNP_ADC_VOLTAGE) \
+ || defined(CONFIG_SENSORS_QPNP_ADC_VOLTAGE_MODULE)
+/**
+ * qpnp_vadc_read() - Performs ADC read on the channel.
+ * @dev: Structure device for qpnp vadc
+ * @channel: Input channel to perform the ADC read.
+ * @result: Structure pointer of type adc_chan_result
+ * in which the ADC read results are stored.
+ */
+int32_t qpnp_vadc_read(struct qpnp_vadc_chip *dev,
+ enum qpnp_vadc_channels channel,
+ struct qpnp_vadc_result *result);
+
+/**
+ * qpnp_vadc_hc_read() - Performs ADC read on the channel.
+ * It uses the refreshed VADC design from qpnp-vadc-hc.
+ * @dev: Structure device for qpnp vadc
+ * @channel: Input channel to perform the ADC read.
+ * @result: Structure pointer of type adc_chan_result
+ * in which the ADC read results are stored.
+ */
+int32_t qpnp_vadc_hc_read(struct qpnp_vadc_chip *dev,
+ enum qpnp_vadc_channels channel,
+ struct qpnp_vadc_result *result);
+
+/**
+ * qpnp_vadc_conv_seq_request() - Performs ADC read on the conversion
+ * sequencer channel.
+ * @dev: Structure device for qpnp vadc
+ * @channel: Input channel to perform the ADC read.
+ * @result: Structure pointer of type adc_chan_result
+ * in which the ADC read results are stored.
+ */
+int32_t qpnp_vadc_conv_seq_request(struct qpnp_vadc_chip *dev,
+ enum qpnp_vadc_trigger trigger_channel,
+ enum qpnp_vadc_channels channel,
+ struct qpnp_vadc_result *result);
+
+/**
+ * qpnp_adc_get_devicetree_data() - Abstracts the ADC devicetree data.
+ * @spmi: spmi ADC device.
+ * @adc_qpnp: spmi device tree node structure
+ */
+int32_t qpnp_adc_get_devicetree_data(struct spmi_device *spmi,
+ struct qpnp_adc_drv *adc_qpnp);
+
+/**
+ * qpnp_adc_scale_default() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset.
+ * @dev: Structure device for qpnp vadc
+ * @adc_code: pre-calibrated digital output of the ADC.
+ * @adc_prop: adc properties of the qpnp adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: Individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: Physical result to be stored.
+ */
+int32_t qpnp_adc_scale_default(struct qpnp_vadc_chip *dev,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_pmic_therm() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset. Performs the AMUX out as 2mV/K and returns
+ * the temperature in milli degC.
+ * @dev: Structure device for qpnp vadc
+ * @adc_code: pre-calibrated digital output of the ADC.
+ * @adc_prop: adc properties of the qpnp adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: Individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: Physical result to be stored.
+ */
+int32_t qpnp_adc_scale_pmic_therm(struct qpnp_vadc_chip *dev,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_pmi_chg_temp() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset. The voltage measured by HKADC is related to
+ * the junction temperature according to
+ * Tj = -137.67 degC * (V_adc * 2) + 382.04 degC
+ * @dev: Structure device for qpnp vadc
+ * @adc_code: pre-calibrated digital output of the ADC.
+ * @adc_prop: adc properties of the qpnp adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: Individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: Physical result to be stored.
+ */
+int32_t qpnp_adc_scale_pmi_chg_temp(struct qpnp_vadc_chip *dev,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_batt_therm() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset. Returns the temperature in decidegC.
+ * @dev: Structure device for qpnp vadc
+ * @adc_code: pre-calibrated digital output of the ADC.
+ * @adc_prop: adc properties of the pm8xxx adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: physical result to be stored.
+ */
+int32_t qpnp_adc_scale_batt_therm(struct qpnp_vadc_chip *dev,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_qrd_batt_therm() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset. Returns the temperature in decidegC.
+ * @dev: Structure device for qpnp vadc
+ * @adc_code: pre-calibrated digital output of the ADC.
+ * @adc_prop: adc properties of the pm8xxx adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: physical result to be stored.
+ */
+int32_t qpnp_adc_scale_qrd_batt_therm(struct qpnp_vadc_chip *dev,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_qrd_skuaa_batt_therm() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset. Returns the temperature in decidegC.
+ * @dev: Structure device for qpnp vadc
+ * @adc_code: pre-calibrated digital output of the ADC.
+ * @adc_prop: adc properties of the pm8xxx adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: physical result to be stored.
+ */
+int32_t qpnp_adc_scale_qrd_skuaa_batt_therm(struct qpnp_vadc_chip *dev,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_qrd_skug_batt_therm() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset. Returns the temperature in decidegC.
+ * @dev: Structure device for qpnp vadc
+ * @adc_code: pre-calibrated digital output of the ADC.
+ * @adc_prop: adc properties of the pm8xxx adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: physical result to be stored.
+ */
+int32_t qpnp_adc_scale_qrd_skug_batt_therm(struct qpnp_vadc_chip *dev,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_qrd_skuh_batt_therm() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset. Returns the temperature in decidegC.
+ * @dev: Structure device for qpnp vadc
+ * @adc_code: pre-calibrated digital output of the ADC.
+ * @adc_prop: adc properties of the pm8xxx adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: physical result to be stored.
+ */
+int32_t qpnp_adc_scale_qrd_skuh_batt_therm(struct qpnp_vadc_chip *dev,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_qrd_skut1_batt_therm() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset. Returns the temperature in decidegC.
+ * @dev: Structure device for qpnp vadc
+ * @adc_code: pre-calibrated digital output of the ADC.
+ * @adc_prop: adc properties of the pm8xxx adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: physical result to be stored.
+ */
+int32_t qpnp_adc_scale_qrd_skut1_batt_therm(struct qpnp_vadc_chip *dev,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_smb_batt_therm() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset. Returns the temperature in decidegC.
+ * @dev: Structure device for qpnp vadc
+ * @adc_code: pre-calibrated digital output of the ADC.
+ * @adc_prop: adc properties of the pm8xxx adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: physical result to be stored.
+ */
+int32_t qpnp_adc_scale_smb_batt_therm(struct qpnp_vadc_chip *dev,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_batt_id() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset.
+ * @dev: Structure device for qpnp vadc
+ * @adc_code: pre-calibrated digital output of the ADC.
+ * @adc_prop: adc properties of the pm8xxx adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: physical result to be stored.
+ */
+int32_t qpnp_adc_scale_batt_id(struct qpnp_vadc_chip *dev, int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_tdkntcg_therm() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset. Returns the temperature of the xo therm in mili
+ degC.
+ * @dev: Structure device for qpnp vadc
+ * @adc_code: pre-calibrated digital output of the ADC.
+ * @adc_prop: adc properties of the pm8xxx adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: physical result to be stored.
+ */
+int32_t qpnp_adc_tdkntcg_therm(struct qpnp_vadc_chip *dev, int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_therm_pu1() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset. Returns the temperature of the therm in degC.
+ * It uses a mapping table computed for a 150K pull-up.
+ * Pull-up1 is an internal pull-up on the AMUX of 150K.
+ * @dev: Structure device for qpnp vadc
+ * @adc_code: pre-calibrated digital output of the ADC.
+ * @adc_prop: adc properties of the pm8xxx adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: physical result to be stored.
+ */
+int32_t qpnp_adc_scale_therm_pu1(struct qpnp_vadc_chip *dev, int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_therm_pu2() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset. Returns the temperature of the therm in degC.
+ * It uses a mapping table computed for a 100K pull-up.
+ * Pull-up2 is an internal pull-up on the AMUX of 100K.
+ * @dev: Structure device for qpnp vadc
+ * @adc_code: pre-calibrated digital output of the ADC.
+ * @adc_prop: adc properties of the pm8xxx adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: physical result to be stored.
+ */
+int32_t qpnp_adc_scale_therm_pu2(struct qpnp_vadc_chip *dev, int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_adc_scale_therm_ncp03() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset. Returns the temperature of the therm in degC.
+ * It uses a mapping table computed for a NCP03WF683.
+ * @dev: Structure device for qpnp vadc
+ * @adc_code: pre-calibrated digital output of the ADC.
+ * @adc_prop: adc properties of the pm8xxx adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: physical result to be stored.
+ */
+int32_t qpnp_adc_scale_therm_ncp03(struct qpnp_vadc_chip *dev, int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+/**
+ * qpnp_get_vadc() - Clients need to register with the vadc using the
+ * corresponding device instance it wants to read the channels
+ * from. Read the bindings document on how to pass the phandle
+ * for the corresponding vadc driver to register with.
+ * @dev: Clients device structure
+ * @name: Corresponding client's DT parser name. Read the DT bindings
+ * document on how to register with the vadc
+ * @struct qpnp_vadc_chip * - On success returns the vadc device structure
+ * pointer that needs to be used during an ADC request.
+ */
+struct qpnp_vadc_chip *qpnp_get_vadc(struct device *dev, const char *name);
+/**
+ * qpnp_adc_tm_scaler() - Performs reverse calibration.
+ * @config: Thermal monitoring configuration.
+ * @adc_prop: adc properties of the qpnp adc such as bit resolution and
+ * reference voltage.
+ * @chan_prop: Individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ */
+static inline int32_t qpnp_adc_tm_scaler(struct qpnp_adc_tm_config *tm_config,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop)
+{ return -ENXIO; }
+/**
+ * qpnp_get_vadc_gain_and_offset() - Obtains the VADC gain and offset
+ * for absolute and ratiometric calibration.
+ * @dev: Structure device for qpnp vadc
+ * @param: The result in which the ADC offset and gain values are stored.
+ * @type: The calibration type whether client needs the absolute or
+ * ratiometric gain and offset values.
+ */
+int32_t qpnp_get_vadc_gain_and_offset(struct qpnp_vadc_chip *dev,
+ struct qpnp_vadc_linear_graph *param,
+ enum qpnp_adc_calib_type calib_type);
+/**
+ * qpnp_adc_scale_millidegc_pmic_voltage_thr() - Performs reverse calibration
+ * on the low/high temperature threshold values passed by the
+ * client. The function coverts milldegC to voltage threshold
+ * and accounts for the corresponding channels scaling as (2mV/K).
+ * @dev: Structure device for qpnp vadc
+ * @param: The input parameters that contain the low/high temperature
+ * values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ */
+int32_t qpnp_adc_scale_millidegc_pmic_voltage_thr(struct qpnp_vadc_chip *dev,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_adc_btm_scaler() - Performs reverse calibration on the low/high
+ * temperature threshold values passed by the client.
+ * The function maps the temperature to voltage and applies
+ * ratiometric calibration on the voltage values.
+ * @dev: Structure device for qpnp vadc
+ * @param: The input parameters that contain the low/high temperature
+ * values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ */
+int32_t qpnp_adc_btm_scaler(struct qpnp_vadc_chip *dev,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold);
+
+/**
+ * qpnp_adc_qrd_skuh_btm_scaler() - Performs reverse calibration on the low/high
+ * temperature threshold values passed by the client.
+ * The function maps the temperature to voltage and applies
+ * ratiometric calibration on the voltage values for SKUH board.
+ * @dev: Structure device for qpnp vadc
+ * @param: The input parameters that contain the low/high temperature
+ * values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ */
+int32_t qpnp_adc_qrd_skuh_btm_scaler(struct qpnp_vadc_chip *dev,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_adc_qrd_skut1_btm_scaler() - Performs reverse calibration on the low/high
+ * temperature threshold values passed by the client.
+ * The function maps the temperature to voltage and applies
+ * ratiometric calibration on the voltage values for SKUT1 board.
+ * @dev: Structure device for qpnp vadc
+ * @param: The input parameters that contain the low/high temperature
+ * values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ */
+int32_t qpnp_adc_qrd_skut1_btm_scaler(struct qpnp_vadc_chip *dev,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_adc_tm_scale_therm_voltage_pu2() - Performs reverse calibration
+ * and convert given temperature to voltage on supported
+ * thermistor channels using 100k pull-up.
+ * @dev: Structure device for qpnp vadc
+ * @param: The input temperature values.
+ */
+int32_t qpnp_adc_tm_scale_therm_voltage_pu2(struct qpnp_vadc_chip *dev,
+ struct qpnp_adc_tm_config *param);
+/**
+ * qpnp_adc_tm_scale_therm_voltage_pu2() - Performs reverse calibration
+ * and converts the given ADC code to temperature for
+ * thermistor channels using 100k pull-up.
+ * @dev: Structure device for qpnp vadc
+ * @reg: The input ADC code.
+ * @result: The physical measurement temperature on the thermistor.
+ */
+int32_t qpnp_adc_tm_scale_voltage_therm_pu2(struct qpnp_vadc_chip *dev,
+ uint32_t reg, int64_t *result);
+/**
+ * qpnp_adc_usb_scaler() - Performs reverse calibration on the low/high
+ * voltage threshold values passed by the client.
+ * The function applies ratiometric calibration on the
+ * voltage values.
+ * @dev: Structure device for qpnp vadc
+ * @param: The input parameters that contain the low/high voltage
+ * threshold values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ */
+int32_t qpnp_adc_usb_scaler(struct qpnp_vadc_chip *dev,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_adc_vbatt_rscaler() - Performs reverse calibration on the low/high
+ * voltage threshold values passed by the client.
+ * The function applies ratiometric calibration on the
+ * voltage values.
+ * @dev: Structure device for qpnp vadc
+ * @param: The input parameters that contain the low/high voltage
+ * threshold values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ */
+int32_t qpnp_adc_vbatt_rscaler(struct qpnp_vadc_chip *dev,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_vadc_absolute_rthr() - Performs reverse calibration on the low/high
+ * voltage threshold values passed by the client.
+ * The function applies absolute calibration on the
+ * voltage values.
+ * @dev: Structure device for qpnp vadc
+ * @chan_prop: Individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @param: The input parameters that contain the low/high voltage
+ * threshold values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ */
+int32_t qpnp_vadc_absolute_rthr(struct qpnp_vadc_chip *dev,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_adc_absolute_rthr() - Performs reverse calibration on the low/high
+ * voltage threshold values passed by the client.
+ * The function applies absolute calibration on the
+ * voltage values.
+ * @dev: Structure device for qpnp vadc
+ * @param: The input parameters that contain the low/high voltage
+ * threshold values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ */
+int32_t qpnp_adc_absolute_rthr(struct qpnp_vadc_chip *dev,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_adc_smb_btm_rscaler() - Performs reverse calibration on the low/high
+ * temperature threshold values passed by the client.
+ * The function maps the temperature to voltage and applies
+ * ratiometric calibration on the voltage values.
+ * @dev: Structure device for qpnp vadc
+ * @param: The input parameters that contain the low/high temperature
+ * values.
+ * @low_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ * @high_threshold: The low threshold value that needs to be updated with
+ * the above calibrated voltage value.
+ */
+int32_t qpnp_adc_smb_btm_rscaler(struct qpnp_vadc_chip *dev,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold);
+/**
+ * qpnp_vadc_iadc_sync_request() - Performs Voltage ADC read and
+ * locks the peripheral. When performing simultaneous
+ * voltage and current request the VADC peripheral is
+ * prepared for conversion and the IADC sync conversion
+ * is done from the IADC peripheral.
+ * @dev: Structure device for qpnp vadc
+ * @channel: Input channel to perform the voltage ADC read.
+ */
+int32_t qpnp_vadc_iadc_sync_request(struct qpnp_vadc_chip *dev,
+ enum qpnp_vadc_channels channel);
+/**
+ * qpnp_vadc_iadc_sync_complete_request() - Reads the ADC result and
+ * unlocks the peripheral.
+ * @dev: Structure device for qpnp vadc
+ * @result: Structure pointer of type adc_chan_result
+ * in which the ADC read results are stored.
+ */
+int32_t qpnp_vadc_iadc_sync_complete_request(struct qpnp_vadc_chip *dev,
+ enum qpnp_vadc_channels channel, struct qpnp_vadc_result *result);
+/**
+ * qpnp_vadc_sns_comp_result() - Compensate vbatt readings based on temperature
+ * @dev: Structure device for qpnp vadc
+ * @result: Voltage in uV that needs compensation.
+ * @is_pon_ocv: Whether the reading is from a power on OCV or not
+ */
+int32_t qpnp_vbat_sns_comp_result(struct qpnp_vadc_chip *dev,
+ int64_t *result, bool is_pon_ocv);
+/**
+ * qpnp_adc_get_revid_version() - Obtain the PMIC number and revision.
+ * @dev: Structure device node.
+ * returns internal mapped PMIC number and revision id.
+ */
+int qpnp_adc_get_revid_version(struct device *dev);
+/**
+ * qpnp_vadc_channel_monitor() - Configures kernel clients a channel to
+ * monitor the corresponding ADC channel for threshold detection.
+ * Driver passes the high/low voltage threshold along
+ * with the notification callback once the set thresholds
+ * are crossed.
+ * @param: Structure pointer of qpnp_adc_tm_btm_param type.
+ * Clients pass the low/high temperature along with the threshold
+ * notification callback.
+ */
+int32_t qpnp_vadc_channel_monitor(struct qpnp_vadc_chip *chip,
+ struct qpnp_adc_tm_btm_param *param);
+/**
+ * qpnp_vadc_end_channel_monitor() - Disables recurring measurement mode for
+ * VADC_USR and disables the bank.
+ * @param: device instance for the VADC
+ */
+int32_t qpnp_vadc_end_channel_monitor(struct qpnp_vadc_chip *chip);
+/**
+ * qpnp_vadc_calib_vref() - Read calibration channel REF_125V/VDD_VADC
+ * @dev: Structure device for qpnp vadc
+ * @calib_type: absolute or ratiometric calib type.
+ * returns calibration channel adc code.
+ */
+int32_t qpnp_vadc_calib_vref(struct qpnp_vadc_chip *vadc,
+ enum qpnp_adc_calib_type calib_type,
+ int *calib_data);
+/**
+ * qpnp_vadc_calib_gnd() - Read calibration channel REF_625MV/GND_REF
+ * @dev: Structure device for qpnp vadc
+ * @calib_type: absolute or ratiometric calib type.
+ * returns calibration channel adc code.
+ */
+int32_t qpnp_vadc_calib_gnd(struct qpnp_vadc_chip *vadc,
+ enum qpnp_adc_calib_type calib_type,
+ int *calib_data);
+
+/**
+ * qpnp_adc_enable_voltage() - Enable LDO for HKADC
+ * @dev: Structure device for qpnp vadc
+ * returns result of enabling the regulator interface.
+ */
+int32_t qpnp_adc_enable_voltage(struct qpnp_adc_drv *adc);
+
+/**
+ * qpnp_adc_disable_voltage() - Disable vote for HKADC LDO
+ * @dev: Structure device for qpnp vadc
+ */
+void qpnp_adc_disable_voltage(struct qpnp_adc_drv *adc);
+
+/**
+ * qpnp_adc_free_voltage_resource() - Puts HKADC LDO
+ * @dev: Structure device for qpnp vadc
+ */
+void qpnp_adc_free_voltage_resource(struct qpnp_adc_drv *adc);
+
+#else
+static inline int32_t qpnp_vadc_read(struct qpnp_vadc_chip *dev,
+ uint32_t channel,
+ struct qpnp_vadc_result *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_hc_read(struct qpnp_vadc_chip *dev,
+ uint32_t channel,
+ struct qpnp_vadc_result *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_conv_seq_request(struct qpnp_vadc_chip *dev,
+ enum qpnp_vadc_trigger trigger_channel,
+ enum qpnp_vadc_channels channel,
+ struct qpnp_vadc_result *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_default(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_pmic_therm(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_pmi_chg_temp(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_batt_therm(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_qrd_batt_therm(
+ struct qpnp_vadc_chip *vadc, int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_qrd_skuaa_batt_therm(
+ struct qpnp_vadc_chip *vadc, int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_qrd_skug_batt_therm(
+ struct qpnp_vadc_chip *vadc, int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_qrd_skuh_batt_therm(
+ struct qpnp_vadc_chip *vdev, int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_qrd_skut1_batt_therm(
+ struct qpnp_vadc_chip *vdev, int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_smb_batt_therm(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_batt_id(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_tdkntcg_therm(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_therm_pu1(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_therm_pu2(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_therm_ncp03(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
+static inline struct qpnp_vadc_chip *qpnp_get_vadc(struct device *dev,
+ const char *name)
+{ return ERR_PTR(-ENXIO); }
+static inline int32_t qpnp_get_vadc_gain_and_offset(struct qpnp_vadc_chip *dev,
+ struct qpnp_vadc_linear_graph *param,
+ enum qpnp_adc_calib_type calib_type)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_usb_scaler(struct qpnp_vadc_chip *dev,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_vbatt_rscaler(struct qpnp_vadc_chip *dev,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_absolute_rthr(struct qpnp_vadc_chip *dev,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_absolute_rthr(struct qpnp_vadc_chip *dev,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_btm_scaler(struct qpnp_vadc_chip *dev,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_qrd_skuh_btm_scaler(struct qpnp_vadc_chip *dev,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_qrd_skut1_btm_scaler(struct qpnp_vadc_chip *dev,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_scale_millidegc_pmic_voltage_thr(
+ struct qpnp_vadc_chip *dev,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_tm_scale_therm_voltage_pu2(
+ struct qpnp_vadc_chip *dev,
+ struct qpnp_adc_tm_config *param)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_tm_scale_voltage_therm_pu2(
+ struct qpnp_vadc_chip *dev,
+ uint32_t reg, int64_t *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_smb_btm_rscaler(struct qpnp_vadc_chip *dev,
+ struct qpnp_adc_tm_btm_param *param,
+ uint32_t *low_threshold, uint32_t *high_threshold)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_iadc_sync_request(struct qpnp_vadc_chip *dev,
+ enum qpnp_vadc_channels channel)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_iadc_sync_complete_request(
+ struct qpnp_vadc_chip *dev,
+ enum qpnp_vadc_channels channel,
+ struct qpnp_vadc_result *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_vbat_sns_comp_result(struct qpnp_vadc_chip *dev,
+ int64_t *result)
+{ return -ENXIO; }
+static inline int qpnp_adc_get_revid_version(struct device *dev)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_channel_monitor(struct qpnp_vadc_chip *chip,
+ struct qpnp_adc_tm_btm_param *param)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_end_channel_monitor(struct qpnp_vadc_chip *chip)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_calib_vref(struct qpnp_vadc_chip *vadc,
+ enum qpnp_adc_calib_type calib_type,
+ int *calib_data)
+{ return -ENXIO; }
+static inline int32_t qpnp_vadc_calib_gnd(struct qpnp_vadc_chip *vadc,
+ enum qpnp_adc_calib_type calib_type,
+ int *calib_data)
+{ return -ENXIO; }
+
+static inline int32_t qpnp_adc_enable_voltage(struct qpnp_adc_drv *adc)
+{ return -ENXIO; }
+
+static inline void qpnp_adc_disable_voltage(struct qpnp_adc_drv *adc)
+{ return; }
+
+static inline void qpnp_adc_free_voltage_resource(struct qpnp_adc_drv *adc)
+{ return; }
+
+#endif
+
+/* Public API */
+#if defined(CONFIG_SENSORS_QPNP_ADC_CURRENT) \
+ || defined(CONFIG_SENSORS_QPNP_ADC_CURRENT_MODULE)
+/**
+ * qpnp_iadc_read() - Performs ADC read on the current channel.
+ * @dev: Structure device for qpnp iadc
+ * @channel: Input channel to perform the ADC read.
+ * @result: Current across rsense in mA.
+ * @return: 0 on success.
+ */
+int32_t qpnp_iadc_read(struct qpnp_iadc_chip *dev,
+ enum qpnp_iadc_channels channel,
+ struct qpnp_iadc_result *result);
+/**
+ * qpnp_iadc_get_rsense() - Reads the RDS resistance value from the
+ trim registers.
+ * @dev: Structure device for qpnp iadc
+ * @rsense: RDS resistance in nOhms.
+ * @return: 0 on success.
+ */
+int32_t qpnp_iadc_get_rsense(struct qpnp_iadc_chip *dev, int32_t *rsense);
+/**
+ * qpnp_iadc_get_gain_and_offset() - Performs gain calibration
+ * over 17.8571mV and offset over selected
+ * channel. Channel can be internal rsense,
+ * external rsense and alternate lead pair.
+ * @dev: Structure device for qpnp iadc
+ * @result: result structure where the gain and offset is stored of
+ * type qpnp_iadc_calib.
+ * @return: 0 on success.
+ */
+int32_t qpnp_iadc_get_gain_and_offset(struct qpnp_iadc_chip *dev,
+ struct qpnp_iadc_calib *result);
+/**
+ * qpnp_get_iadc() - Clients need to register with the iadc with the
+ * corresponding device instance it wants to read the channels.
+ * Read the bindings document on how to pass the phandle for
+ * the corresponding vadc driver to register with.
+ * @dev: Clients device structure
+ * @name: Corresponding client's DT parser name. Read the DT bindings
+ * document on how to register with the iadc
+ * @struct qpnp_iadc_chip * - On success returns the iadc device structure
+ * pointer used everytime client makes an ADC request.
+ */
+struct qpnp_iadc_chip *qpnp_get_iadc(struct device *dev, const char *name);
+/**
+ * qpnp_iadc_vadc_sync_read() - Performs synchronous VADC and IADC read.
+ * The api is to be used only by the BMS to perform
+ * simultaneous VADC and IADC measurement for battery voltage
+ * and current.
+ * @dev: Structure device for qpnp iadc
+ * @i_channel: Input battery current channel to perform the IADC read.
+ * @i_result: Current across the rsense in mA.
+ * @v_channel: Input battery voltage channel to perform VADC read.
+ * @v_result: Voltage on the vbatt channel with units in mV.
+ * @return: 0 on success.
+ */
+int32_t qpnp_iadc_vadc_sync_read(struct qpnp_iadc_chip *dev,
+ enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result,
+ enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result);
+/**
+ * qpnp_iadc_calibrate_for_trim - Clients can use this API to re-calibrate
+ * IADC. The offset and gain values are programmed in the trim
+ * registers. The offset and the gain can be retrieved using
+ * qpnp_iadc_get_gain_and_offset
+ * @dev: Structure device for qpnp iadc
+ * @batfet_closed: batfet is opened or closed. The IADC chooses proper
+ * channel (internal/external) based on batfet status
+ * for calibration.
+ * RETURNS: 0 on success.
+ */
+int32_t qpnp_iadc_calibrate_for_trim(struct qpnp_iadc_chip *dev,
+ bool batfet_closed);
+/**
+ * qpnp_iadc_comp_result() - Compensates the result of the current based on
+ * the gain and offset co-effients and rsense parameters.
+ * @dev: Structure device for qpnp iadc
+ * @result: Current value to perform the compensation.
+ * @return: 0 on success.
+ */
+int32_t qpnp_iadc_comp_result(struct qpnp_iadc_chip *dev, int64_t *result);
+/**
+ * qpnp_iadc_skip_calibration() - Clients can use this API to ask the driver
+ * to skip iadc calibrations
+ * @dev: Structure device for qpnp iadc
+ * @result: 0 on success and -EPROBE_DEFER when probe for the device
+ * has not occured.
+ */
+int qpnp_iadc_skip_calibration(struct qpnp_iadc_chip *dev);
+/**
+ * qpnp_iadc_resume_calibration() - Clients can use this API to ask the driver
+ * to resume iadc calibrations
+ * @dev: Structure device for qpnp iadc
+ * @result: 0 on success and -EPROBE_DEFER when probe for the device
+ * has not occured.
+ */
+int qpnp_iadc_resume_calibration(struct qpnp_iadc_chip *dev);
+#else
+static inline int32_t qpnp_iadc_read(struct qpnp_iadc_chip *iadc,
+ enum qpnp_iadc_channels channel, struct qpnp_iadc_result *result)
+{ return -ENXIO; }
+static inline int32_t qpnp_iadc_get_rsense(struct qpnp_iadc_chip *iadc,
+ int32_t *rsense)
+{ return -ENXIO; }
+static inline int32_t qpnp_iadc_get_gain_and_offset(struct qpnp_iadc_chip *iadc,
+ struct qpnp_iadc_calib *result)
+{ return -ENXIO; }
+static inline struct qpnp_iadc_chip *qpnp_get_iadc(struct device *dev,
+ const char *name)
+{ return ERR_PTR(-ENXIO); }
+static inline int32_t qpnp_iadc_vadc_sync_read(struct qpnp_iadc_chip *iadc,
+ enum qpnp_iadc_channels i_channel, struct qpnp_iadc_result *i_result,
+ enum qpnp_vadc_channels v_channel, struct qpnp_vadc_result *v_result)
+{ return -ENXIO; }
+static inline int32_t qpnp_iadc_calibrate_for_trim(struct qpnp_iadc_chip *iadc,
+ bool batfet_closed)
+{ return -ENXIO; }
+static inline int32_t qpnp_iadc_comp_result(struct qpnp_iadc_chip *iadc,
+ int64_t *result)
+{ return -ENXIO; }
+static inline int qpnp_iadc_skip_calibration(struct qpnp_iadc_chip *iadc)
+{ return -ENXIO; }
+static inline int qpnp_iadc_resume_calibration(struct qpnp_iadc_chip *iadc)
+{ return -ENXIO; }
+#endif
+
+/* Public API */
+#if defined(CONFIG_THERMAL_QPNP_ADC_TM) \
+ || defined(CONFIG_THERMAL_QPNP_ADC_TM_MODULE)
+/**
+ * qpnp_adc_tm_usbid_configure() - Configures Channel 0 of VADC_BTM to
+ * monitor USB_ID channel using 100k internal pull-up.
+ * USB driver passes the high/low voltage threshold along
+ * with the notification callback once the set thresholds
+ * are crossed.
+ * @param: Structure pointer of qpnp_adc_tm_usbid_param type.
+ * Clients pass the low/high voltage along with the threshold
+ * notification callback.
+ */
+int32_t qpnp_adc_tm_usbid_configure(struct qpnp_adc_tm_chip *chip,
+ struct qpnp_adc_tm_btm_param *param);
+/**
+ * qpnp_adc_tm_usbid_end() - Disables the monitoring of channel 0 thats
+ * assigned for monitoring USB_ID. Disables the low/high
+ * threshold activation for channel 0 as well.
+ * @param: none.
+ */
+int32_t qpnp_adc_tm_usbid_end(struct qpnp_adc_tm_chip *chip);
+/**
+ * qpnp_adc_tm_channel_measure() - Configures kernel clients a channel to
+ * monitor the corresponding ADC channel for threshold detection.
+ * Driver passes the high/low voltage threshold along
+ * with the notification callback once the set thresholds
+ * are crossed.
+ * @param: Structure pointer of qpnp_adc_tm_btm_param type.
+ * Clients pass the low/high temperature along with the threshold
+ * notification callback.
+ */
+int32_t qpnp_adc_tm_channel_measure(struct qpnp_adc_tm_chip *chip,
+ struct qpnp_adc_tm_btm_param *param);
+/**
+ * qpnp_adc_tm_disable_chan_meas() - Disables the monitoring of channel thats
+ * assigned for monitoring kernel clients. Disables the low/high
+ * threshold activation for the corresponding channel.
+ * @param: Structure pointer of qpnp_adc_tm_btm_param type.
+ * This is used to identify the channel for which the corresponding
+ * channels high/low threshold notification will be disabled.
+ */
+int32_t qpnp_adc_tm_disable_chan_meas(struct qpnp_adc_tm_chip *chip,
+ struct qpnp_adc_tm_btm_param *param);
+/**
+ * qpnp_get_adc_tm() - Clients need to register with the adc_tm using the
+ * corresponding device instance it wants to read the channels
+ * from. Read the bindings document on how to pass the phandle
+ * for the corresponding adc_tm driver to register with.
+ * @name: Corresponding client's DT parser name. Read the DT bindings
+ * document on how to register with the vadc
+ * @struct qpnp_adc_tm_chip * - On success returns the vadc device structure
+ * pointer that needs to be used during an ADC TM request.
+ */
+struct qpnp_adc_tm_chip *qpnp_get_adc_tm(struct device *dev, const char *name);
+#else
+static inline int32_t qpnp_adc_tm_usbid_configure(
+ struct qpnp_adc_tm_chip *chip,
+ struct qpnp_adc_tm_btm_param *param)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_tm_usbid_end(struct qpnp_adc_tm_chip *chip)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_tm_channel_measure(
+ struct qpnp_adc_tm_chip *chip,
+ struct qpnp_adc_tm_btm_param *param)
+{ return -ENXIO; }
+static inline int32_t qpnp_adc_tm_disable_chan_meas(
+ struct qpnp_adc_tm_chip *chip,
+ struct qpnp_adc_tm_btm_param *param)
+{ return -ENXIO; }
+static inline struct qpnp_adc_tm_chip *qpnp_get_adc_tm(struct device *dev,
+ const char *name)
+{ return ERR_PTR(-ENXIO); }
+#endif
+
+#endif
diff --git a/include/linux/qpnp/qpnp-haptic.h b/include/linux/qpnp/qpnp-haptic.h
new file mode 100644
index 000000000000..92a66e844f94
--- /dev/null
+++ b/include/linux/qpnp/qpnp-haptic.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QPNP_HAPTIC_H
+
+/* interface for the other module to play different sequences */
+#ifdef CONFIG_QPNP_HAPTIC
+int qpnp_hap_play_byte(u8 data, bool on);
+#else
+int qpnp_hap_play_byte(u8 data, bool on);
+{
+ return 0;
+}
+#endif
+#endif
diff --git a/include/linux/qpnp/qpnp-revid.h b/include/linux/qpnp/qpnp-revid.h
new file mode 100644
index 000000000000..d67efaf678e4
--- /dev/null
+++ b/include/linux/qpnp/qpnp-revid.h
@@ -0,0 +1,176 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QPNP_REVID
+#define __QPNP_REVID
+
+#define PM8226_V2P2_REV1 0x00
+#define PM8226_V2P2_REV2 0x00
+#define PM8226_V2P2_REV3 0x02
+#define PM8226_V2P2_REV4 0x02
+#define PM8226_V2P2_TYPE 0x51
+#define PM8226_V2P2_SUBTYPE 0x04
+
+#define PM8226_V2P1_REV1 0x00
+#define PM8226_V2P1_REV2 0x00
+#define PM8226_V2P1_REV3 0x01
+#define PM8226_V2P1_REV4 0x02
+#define PM8226_V2P1_TYPE 0x51
+#define PM8226_V2P1_SUBTYPE 0x04
+
+#define PM8226_V2P0_REV1 0x00
+#define PM8226_V2P0_REV2 0x00
+#define PM8226_V2P0_REV3 0x00
+#define PM8226_V2P0_REV4 0x02
+#define PM8226_V2P0_TYPE 0x51
+#define PM8226_V2P0_SUBTYPE 0x04
+
+#define PM8226_V1P0_REV1 0x00
+#define PM8226_V1P0_REV2 0x00
+#define PM8226_V1P0_REV3 0x00
+#define PM8226_V1P0_REV4 0x00
+#define PM8226_V1P0_TYPE 0x51
+#define PM8226_V1P0_SUBTYPE 0x04
+
+#define PM8941_V1P0_REV1 0x00
+#define PM8941_V1P0_REV2 0x00
+#define PM8941_V1P0_REV3 0x00
+#define PM8941_V1P0_REV4 0x01
+#define PM8941_V1P0_TYPE 0x51
+#define PM8941_V1P0_SUBTYPE 0x01
+
+#define PM8941_V2P0_REV1 0x00
+#define PM8941_V2P0_REV2 0x00
+#define PM8941_V2P0_REV3 0x00
+#define PM8941_V2P0_REV4 0x01
+#define PM8941_V2P0_TYPE 0x51
+#define PM8941_V2P0_SUBTYPE 0x01
+
+#define PM8941_V3P0_REV1 0x00
+#define PM8941_V3P0_REV2 0x00
+#define PM8941_V3P0_REV3 0x00
+#define PM8941_V3P0_REV4 0x03
+#define PM8941_V3P0_TYPE 0x51
+#define PM8941_V3P0_SUBTYPE 0x01
+
+#define PM8941_V3P1_REV1 0x00
+#define PM8941_V3P1_REV2 0x00
+#define PM8941_V3P1_REV3 0x01
+#define PM8941_V3P1_REV4 0x03
+#define PM8941_V3P1_TYPE 0x51
+#define PM8941_V3P1_SUBTYPE 0x01
+
+#define PM8110_V1P0_REV1 0x00
+#define PM8110_V1P0_REV2 0x00
+#define PM8110_V1P0_REV3 0x00
+#define PM8110_V1P0_REV4 0x01
+#define PM8110_V1P0_TYPE 0x51
+#define PM8110_V1P0_SUBTYPE 0x05
+
+#define PM8110_V1P1_REV1 0x00
+#define PM8110_V1P1_REV2 0x01
+#define PM8110_V1P1_REV3 0x00
+#define PM8110_V1P1_REV4 0x01
+#define PM8110_V1P1_TYPE 0x51
+#define PM8110_V1P1_SUBTYPE 0x05
+
+#define PM8110_V1P3_REV1 0x00
+#define PM8110_V1P3_REV2 0x03
+#define PM8110_V1P3_REV3 0x00
+#define PM8110_V1P3_REV4 0x01
+#define PM8110_V1P3_TYPE 0x51
+#define PM8110_V1P3_SUBTYPE 0x05
+
+#define PM8110_V2P0_REV1 0x00
+#define PM8110_V2P0_REV2 0x00
+#define PM8110_V2P0_REV3 0x00
+#define PM8110_V2P0_REV4 0x02
+#define PM8110_V2P0_TYPE 0x51
+#define PM8110_V2P0_SUBTYPE 0x05
+
+#define PM8916_V1P0_REV1 0x00
+#define PM8916_V1P0_REV2 0x00
+#define PM8916_V1P0_REV3 0x00
+#define PM8916_V1P0_REV4 0x01
+#define PM8916_V1P0_TYPE 0x51
+#define PM8916_V1P0_SUBTYPE 0x0B
+
+#define PM8916_V1P1_REV1 0x00
+#define PM8916_V1P1_REV2 0x00
+#define PM8916_V1P1_REV3 0x01
+#define PM8916_V1P1_REV4 0x01
+#define PM8916_V1P1_TYPE 0x51
+#define PM8916_V1P1_SUBTYPE 0x0B
+
+#define PM8916_V2P0_REV1 0x00
+#define PM8916_V2P0_REV2 0x00
+#define PM8916_V2P0_REV3 0x00
+#define PM8916_V2P0_REV4 0x02
+#define PM8916_V2P0_TYPE 0x51
+#define PM8916_V2P0_SUBTYPE 0x0B
+
+#define PM8909_V1P0_REV1 0x00
+#define PM8909_V1P0_REV2 0x00
+#define PM8909_V1P0_REV3 0x00
+#define PM8909_V1P0_REV4 0x01
+#define PM8909_V1P0_TYPE 0x51
+#define PM8909_V1P0_SUBTYPE 0x0D
+
+#define PM8909_V1P1_REV1 0x00
+#define PM8909_V1P1_REV2 0x00
+#define PM8909_V1P1_REV3 0x01
+#define PM8909_V1P1_REV4 0x01
+#define PM8909_V1P1_TYPE 0x51
+#define PM8909_V1P1_SUBTYPE 0x0D
+
+#define PMI8994_V1P0_REV1 0x00
+#define PMI8994_V1P0_REV2 0x00
+#define PMI8994_V1P0_REV3 0x00
+#define PMI8994_V1P0_REV4 0x01
+#define PMI8994_V1P0_TYPE 0x51
+#define PMI8994_V1P0_SUBTYPE 0x0A
+
+#define PMI8994_V2P0_REV1 0x00
+#define PMI8994_V2P0_REV2 0x00
+#define PMI8994_V2P0_REV3 0x00
+#define PMI8994_V2P0_REV4 0x02
+#define PMI8994_V2P0_TYPE 0x51
+#define PMI8994_V2P0_SUBTYPE 0x0A
+
+#define PM8950_V1P0_REV4 0x01
+#define PM8950_V1P0_TYPE 0x51
+#define PM8950_V1P0_SUBTYPE 0x10
+
+#define PM8950_V2P0_REV4 0x02
+#define PM8950_V2P0_TYPE 0x51
+#define PM8950_V2P0_SUBTYPE 0x10
+
+struct pmic_revid_data {
+ u8 rev1;
+ u8 rev2;
+ u8 rev3;
+ u8 rev4;
+ u8 pmic_type;
+ u8 pmic_subtype;
+ const char *pmic_name;
+};
+
+#ifdef CONFIG_QPNP_REVID
+struct pmic_revid_data *get_revid_data(struct device_node *dev_node);
+#else
+static inline
+struct pmic_revid_data *get_revid_data(struct device_node *dev_node)
+{
+ return NULL;
+}
+#endif
+#endif
diff --git a/include/linux/regulator/kryo-regulator.h b/include/linux/regulator/kryo-regulator.h
new file mode 100644
index 000000000000..ab51f8629d2d
--- /dev/null
+++ b/include/linux/regulator/kryo-regulator.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __KRYO_REGULATOR_H__
+#define __KRYO_REGULATOR_H__
+
+/**
+ * enum kryo_supply_mode - supported operating modes by this regulator type.
+ * Use negative logic to ensure BHS mode is treated as the safe default by the
+ * the regulator framework. This is necessary since LDO mode can only be enabled
+ * when several constraints are satisfied. Consumers of this regulator are
+ * expected to request changes in operating modes through the use of
+ * regulator_allow_bypass() passing in the desired Kryo supply mode.
+ * %BHS_MODE: to select BHS as operating mode
+ * %LDO_MODE: to select LDO as operating mode
+ */
+enum kryo_supply_mode {
+ BHS_MODE = false,
+ LDO_MODE = true,
+};
+
+#endif /* __KRYO_REGULATOR_H__ */
diff --git a/include/linux/regulator/proxy-consumer.h b/include/linux/regulator/proxy-consumer.h
new file mode 100644
index 000000000000..10ba5411a983
--- /dev/null
+++ b/include/linux/regulator/proxy-consumer.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_REGULATOR_PROXY_CONSUMER_H_
+#define _LINUX_REGULATOR_PROXY_CONSUMER_H_
+
+#include <linux/device.h>
+#include <linux/of.h>
+
+struct proxy_consumer;
+
+#ifdef CONFIG_REGULATOR_PROXY_CONSUMER
+
+struct proxy_consumer *regulator_proxy_consumer_register(struct device *reg_dev,
+ struct device_node *reg_node);
+
+int regulator_proxy_consumer_unregister(struct proxy_consumer *consumer);
+
+#else
+
+static inline struct proxy_consumer *regulator_proxy_consumer_register(
+ struct device *reg_dev, struct device_node *reg_node)
+{ return NULL; }
+
+static inline int regulator_proxy_consumer_unregister(
+ struct proxy_consumer *consumer)
+{ return 0; }
+
+#endif
+
+#endif
diff --git a/include/linux/regulator/qpnp-regulator.h b/include/linux/regulator/qpnp-regulator.h
new file mode 100644
index 000000000000..c7afeb50f244
--- /dev/null
+++ b/include/linux/regulator/qpnp-regulator.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __REGULATOR_QPNP_REGULATOR_H__
+#define __REGULATOR_QPNP_REGULATOR_H__
+
+#include <linux/regulator/machine.h>
+
+#define QPNP_REGULATOR_DRIVER_NAME "qcom,qpnp-regulator"
+
+/* Pin control enable input pins. */
+#define QPNP_REGULATOR_PIN_CTRL_ENABLE_NONE 0x00
+#define QPNP_REGULATOR_PIN_CTRL_ENABLE_EN0 0x01
+#define QPNP_REGULATOR_PIN_CTRL_ENABLE_EN1 0x02
+#define QPNP_REGULATOR_PIN_CTRL_ENABLE_EN2 0x04
+#define QPNP_REGULATOR_PIN_CTRL_ENABLE_EN3 0x08
+#define QPNP_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT 0x10
+
+/* Pin control high power mode input pins. */
+#define QPNP_REGULATOR_PIN_CTRL_HPM_NONE 0x00
+#define QPNP_REGULATOR_PIN_CTRL_HPM_EN0 0x01
+#define QPNP_REGULATOR_PIN_CTRL_HPM_EN1 0x02
+#define QPNP_REGULATOR_PIN_CTRL_HPM_EN2 0x04
+#define QPNP_REGULATOR_PIN_CTRL_HPM_EN3 0x08
+#define QPNP_REGULATOR_PIN_CTRL_HPM_SLEEP_B 0x10
+#define QPNP_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT 0x20
+
+/*
+ * Used with enable parameters to specify that hardware default register values
+ * should be left unaltered.
+ */
+#define QPNP_REGULATOR_DISABLE 0
+#define QPNP_REGULATOR_ENABLE 1
+#define QPNP_REGULATOR_USE_HW_DEFAULT 2
+
+/* Soft start strength of a voltage switch type regulator */
+enum qpnp_vs_soft_start_str {
+ QPNP_VS_SOFT_START_STR_0P05_UA,
+ QPNP_VS_SOFT_START_STR_0P25_UA,
+ QPNP_VS_SOFT_START_STR_0P55_UA,
+ QPNP_VS_SOFT_START_STR_0P75_UA,
+ QPNP_VS_SOFT_START_STR_HW_DEFAULT,
+};
+
+/* Current limit of a boost type regulator */
+enum qpnp_boost_current_limit {
+ QPNP_BOOST_CURRENT_LIMIT_300_MA,
+ QPNP_BOOST_CURRENT_LIMIT_600_MA,
+ QPNP_BOOST_CURRENT_LIMIT_900_MA,
+ QPNP_BOOST_CURRENT_LIMIT_1200_MA,
+ QPNP_BOOST_CURRENT_LIMIT_1500_MA,
+ QPNP_BOOST_CURRENT_LIMIT_1800_MA,
+ QPNP_BOOST_CURRENT_LIMIT_2100_MA,
+ QPNP_BOOST_CURRENT_LIMIT_2400_MA,
+ QPNP_BOOST_CURRENT_LIMIT_HW_DEFAULT,
+};
+
+/**
+ * struct qpnp_regulator_platform_data - qpnp-regulator initialization data
+ * @init_data: regulator constraints
+ * @pull_down_enable: 1 = Enable output pull down resistor when the
+ * regulator is disabled
+ * 0 = Disable pull down resistor
+ * QPNP_REGULATOR_USE_HW_DEFAULT = do not modify
+ * pull down state
+ * @pin_ctrl_enable: Bit mask specifying which hardware pins should be
+ * used to enable the regulator, if any
+ * Value should be an ORing of
+ * QPNP_REGULATOR_PIN_CTRL_ENABLE_* constants. If
+ * the bit specified by
+ * QPNP_REGULATOR_PIN_CTRL_ENABLE_HW_DEFAULT is
+ * set, then pin control enable hardware registers
+ * will not be modified.
+ * @pin_ctrl_hpm: Bit mask specifying which hardware pins should be
+ * used to force the regulator into high power
+ * mode, if any
+ * Value should be an ORing of
+ * QPNP_REGULATOR_PIN_CTRL_HPM_* constants. If
+ * the bit specified by
+ * QPNP_REGULATOR_PIN_CTRL_HPM_HW_DEFAULT is
+ * set, then pin control mode hardware registers
+ * will not be modified.
+ * @system_load: Load in uA present on regulator that is not captured
+ * by any consumer request
+ * @enable_time: Time in us to delay after enabling the regulator
+ * @ocp_enable: 1 = Allow over current protection (OCP) to be
+ * enabled for voltage switch type regulators so
+ * that they latch off automatically when over
+ * current is detected. OCP is enabled when in HPM
+ * or auto mode.
+ * 0 = Disable OCP
+ * QPNP_REGULATOR_USE_HW_DEFAULT = do not modify
+ * OCP state
+ * @ocp_irq: IRQ number of the voltage switch OCP IRQ. If
+ * specified the voltage switch will be toggled off
+ * and back on when OCP triggers in order to handle
+ * high in-rush current.
+ * @ocp_max_retries: Maximum number of times to try toggling a voltage
+ * switch off and back on as a result of
+ * consecutive over current events.
+ * @ocp_retry_delay_ms: Time to delay in milliseconds between each
+ * voltage switch toggle after an over current
+ * event takes place.
+ * @boost_current_limit: This parameter sets the current limit of boost type
+ * regulators. Its value should be one of
+ * QPNP_BOOST_CURRENT_LIMIT_*. If its value is
+ * QPNP_BOOST_CURRENT_LIMIT_HW_DEFAULT, then the
+ * boost current limit will be left at its default
+ * hardware value.
+ * @soft_start_enable: 1 = Enable soft start for LDO and voltage switch
+ * type regulators so that output voltage slowly
+ * ramps up when the regulator is enabled
+ * 0 = Disable soft start
+ * QPNP_REGULATOR_USE_HW_DEFAULT = do not modify
+ * soft start state
+ * @vs_soft_start_strength: This parameter sets the soft start strength for
+ * voltage switch type regulators. Its value
+ * should be one of QPNP_VS_SOFT_START_STR_*. If
+ * its value is QPNP_VS_SOFT_START_STR_HW_DEFAULT,
+ * then the soft start strength will be left at its
+ * default hardware value.
+ * @auto_mode_enable: 1 = Enable automatic hardware selection of regulator
+ * mode (HPM vs LPM). Auto mode is not available
+ * on boost type regulators
+ * 0 = Disable auto mode selection
+ * QPNP_REGULATOR_USE_HW_DEFAULT = do not modify
+ * auto mode state
+ * @bypass_mode_enable: 1 = Enable bypass mode for an LDO type regulator so
+ * that it acts like a switch and simply outputs
+ * its input voltage
+ * 0 = Do not enable bypass mode
+ * QPNP_REGULATOR_USE_HW_DEFAULT = do not modify
+ * bypass mode state
+ * @hpm_enable: 1 = Enable high power mode (HPM), also referred to
+ * as NPM. HPM consumes more ground current than
+ * LPM, but it can source significantly higher load
+ * current. HPM is not available on boost type
+ * regulators. For voltage switch type regulators,
+ * HPM implies that over current protection and
+ * soft start are active all the time. This
+ * configuration can be overwritten by changing the
+ * regulator's mode dynamically.
+ * 0 = Do not enable HPM
+ * QPNP_REGULATOR_USE_HW_DEFAULT = do not modify
+ * HPM state
+ * @base_addr: SMPI base address for the regulator peripheral
+ */
+struct qpnp_regulator_platform_data {
+ struct regulator_init_data init_data;
+ int pull_down_enable;
+ unsigned pin_ctrl_enable;
+ unsigned pin_ctrl_hpm;
+ int system_load;
+ int enable_time;
+ int ocp_enable;
+ int ocp_irq;
+ int ocp_max_retries;
+ int ocp_retry_delay_ms;
+ enum qpnp_boost_current_limit boost_current_limit;
+ int soft_start_enable;
+ enum qpnp_vs_soft_start_str vs_soft_start_strength;
+ int auto_mode_enable;
+ int bypass_mode_enable;
+ int hpm_enable;
+ u16 base_addr;
+};
+
+#ifdef CONFIG_REGULATOR_QPNP
+
+/**
+ * qpnp_regulator_init() - register spmi driver for qpnp-regulator
+ *
+ * This initialization function should be called in systems in which driver
+ * registration ordering must be controlled precisely.
+ */
+int __init qpnp_regulator_init(void);
+
+#else
+
+static inline int __init qpnp_regulator_init(void)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_REGULATOR_QPNP */
+
+#endif
diff --git a/include/linux/regulator/rpm-smd-regulator.h b/include/linux/regulator/rpm-smd-regulator.h
new file mode 100644
index 000000000000..c57995d3f5a2
--- /dev/null
+++ b/include/linux/regulator/rpm-smd-regulator.h
@@ -0,0 +1,132 @@
+/* Copyright (c) 2012-2013, 2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_REGULATOR_RPM_SMD_H
+#define _LINUX_REGULATOR_RPM_SMD_H
+
+#include <linux/device.h>
+
+struct rpm_regulator;
+
+/**
+ * enum rpm_regulator_voltage_corner - possible voltage corner values
+ *
+ * These should be used in regulator_set_voltage() and
+ * rpm_regulator_set_voltage() calls for corner type regulators as if they had
+ * units of uV.
+ *
+ * Note, the meaning of corner values is set by the RPM. It is possible that
+ * future platforms will utilize different corner values. The values specified
+ * in this enum correspond to MSM8974 for PMIC PM8841 SMPS 2 (VDD_Dig).
+ */
+enum rpm_regulator_voltage_corner {
+ RPM_REGULATOR_CORNER_NONE = 1,
+ RPM_REGULATOR_CORNER_RETENTION,
+ RPM_REGULATOR_CORNER_SVS_KRAIT,
+ RPM_REGULATOR_CORNER_SVS_SOC,
+ RPM_REGULATOR_CORNER_NORMAL,
+ RPM_REGULATOR_CORNER_TURBO,
+ RPM_REGULATOR_CORNER_SUPER_TURBO,
+};
+
+/**
+ * enum rpm_regulator_voltage_level - possible voltage level values
+ *
+ * These should be used in regulator_set_voltage() and
+ * rpm_regulator_set_voltage() calls for level type regulators as if they had
+ * units of uV.
+ *
+ * Note: the meaning of level values is set by the RPM.
+ */
+enum rpm_regulator_voltage_level {
+ RPM_REGULATOR_LEVEL_NONE = 0,
+ RPM_REGULATOR_LEVEL_RETENTION = 16,
+ RPM_REGULATOR_LEVEL_RETENTION_PLUS = 32,
+ RPM_REGULATOR_LEVEL_MIN_SVS = 48,
+ RPM_REGULATOR_LEVEL_LOW_SVS = 64,
+ RPM_REGULATOR_LEVEL_SVS = 128,
+ RPM_REGULATOR_LEVEL_SVS_PLUS = 192,
+ RPM_REGULATOR_LEVEL_NOM = 256,
+ RPM_REGULATOR_LEVEL_NOM_PLUS = 320,
+ RPM_REGULATOR_LEVEL_TURBO = 384,
+ RPM_REGULATOR_LEVEL_BINNING = 512,
+ RPM_REGULATOR_LEVEL_MAX = 65535,
+};
+
+/**
+ * enum rpm_regulator_mode - control mode for LDO or SMPS type regulators
+ * %RPM_REGULATOR_MODE_AUTO: For SMPS type regulators, use SMPS auto mode so
+ * that the hardware can automatically switch
+ * between PFM and PWM modes based on realtime
+ * load.
+ * LDO type regulators do not support this mode.
+ * %RPM_REGULATOR_MODE_IPEAK: For SMPS type regulators, use aggregated
+ * software current requests to determine
+ * usage of PFM or PWM mode.
+ * For LDO type regulators, use aggregated
+ * software current requests to determine
+ * usage of LPM or HPM mode.
+ * %RPM_REGULATOR_MODE_HPM: For SMPS type regulators, force the
+ * usage of PWM mode.
+ * For LDO type regulators, force the
+ * usage of HPM mode.
+ *
+ * These values should be used in calls to rpm_regulator_set_mode().
+ */
+enum rpm_regulator_mode {
+ RPM_REGULATOR_MODE_AUTO,
+ RPM_REGULATOR_MODE_IPEAK,
+ RPM_REGULATOR_MODE_HPM,
+};
+
+#ifdef CONFIG_REGULATOR_RPM_SMD
+
+struct rpm_regulator *rpm_regulator_get(struct device *dev, const char *supply);
+
+void rpm_regulator_put(struct rpm_regulator *regulator);
+
+int rpm_regulator_enable(struct rpm_regulator *regulator);
+
+int rpm_regulator_disable(struct rpm_regulator *regulator);
+
+int rpm_regulator_set_voltage(struct rpm_regulator *regulator, int min_uV,
+ int max_uV);
+
+int rpm_regulator_set_mode(struct rpm_regulator *regulator,
+ enum rpm_regulator_mode mode);
+
+int __init rpm_smd_regulator_driver_init(void);
+
+#else
+
+static inline struct rpm_regulator *rpm_regulator_get(struct device *dev,
+ const char *supply) { return NULL; }
+
+static inline void rpm_regulator_put(struct rpm_regulator *regulator) { }
+
+static inline int rpm_regulator_enable(struct rpm_regulator *regulator)
+ { return 0; }
+
+static inline int rpm_regulator_disable(struct rpm_regulator *regulator)
+ { return 0; }
+
+static inline int rpm_regulator_set_voltage(struct rpm_regulator *regulator,
+ int min_uV, int max_uV) { return 0; }
+
+static inline int rpm_regulator_set_mode(struct rpm_regulator *regulator,
+ enum rpm_regulator_mode mode) { return 0; }
+
+static inline int __init rpm_smd_regulator_driver_init(void) { return 0; }
+
+#endif /* CONFIG_REGULATOR_RPM_SMD */
+
+#endif
diff --git a/include/linux/regulator/spm-regulator.h b/include/linux/regulator/spm-regulator.h
new file mode 100644
index 000000000000..bd5da2e3352b
--- /dev/null
+++ b/include/linux/regulator/spm-regulator.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_REGULATOR_SPM_H
+#define _LINUX_REGULATOR_SPM_H
+
+#include <linux/err.h>
+#include <linux/init.h>
+
+#ifdef CONFIG_REGULATOR_SPM
+int __init spm_regulator_init(void);
+#else
+static inline int __init spm_regulator_init(void) { return -ENODEV; }
+#endif
+
+#endif
diff --git a/include/soc/qcom/spm.h b/include/soc/qcom/spm.h
new file mode 100644
index 000000000000..3900864dc8d1
--- /dev/null
+++ b/include/soc/qcom/spm.h
@@ -0,0 +1,148 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_SPM_H
+#define __ARCH_ARM_MACH_MSM_SPM_H
+
+enum {
+ MSM_SPM_MODE_DISABLED,
+ MSM_SPM_MODE_CLOCK_GATING,
+ MSM_SPM_MODE_RETENTION,
+ MSM_SPM_MODE_GDHS,
+ MSM_SPM_MODE_POWER_COLLAPSE,
+ MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE,
+ MSM_SPM_MODE_FASTPC,
+ MSM_SPM_MODE_NR
+};
+
+enum msm_spm_avs_irq {
+ MSM_SPM_AVS_IRQ_MIN,
+ MSM_SPM_AVS_IRQ_MAX,
+};
+
+struct msm_spm_device;
+struct device_node;
+
+#if defined(CONFIG_MSM_SPM)
+
+int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm);
+int msm_spm_probe_done(void);
+int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel);
+int msm_spm_get_vdd(unsigned int cpu);
+int msm_spm_turn_on_cpu_rail(struct device_node *l2ccc_node,
+ unsigned int val, int cpu, int vctl_offset);
+struct msm_spm_device *msm_spm_get_device_by_name(const char *name);
+int msm_spm_config_low_power_mode(struct msm_spm_device *dev,
+ unsigned int mode, bool notify_rpm);
+int msm_spm_device_init(void);
+bool msm_spm_is_mode_avail(unsigned int mode);
+void msm_spm_dump_regs(unsigned int cpu);
+int msm_spm_is_avs_enabled(unsigned int cpu);
+int msm_spm_avs_enable(unsigned int cpu);
+int msm_spm_avs_disable(unsigned int cpu);
+int msm_spm_avs_set_limit(unsigned int cpu, uint32_t min_lvl,
+ uint32_t max_lvl);
+int msm_spm_avs_enable_irq(unsigned int cpu, enum msm_spm_avs_irq irq);
+int msm_spm_avs_disable_irq(unsigned int cpu, enum msm_spm_avs_irq irq);
+int msm_spm_avs_clear_irq(unsigned int cpu, enum msm_spm_avs_irq irq);
+
+#if defined(CONFIG_MSM_L2_SPM)
+
+/* Public functions */
+
+int msm_spm_apcs_set_phase(int cpu, unsigned int phase_cnt);
+int msm_spm_enable_fts_lpm(int cpu, uint32_t mode);
+
+#else
+
+static inline int msm_spm_apcs_set_phase(int cpu, unsigned int phase_cnt)
+{
+ return -ENOSYS;
+}
+
+static inline int msm_spm_enable_fts_lpm(int cpu, uint32_t mode)
+{
+ return -ENOSYS;
+}
+#endif /* defined(CONFIG_MSM_L2_SPM) */
+#else /* defined(CONFIG_MSM_SPM) */
+static inline int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm)
+{
+ return -ENOSYS;
+}
+
+static inline int msm_spm_probe_done(void)
+{
+ return -ENOSYS;
+}
+
+static inline int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
+{
+ return -ENOSYS;
+}
+
+static inline int msm_spm_get_vdd(unsigned int cpu)
+{
+ return 0;
+}
+
+static inline int msm_spm_turn_on_cpu_rail(struct device_node *l2ccc_node,
+ unsigned int val, int cpu, int vctl_offset)
+{
+ return -ENOSYS;
+}
+
+static inline int msm_spm_device_init(void)
+{
+ return -ENOSYS;
+}
+
+static inline void msm_spm_dump_regs(unsigned int cpu)
+{
+ return;
+}
+
+static inline int msm_spm_config_low_power_mode(struct msm_spm_device *dev,
+ unsigned int mode, bool notify_rpm)
+{
+ return -ENODEV;
+}
+static inline struct msm_spm_device *msm_spm_get_device_by_name(const char *name)
+{
+ return NULL;
+}
+
+static inline bool msm_spm_is_mode_avail(unsigned int mode)
+{
+ return false;
+}
+
+static inline int msm_spm_avs_enable_irq(unsigned int cpu,
+ enum msm_spm_avs_irq irq)
+{
+ return -ENOSYS;
+}
+
+static inline int msm_spm_avs_disable_irq(unsigned int cpu,
+ enum msm_spm_avs_irq irq)
+{
+ return -ENOSYS;
+}
+
+static inline int msm_spm_avs_clear_irq(unsigned int cpu,
+ enum msm_spm_avs_irq irq)
+{
+ return -ENOSYS;
+}
+
+#endif /* defined (CONFIG_MSM_SPM) */
+#endif /* __ARCH_ARM_MACH_MSM_SPM_H */
diff --git a/include/trace/trace_thermal.h b/include/trace/trace_thermal.h
new file mode 100644
index 000000000000..0be0f47f88d2
--- /dev/null
+++ b/include/trace/trace_thermal.h
@@ -0,0 +1,399 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM thermal
+
+#if !defined(_TRACE_THERMAL_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_THERMAL_H
+
+#include <linux/tracepoint.h>
+
+#ifdef TRACE_MSM_LMH
+DECLARE_EVENT_CLASS(msm_lmh_print_sensor_reading,
+
+ TP_PROTO(const char *sensor_name, unsigned int intensity),
+
+ TP_ARGS(
+ sensor_name, intensity
+ ),
+
+ TP_STRUCT__entry(
+ __string(_name, sensor_name)
+ __field(unsigned int, reading)
+ ),
+
+ TP_fast_assign(
+ __assign_str(_name, sensor_name);
+ __entry->reading = intensity;
+ ),
+
+ TP_printk(
+ "Sensor:[%s] throttling intensity:%u", __get_str(_name),
+ __entry->reading
+ )
+);
+
+DECLARE_EVENT_CLASS(msm_lmh_print_event,
+
+ TP_PROTO(const char *event_name),
+
+ TP_ARGS(
+ event_name
+ ),
+
+ TP_STRUCT__entry(
+ __string(_name, event_name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(_name, event_name);
+ ),
+
+ TP_printk(
+ "Event:[%s]", __get_str(_name)
+ )
+);
+
+DEFINE_EVENT(msm_lmh_print_sensor_reading, lmh_sensor_interrupt,
+
+ TP_PROTO(const char *sensor_name, unsigned int intensity),
+
+ TP_ARGS(sensor_name, intensity)
+);
+
+DEFINE_EVENT(msm_lmh_print_sensor_reading, lmh_sensor_reading,
+
+ TP_PROTO(const char *sensor_name, unsigned int intensity),
+
+ TP_ARGS(sensor_name, intensity)
+);
+
+DEFINE_EVENT(msm_lmh_print_event, lmh_event_call,
+
+ TP_PROTO(const char *event_name),
+
+ TP_ARGS(event_name)
+);
+
+TRACE_EVENT(lmh_debug_data,
+ TP_PROTO(const char *pre_data, uint32_t *data_buf, uint32_t buffer_len),
+
+ TP_ARGS(
+ pre_data, data_buf, buffer_len
+ ),
+
+ TP_STRUCT__entry(
+ __string(_data, pre_data)
+ __field(u32, _buffer_len)
+ __dynamic_array(u32, _buffer, buffer_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(_data, pre_data);
+ __entry->_buffer_len = buffer_len * sizeof(uint32_t);
+ memcpy(__get_dynamic_array(_buffer), data_buf,
+ buffer_len * sizeof(uint32_t));
+ ),
+
+ TP_printk("%s:\t %s",
+ __get_str(_data), __print_hex(__get_dynamic_array(_buffer),
+ __entry->_buffer_len)
+ )
+);
+
+
+#elif defined(TRACE_MSM_THERMAL)
+
+DECLARE_EVENT_CLASS(msm_thermal_post_core_ctl,
+
+ TP_PROTO(unsigned int cpu, unsigned int online),
+
+ TP_ARGS(cpu, online),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __field(unsigned int, online)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->online = online;
+ ),
+
+ TP_printk("device=cpu%u online=%u",
+ __entry->cpu, __entry->online)
+);
+DECLARE_EVENT_CLASS(msm_thermal_pre_core_ctl,
+
+ TP_PROTO(unsigned int cpu),
+
+ TP_ARGS(cpu),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ ),
+
+ TP_printk("device=cpu%u", __entry->cpu)
+);
+
+DEFINE_EVENT(msm_thermal_pre_core_ctl, thermal_pre_core_offline,
+
+ TP_PROTO(unsigned int cpu),
+
+ TP_ARGS(cpu)
+);
+
+DEFINE_EVENT(msm_thermal_post_core_ctl, thermal_post_core_offline,
+
+ TP_PROTO(unsigned int cpu, unsigned int online),
+
+ TP_ARGS(cpu, online)
+);
+
+DEFINE_EVENT(msm_thermal_pre_core_ctl, thermal_pre_core_online,
+
+ TP_PROTO(unsigned int cpu),
+
+ TP_ARGS(cpu)
+);
+
+DEFINE_EVENT(msm_thermal_post_core_ctl, thermal_post_core_online,
+
+ TP_PROTO(unsigned int cpu, unsigned int online),
+
+ TP_ARGS(cpu, online)
+);
+
+DECLARE_EVENT_CLASS(msm_thermal_freq_mit,
+
+ TP_PROTO(unsigned int cpu, unsigned int max_freq,
+ unsigned int min_freq),
+
+ TP_ARGS(cpu, max_freq, min_freq),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cpu)
+ __field(unsigned int, max_freq)
+ __field(unsigned int, min_freq)
+ ),
+
+ TP_fast_assign(
+ __entry->cpu = cpu;
+ __entry->max_freq = max_freq;
+ __entry->min_freq = min_freq;
+ ),
+
+ TP_printk("device=cpu%u max_frequency=%u min_frequency=%u",
+ __entry->cpu, __entry->max_freq,
+ __entry->min_freq)
+);
+
+DEFINE_EVENT(msm_thermal_freq_mit, thermal_pre_frequency_mit,
+
+ TP_PROTO(unsigned int cpu, unsigned int max_freq,
+ unsigned int min_freq),
+
+ TP_ARGS(cpu, max_freq, min_freq)
+);
+
+DEFINE_EVENT(msm_thermal_freq_mit, thermal_post_frequency_mit,
+
+ TP_PROTO(unsigned int cpu, unsigned int max_freq,
+ unsigned int min_freq),
+
+ TP_ARGS(cpu, max_freq, min_freq)
+);
+
+#elif defined(_BCL_SW_TRACE) || defined(_BCL_HW_TRACE)
+
+DECLARE_EVENT_CLASS(msm_bcl_print_reading,
+
+ TP_PROTO(const char *sensor_name, long value),
+
+ TP_ARGS(
+ sensor_name, value
+ ),
+
+ TP_STRUCT__entry(
+ __string(_name, sensor_name)
+ __field(long, reading)
+ ),
+
+ TP_fast_assign(
+ __assign_str(_name, sensor_name);
+ __entry->reading = value;
+ ),
+
+ TP_printk(
+ "%s:[%ld]", __get_str(_name), __entry->reading
+ )
+);
+
+DECLARE_EVENT_CLASS(msm_bcl_print_event,
+
+ TP_PROTO(const char *event_name),
+
+ TP_ARGS(
+ event_name
+ ),
+
+ TP_STRUCT__entry(
+ __string(_name, event_name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(_name, event_name);
+ ),
+
+ TP_printk(
+ "Event:[%s]", __get_str(_name)
+ )
+);
+
+#ifdef _BCL_HW_TRACE
+DECLARE_EVENT_CLASS(msm_bcl_print_reg,
+
+ TP_PROTO(const char *sensor_name, unsigned int address,
+ unsigned int value),
+
+ TP_ARGS(
+ sensor_name, address, value
+ ),
+
+ TP_STRUCT__entry(
+ __string(_name, sensor_name)
+ __field(unsigned int, _address)
+ __field(unsigned int, _value)
+ ),
+
+ TP_fast_assign(
+ __assign_str(_name, sensor_name);
+ __entry->_address = address;
+ __entry->_value = value;
+ ),
+
+ TP_printk(
+ "%s: address 0x%x: data 0x%02x", __get_str(_name),
+ __entry->_address, __entry->_value
+ )
+);
+
+DEFINE_EVENT(msm_bcl_print_reading, bcl_hw_sensor_reading,
+
+ TP_PROTO(const char *sensor_name, long intensity),
+
+ TP_ARGS(sensor_name, intensity)
+);
+
+DEFINE_EVENT(msm_bcl_print_reg, bcl_hw_reg_access,
+
+ TP_PROTO(const char *op_name, unsigned int address, unsigned int value),
+
+ TP_ARGS(op_name, address, value)
+);
+
+DEFINE_EVENT(msm_bcl_print_reading, bcl_hw_mitigation,
+
+ TP_PROTO(const char *sensor_name, long intensity),
+
+ TP_ARGS(sensor_name, intensity)
+);
+
+DEFINE_EVENT(msm_bcl_print_event, bcl_hw_mitigation_event,
+
+ TP_PROTO(const char *event_name),
+
+ TP_ARGS(event_name)
+);
+
+DEFINE_EVENT(msm_bcl_print_reading, bcl_hw_state_event,
+
+ TP_PROTO(const char *sensor_name, long intensity),
+
+ TP_ARGS(sensor_name, intensity)
+);
+
+DEFINE_EVENT(msm_bcl_print_event, bcl_hw_event,
+
+ TP_PROTO(const char *event_name),
+
+ TP_ARGS(event_name)
+);
+#elif defined(_BCL_SW_TRACE)
+DEFINE_EVENT(msm_bcl_print_reading, bcl_sw_mitigation,
+
+ TP_PROTO(const char *sensor_name, long intensity),
+
+ TP_ARGS(sensor_name, intensity)
+);
+
+DEFINE_EVENT(msm_bcl_print_event, bcl_sw_mitigation_event,
+
+ TP_PROTO(const char *event_name),
+
+ TP_ARGS(event_name)
+);
+#endif /* _BCL_HW_TRACE */
+#else
+DECLARE_EVENT_CLASS(tsens,
+
+ TP_PROTO(unsigned long temp, unsigned int sensor),
+
+ TP_ARGS(temp, sensor),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, temp)
+ __field(unsigned int, sensor)
+ ),
+
+ TP_fast_assign(
+ __entry->temp = temp;
+ __entry->sensor = sensor;
+ ),
+
+ TP_printk("temp=%lu sensor=tsens_tz_sensor%u",
+ __entry->temp, __entry->sensor)
+);
+
+DEFINE_EVENT(tsens, tsens_read,
+
+ TP_PROTO(unsigned long temp, unsigned int sensor),
+
+ TP_ARGS(temp, sensor)
+);
+
+DEFINE_EVENT(tsens, tsens_threshold_hit,
+
+ TP_PROTO(unsigned long temp, unsigned int sensor),
+
+ TP_ARGS(temp, sensor)
+);
+
+DEFINE_EVENT(tsens, tsens_threshold_clear,
+
+ TP_PROTO(unsigned long temp, unsigned int sensor),
+
+ TP_ARGS(temp, sensor)
+);
+#endif
+#endif
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_thermal
+#include <trace/define_trace.h>