summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/msm/glink_spi_xprt.txt44
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/it7258_ts_i2c.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/wcd_codec.txt8
-rw-r--r--arch/arm/boot/dts/qcom/batterydata-qrd-skuk-4v4-3000mah.dtsi78
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/msm8996.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi9
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi87
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi9
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi28
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-vidc.dtsi14
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt.dtsi36
-rw-r--r--arch/arm64/Kconfig.platforms1
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig2
-rw-r--r--arch/arm64/configs/msmcortex_defconfig2
-rw-r--r--drivers/crypto/msm/qcedev.c8
-rw-r--r--drivers/gpu/msm/adreno.c8
-rw-r--r--drivers/gpu/msm/adreno.h2
-rw-r--r--drivers/gpu/msm/adreno_a4xx_snapshot.c10
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c31
-rw-r--r--drivers/gpu/msm/adreno_a5xx.h2
-rw-r--r--drivers/gpu/msm/adreno_a5xx_snapshot.c174
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c41
-rw-r--r--drivers/gpu/msm/adreno_snapshot.c3
-rw-r--r--drivers/gpu/msm/kgsl.c28
-rw-r--r--drivers/gpu/msm/kgsl.h9
-rw-r--r--drivers/gpu/msm/kgsl_debugfs.c2
-rw-r--r--drivers/gpu/msm/kgsl_device.h5
-rw-r--r--drivers/gpu/msm/kgsl_events.c10
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c50
-rw-r--r--drivers/gpu/msm/kgsl_pool.c25
-rw-r--r--drivers/gpu/msm/kgsl_pool.h1
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c31
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.h2
-rw-r--r--drivers/gpu/msm/kgsl_pwrscale.c3
-rw-r--r--drivers/gpu/msm/kgsl_sharedmem.c137
-rw-r--r--drivers/gpu/msm/kgsl_sharedmem.h60
-rw-r--r--drivers/input/touchscreen/gt9xx/gt9xx.c2751
-rw-r--r--drivers/input/touchscreen/gt9xx/gt9xx.h425
-rw-r--r--drivers/input/touchscreen/it7258_ts_i2c.c34
-rw-r--r--drivers/media/platform/msm/vidc/hfi_packetization.c3
-rw-r--r--drivers/media/platform/msm/vidc/msm_vdec.c83
-rw-r--r--drivers/media/platform/msm/vidc/msm_venc.c6
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.c4
-rw-r--r--drivers/media/platform/msm/vidc/vidc_hfi.h2
-rw-r--r--drivers/media/platform/msm/vidc/vidc_hfi_api.h1
-rw-r--r--drivers/mfd/wcd9xxx-utils.c10
-rw-r--r--drivers/misc/qseecom.c9
-rw-r--r--drivers/nfc/nq-nci.c48
-rw-r--r--drivers/platform/msm/ipa/ipa_api.c30
-rw-r--r--drivers/platform/msm/ipa/ipa_api.h7
-rw-r--r--drivers/platform/msm/ipa/ipa_clients/Makefile4
-rw-r--r--drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c597
-rw-r--r--drivers/platform/msm/ipa/ipa_common_i.h6
-rw-r--r--drivers/platform/msm/ipa/ipa_uc_offload_common_i.h24
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/Makefile2
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa.c6
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c116
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_i.h142
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c438
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h514
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c46
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_utils.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/Makefile2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c6
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c116
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h205
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c410
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h580
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c46
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c3
-rw-r--r--drivers/regulator/cpr3-mmss-regulator.c64
-rw-r--r--drivers/soc/qcom/Kconfig10
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/glink.c60
-rw-r--r--drivers/soc/qcom/glink_core_if.h7
-rw-r--r--drivers/soc/qcom/glink_spi_xprt.c2192
-rw-r--r--drivers/soc/qcom/glink_xprt_if.h5
-rw-r--r--drivers/soc/qcom/icnss.c8
-rw-r--r--drivers/soc/qcom/peripheral-loader.c29
-rw-r--r--drivers/soc/qcom/pil-msa.c38
-rw-r--r--drivers/soc/qcom/pil-q6v5-mss.c8
-rw-r--r--drivers/soc/qcom/pil-q6v5.c28
-rw-r--r--drivers/soc/qcom/socinfo.c3
-rw-r--r--drivers/soc/qcom/subsys-pil-tz.c22
-rw-r--r--drivers/soc/qcom/subsystem_restart.c3
-rw-r--r--drivers/usb/phy/phy-msm-qusb-v2.c2
-rw-r--r--drivers/usb/phy/phy-msm-qusb.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_compat_utils.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.c101
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.h6
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.c19
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_video.c18
-rw-r--r--include/linux/ipa_uc_offload.h259
-rwxr-xr-xinclude/linux/mfd/wcd9xxx/pdata.h1
-rw-r--r--include/soc/qcom/icnss.h8
-rw-r--r--include/soc/qcom/socinfo.h1
-rw-r--r--include/uapi/linux/v4l2-controls.h3
-rw-r--r--include/uapi/media/msm_vidc.h3
-rw-r--r--sound/soc/codecs/wcd9335.c23
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x.c12
-rw-r--r--sound/soc/msm/msm8996.c42
-rw-r--r--sound/soc/msm/qdsp6v2/q6afe.c2
103 files changed, 8264 insertions, 2371 deletions
diff --git a/Documentation/devicetree/bindings/arm/msm/glink_spi_xprt.txt b/Documentation/devicetree/bindings/arm/msm/glink_spi_xprt.txt
new file mode 100644
index 000000000000..0a78eb6b91fd
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/glink_spi_xprt.txt
@@ -0,0 +1,44 @@
+Qualcomm Technologies, Inc. G-link SPI Transport
+
+Required properties:
+-compatible : should be "qcom,glink-spi-xprt".
+-label : the name of the subsystem this link connects to.
+
+Optional properties:
+-qcom,remote-fifo-config: Reference to the FIFO configuratio in the remote
+ processor.
+-qcom,qos-config: Reference to the qos configuration elements.It depends on
+ ramp-time.
+-qcom,ramp-time: Worst case time in microseconds to transition to this power
+ state. Power states are numbered by array index position.
+
+Example:
+
+ glink_spi_xprt_wdsp: qcom,glink-spi-xprt-wdsp {
+ compatible = "qcom,glink-spi-xprt";
+ label = "wdsp";
+ qcom,remote-fifo-config = <&glink_fifo_wdsp>;
+ qcom,qos-config = <&glink_qos_wdsp>;
+ qcom,ramp-time = <0x10>,
+ <0x20>,
+ <0x30>,
+ <0x40>;
+ };
+
+ glink_fifo_wdsp: qcom,glink-fifo-config-wdsp {
+ compatible = "qcom,glink-fifo-config";
+ qcom,out-read-idx-reg = <0x12000>;
+ qcom,out-write-idx-reg = <0x12004>;
+ qcom,in-read-idx-reg = <0x1200C>;
+ qcom,in-write-idx-reg = <0x12010>;
+ };
+
+ glink_qos_wdsp: qcom,glink-qos-config-wdsp {
+ compatible = "qcom,glink-qos-config";
+ qcom,flow-info = <0x80 0x0>,
+ <0x70 0x1>,
+ <0x60 0x2>,
+ <0x50 0x3>;
+ qcom,mtu-size = <0x800>;
+ qcom,tput-stats-cycle = <0xa>;
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/it7258_ts_i2c.txt b/Documentation/devicetree/bindings/input/touchscreen/it7258_ts_i2c.txt
index 832ec34dbbda..3b2c272d7378 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/it7258_ts_i2c.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/it7258_ts_i2c.txt
@@ -45,6 +45,7 @@ Optional properties:
- ite,low-reset : boolean, if the controller needs low-state of the reset gpio while
initializing, and reset gpio should be made as high-state to reset the
controller. It means the controller needs "active-high" reset gpio.
+ - ite,avdd-lpm-cur : avdd lpm current value(mA) in suspend state.
Required properties palm-detect-en feature:
- ite,palm-detect-keycode : The keycode that is required to be sent when
@@ -76,5 +77,6 @@ Example:
ite,num-fingers = <2>;
ite,reset-delay = <20>;
ite,low-reset;
+ ite,vdd-lpm-cur = <3000>;
};
};
diff --git a/Documentation/devicetree/bindings/sound/wcd_codec.txt b/Documentation/devicetree/bindings/sound/wcd_codec.txt
index d585595f21c6..e37e7c2bea3c 100644
--- a/Documentation/devicetree/bindings/sound/wcd_codec.txt
+++ b/Documentation/devicetree/bindings/sound/wcd_codec.txt
@@ -92,6 +92,14 @@ Optional properties:
involving DMIC will use the rate defined by
cdc-dmic-sample-rate.
+ - qcom,cdc-ecpp-dmic-rate: Specifies the sample rate of digital mic in HZ to be
+ used by ECPP (Echo Cancellation Ping Pong) block
+ on the codec. The valid set of values are same
+ as that of cdc-dmic-sample-rate, but this rate will
+ only be used by ECPP and all other audio use cases
+ involving DMIC will use the rate defined by
+ cdc-dmic-sample-rate.
+
- qcom,cdc-dmic-clk-drv-strength: Specifies the drive strength for digital microphone
clock in the codec. Accepted values are 2,4,8 and 16.
The clock drive strentgh is in uA. Codec driver will
diff --git a/arch/arm/boot/dts/qcom/batterydata-qrd-skuk-4v4-3000mah.dtsi b/arch/arm/boot/dts/qcom/batterydata-qrd-skuk-4v4-3000mah.dtsi
new file mode 100644
index 000000000000..76126c21c43a
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/batterydata-qrd-skuk-4v4-3000mah.dtsi
@@ -0,0 +1,78 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+qcom,qrd_msmcobalt_skuk_3000mah {
+ qcom,max-voltage-uv = <4400000>;
+ qcom,nom-batt-capacity-mah = <3000>;
+ qcom,batt-id-kohm = <68>;
+ qcom,battery-beta = <3380>;
+ qcom,battery-type = "qrd_msmcobalt_skuk_300mah";
+ qcom,checksum = <0x0F19>;
+ qcom,fg-profile-data = [
+ 05 B2 1F 6F
+ FC A3 0A 6E
+ FD DB 1D 8C
+ 1D AE 12 C2
+ 23 00 18 7E
+ 52 B4 45 8D
+ 00 00 00 55
+ 00 00 00 0F
+ C5 92 00 00
+ CA A0 CD 95
+ 00 0C 00 1F
+ EC C3 F2 56
+ F3 27 06 7B
+ 12 FF 01 02
+ 3A 21 DA 1C
+ 40 40 09 1C
+ 00 05 00 07
+ 05 B4 1F AC
+ FC EF 0A 57
+ 00 2E 1D 6A
+ 14 BA 0B 12
+ 22 DC 19 40
+ 53 03 45 79
+ 00 00 00 53
+ 00 00 00 0E
+ CC 05 00 00
+ CA 24 BB 3A
+ 00 00 00 1C
+ EC C3 F2 56
+ F2 A2 06 A6
+ 01 C7 06 96
+ 1A CF EA 8B
+ 33 08 33 BA
+ 00 00 10 07
+ 46 66 0C 3A
+ 00 19 00 1C
+ FA 0A 01 98
+ 00 00 00 FF
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ ];
+};
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi
index bb111ba8e2d4..367384a8c3e5 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -70,6 +70,10 @@
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>;
+ qcom,mdss-dsi-min-refresh-rate = <55>;
+ qcom,mdss-dsi-max-refresh-rate = <60>;
+ qcom,mdss-dsi-pan-enable-dynamic-fps;
+ qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
qcom,config-select = <&dsi_dual_nt35597_video_config0>;
diff --git a/arch/arm/boot/dts/qcom/msm8996.dtsi b/arch/arm/boot/dts/qcom/msm8996.dtsi
index 9bcc375e275c..596a713a9ad0 100644
--- a/arch/arm/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996.dtsi
@@ -1001,6 +1001,7 @@
qcom,cdc-slim-ifd-elemental-addr = [00 00 A0 01 17 02];
qcom,cdc-dmic-sample-rate = <4800000>;
qcom,cdc-mad-dmic-rate = <600000>;
+ qcom,cdc-ecpp-dmic-rate = <1200000>;
qcom,cdc-dmic-clk-drv-strength = <2>;
};
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
index b9626cd61553..a8b047a0e0b3 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
@@ -331,6 +331,15 @@
&mdss_hdmi_ddc_suspend &mdss_hdmi_cec_suspend>;
};
+&mdss_dp_ctrl {
+ pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
+ pinctrl-0 = <&mdss_dp_aux_active &mdss_dp_usbplug_cc_active>;
+ pinctrl-1 = <&mdss_dp_aux_suspend &mdss_dp_usbplug_cc_suspend>;
+ qcom,aux-en-gpio = <&tlmm 77 0>;
+ qcom,aux-sel-gpio = <&tlmm 78 0>;
+ qcom,usbplug-cc-gpio = <&tlmm 38 0>;
+};
+
&pmicobalt_charger {
qcom,suspend-input;
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi
index 3018ecd4e5eb..a99ce727c195 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi
@@ -255,6 +255,9 @@
mdss_fb0: qcom,mdss_fb_primary {
cell-index = <0>;
compatible = "qcom,mdss-fb";
+ qcom,cont-splash-memory {
+ linux,contiguous-region = <&cont_splash_mem>;
+ };
};
mdss_fb1: qcom,mdss_fb_wfd {
@@ -266,6 +269,12 @@
cell-index = <2>;
compatible = "qcom,mdss-fb";
};
+
+ mdss_fb3: qcom,mdss_fb_dp {
+ cell-index = <3>;
+ compatible = "qcom,mdss-fb";
+ };
+
};
mdss_dsi: qcom,mdss_dsi@0 {
@@ -290,7 +299,6 @@
<22 512 0 0>,
<22 512 0 1000>;
- qcom,timing-db-mode;
qcom,mmss-ulp-clamp-ctrl-offset = <0x14>;
clocks = <&clock_mmss clk_mmss_mdss_mdp_clk>,
@@ -362,6 +370,7 @@
<0xc828000 0xac>;
reg-names = "dsi_ctrl", "dsi_phy", "mmss_misc_phys";
+ qcom,timing-db-mode;
wqhd-vddio-supply = <&pmcobalt_l14>;
lab-supply = <&lab_regulator>;
ibb-supply = <&ibb_regulator>;
@@ -399,6 +408,7 @@
<0xc828000 0xac>;
reg-names = "dsi_ctrl", "dsi_phy", "mmss_misc_phys";
+ qcom,timing-db-mode;
wqhd-vddio-supply = <&pmcobalt_l14>;
lab-supply = <&lab_regulator>;
ibb-supply = <&ibb_regulator>;
@@ -435,6 +445,81 @@
qcom,mdss-fb-map = <&mdss_fb1>;
};
+ mdss_dp_ctrl: qcom,dp_ctrl@c990000 {
+ cell-index = <0>;
+ compatible = "qcom,mdss-dp";
+ qcom,mdss-fb-map = <&mdss_fb3>;
+
+ gdsc-supply = <&gdsc_mdss>;
+ vdda-1p2-supply = <&pmcobalt_l2>;
+ vdda-0p9-supply = <&pmcobalt_l1>;
+
+ reg = <0xc990000 0xa84>,
+ <0xc011000 0x910>,
+ <0x1fcb200 0x050>;
+ reg-names = "dp_ctrl", "dp_phy", "tcsr_regs";
+
+ clocks = <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+ <&clock_mmss clk_mmss_mdss_ahb_clk>,
+ <&clock_mmss clk_mmss_mdss_axi_clk>,
+ <&clock_mmss clk_mmss_mdss_mdp_clk>,
+ <&clock_mmss clk_mmss_mdss_hdmi_dp_ahb_clk>,
+ <&clock_mmss clk_mmss_mdss_dp_aux_clk>,
+ <&clock_mmss clk_mmss_mdss_dp_link_clk>,
+ <&clock_mmss clk_mmss_mdss_dp_link_intf_clk>,
+ <&clock_mmss clk_mmss_mdss_dp_crypto_clk>,
+ <&clock_mmss clk_mmss_mdss_dp_pixel_clk>;
+ clock-names = "core_mnoc_clk", "core_iface_clk", "core_bus_clk",
+ "core_mdp_core_clk", "core_alt_iface_clk",
+ "core_aux_clk", "ctrl_link_clk",
+ "ctrl_link_iface_clk", "ctrl_crypto_clk",
+ "ctrl_pixel_clk";
+
+ qcom,dp-usbpd-detection = <&pmicobalt_pdphy>;
+
+ qcom,core-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,core-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "gdsc";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+ };
+
+ qcom,ctrl-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,ctrl-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vdda-1p2";
+ qcom,supply-min-voltage = <1200000>;
+ qcom,supply-max-voltage = <1200000>;
+ qcom,supply-enable-load = <12560>;
+ qcom,supply-disable-load = <4>;
+ };
+ };
+
+ qcom,phy-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,phy-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vdda-0p9";
+ qcom,supply-min-voltage = <880000>;
+ qcom,supply-max-voltage = <880000>;
+ qcom,supply-enable-load = <73400>;
+ qcom,supply-disable-load = <32>;
+ };
+ };
+ };
+
mdss_rotator: qcom,mdss_rotator {
compatible = "qcom,sde_rotator";
reg = <0x0c900000 0xab100>,
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
index 50924b1667a4..aaf9aed30e7f 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
@@ -221,6 +221,15 @@
&mdss_hdmi_ddc_suspend &mdss_hdmi_cec_suspend>;
};
+&mdss_dp_ctrl {
+ pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
+ pinctrl-0 = <&mdss_dp_aux_active &mdss_dp_usbplug_cc_active>;
+ pinctrl-1 = <&mdss_dp_aux_suspend &mdss_dp_usbplug_cc_suspend>;
+ qcom,aux-en-gpio = <&tlmm 77 0>;
+ qcom,aux-sel-gpio = <&tlmm 78 0>;
+ qcom,usbplug-cc-gpio = <&tlmm 38 0>;
+};
+
&mdss_mdp {
qcom,mdss-pref-prim-intf = "dsi";
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
index 5833b30d1fd1..9f8ecea15568 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
@@ -944,20 +944,20 @@
qcom,cpr-voltage-ceiling =
<896000 896000 896000 896000 896000
1032000>,
- <632000 696000 768000 828000 896000
- 1032000>,
- <632000 696000 768000 828000 896000
- 1032000>,
- <632000 696000 768000 828000 896000
- 1032000>,
- <632000 696000 768000 828000 896000
- 1032000>,
- <632000 696000 768000 828000 896000
- 1032000>,
- <632000 696000 768000 828000 896000
- 1032000>,
- <632000 696000 768000 828000 896000
- 1032000>;
+ <672000 740000 800000 868000 976000
+ 1100000>,
+ <672000 740000 800000 868000 976000
+ 1100000>,
+ <672000 740000 800000 868000 976000
+ 1100000>,
+ <672000 740000 800000 868000 976000
+ 1100000>,
+ <672000 740000 800000 868000 976000
+ 1100000>,
+ <672000 740000 800000 868000 976000
+ 1100000>,
+ <672000 740000 800000 868000 976000
+ 1100000>;
qcom,cpr-voltage-floor =
<896000 896000 896000 896000 896000
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-vidc.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-vidc.dtsi
index 8311d21a262c..0860139248d1 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-vidc.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-vidc.dtsi
@@ -27,13 +27,13 @@
qcom,sw-power-collapse;
qcom,debug-timeout;
qcom,reg-presets =
- <0x80124 0x0002000>,
- <0x80550 0x0000001>,
- <0x80560 0x2222221>,
- <0x80568 0x3333331>,
- <0x80570 0x0000001>,
- <0x80580 0x2222221>,
- <0x80588 0x3333331>;
+ <0x80124 0x00000003>,
+ <0x80550 0x01111111>,
+ <0x80560 0x01111111>,
+ <0x80568 0x01111111>,
+ <0x80570 0x01111111>,
+ <0x80580 0x01111111>,
+ <0x80588 0x01111111>;
qcom,imem-size = <524288>; /* 512 kB */
qcom,max-hw-load = <2563200>; /* Full 4k @ 60 + 1080p @ 60 */
diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
index b8735096796e..33e8c9c4993d 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
@@ -333,6 +333,11 @@
size = <0 0x2000000>;
linux,cma-default;
};
+
+ cont_splash_mem: splash_region@9d600000 {
+ reg = <0x0 0x9d600000 0x0 0x02400000>;
+ label = "cont_splash_mem";
+ };
};
};
@@ -1304,6 +1309,35 @@
qcom,rx-ring-size = <0x800>;
};
+ glink_spi_xprt_wdsp: qcom,glink-spi-xprt-wdsp {
+ compatible = "qcom,glink-spi-xprt";
+ label = "wdsp";
+ qcom,remote-fifo-config = <&glink_fifo_wdsp>;
+ qcom,qos-config = <&glink_qos_wdsp>;
+ qcom,ramp-time = <0x10>,
+ <0x20>,
+ <0x30>,
+ <0x40>;
+ };
+
+ glink_fifo_wdsp: qcom,glink-fifo-config-wdsp {
+ compatible = "qcom,glink-fifo-config";
+ qcom,out-read-idx-reg = <0x12000>;
+ qcom,out-write-idx-reg = <0x12004>;
+ qcom,in-read-idx-reg = <0x1200C>;
+ qcom,in-write-idx-reg = <0x12010>;
+ };
+
+ glink_qos_wdsp: qcom,glink-qos-config-wdsp {
+ compatible = "qcom,glink-qos-config";
+ qcom,flow-info = <0x80 0x0>,
+ <0x70 0x1>,
+ <0x60 0x2>,
+ <0x50 0x3>;
+ qcom,mtu-size = <0x800>;
+ qcom,tput-stats-cycle = <0xa>;
+ };
+
qcom,glink_pkt {
compatible = "qcom,glinkpkt";
@@ -2886,6 +2920,8 @@
clock-names = "bus_clk", "rot_clk";
clocks = <&clock_mmss clk_mmss_mdss_axi_clk>,
<&clock_mmss clk_mmss_mdss_rot_clk>;
+ proxy-supply = <&gdsc_mdss>;
+ qcom,proxy-consumer-enable;
status = "ok";
};
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 711a73e401f7..eb02bc09b63d 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -57,6 +57,7 @@ config ARCH_QCOM
select MSM_IRQ
select THERMAL_WRITABLE_TRIPS
select RATIONAL
+ select ARCH_HAS_RESET_CONTROLLER
help
This enables support for the ARMv8 based Qualcomm chipsets.
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index f9d9a882db1f..eb010631be76 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -419,6 +419,7 @@ CONFIG_DUAL_ROLE_USB_INTF=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_VBUS_DRAW=500
CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_NCM=y
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_F_MTP=y
@@ -481,6 +482,7 @@ CONFIG_MSM_GLINK=y
CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
CONFIG_MSM_GLINK_SMD_XPRT=y
CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
CONFIG_MSM_SPCOM=y
CONFIG_MSM_SMEM_LOGGING=y
CONFIG_MSM_SMP2P=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index bdfcbb4d1621..45c2684fe8e4 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -421,6 +421,7 @@ CONFIG_DUAL_ROLE_USB_INTF=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_VBUS_DRAW=500
CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_NCM=y
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_USB_CONFIGFS_F_MTP=y
@@ -494,6 +495,7 @@ CONFIG_MSM_GLINK=y
CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
CONFIG_MSM_GLINK_SMD_XPRT=y
CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_MSM_GLINK_SPI_XPRT=y
CONFIG_MSM_SPCOM=y
CONFIG_MSM_SMEM_LOGGING=y
CONFIG_MSM_SMP2P=y
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index 51f50698e597..e63f061175ad 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -1,6 +1,6 @@
/* Qualcomm CE device driver.
*
- * Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1543,7 +1543,7 @@ static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
}
/* Check for sum of all dst length is equal to data_len */
for (i = 0; i < req->entries; i++) {
- if (req->vbuf.dst[i].len >= ULONG_MAX - total) {
+ if (req->vbuf.dst[i].len >= U32_MAX - total) {
pr_err("%s: Integer overflow on total req dst vbuf length\n",
__func__);
goto error;
@@ -1557,7 +1557,7 @@ static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
}
/* Check for sum of all src length is equal to data_len */
for (i = 0, total = 0; i < req->entries; i++) {
- if (req->vbuf.src[i].len > ULONG_MAX - total) {
+ if (req->vbuf.src[i].len > U32_MAX - total) {
pr_err("%s: Integer overflow on total req src vbuf length\n",
__func__);
goto error;
@@ -1619,7 +1619,7 @@ static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
/* Check for sum of all src length is equal to data_len */
for (i = 0, total = 0; i < req->entries; i++) {
- if (req->data[i].len > ULONG_MAX - total) {
+ if (req->data[i].len > U32_MAX - total) {
pr_err("%s: Integer overflow on total req buf length\n",
__func__);
goto sha_error;
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index a802671acba0..b57fe05b21d5 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -579,7 +579,6 @@ void adreno_cp_callback(struct adreno_device *adreno_dev, int bit)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- kgsl_schedule_work(&device->event_work);
adreno_dispatcher_schedule(device);
}
@@ -1130,7 +1129,10 @@ static int adreno_init(struct kgsl_device *device)
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
int ret;
- kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT);
+ ret = kgsl_pwrctrl_change_state(device, KGSL_STATE_INIT);
+ if (ret)
+ return ret;
+
/*
* initialization only needs to be done once initially until
* device is shutdown
@@ -1595,6 +1597,8 @@ static int adreno_stop(struct kgsl_device *device)
adreno_ringbuffer_stop(adreno_dev);
+ kgsl_pwrscale_update_stats(device);
+
adreno_irqctrl(adreno_dev, 0);
adreno_ocmem_free(adreno_dev);
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 9f462bca26ce..f5fb4e48c3ee 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -613,11 +613,13 @@ struct adreno_vbif_platform {
* struct adreno_vbif_snapshot_registers - Holds an array of vbif registers
* listed for snapshot dump for a particular core
* @version: vbif version
+ * @mask: vbif revision mask
* @registers: vbif registers listed for snapshot dump
* @count: count of vbif registers listed for snapshot
*/
struct adreno_vbif_snapshot_registers {
const unsigned int version;
+ const unsigned int mask;
const unsigned int *registers;
const int count;
};
diff --git a/drivers/gpu/msm/adreno_a4xx_snapshot.c b/drivers/gpu/msm/adreno_a4xx_snapshot.c
index 6921af5c0ab5..540b42b984c0 100644
--- a/drivers/gpu/msm/adreno_a4xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a4xx_snapshot.c
@@ -168,15 +168,15 @@ static const unsigned int a4xx_vbif_ver_20050000_registers[] = {
static const struct adreno_vbif_snapshot_registers
a4xx_vbif_snapshot_registers[] = {
- { 0x20000000, a4xx_vbif_ver_20000000_registers,
+ { 0x20000000, 0xFFFF0000, a4xx_vbif_ver_20000000_registers,
ARRAY_SIZE(a4xx_vbif_ver_20000000_registers)/2},
- { 0x20020000, a4xx_vbif_ver_20020000_registers,
+ { 0x20020000, 0xFFFF0000, a4xx_vbif_ver_20020000_registers,
ARRAY_SIZE(a4xx_vbif_ver_20020000_registers)/2},
- { 0x20050000, a4xx_vbif_ver_20050000_registers,
+ { 0x20050000, 0xFFFF0000, a4xx_vbif_ver_20050000_registers,
ARRAY_SIZE(a4xx_vbif_ver_20050000_registers)/2},
- { 0x20070000, a4xx_vbif_ver_20020000_registers,
+ { 0x20070000, 0xFFFF0000, a4xx_vbif_ver_20020000_registers,
ARRAY_SIZE(a4xx_vbif_ver_20020000_registers)/2},
- { 0x20090000, a4xx_vbif_ver_20050000_registers,
+ { 0x20090000, 0xFFFF0000, a4xx_vbif_ver_20050000_registers,
ARRAY_SIZE(a4xx_vbif_ver_20050000_registers)/2},
};
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 96f72c59e4cd..467b385f6d56 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -2373,17 +2373,25 @@ static int a5xx_microcode_read(struct adreno_device *adreno_dev)
{
int ret;
- ret = _load_firmware(KGSL_DEVICE(adreno_dev),
- adreno_dev->gpucore->pm4fw_name, &adreno_dev->pm4,
- &adreno_dev->pm4_fw_size, &adreno_dev->pm4_fw_version);
- if (ret)
- return ret;
+ if (adreno_dev->pm4.hostptr == NULL) {
+ ret = _load_firmware(KGSL_DEVICE(adreno_dev),
+ adreno_dev->gpucore->pm4fw_name,
+ &adreno_dev->pm4,
+ &adreno_dev->pm4_fw_size,
+ &adreno_dev->pm4_fw_version);
+ if (ret)
+ return ret;
+ }
- ret = _load_firmware(KGSL_DEVICE(adreno_dev),
- adreno_dev->gpucore->pfpfw_name, &adreno_dev->pfp,
- &adreno_dev->pfp_fw_size, &adreno_dev->pfp_fw_version);
- if (ret)
- return ret;
+ if (adreno_dev->pfp.hostptr == NULL) {
+ ret = _load_firmware(KGSL_DEVICE(adreno_dev),
+ adreno_dev->gpucore->pfpfw_name,
+ &adreno_dev->pfp,
+ &adreno_dev->pfp_fw_size,
+ &adreno_dev->pfp_fw_version);
+ if (ret)
+ return ret;
+ }
ret = _load_gpmu_firmware(adreno_dev);
if (ret)
@@ -3058,7 +3066,6 @@ static void a5xx_irq_storm_worker(struct work_struct *work)
mutex_unlock(&device->mutex);
/* Reschedule just to make sure everything retires */
- kgsl_schedule_work(&device->event_work);
adreno_dispatcher_schedule(device);
}
@@ -3109,8 +3116,6 @@ static void a5xx_cp_callback(struct adreno_device *adreno_dev, int bit)
}
a5xx_preemption_trigger(adreno_dev);
-
- kgsl_schedule_work(&device->event_work);
adreno_dispatcher_schedule(device);
}
diff --git a/drivers/gpu/msm/adreno_a5xx.h b/drivers/gpu/msm/adreno_a5xx.h
index 7965bb7b5440..27d5a4b31c71 100644
--- a/drivers/gpu/msm/adreno_a5xx.h
+++ b/drivers/gpu/msm/adreno_a5xx.h
@@ -52,7 +52,7 @@
#define A5XX_CP_CTXRECORD_MAGIC_REF 0x27C4BAFCUL
/* Size of each CP preemption record */
-#define A5XX_CP_CTXRECORD_SIZE_IN_BYTES 0x100000
+#define A5XX_CP_CTXRECORD_SIZE_IN_BYTES 0x10000
/* Size of the preemption counter block (in bytes) */
#define A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE (16 * 4)
diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c
index 4f368a8f93f3..04d82844a5e9 100644
--- a/drivers/gpu/msm/adreno_a5xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c
@@ -128,6 +128,9 @@ static const struct adreno_debugbus_block a5xx_debugbus_blocks[] = {
#define A5XX_NUM_AXI_ARB_BLOCKS 2
#define A5XX_NUM_XIN_BLOCKS 4
+/* Width of A5XX_CP_DRAW_STATE_ADDR is 8 bits */
+#define A5XX_CP_DRAW_STATE_ADDR_WIDTH 8
+
/* a5xx_snapshot_cp_pm4() - Dump PM4 data in snapshot */
static size_t a5xx_snapshot_cp_pm4(struct kgsl_device *device, u8 *buf,
size_t remain, void *priv)
@@ -326,8 +329,7 @@ static void a5xx_snapshot_debugbus(struct kgsl_device *device,
}
}
-static const unsigned int a5xx_vbif_ver_20040000_registers[] = {
- /* VBIF version 0x20040000*/
+static const unsigned int a5xx_vbif_ver_20xxxxxx_registers[] = {
0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x302C, 0x3030, 0x3030,
0x3034, 0x3036, 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040,
0x3042, 0x3042, 0x3049, 0x3049, 0x3058, 0x3058, 0x305A, 0x3061,
@@ -341,10 +343,8 @@ static const unsigned int a5xx_vbif_ver_20040000_registers[] = {
static const struct adreno_vbif_snapshot_registers
a5xx_vbif_snapshot_registers[] = {
- { 0x20040000, a5xx_vbif_ver_20040000_registers,
- ARRAY_SIZE(a5xx_vbif_ver_20040000_registers)/2},
- { 0x20040001, a5xx_vbif_ver_20040000_registers,
- ARRAY_SIZE(a5xx_vbif_ver_20040000_registers)/2},
+ { 0x20000000, 0xFF000000, a5xx_vbif_ver_20xxxxxx_registers,
+ ARRAY_SIZE(a5xx_vbif_ver_20xxxxxx_registers)/2},
};
/*
@@ -379,7 +379,7 @@ static const unsigned int a5xx_registers[] = {
/* VPC */
0x0E60, 0x0E7C,
/* UCHE */
- 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0xEA0, 0xEA8, 0xEB0, 0xEB2,
+ 0x0E80, 0x0E8F, 0x0E90, 0x0E96, 0xEA0, 0xEA8, 0xEB0, 0xEB2,
/* RB CTX 0 */
0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6,
@@ -414,49 +414,49 @@ static const unsigned int a5xx_registers[] = {
0xB000, 0xB97F, 0xB9A0, 0xB9BF,
};
-/*
- * The HLSQ registers can only be read via the crash dumper (not AHB) so they
- * need to be in their own array because the array above does double duty for
- * the fallback path too
- */
-static const unsigned int a5xx_hlsq_registers[] = {
+struct a5xx_hlsq_sp_tp_regs {
+ unsigned int statetype;
+ unsigned int ahbaddr;
+ unsigned int size;
+ uint64_t offset;
+};
+
+static struct a5xx_hlsq_sp_tp_regs a5xx_hlsq_sp_tp_registers[] = {
+ /* HSLQ non context. 0xe32 - 0xe3f are holes so don't include them */
+ { 0x35, 0xE00, 0x32 },
+ /* HLSQ CTX 0 2D */
+ { 0x31, 0x2080, 0x1 },
+ /* HLSQ CTX 1 2D */
+ { 0x33, 0x2480, 0x1 },
+ /* HLSQ CTX 0 3D. 0xe7e2 - 0xe7ff are holes so don't inculde them */
+ { 0x32, 0xE780, 0x62 },
+ /* HLSQ CTX 1 3D. 0xefe2 - 0xefff are holes so don't include them */
+ { 0x34, 0xEF80, 0x62 },
+
/* SP non context */
- 0x0EC0, 0xEC2, 0xED0, 0xEE0, 0xEF0, 0xEF2, 0xEFA, 0xEFF,
+ { 0x3f, 0x0EC0, 0x40 },
/* SP CTX 0 2D */
- 0x2040, 0x2040,
+ { 0x3d, 0x2040, 0x1 },
/* SP CTX 1 2D */
- 0x2440, 0x2440,
- /* SP CTXT 0 3D */
- 0xE580, 0xE580, 0xE584, 0xE58B, 0xE590, 0xE5B1, 0xE5C0, 0xE5DF,
- 0xE5F0, 0xE5F9, 0xE600, 0xE608, 0xE610, 0xE631, 0xE640, 0xE661,
- 0xE670, 0xE673, 0xE6F0, 0xE6F0,
- /* SP CTXT 1 3D */
- 0xED80, 0xED80, 0xED84, 0xED8B, 0xED90, 0xEDB1, 0xEDC0, 0xEDDF,
- 0xEDF0, 0xEDF9, 0xEE00, 0xEE08, 0xEE10, 0xEE31, 0xEE40, 0xEE61,
- 0xEE70, 0xEE73, 0xEEF0, 0xEEF0,
- /* TP non context */
- 0xF00, 0xF03, 0xF08, 0xF08, 0xF10, 0xF1B,
- /* TP CTX 0 2D */
- 0x2000, 0x2009,
- /* TP CTX 1 2D */
- 0x2400, 0x2409,
+ { 0x3b, 0x2440, 0x1 },
+ /* SP CTX 0 3D */
+ { 0x3e, 0xE580, 0x180 },
+ /* SP CTX 1 3D */
+ { 0x3c, 0xED80, 0x180 },
+
+ /* TP non context. 0x0f1c - 0x0f3f are holes so don't include them */
+ { 0x3a, 0x0F00, 0x1c },
+ /* TP CTX 0 2D. 0x200a - 0x200f are holes so don't include them */
+ { 0x38, 0x2000, 0xa },
+ /* TP CTX 1 2D. 0x240a - 0x240f are holes so don't include them */
+ { 0x36, 0x2400, 0xa },
/* TP CTX 0 3D */
- 0xE700, 0xE707, 0xE70E, 0xE731,
- 0xE750, 0xE751, 0xE75A, 0xE764, 0xE76C, 0xE77F,
+ { 0x39, 0xE700, 0x80 },
/* TP CTX 1 3D */
- 0xEF00, 0xEF07, 0xEF0E, 0xEF31,
- 0xEF50, 0xEF51, 0xEF5A, 0xEF64, 0xEF6C, 0xEF7F,
- /* HLSQ non context */
- 0xE00, 0xE01, 0xE04, 0xE06, 0xE08, 0xE09, 0xE10, 0xE17,
- 0xE20, 0xE25,
- /* HLSQ CTXT 0 3D */
- 0xE784, 0xE789, 0xE78B, 0xE796, 0xE7A0, 0xE7A2, 0xE7B0, 0xE7BB,
- 0xE7C0, 0xE7DD, 0xE7E0, 0xE7E1,
- /* HLSQ CTXT 1 3D */
- 0xEF84, 0xEF89, 0xEF8B, 0xEF96, 0xEFA0, 0xEFA2, 0xEFB0, 0xEFBB,
- 0xEFC0, 0xEFDD, 0xEFE0, 0xEFE1,
+ { 0x37, 0xEF00, 0x80 },
};
+
#define A5XX_NUM_SHADER_BANKS 4
#define A5XX_SHADER_STATETYPE_SHIFT 8
@@ -652,7 +652,6 @@ static struct cdregs {
unsigned int size;
} _a5xx_cd_registers[] = {
{ a5xx_registers, ARRAY_SIZE(a5xx_registers) },
- { a5xx_hlsq_registers, ARRAY_SIZE(a5xx_hlsq_registers) },
};
#define REG_PAIR_COUNT(_a, _i) \
@@ -776,6 +775,46 @@ static void _a5xx_do_crashdump(struct kgsl_device *device)
crash_dump_valid = true;
}
+static int get_hlsq_registers(struct kgsl_device *device,
+ const struct a5xx_hlsq_sp_tp_regs *regs, unsigned int *data)
+{
+ unsigned int i;
+ unsigned int *src = registers.hostptr + regs->offset;
+
+ for (i = 0; i < regs->size; i++) {
+ *data++ = regs->ahbaddr + i;
+ *data++ = *(src + i);
+ }
+
+ return (2 * regs->size);
+}
+
+static size_t a5xx_snapshot_dump_hlsq_sp_tp_regs(struct kgsl_device *device,
+ u8 *buf, size_t remain, void *priv)
+{
+ struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
+ unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+ int count = 0, i;
+
+ /* Figure out how many registers we are going to dump */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_sp_tp_registers); i++)
+ count += a5xx_hlsq_sp_tp_registers[i].size;
+
+ if (remain < (count * 8) + sizeof(*header)) {
+ SNAPSHOT_ERR_NOMEM(device, "REGISTERS");
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_sp_tp_registers); i++)
+ data += get_hlsq_registers(device,
+ &a5xx_hlsq_sp_tp_registers[i], data);
+
+ header->count = count;
+
+ /* Return the size of the section */
+ return (count * 8) + sizeof(*header);
+}
+
/*
* a5xx_snapshot() - A5XX GPU snapshot function
* @adreno_dev: Device being snapshotted
@@ -806,6 +845,10 @@ void a5xx_snapshot(struct adreno_device *adreno_dev,
a5xx_vbif_snapshot_registers,
ARRAY_SIZE(a5xx_vbif_snapshot_registers));
+ /* Dump SP TP HLSQ registers */
+ kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS, snapshot,
+ a5xx_snapshot_dump_hlsq_sp_tp_regs, NULL);
+
/* CP_PFP indexed registers */
kgsl_snapshot_indexed_registers(device, snapshot,
A5XX_CP_PFP_STAT_ADDR, A5XX_CP_PFP_STAT_DATA,
@@ -819,7 +862,7 @@ void a5xx_snapshot(struct adreno_device *adreno_dev,
/* CP_DRAW_STATE */
kgsl_snapshot_indexed_registers(device, snapshot,
A5XX_CP_DRAW_STATE_ADDR, A5XX_CP_DRAW_STATE_DATA,
- 0, 128);
+ 0, 1 << A5XX_CP_DRAW_STATE_ADDR_WIDTH);
/*
* CP needs to be halted on a530v1 before reading CP_PFP_UCODE_DBG_DATA
@@ -878,8 +921,8 @@ void a5xx_snapshot(struct adreno_device *adreno_dev,
}
-static int _a5xx_crashdump_init(struct a5xx_shader_block *block, uint64_t *ptr,
- uint64_t *offset)
+static int _a5xx_crashdump_init_shader(struct a5xx_shader_block *block,
+ uint64_t *ptr, uint64_t *offset)
{
int qwords = 0;
unsigned int j;
@@ -908,6 +951,31 @@ static int _a5xx_crashdump_init(struct a5xx_shader_block *block, uint64_t *ptr,
return qwords;
}
+static int _a5xx_crashdump_init_hlsq(struct a5xx_hlsq_sp_tp_regs *regs,
+ uint64_t *ptr, uint64_t *offset)
+{
+ int qwords = 0;
+
+ /* Program the aperture */
+ ptr[qwords++] =
+ (regs->statetype << A5XX_SHADER_STATETYPE_SHIFT);
+ ptr[qwords++] = (((uint64_t) A5XX_HLSQ_DBG_READ_SEL << 44)) |
+ (1 << 21) | 1;
+
+ /* Read all the data in one chunk */
+ ptr[qwords++] = registers.gpuaddr + *offset;
+ ptr[qwords++] =
+ (((uint64_t) A5XX_HLSQ_DBG_AHB_READ_APERTURE << 44)) |
+ regs->size;
+
+ /* Remember the offset of the first bank for easy access */
+ regs->offset = *offset;
+
+ *offset += regs->size * sizeof(unsigned int);
+
+ return qwords;
+}
+
void a5xx_crashdump_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -954,6 +1022,11 @@ void a5xx_crashdump_init(struct adreno_device *adreno_dev)
data_size += a5xx_shader_blocks[i].sz * sizeof(unsigned int) *
A5XX_NUM_SHADER_BANKS;
}
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_sp_tp_registers); i++) {
+ script_size += 32;
+ data_size +=
+ a5xx_hlsq_sp_tp_registers[i].size * sizeof(unsigned int);
+ }
/* Now allocate the script and data buffers */
@@ -968,7 +1041,6 @@ void a5xx_crashdump_init(struct adreno_device *adreno_dev)
kgsl_free_global(KGSL_DEVICE(adreno_dev), &capturescript);
return;
}
-
/* Build the crash script */
ptr = (uint64_t *) capturescript.hostptr;
@@ -987,9 +1059,13 @@ void a5xx_crashdump_init(struct adreno_device *adreno_dev)
/* Program each shader block */
for (i = 0; i < ARRAY_SIZE(a5xx_shader_blocks); i++) {
- ptr += _a5xx_crashdump_init(&a5xx_shader_blocks[i], ptr,
+ ptr += _a5xx_crashdump_init_shader(&a5xx_shader_blocks[i], ptr,
&offset);
}
+ /* Program the hlsq sp tp register sets */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_sp_tp_registers); i++)
+ ptr += _a5xx_crashdump_init_hlsq(&a5xx_hlsq_sp_tp_registers[i],
+ ptr, &offset);
*ptr++ = 0;
*ptr++ = 0;
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index ac3805800691..5d3b2b8a7266 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -284,6 +284,7 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
struct kgsl_context *context = cmdbatch->context;
struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
struct kgsl_device *device = context->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
/*
* Write the start and end timestamp to the memstore to keep the
@@ -301,7 +302,16 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
/* Retire pending GPU events for the object */
kgsl_process_event_group(device, &context->events);
- trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb,
+ /*
+ * For A3xx we still get the rptr from the CP_RB_RPTR instead of
+ * rptr scratch out address. At this point GPU clocks turned off.
+ * So avoid reading GPU register directly for A3xx.
+ */
+ if (adreno_is_a3xx(adreno_dev))
+ trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb,
+ 0);
+ else
+ trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb,
adreno_get_rptr(drawctxt->rb));
kgsl_cmdbatch_destroy(cmdbatch);
}
@@ -613,12 +623,13 @@ static int sendcmd(struct adreno_device *adreno_dev,
}
}
- mutex_unlock(&device->mutex);
if (ret) {
dispatcher->inflight--;
dispatch_q->inflight--;
+ mutex_unlock(&device->mutex);
+
/*
* Don't log a message in case of:
* -ENOENT means that the context was detached before the
@@ -642,6 +653,8 @@ static int sendcmd(struct adreno_device *adreno_dev,
time.ticks, (unsigned long) secs, nsecs / 1000, drawctxt->rb,
adreno_get_rptr(drawctxt->rb));
+ mutex_unlock(&device->mutex);
+
cmdbatch->submit_ticks = time.ticks;
dispatch_q->cmd_q[dispatch_q->tail] = cmdbatch;
@@ -1923,9 +1936,20 @@ static void retire_cmdbatch(struct adreno_device *adreno_dev,
if (test_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv))
cmdbatch_profile_ticks(adreno_dev, cmdbatch, &start, &end);
- trace_adreno_cmdbatch_retired(cmdbatch, (int) dispatcher->inflight,
- start, end, ADRENO_CMDBATCH_RB(cmdbatch),
- adreno_get_rptr(drawctxt->rb));
+ /*
+ * For A3xx we still get the rptr from the CP_RB_RPTR instead of
+ * rptr scratch out address. At this point GPU clocks turned off.
+ * So avoid reading GPU register directly for A3xx.
+ */
+ if (adreno_is_a3xx(adreno_dev))
+ trace_adreno_cmdbatch_retired(cmdbatch,
+ (int) dispatcher->inflight, start, end,
+ ADRENO_CMDBATCH_RB(cmdbatch), 0);
+ else
+ trace_adreno_cmdbatch_retired(cmdbatch,
+ (int) dispatcher->inflight, start, end,
+ ADRENO_CMDBATCH_RB(cmdbatch),
+ adreno_get_rptr(drawctxt->rb));
drawctxt->submit_retire_ticks[drawctxt->ticks_index] =
end - cmdbatch->submit_ticks;
@@ -2099,19 +2123,18 @@ static void adreno_dispatcher_work(struct work_struct *work)
break;
}
+ kgsl_process_event_groups(device);
+
/*
* dispatcher_do_fault() returns 0 if no faults occurred. If that is the
* case, then clean up preemption and try to schedule more work
*/
if (dispatcher_do_fault(adreno_dev) == 0) {
+
/* Clean up after preemption */
if (gpudev->preemption_schedule)
gpudev->preemption_schedule(adreno_dev);
- /* Re-kick the event engine to catch stragglers */
- if (dispatcher->inflight == 0 && count != 0)
- kgsl_schedule_work(&device->event_work);
-
/* Run the scheduler for to dispatch new commands */
_adreno_dispatcher_issuecmds(adreno_dev);
}
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index b069b16c75ef..0eff3da0e494 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -1118,7 +1118,8 @@ static const struct adreno_vbif_snapshot_registers *vbif_registers(
adreno_readreg(adreno_dev, ADRENO_REG_VBIF_VERSION, &version);
for (i = 0; i < count; i++) {
- if (list[i].version == version)
+ if ((list[i].version & list[i].mask) ==
+ (version & list[i].mask))
return &list[i];
}
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index f77dbb7f20af..c203ac7bfe8c 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -2144,8 +2144,8 @@ static int kgsl_setup_dmabuf_useraddr(struct kgsl_device *device,
}
up_read(&current->mm->mmap_sem);
- if (dmabuf == NULL)
- return -ENODEV;
+ if (IS_ERR_OR_NULL(dmabuf))
+ return dmabuf ? PTR_ERR(dmabuf) : -ENODEV;
ret = kgsl_setup_dma_buf(device, pagetable, entry, dmabuf);
if (ret) {
@@ -2249,7 +2249,7 @@ static long _gpuobj_map_dma_buf(struct kgsl_device *device,
if (ret)
return ret;
- if (buf.fd == 0)
+ if (buf.fd < 0)
return -EINVAL;
*fd = buf.fd;
@@ -3663,19 +3663,15 @@ static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
if (cache == KGSL_CACHEMODE_WRITEBACK
|| cache == KGSL_CACHEMODE_WRITETHROUGH) {
- struct scatterlist *s;
int i;
unsigned long addr = vma->vm_start;
+ struct kgsl_memdesc *m = &entry->memdesc;
+
+ for (i = 0; i < m->page_count; i++) {
+ struct page *page = m->pages[i];
- for_each_sg(entry->memdesc.sgt->sgl, s,
- entry->memdesc.sgt->nents, i) {
- int j;
- for (j = 0; j < (s->length >> PAGE_SHIFT); j++) {
- struct page *page = sg_page(s);
- page = nth_page(page, j);
- vm_insert_page(vma, addr, page);
- addr += PAGE_SIZE;
- }
+ vm_insert_page(vma, addr, page);
+ addr += PAGE_SIZE;
}
}
@@ -3913,7 +3909,7 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
goto error_close_mmu;
status = kgsl_allocate_global(device, &device->memstore,
- KGSL_MEMSTORE_SIZE, 0, 0);
+ KGSL_MEMSTORE_SIZE, 0, KGSL_MEMDESC_CONTIG);
if (status != 0)
goto error_close_mmu;
@@ -3957,8 +3953,8 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
PM_QOS_DEFAULT_VALUE);
}
-
- device->events_wq = create_singlethread_workqueue("kgsl-events");
+ device->events_wq = alloc_workqueue("kgsl-events",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
/* Initalize the snapshot engine */
kgsl_device_snapshot_init(device);
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index c172021c8944..ee7149e1fd41 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -163,6 +163,8 @@ struct kgsl_memdesc_ops {
#define KGSL_MEMDESC_PRIVILEGED BIT(6)
/* The memdesc is TZ locked content protection */
#define KGSL_MEMDESC_TZ_LOCKED BIT(7)
+/* The memdesc is allocated through contiguous memory */
+#define KGSL_MEMDESC_CONTIG BIT(8)
/**
* struct kgsl_memdesc - GPU memory object descriptor
@@ -179,8 +181,9 @@ struct kgsl_memdesc_ops {
* @ops: Function hooks for the memdesc memory type
* @flags: Flags set from userspace
* @dev: Pointer to the struct device that owns this memory
- * @memmap: bitmap of pages for mmapsize
- * @memmap_len: Number of bits for memmap
+ * @attrs: dma attributes for this memory
+ * @pages: An array of pointers to allocated pages
+ * @page_count: Total number of pages allocated
*/
struct kgsl_memdesc {
struct kgsl_pagetable *pagetable;
@@ -197,6 +200,8 @@ struct kgsl_memdesc {
uint64_t flags;
struct device *dev;
struct dma_attrs attrs;
+ struct page **pages;
+ unsigned int page_count;
};
/*
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index 766cd811588c..93ac790f3a55 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -150,7 +150,7 @@ static int print_mem_entry(int id, void *ptr, void *data)
(unsigned long *) m->useraddr,
m->size, entry->id, flags,
memtype_str(kgsl_memdesc_usermem_type(m)),
- usage, m->sgt->nents, m->mapsize);
+ usage, (m->sgt ? m->sgt->nents : 0), m->mapsize);
if (entry->metadata[0] != 0)
seq_printf(s, " %s", entry->metadata);
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 4159a5fe375f..f55b795b1d2b 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -272,7 +272,6 @@ struct kgsl_device {
int mem_log;
int pwr_log;
struct kgsl_pwrscale pwrscale;
- struct work_struct event_work;
int reset_counter; /* Track how many GPU core resets have occured */
int cff_dump_enable;
@@ -292,8 +291,6 @@ struct kgsl_device {
.cmdbatch_gate = COMPLETION_INITIALIZER((_dev).cmdbatch_gate),\
.idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
kgsl_idle_check),\
- .event_work = __WORK_INITIALIZER((_dev).event_work,\
- kgsl_process_events),\
.context_idr = IDR_INIT((_dev).context_idr),\
.wait_queue = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).wait_queue),\
.active_cnt_wq = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).active_cnt_wq),\
@@ -602,7 +599,7 @@ void kgsl_process_event_group(struct kgsl_device *device,
struct kgsl_event_group *group);
void kgsl_flush_event_group(struct kgsl_device *device,
struct kgsl_event_group *group);
-void kgsl_process_events(struct work_struct *work);
+void kgsl_process_event_groups(struct kgsl_device *device);
void kgsl_context_destroy(struct kref *kref);
diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c
index 6f70b9ddd376..6e8abf36c50f 100644
--- a/drivers/gpu/msm/kgsl_events.c
+++ b/drivers/gpu/msm/kgsl_events.c
@@ -314,22 +314,16 @@ EXPORT_SYMBOL(kgsl_add_event);
static DEFINE_RWLOCK(group_lock);
static LIST_HEAD(group_list);
-/**
- * kgsl_process_events() - Work queue for processing new timestamp events
- * @work: Pointer to a work_struct
- */
-void kgsl_process_events(struct work_struct *work)
+void kgsl_process_event_groups(struct kgsl_device *device)
{
struct kgsl_event_group *group;
- struct kgsl_device *device = container_of(work, struct kgsl_device,
- event_work);
read_lock(&group_lock);
list_for_each_entry(group, &group_list, group)
_process_event_group(device, group, false);
read_unlock(&group_lock);
}
-EXPORT_SYMBOL(kgsl_process_events);
+EXPORT_SYMBOL(kgsl_process_event_groups);
/**
* kgsl_del_event_group() - Remove a GPU event group
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 865cd9d8f498..b467ef81d257 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1627,16 +1627,34 @@ kgsl_iommu_map(struct kgsl_pagetable *pt,
uint64_t addr = memdesc->gpuaddr;
uint64_t size = memdesc->size;
unsigned int flags = _get_protection_flags(memdesc);
+ struct sg_table *sgt = NULL;
- ret = _iommu_map_sg_sync_pc(pt, addr, memdesc, memdesc->sgt->sgl,
- memdesc->sgt->nents, flags);
+ /*
+ * For paged memory allocated through kgsl, memdesc->pages is not NULL.
+ * Allocate sgt here just for its map operation. Contiguous memory
+ * already has its sgt, so no need to allocate it here.
+ */
+ if (memdesc->pages != NULL)
+ sgt = kgsl_alloc_sgt_from_pages(memdesc);
+ else
+ sgt = memdesc->sgt;
+
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ ret = _iommu_map_sg_sync_pc(pt, addr, memdesc, sgt->sgl,
+ sgt->nents, flags);
if (ret)
- return ret;
+ goto done;
ret = _iommu_map_guard_page(pt, memdesc, addr + size, flags);
if (ret)
_iommu_unmap_sync_pc(pt, memdesc, addr, size);
+done:
+ if (memdesc->pages != NULL)
+ kgsl_free_sgt(sgt);
+
return ret;
}
@@ -1647,6 +1665,8 @@ static int kgsl_iommu_map_offset(struct kgsl_pagetable *pt,
{
int pg_sz;
unsigned int protflags = _get_protection_flags(memdesc);
+ int ret;
+ struct sg_table *sgt = NULL;
pg_sz = (1 << kgsl_memdesc_get_align(memdesc));
if (!IS_ALIGNED(virtaddr | virtoffset | physoffset | size, pg_sz))
@@ -1655,9 +1675,27 @@ static int kgsl_iommu_map_offset(struct kgsl_pagetable *pt,
if (size == 0)
return -EINVAL;
- return _iommu_map_sg_offset_sync_pc(pt, virtaddr + virtoffset,
- memdesc, memdesc->sgt->sgl, memdesc->sgt->nents,
- physoffset, size, protflags);
+ /*
+ * For paged memory allocated through kgsl, memdesc->pages is not NULL.
+ * Allocate sgt here just for its map operation. Contiguous memory
+ * already has its sgt, so no need to allocate it here.
+ */
+ if (memdesc->pages != NULL)
+ sgt = kgsl_alloc_sgt_from_pages(memdesc);
+ else
+ sgt = memdesc->sgt;
+
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ ret = _iommu_map_sg_offset_sync_pc(pt, virtaddr + virtoffset,
+ memdesc, sgt->sgl, sgt->nents,
+ physoffset, size, protflags);
+
+ if (memdesc->pages != NULL)
+ kgsl_free_sgt(sgt);
+
+ return ret;
}
/* This function must be called with context bank attached */
diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c
index 7fb3b37ac191..7967b19779db 100644
--- a/drivers/gpu/msm/kgsl_pool.c
+++ b/drivers/gpu/msm/kgsl_pool.c
@@ -263,6 +263,31 @@ void kgsl_pool_free_sgt(struct sg_table *sgt)
}
}
+/**
+ * kgsl_pool_free_pages() - Free pages in the pages array
+ * @pages: pointer of the pages array
+ *
+ * Free the pages by collapsing any physical adjacent pages.
+ * Pages are added back to the pool, if pool has sufficient space
+ * otherwise they are given back to system.
+ */
+void kgsl_pool_free_pages(struct page **pages, unsigned int pcount)
+{
+ int i;
+
+ if (pages == NULL || pcount == 0)
+ return;
+
+ for (i = 0; i < pcount;) {
+ /*
+ * Free each page or compound page group individually.
+ */
+ struct page *p = pages[i];
+
+ i += 1 << compound_order(p);
+ kgsl_pool_free_page(p);
+ }
+}
static int kgsl_pool_idx_lookup(unsigned int order)
{
int i;
diff --git a/drivers/gpu/msm/kgsl_pool.h b/drivers/gpu/msm/kgsl_pool.h
index f2cdda19140b..efbfa96f1498 100644
--- a/drivers/gpu/msm/kgsl_pool.h
+++ b/drivers/gpu/msm/kgsl_pool.h
@@ -34,6 +34,7 @@ kgsl_gfp_mask(unsigned int page_order)
}
void kgsl_pool_free_sgt(struct sg_table *sgt);
+void kgsl_pool_free_pages(struct page **pages, unsigned int page_count);
void kgsl_init_page_pools(void);
void kgsl_exit_page_pools(void);
int kgsl_pool_alloc_page(int *page_size, struct page **pages,
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 2b9eef8b6351..11b323e9d40c 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -363,6 +363,8 @@ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
if (new_level == old_level)
return;
+ kgsl_pwrscale_update_stats(device);
+
/*
* Set the active and previous powerlevel first in case the clocks are
* off - if we don't do this then the pwrlevel change won't take effect
@@ -934,6 +936,31 @@ static ssize_t kgsl_pwrctrl_gpu_available_frequencies_show(
return num_chars;
}
+static ssize_t kgsl_pwrctrl_gpu_clock_stats_show(
+ struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct kgsl_device *device = kgsl_device_from_dev(dev);
+ struct kgsl_pwrctrl *pwr;
+ int index, num_chars = 0;
+
+ if (device == NULL)
+ return 0;
+ pwr = &device->pwrctrl;
+ mutex_lock(&device->mutex);
+ kgsl_pwrscale_update_stats(device);
+ mutex_unlock(&device->mutex);
+ for (index = 0; index < pwr->num_pwrlevels - 1; index++)
+ num_chars += snprintf(buf + num_chars, PAGE_SIZE - num_chars,
+ "%llu ", pwr->clock_times[index]);
+
+ if (num_chars < PAGE_SIZE)
+ buf[num_chars++] = '\n';
+
+ return num_chars;
+}
+
static ssize_t kgsl_pwrctrl_reset_count_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1201,6 +1228,9 @@ static DEVICE_ATTR(gpubusy, 0444, kgsl_pwrctrl_gpubusy_show,
static DEVICE_ATTR(gpu_available_frequencies, 0444,
kgsl_pwrctrl_gpu_available_frequencies_show,
NULL);
+static DEVICE_ATTR(gpu_clock_stats, 0444,
+ kgsl_pwrctrl_gpu_clock_stats_show,
+ NULL);
static DEVICE_ATTR(max_pwrlevel, 0644,
kgsl_pwrctrl_max_pwrlevel_show,
kgsl_pwrctrl_max_pwrlevel_store);
@@ -1249,6 +1279,7 @@ static const struct device_attribute *pwrctrl_attr_list[] = {
&dev_attr_deep_nap_timer,
&dev_attr_gpubusy,
&dev_attr_gpu_available_frequencies,
+ &dev_attr_gpu_clock_stats,
&dev_attr_max_pwrlevel,
&dev_attr_min_pwrlevel,
&dev_attr_thermal_pwrlevel,
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index 0029c389484f..8fd06531aa81 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -122,6 +122,7 @@ struct kgsl_regulator {
* @min_pwrlevel - minimum allowable powerlevel per the user
* @num_pwrlevels - number of available power levels
* @interval_timeout - timeout in jiffies to be idle before a power event
+ * @clock_times - Each GPU frequency's accumulated active time in us
* @strtstp_sleepwake - true if the device supports low latency GPU start/stop
* @regulators - array of pointers to kgsl_regulator structs
* @pcl - bus scale identifier
@@ -178,6 +179,7 @@ struct kgsl_pwrctrl {
unsigned int min_pwrlevel;
unsigned int num_pwrlevels;
unsigned long interval_timeout;
+ u64 clock_times[KGSL_MAX_PWRLEVELS];
bool strtstp_sleepwake;
struct kgsl_regulator regulators[KGSL_MAX_REGULATORS];
uint32_t pcl;
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 4f6677d9a1de..d90aec42f30a 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -127,6 +127,7 @@ EXPORT_SYMBOL(kgsl_pwrscale_busy);
*/
void kgsl_pwrscale_update_stats(struct kgsl_device *device)
{
+ struct kgsl_pwrctrl *pwrctrl = &device->pwrctrl;
struct kgsl_pwrscale *psc = &device->pwrscale;
BUG_ON(!mutex_is_locked(&device->mutex));
@@ -150,6 +151,8 @@ void kgsl_pwrscale_update_stats(struct kgsl_device *device)
device->pwrscale.accum_stats.busy_time += stats.busy_time;
device->pwrscale.accum_stats.ram_time += stats.ram_time;
device->pwrscale.accum_stats.ram_wait += stats.ram_wait;
+ pwrctrl->clock_times[pwrctrl->active_pwrlevel] +=
+ stats.busy_time;
}
}
EXPORT_SYMBOL(kgsl_pwrscale_update_stats);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c
index 50dcd39fac58..73edc3f7e146 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.c
+++ b/drivers/gpu/msm/kgsl_sharedmem.c
@@ -313,10 +313,6 @@ kgsl_sharedmem_init_sysfs(void)
drv_attr_list);
}
-static int kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
- struct kgsl_pagetable *pagetable,
- uint64_t size);
-
static int kgsl_cma_alloc_secure(struct kgsl_device *device,
struct kgsl_memdesc *memdesc, uint64_t size);
@@ -358,8 +354,7 @@ static int kgsl_page_alloc_vmfault(struct kgsl_memdesc *memdesc,
struct vm_area_struct *vma,
struct vm_fault *vmf)
{
- int i, pgoff;
- struct scatterlist *s = memdesc->sgt->sgl;
+ int pgoff;
unsigned int offset;
offset = ((unsigned long) vmf->virtual_address - vma->vm_start);
@@ -369,30 +364,15 @@ static int kgsl_page_alloc_vmfault(struct kgsl_memdesc *memdesc,
pgoff = offset >> PAGE_SHIFT;
- /*
- * The sglist might be comprised of mixed blocks of memory depending
- * on how many 64K pages were allocated. This means we have to do math
- * to find the actual 4K page to map in user space
- */
-
- for (i = 0; i < memdesc->sgt->nents; i++) {
- int npages = s->length >> PAGE_SHIFT;
-
- if (pgoff < npages) {
- struct page *page = sg_page(s);
+ if (pgoff < memdesc->page_count) {
+ struct page *page = memdesc->pages[pgoff];
- page = nth_page(page, pgoff);
+ get_page(page);
+ vmf->page = page;
- get_page(page);
- vmf->page = page;
+ memdesc->mapsize += PAGE_SIZE;
- memdesc->mapsize += PAGE_SIZE;
-
- return 0;
- }
-
- pgoff -= npages;
- s = sg_next(s);
+ return 0;
}
return VM_FAULT_SIGBUS;
@@ -455,9 +435,15 @@ static void kgsl_page_alloc_free(struct kgsl_memdesc *memdesc)
for_each_sg_page(memdesc->sgt->sgl, &sg_iter,
memdesc->sgt->nents, 0)
ClearPagePrivate(sg_page_iter_page(&sg_iter));
+
}
- kgsl_pool_free_sgt(memdesc->sgt);
+ /* Free pages using the pages array for non secure paged memory */
+ if (memdesc->pages != NULL)
+ kgsl_pool_free_pages(memdesc->pages, memdesc->page_count);
+ else
+ kgsl_pool_free_sgt(memdesc->sgt);
+
}
/*
@@ -477,31 +463,10 @@ static int kgsl_page_alloc_map_kernel(struct kgsl_memdesc *memdesc)
return -ENOMEM;
mutex_lock(&kernel_map_global_lock);
- if (!memdesc->hostptr) {
+ if ((!memdesc->hostptr) && (memdesc->pages != NULL)) {
pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
- struct page **pages = NULL;
- struct scatterlist *sg;
- int npages = PAGE_ALIGN(memdesc->size) >> PAGE_SHIFT;
- int sglen = memdesc->sgt->nents;
- int i, count = 0;
-
- /* create a list of pages to call vmap */
- pages = kgsl_malloc(npages * sizeof(struct page *));
- if (pages == NULL) {
- ret = -ENOMEM;
- goto done;
- }
-
- for_each_sg(memdesc->sgt->sgl, sg, sglen, i) {
- struct page *page = sg_page(sg);
- int j;
-
- for (j = 0; j < sg->length >> PAGE_SHIFT; j++)
- pages[count++] = page++;
- }
-
- memdesc->hostptr = vmap(pages, count,
+ memdesc->hostptr = vmap(memdesc->pages, memdesc->page_count,
VM_IOREMAP, page_prot);
if (memdesc->hostptr)
KGSL_STATS_ADD(memdesc->size,
@@ -509,11 +474,10 @@ static int kgsl_page_alloc_map_kernel(struct kgsl_memdesc *memdesc)
&kgsl_driver.stats.vmalloc_max);
else
ret = -ENOMEM;
- kgsl_free(pages);
}
if (memdesc->hostptr)
memdesc->hostptr_count++;
-done:
+
mutex_unlock(&kernel_map_global_lock);
return ret;
@@ -672,7 +636,7 @@ static inline int get_page_size(size_t size, unsigned int align)
}
#endif
-static int
+int
kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
uint64_t size)
@@ -681,7 +645,6 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
unsigned int j, page_size, len_alloc;
unsigned int pcount = 0;
size_t len;
- struct page **pages = NULL;
unsigned int align;
size = PAGE_ALIGN(size);
@@ -712,18 +675,17 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
memdesc->pagetable = pagetable;
memdesc->ops = &kgsl_page_alloc_ops;
- memdesc->sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (memdesc->sgt == NULL)
- return -ENOMEM;
-
/*
- * Allocate space to store the list of pages to send to vmap. This is an
- * array of pointers so we can track 1024 pages per page of allocation
+ * Allocate space to store the list of pages. This is an array of
+ * pointers so we can track 1024 pages per page of allocation.
+ * Keep this array around for non global non secure buffers that
+ * are allocated by kgsl. This helps with improving the vm fault
+ * routine by finding the faulted page in constant time.
*/
- pages = kgsl_malloc(len_alloc * sizeof(struct page *));
+ memdesc->pages = kgsl_malloc(len_alloc * sizeof(struct page *));
- if (pages == NULL) {
+ if (memdesc->pages == NULL) {
ret = -ENOMEM;
goto done;
}
@@ -734,9 +696,9 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
int page_count;
page_count = kgsl_pool_alloc_page(&page_size,
- pages + pcount, len_alloc - pcount,
+ memdesc->pages + pcount,
+ len_alloc - pcount,
&align);
-
if (page_count <= 0) {
if (page_count == -EAGAIN)
continue;
@@ -760,16 +722,12 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
pcount += page_count;
len -= page_size;
memdesc->size += page_size;
+ memdesc->page_count += page_count;
/* Get the needed page size for the next iteration */
page_size = get_page_size(len, align);
}
- ret = sg_alloc_table_from_pages(memdesc->sgt, pages, pcount, 0,
- memdesc->size, GFP_KERNEL);
- if (ret)
- goto done;
-
/* Call to the hypervisor to lock any secure buffer allocations */
if (memdesc->flags & KGSL_MEMFLAGS_SECURE) {
unsigned int i;
@@ -778,10 +736,27 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
int source_vm = VMID_HLOS;
int dest_vm = VMID_CP_PIXEL;
+ memdesc->sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (memdesc->sgt == NULL) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ ret = sg_alloc_table_from_pages(memdesc->sgt, memdesc->pages,
+ memdesc->page_count, 0, memdesc->size, GFP_KERNEL);
+ if (ret) {
+ kfree(memdesc->sgt);
+ goto done;
+ }
+
ret = hyp_assign_table(memdesc->sgt, &source_vm, 1,
&dest_vm, &dest_perms, 1);
- if (ret)
+ if (ret) {
+ sg_free_table(memdesc->sgt);
+ kfree(memdesc->sgt);
+ memdesc->sgt = NULL;
goto done;
+ }
/* Set private bit for each sg to indicate that its secured */
for_each_sg(memdesc->sgt->sgl, sg, memdesc->sgt->nents, i)
@@ -793,6 +768,14 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
KGSL_STATS_ADD(memdesc->size, &kgsl_driver.stats.secure,
&kgsl_driver.stats.secure_max);
+ /*
+ * We don't need the array for secure buffers because they are
+ * not mapped to CPU
+ */
+ kgsl_free(memdesc->pages);
+ memdesc->pages = NULL;
+ memdesc->page_count = 0;
+
/* Don't map and zero the locked secure buffer */
goto done;
}
@@ -802,19 +785,18 @@ kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
done:
if (ret) {
- if (pages) {
+ if (memdesc->pages) {
unsigned int count = 1;
for (j = 0; j < pcount; j += count) {
- count = 1 << compound_order(pages[j]);
- kgsl_pool_free_page(pages[j]);
+ count = 1 << compound_order(memdesc->pages[j]);
+ kgsl_pool_free_page(memdesc->pages[j]);
}
}
- kfree(memdesc->sgt);
+ kgsl_free(memdesc->pages);
memset(memdesc, 0, sizeof(*memdesc));
}
- kgsl_free(pages);
return ret;
}
@@ -837,6 +819,9 @@ void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
kfree(memdesc->sgt);
}
+ if (memdesc->pages)
+ kgsl_free(memdesc->pages);
+
memset(memdesc, 0, sizeof(*memdesc));
}
EXPORT_SYMBOL(kgsl_sharedmem_free);
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index 5093ebd6e51a..c05aaecb5284 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -71,6 +71,10 @@ int kgsl_allocate_user(struct kgsl_device *device,
void kgsl_get_memory_usage(char *str, size_t len, uint64_t memflags);
+int kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
+ struct kgsl_pagetable *pagetable,
+ uint64_t size);
+
#define MEMFLAGS(_flags, _mask, _shift) \
((unsigned int) (((_flags) & (_mask)) >> (_shift)))
@@ -266,7 +270,16 @@ static inline int kgsl_allocate_global(struct kgsl_device *device,
memdesc->flags = flags;
memdesc->priv = priv;
- ret = kgsl_sharedmem_alloc_contig(device, memdesc, NULL, (size_t) size);
+ if ((memdesc->priv & KGSL_MEMDESC_CONTIG) != 0)
+ ret = kgsl_sharedmem_alloc_contig(device, memdesc, NULL,
+ (size_t) size);
+ else {
+ ret = kgsl_sharedmem_page_alloc_user(memdesc, NULL,
+ (size_t) size);
+ if (ret == 0)
+ kgsl_memdesc_map(memdesc);
+ }
+
if (ret == 0)
kgsl_mmu_add_global(device, memdesc);
@@ -293,4 +306,47 @@ static inline void kgsl_free_global(struct kgsl_device *device,
void kgsl_sharedmem_set_noretry(bool val);
bool kgsl_sharedmem_get_noretry(void);
+/**
+ * kgsl_alloc_sgt_from_pages() - Allocate a sg table
+ *
+ * @memdesc: memory descriptor of the allocation
+ *
+ * Allocate and return pointer to a sg table
+ */
+static inline struct sg_table *kgsl_alloc_sgt_from_pages(
+ struct kgsl_memdesc *m)
+{
+ int ret;
+ struct sg_table *sgt;
+
+ sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (sgt == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table_from_pages(sgt, m->pages, m->page_count, 0,
+ m->size, GFP_KERNEL);
+ if (ret) {
+ kfree(sgt);
+ return ERR_PTR(ret);
+ }
+
+ return sgt;
+}
+
+/**
+ * kgsl_free_sgt() - Free a sg table structure
+ *
+ * @sgt: sg table pointer to be freed
+ *
+ * Free the sg table allocated using sgt and free the
+ * sgt structure itself
+ */
+static inline void kgsl_free_sgt(struct sg_table *sgt)
+{
+ if (sgt != NULL) {
+ sg_free_table(sgt);
+ kfree(sgt);
+ }
+}
+
#endif /* __KGSL_SHAREDMEM_H */
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx.c b/drivers/input/touchscreen/gt9xx/gt9xx.c
index b1dc08bdd54f..6615c3a039a0 100644
--- a/drivers/input/touchscreen/gt9xx/gt9xx.c
+++ b/drivers/input/touchscreen/gt9xx/gt9xx.c
@@ -1,1589 +1,1609 @@
/* drivers/input/touchscreen/gt9xx.c
- *
+ *
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ *
* 2010 - 2013 Goodix Technology.
- *
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be a reference
- * to you, when you are integrating the GOODiX's CTP IC into your system,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ *
+ * This program is distributed in the hope that it will be a reference
+ * to you, when you are integrating the GOODiX's CTP IC into your system,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
+ *
* Version: 1.8
* Authors: andrew@goodix.com, meta@goodix.com
* Release Date: 2013/04/25
* Revision record:
- * V1.0:
- * first Release. By Andrew, 2012/08/31
+ * V1.0:
+ * first Release. By Andrew, 2012/08/31
* V1.2:
- * modify gtp_reset_guitar,slot report,tracking_id & 0x0F. By Andrew, 2012/10/15
+ * modify gtp_reset_guitar,slot report,tracking_id & 0x0F.
+ * By Andrew, 2012/10/15
* V1.4:
* modify gt9xx_update.c. By Andrew, 2012/12/12
- * V1.6:
+ * V1.6:
* 1. new heartbeat/esd_protect mechanism(add external watchdog)
- * 2. doze mode, sliding wakeup
- * 3. 3 more cfg_group(GT9 Sensor_ID: 0~5)
+ * 2. doze mode, sliding wakeup
+ * 3. 3 more cfg_group(GT9 Sensor_ID: 0~5)
* 3. config length verification
* 4. names & comments
* By Meta, 2013/03/11
* V1.8:
- * 1. pen/stylus identification
+ * 1. pen/stylus identification
* 2. read double check & fixed config support
* 2. new esd & slide wakeup optimization
* By Meta, 2013/06/08
*/
-#include <linux/irq.h>
#include "gt9xx.h"
#if GTP_ICS_SLOT_REPORT
- #include <linux/input/mt.h>
+#include <linux/input/mt.h>
#endif
-static const char *goodix_ts_name = "Goodix Capacitive TouchScreen";
-static struct workqueue_struct *goodix_wq;
-struct i2c_client * i2c_connect_client = NULL;
-u8 config[GTP_CONFIG_MAX_LENGTH + GTP_ADDR_LENGTH]
- = {GTP_REG_CONFIG_DATA >> 8, GTP_REG_CONFIG_DATA & 0xff};
+#define GOODIX_DEV_NAME "Goodix Capacitive TouchScreen"
+#define CFG_MAX_TOUCH_POINTS 5
+#define GOODIX_COORDS_ARR_SIZE 4
+#define MAX_BUTTONS 4
+
+/* HIGH: 0x28/0x29, LOW: 0xBA/0xBB */
+#define GTP_I2C_ADDRESS_HIGH 0x14
+#define GTP_I2C_ADDRESS_LOW 0x5D
+
+#define RESET_DELAY_T3_US 200 /* T3: > 100us */
+#define RESET_DELAY_T4 20 /* T4: > 5ms */
+
+#define PHY_BUF_SIZE 32
+
+#define GTP_MAX_TOUCH 5
+#define GTP_ESD_CHECK_CIRCLE_MS 2000
#if GTP_HAVE_TOUCH_KEY
- static const u16 touch_key_array[] = GTP_KEY_TAB;
- #define GTP_MAX_KEY_NUM (sizeof(touch_key_array)/sizeof(touch_key_array[0]))
-
+static const u16 touch_key_array[] = {KEY_MENU, KEY_HOMEPAGE, KEY_BACK};
+
#if GTP_DEBUG_ON
- static const int key_codes[] = {KEY_HOME, KEY_BACK, KEY_MENU, KEY_SEARCH};
- static const char *key_names[] = {"Key_Home", "Key_Back", "Key_Menu", "Key_Search"};
+static const int key_codes[] = {
+ KEY_HOME, KEY_BACK, KEY_MENU, KEY_SEARCH
+};
+static const char *const key_names[] = {
+ "Key_Home", "Key_Back", "Key_Menu", "Key_Search"
+};
#endif
-
#endif
-static s8 gtp_i2c_test(struct i2c_client *client);
-void gtp_reset_guitar(struct i2c_client *client, s32 ms);
-void gtp_int_sync(s32 ms);
+static void gtp_reset_guitar(struct goodix_ts_data *ts, int ms);
+static void gtp_int_sync(struct goodix_ts_data *ts, int ms);
+static int gtp_i2c_test(struct i2c_client *client);
#ifdef CONFIG_HAS_EARLYSUSPEND
static void goodix_ts_early_suspend(struct early_suspend *h);
static void goodix_ts_late_resume(struct early_suspend *h);
#endif
-
-#if GTP_CREATE_WR_NODE
-extern s32 init_wr_node(struct i2c_client*);
-extern void uninit_wr_node(void);
-#endif
-
-#if GTP_AUTO_UPDATE
-extern u8 gup_init_update_proc(struct goodix_ts_data *);
-#endif
#if GTP_ESD_PROTECT
static struct delayed_work gtp_esd_check_work;
-static struct workqueue_struct * gtp_esd_check_workqueue = NULL;
-static void gtp_esd_check_func(struct work_struct *);
-static s32 gtp_init_ext_watchdog(struct i2c_client *client);
-void gtp_esd_switch(struct i2c_client *, s32);
+static struct workqueue_struct *gtp_esd_check_workqueue;
+static void gtp_esd_check_func(struct work_struct *work);
+static int gtp_init_ext_watchdog(struct i2c_client *client);
+struct i2c_client *i2c_connect_client;
#endif
-
#if GTP_SLIDE_WAKEUP
-typedef enum
-{
- DOZE_DISABLED = 0,
- DOZE_ENABLED = 1,
- DOZE_WAKEUP = 2,
-}DOZE_T;
-static DOZE_T doze_status = DOZE_DISABLED;
+enum doze_status {
+ DOZE_DISABLED = 0,
+ DOZE_ENABLED = 1,
+ DOZE_WAKEUP = 2,
+};
+static enum doze_status = DOZE_DISABLED;
static s8 gtp_enter_doze(struct goodix_ts_data *ts);
#endif
-
-static u8 chip_gt9xxs = 0; // true if ic is gt9xxs, like gt915s
-u8 grp_cfg_version = 0;
+bool init_done;
+static u8 chip_gt9xxs; /* true if ic is gt9xxs, like gt915s */
+u8 grp_cfg_version;
/*******************************************************
Function:
- Read data from the i2c slave device.
+ Read data from the i2c slave device.
Input:
- client: i2c device.
- buf[0~1]: read start address.
- buf[2~len-1]: read data buffer.
- len: GTP_ADDR_LENGTH + read bytes count
+ client: i2c device.
+ buf[0~1]: read start address.
+ buf[2~len-1]: read data buffer.
+ len: GTP_ADDR_LENGTH + read bytes count
Output:
- numbers of i2c_msgs to transfer:
- 2: succeed, otherwise: failed
+ numbers of i2c_msgs to transfer:
+ 2: succeed, otherwise: failed
*********************************************************/
-s32 gtp_i2c_read(struct i2c_client *client, u8 *buf, s32 len)
+int gtp_i2c_read(struct i2c_client *client, u8 *buf, int len)
{
- struct i2c_msg msgs[2];
- s32 ret=-1;
- s32 retries = 0;
-
- GTP_DEBUG_FUNC();
-
- msgs[0].flags = !I2C_M_RD;
- msgs[0].addr = client->addr;
- msgs[0].len = GTP_ADDR_LENGTH;
- msgs[0].buf = &buf[0];
- //msgs[0].scl_rate = 300 * 1000; // for Rockchip
-
- msgs[1].flags = I2C_M_RD;
- msgs[1].addr = client->addr;
- msgs[1].len = len - GTP_ADDR_LENGTH;
- msgs[1].buf = &buf[GTP_ADDR_LENGTH];
- //msgs[1].scl_rate = 300 * 1000;
-
- while(retries < 5)
- {
- ret = i2c_transfer(client->adapter, msgs, 2);
- if(ret == 2)break;
- retries++;
- }
- if((retries >= 5))
- {
- #if GTP_SLIDE_WAKEUP
- // reset chip would quit doze mode
- if (DOZE_ENABLED == doze_status)
- {
- return ret;
- }
- #endif
- GTP_DEBUG("I2C communication timeout, resetting chip...");
- gtp_reset_guitar(client, 10);
- }
- return ret;
+ struct goodix_ts_data *ts = i2c_get_clientdata(client);
+ struct i2c_msg msgs[2];
+ int ret = -EIO;
+ int retries = 0;
+
+ GTP_DEBUG_FUNC();
+
+ msgs[0].flags = !I2C_M_RD;
+ msgs[0].addr = client->addr;
+ msgs[0].len = GTP_ADDR_LENGTH;
+ msgs[0].buf = &buf[0];
+
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].addr = client->addr;
+ msgs[1].len = len - GTP_ADDR_LENGTH;
+ msgs[1].buf = &buf[GTP_ADDR_LENGTH];
+
+ while (retries < 5) {
+ ret = i2c_transfer(client->adapter, msgs, 2);
+ if (ret == 2)
+ break;
+ retries++;
+ }
+ if (retries >= 5) {
+#if GTP_SLIDE_WAKEUP
+ /* reset chip would quit doze mode */
+ if (doze_status == DOZE_ENABLED)
+ return ret;
+#endif
+ GTP_DEBUG("I2C communication timeout, resetting chip...");
+ if (init_done)
+ gtp_reset_guitar(ts, 10);
+ else
+ dev_warn(&client->dev,
+ "<GTP> gtp_reset_guitar exit init_done=%d:\n",
+ init_done);
+ }
+ return ret;
}
/*******************************************************
Function:
- Write data to the i2c slave device.
+ Write data to the i2c slave device.
Input:
- client: i2c device.
- buf[0~1]: write start address.
- buf[2~len-1]: data buffer
- len: GTP_ADDR_LENGTH + write bytes count
+ client: i2c device.
+ buf[0~1]: write start address.
+ buf[2~len-1]: data buffer
+ len: GTP_ADDR_LENGTH + write bytes count
Output:
- numbers of i2c_msgs to transfer:
- 1: succeed, otherwise: failed
+ numbers of i2c_msgs to transfer:
+ 1: succeed, otherwise: failed
*********************************************************/
-s32 gtp_i2c_write(struct i2c_client *client,u8 *buf,s32 len)
+int gtp_i2c_write(struct i2c_client *client, u8 *buf, int len)
{
- struct i2c_msg msg;
- s32 ret = -1;
- s32 retries = 0;
-
- GTP_DEBUG_FUNC();
-
- msg.flags = !I2C_M_RD;
- msg.addr = client->addr;
- msg.len = len;
- msg.buf = buf;
- //msg.scl_rate = 300 * 1000; // for Rockchip
-
- while(retries < 5)
- {
- ret = i2c_transfer(client->adapter, &msg, 1);
- if (ret == 1)break;
- retries++;
- }
- if((retries >= 5))
- {
- #if GTP_SLIDE_WAKEUP
- if (DOZE_ENABLED == doze_status)
- {
- return ret;
- }
- #endif
- GTP_DEBUG("I2C communication timeout, resetting chip...");
- gtp_reset_guitar(client, 10);
- }
- return ret;
+ struct goodix_ts_data *ts = i2c_get_clientdata(client);
+ struct i2c_msg msg;
+ int ret = -EIO;
+ int retries = 0;
+
+ GTP_DEBUG_FUNC();
+
+ msg.flags = !I2C_M_RD;
+ msg.addr = client->addr;
+ msg.len = len;
+ msg.buf = buf;
+
+ while (retries < 5) {
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret == 1)
+ break;
+ retries++;
+ }
+ if ((retries >= 5)) {
+#if GTP_SLIDE_WAKEUP
+ if (doze_status == DOZE_ENABLED)
+ return ret;
+#endif
+ GTP_DEBUG("I2C communication timeout, resetting chip...");
+ if (init_done)
+ gtp_reset_guitar(ts, 10);
+ else
+ dev_warn(&client->dev,
+ "<GTP> gtp_reset_guitar exit init_done=%d:\n",
+ init_done);
+ }
+ return ret;
}
/*******************************************************
Function:
- i2c read twice, compare the results
+ i2c read twice, compare the results
Input:
- client: i2c device
- addr: operate address
- rxbuf: read data to store, if compare successful
- len: bytes to read
+ client: i2c device
+ addr: operate address
+ rxbuf: read data to store, if compare successful
+ len: bytes to read
Output:
- FAIL: read failed
- SUCCESS: read successful
+ FAIL: read failed
+ SUCCESS: read successful
*********************************************************/
-s32 gtp_i2c_read_dbl_check(struct i2c_client *client, u16 addr, u8 *rxbuf, int len)
+int gtp_i2c_read_dbl_check(struct i2c_client *client,
+ u16 addr, u8 *rxbuf, int len)
{
- u8 buf[16] = {0};
- u8 confirm_buf[16] = {0};
- u8 retry = 0;
-
- while (retry++ < 3)
- {
- memset(buf, 0xAA, 16);
- buf[0] = (u8)(addr >> 8);
- buf[1] = (u8)(addr & 0xFF);
- gtp_i2c_read(client, buf, len + 2);
-
- memset(confirm_buf, 0xAB, 16);
- confirm_buf[0] = (u8)(addr >> 8);
- confirm_buf[1] = (u8)(addr & 0xFF);
- gtp_i2c_read(client, confirm_buf, len + 2);
-
- if (!memcmp(buf, confirm_buf, len+2))
- {
- break;
- }
- }
- if (retry < 3)
- {
- memcpy(rxbuf, confirm_buf+2, len);
- return SUCCESS;
- }
- else
- {
- GTP_ERROR("i2c read 0x%04X, %d bytes, double check failed!", addr, len);
- return FAIL;
- }
+ u8 buf[16] = {0};
+ u8 confirm_buf[16] = {0};
+ u8 retry = 0;
+
+ while (retry++ < 3) {
+ memset(buf, 0xAA, 16);
+ buf[0] = (u8)(addr >> 8);
+ buf[1] = (u8)(addr & 0xFF);
+ gtp_i2c_read(client, buf, len + 2);
+
+ memset(confirm_buf, 0xAB, 16);
+ confirm_buf[0] = (u8)(addr >> 8);
+ confirm_buf[1] = (u8)(addr & 0xFF);
+ gtp_i2c_read(client, confirm_buf, len + 2);
+
+ if (!memcmp(buf, confirm_buf, len + 2))
+ break;
+ }
+ if (retry < 3) {
+ memcpy(rxbuf, confirm_buf + 2, len);
+ return SUCCESS;
+ }
+ dev_err(&client->dev,
+ "i2c read 0x%04X, %d bytes, double check failed!", addr, len);
+ return FAIL;
}
/*******************************************************
Function:
- Send config.
+ Send config data.
Input:
- client: i2c device.
+ client: i2c device.
Output:
- result of i2c write operation.
- 1: succeed, otherwise: failed
+ result of i2c write operation.
+ > 0: succeed, otherwise: failed
*********************************************************/
-s32 gtp_send_cfg(struct i2c_client *client)
+static int gtp_send_cfg(struct goodix_ts_data *ts)
{
- s32 ret = 2;
-
+ int ret;
#if GTP_DRIVER_SEND_CFG
- s32 retry = 0;
- struct goodix_ts_data *ts = i2c_get_clientdata(client);
-
- if (ts->fixed_cfg)
- {
- GTP_INFO("Ic fixed config, no config sent!");
- return 2;
- }
- GTP_INFO("driver send config");
- for (retry = 0; retry < 5; retry++)
- {
- ret = gtp_i2c_write(client, config , GTP_CONFIG_MAX_LENGTH + GTP_ADDR_LENGTH);
- if (ret > 0)
- {
- break;
- }
- }
+ int retry = 0;
+
+ if (ts->fixed_cfg) {
+ dev_dbg(&ts->client->dev,
+ "Ic fixed config, no config sent!");
+ ret = 2;
+ } else {
+ for (retry = 0; retry < 5; retry++) {
+ ret = gtp_i2c_write(ts->client,
+ ts->config_data,
+ GTP_CONFIG_MAX_LENGTH + GTP_ADDR_LENGTH);
+ if (ret > 0)
+ break;
+ }
+ }
#endif
- return ret;
+ return ret;
}
/*******************************************************
Function:
- Disable irq function
+ Disable irq function
Input:
- ts: goodix i2c_client private data
+ ts: goodix i2c_client private data
Output:
- None.
+ None.
*********************************************************/
void gtp_irq_disable(struct goodix_ts_data *ts)
{
- unsigned long irqflags;
+ unsigned long irqflags;
- GTP_DEBUG_FUNC();
+ GTP_DEBUG_FUNC();
- spin_lock_irqsave(&ts->irq_lock, irqflags);
- if (!ts->irq_is_disable)
- {
- ts->irq_is_disable = 1;
- disable_irq_nosync(ts->client->irq);
- }
- spin_unlock_irqrestore(&ts->irq_lock, irqflags);
+ spin_lock_irqsave(&ts->irq_lock, irqflags);
+ if (!ts->irq_is_disabled) {
+ ts->irq_is_disabled = true;
+ disable_irq_nosync(ts->client->irq);
+ }
+ spin_unlock_irqrestore(&ts->irq_lock, irqflags);
}
/*******************************************************
Function:
- Enable irq function
+ Enable irq function
Input:
- ts: goodix i2c_client private data
+ ts: goodix i2c_client private data
Output:
- None.
+ None.
*********************************************************/
void gtp_irq_enable(struct goodix_ts_data *ts)
{
- unsigned long irqflags = 0;
-
- GTP_DEBUG_FUNC();
-
- spin_lock_irqsave(&ts->irq_lock, irqflags);
- if (ts->irq_is_disable)
- {
- enable_irq(ts->client->irq);
- ts->irq_is_disable = 0;
- }
- spin_unlock_irqrestore(&ts->irq_lock, irqflags);
-}
+ unsigned long irqflags = 0;
+
+ GTP_DEBUG_FUNC();
+ spin_lock_irqsave(&ts->irq_lock, irqflags);
+ if (ts->irq_is_disabled) {
+ enable_irq(ts->client->irq);
+ ts->irq_is_disabled = false;
+ }
+ spin_unlock_irqrestore(&ts->irq_lock, irqflags);
+}
/*******************************************************
Function:
- Report touch point event
+ Report touch point event
Input:
- ts: goodix i2c_client private data
- id: trackId
- x: input x coordinate
- y: input y coordinate
- w: input pressure
+ ts: goodix i2c_client private data
+ id: trackId
+ x: input x coordinate
+ y: input y coordinate
+ w: input pressure
Output:
- None.
+ None.
*********************************************************/
-static void gtp_touch_down(struct goodix_ts_data* ts,s32 id,s32 x,s32 y,s32 w)
+static void gtp_touch_down(struct goodix_ts_data *ts, int id, int x, int y,
+ int w)
{
#if GTP_CHANGE_X2Y
- GTP_SWAP(x, y);
+ GTP_SWAP(x, y);
#endif
#if GTP_ICS_SLOT_REPORT
- input_mt_slot(ts->input_dev, id);
- input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, id);
- input_report_abs(ts->input_dev, ABS_MT_POSITION_X, x);
- input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, y);
- input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, w);
- input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
+ input_mt_slot(ts->input_dev, id);
+ input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, id);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_X, x);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, y);
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, w);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
#else
- input_report_abs(ts->input_dev, ABS_MT_POSITION_X, x);
- input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, y);
- input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, w);
- input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
- input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, id);
- input_mt_sync(ts->input_dev);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_X, x);
+ input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, y);
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, w);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, w);
+ input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, id);
+ input_mt_sync(ts->input_dev);
#endif
- GTP_DEBUG("ID:%d, X:%d, Y:%d, W:%d", id, x, y, w);
+ GTP_DEBUG("ID:%d, X:%d, Y:%d, W:%d", id, x, y, w);
}
/*******************************************************
Function:
- Report touch release event
+ Report touch release event
Input:
- ts: goodix i2c_client private data
+ ts: goodix i2c_client private data
Output:
- None.
+ None.
*********************************************************/
-static void gtp_touch_up(struct goodix_ts_data* ts, s32 id)
+static void gtp_touch_up(struct goodix_ts_data *ts, int id)
{
#if GTP_ICS_SLOT_REPORT
- input_mt_slot(ts->input_dev, id);
- input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, -1);
- GTP_DEBUG("Touch id[%2d] release!", id);
+ input_mt_slot(ts->input_dev, id);
+ input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, -1);
+ GTP_DEBUG("Touch id[%2d] release!", id);
#else
- input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0);
- input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0);
- input_mt_sync(ts->input_dev);
+ input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0);
+ input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0);
+ input_mt_sync(ts->input_dev);
#endif
}
+
/*******************************************************
Function:
- Goodix touchscreen work function
+ Goodix touchscreen work function
Input:
- work: work struct of goodix_workqueue
+ work: work struct of goodix_workqueue
Output:
- None.
+ None.
*********************************************************/
static void goodix_ts_work_func(struct work_struct *work)
{
- u8 end_cmd[3] = {GTP_READ_COOR_ADDR >> 8, GTP_READ_COOR_ADDR & 0xFF, 0};
- u8 point_data[2 + 1 + 8 * GTP_MAX_TOUCH + 1]={GTP_READ_COOR_ADDR >> 8, GTP_READ_COOR_ADDR & 0xFF};
- u8 touch_num = 0;
- u8 finger = 0;
- static u16 pre_touch = 0;
- static u8 pre_key = 0;
+ u8 end_cmd[3] = { GTP_READ_COOR_ADDR >> 8,
+ GTP_READ_COOR_ADDR & 0xFF, 0};
+ u8 point_data[2 + 1 + 8 * GTP_MAX_TOUCH + 1] = {
+ GTP_READ_COOR_ADDR >> 8,
+ GTP_READ_COOR_ADDR & 0xFF};
+ u8 touch_num = 0;
+ u8 finger = 0;
+ static u16 pre_touch;
+ static u8 pre_key;
#if GTP_WITH_PEN
- static u8 pre_pen = 0;
+ static u8 pre_pen;
#endif
- u8 key_value = 0;
- u8* coor_data = NULL;
- s32 input_x = 0;
- s32 input_y = 0;
- s32 input_w = 0;
- s32 id = 0;
- s32 i = 0;
- s32 ret = -1;
- struct goodix_ts_data *ts = NULL;
+ u8 key_value = 0;
+ u8 *coor_data = NULL;
+ s32 input_x = 0;
+ s32 input_y = 0;
+ s32 input_w = 0;
+ s32 id = 0;
+ s32 i = 0;
+ int ret = -1;
+ struct goodix_ts_data *ts = NULL;
#if GTP_SLIDE_WAKEUP
- u8 doze_buf[3] = {0x81, 0x4B};
+ u8 doze_buf[3] = {0x81, 0x4B};
+#endif
+
+ GTP_DEBUG_FUNC();
+
+ ts = container_of(work, struct goodix_ts_data, work);
+#ifdef CONFIG_GT9XX_TOUCHPANEL_UPDATE
+ if (ts->enter_update)
+ return;
#endif
- GTP_DEBUG_FUNC();
- ts = container_of(work, struct goodix_ts_data, work);
- if (ts->enter_update)
- {
- return;
- }
#if GTP_SLIDE_WAKEUP
- if (DOZE_ENABLED == doze_status)
- {
- ret = gtp_i2c_read(i2c_connect_client, doze_buf, 3);
- GTP_DEBUG("0x814B = 0x%02X", doze_buf[2]);
- if (ret > 0)
- {
- if (doze_buf[2] == 0xAA)
- {
- GTP_INFO("Slide(0xAA) To Light up the screen!");
- doze_status = DOZE_WAKEUP;
- input_report_key(ts->input_dev, KEY_POWER, 1);
- input_sync(ts->input_dev);
- input_report_key(ts->input_dev, KEY_POWER, 0);
- input_sync(ts->input_dev);
- // clear 0x814B
- doze_buf[2] = 0x00;
- gtp_i2c_write(i2c_connect_client, doze_buf, 3);
- }
- else if (doze_buf[2] == 0xBB)
- {
- GTP_INFO("Slide(0xBB) To Light up the screen!");
- doze_status = DOZE_WAKEUP;
- input_report_key(ts->input_dev, KEY_POWER, 1);
- input_sync(ts->input_dev);
- input_report_key(ts->input_dev, KEY_POWER, 0);
- input_sync(ts->input_dev);
- // clear 0x814B
- doze_buf[2] = 0x00;
- gtp_i2c_write(i2c_connect_client, doze_buf, 3);
- }
- else if (0xC0 == (doze_buf[2] & 0xC0))
- {
- GTP_INFO("double click to light up the screen!");
- doze_status = DOZE_WAKEUP;
- input_report_key(ts->input_dev, KEY_POWER, 1);
- input_sync(ts->input_dev);
- input_report_key(ts->input_dev, KEY_POWER, 0);
- input_sync(ts->input_dev);
- // clear 0x814B
- doze_buf[2] = 0x00;
- gtp_i2c_write(i2c_connect_client, doze_buf, 3);
- }
- else
- {
- gtp_enter_doze(ts);
- }
- }
- if (ts->use_irq)
- {
- gtp_irq_enable(ts);
- }
- return;
- }
+ if (doze_status == DOZE_ENABLED) {
+ ret = gtp_i2c_read(ts->client, doze_buf, 3);
+ GTP_DEBUG("0x814B = 0x%02X", doze_buf[2]);
+ if (ret > 0) {
+ if (doze_buf[2] == 0xAA) {
+ dev_dbg(&ts->client->dev,
+ "Slide(0xAA) To Light up the screen!");
+ doze_status = DOZE_WAKEUP;
+ input_report_key(
+ ts->input_dev, KEY_POWER, 1);
+ input_sync(ts->input_dev);
+ input_report_key(
+ ts->input_dev, KEY_POWER, 0);
+ input_sync(ts->input_dev);
+ /* clear 0x814B */
+ doze_buf[2] = 0x00;
+ gtp_i2c_write(ts->client, doze_buf, 3);
+ } else if (doze_buf[2] == 0xBB) {
+ dev_dbg(&ts->client->dev,
+ "Slide(0xBB) To Light up the screen!");
+ doze_status = DOZE_WAKEUP;
+ input_report_key(ts->input_dev, KEY_POWER, 1);
+ input_sync(ts->input_dev);
+ input_report_key(ts->input_dev, KEY_POWER, 0);
+ input_sync(ts->input_dev);
+ /* clear 0x814B*/
+ doze_buf[2] = 0x00;
+ gtp_i2c_write(ts->client, doze_buf, 3);
+ } else if (0xC0 == (doze_buf[2] & 0xC0)) {
+ dev_dbg(&ts->client->dev,
+ "double click to light up the screen!");
+ doze_status = DOZE_WAKEUP;
+ input_report_key(ts->input_dev, KEY_POWER, 1);
+ input_sync(ts->input_dev);
+ input_report_key(ts->input_dev, KEY_POWER, 0);
+ input_sync(ts->input_dev);
+ /* clear 0x814B */
+ doze_buf[2] = 0x00;
+ gtp_i2c_write(ts->client, doze_buf, 3);
+ } else {
+ gtp_enter_doze(ts);
+ }
+ }
+ if (ts->use_irq)
+ gtp_irq_enable(ts);
+
+ return;
+ }
#endif
- ret = gtp_i2c_read(ts->client, point_data, 12);
- if (ret < 0)
- {
- GTP_ERROR("I2C transfer error. errno:%d\n ", ret);
- goto exit_work_func;
- }
-
- finger = point_data[GTP_ADDR_LENGTH];
- if((finger & 0x80) == 0)
- {
- goto exit_work_func;
- }
-
- touch_num = finger & 0x0f;
- if (touch_num > GTP_MAX_TOUCH)
- {
- goto exit_work_func;
- }
-
- if (touch_num > 1)
- {
- u8 buf[8 * GTP_MAX_TOUCH] = {(GTP_READ_COOR_ADDR + 10) >> 8, (GTP_READ_COOR_ADDR + 10) & 0xff};
-
- ret = gtp_i2c_read(ts->client, buf, 2 + 8 * (touch_num - 1));
- memcpy(&point_data[12], &buf[2], 8 * (touch_num - 1));
- }
+ ret = gtp_i2c_read(ts->client, point_data, 12);
+ if (ret < 0) {
+ dev_err(&ts->client->dev,
+ "I2C transfer error. errno:%d\n ", ret);
+ goto exit_work_func;
+ }
+
+ finger = point_data[GTP_ADDR_LENGTH];
+ if ((finger & 0x80) == 0)
+ goto exit_work_func;
+
+ touch_num = finger & 0x0f;
+ if (touch_num > GTP_MAX_TOUCH)
+ goto exit_work_func;
+
+ if (touch_num > 1) {
+ u8 buf[8 * GTP_MAX_TOUCH] = { (GTP_READ_COOR_ADDR + 10) >> 8,
+ (GTP_READ_COOR_ADDR + 10) & 0xff };
+
+ ret = gtp_i2c_read(ts->client, buf,
+ 2 + 8 * (touch_num - 1));
+ memcpy(&point_data[12], &buf[2], 8 * (touch_num - 1));
+ }
#if GTP_HAVE_TOUCH_KEY
- key_value = point_data[3 + 8 * touch_num];
-
- if(key_value || pre_key)
- {
- for (i = 0; i < GTP_MAX_KEY_NUM; i++)
- {
- #if GTP_DEBUG_ON
- for (ret = 0; ret < 4; ++ret)
- {
- if (key_codes[ret] == touch_key_array[i])
- {
- GTP_DEBUG("Key: %s %s", key_names[ret], (key_value & (0x01 << i)) ? "Down" : "Up");
- break;
- }
- }
- #endif
- input_report_key(ts->input_dev, touch_key_array[i], key_value & (0x01<<i));
- }
- touch_num = 0;
- pre_touch = 0;
- }
+ key_value = point_data[3 + 8 * touch_num];
+
+ if (key_value || pre_key) {
+ for (i = 0; i < ARRAY_SIZE(touch_key_array); i++) {
+#if GTP_DEBUG_ON
+ for (ret = 0; ret < 4; ++ret) {
+ if (key_codes[ret] == touch_key_array[i]) {
+ GTP_DEBUG("Key: %s %s",
+ key_names[ret],
+ (key_value & (0x01 << i))
+ ? "Down" : "Up");
+ break;
+ }
+ }
+#endif
+
+ input_report_key(ts->input_dev,
+ touch_key_array[i], key_value & (0x01<<i));
+ }
+ touch_num = 0;
+ pre_touch = 0;
+ }
#endif
- pre_key = key_value;
+ pre_key = key_value;
- GTP_DEBUG("pre_touch:%02x, finger:%02x.", pre_touch, finger);
+ GTP_DEBUG("pre_touch:%02x, finger:%02x.", pre_touch, finger);
#if GTP_ICS_SLOT_REPORT
+#if GTP_WITH_PEN
+ if (pre_pen && (touch_num == 0)) {
+ GTP_DEBUG("Pen touch UP(Slot)!");
+ input_report_key(ts->input_dev, BTN_TOOL_PEN, 0);
+ input_mt_slot(ts->input_dev, 5);
+ input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, -1);
+ pre_pen = 0;
+ }
+#endif
+ if (pre_touch || touch_num) {
+ s32 pos = 0;
+ u16 touch_index = 0;
+ coor_data = &point_data[3];
+ if (touch_num) {
+ id = coor_data[pos] & 0x0F;
#if GTP_WITH_PEN
- if (pre_pen && (touch_num == 0))
- {
- GTP_DEBUG("Pen touch UP(Slot)!");
- input_report_key(ts->input_dev, BTN_TOOL_PEN, 0);
- input_mt_slot(ts->input_dev, 5);
- input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, -1);
- pre_pen = 0;
- }
+ id = coor_data[pos];
+ if (id == 128) {
+ GTP_DEBUG("Pen touch DOWN(Slot)!");
+ input_x = coor_data[pos + 1]
+ | (coor_data[pos + 2] << 8);
+ input_y = coor_data[pos + 3]
+ | (coor_data[pos + 4] << 8);
+ input_w = coor_data[pos + 5]
+ | (coor_data[pos + 6] << 8);
+
+ input_report_key(ts->input_dev,
+ BTN_TOOL_PEN, 1);
+ input_mt_slot(ts->input_dev, 5);
+ input_report_abs(ts->input_dev,
+ ABS_MT_TRACKING_ID, 5);
+ input_report_abs(ts->input_dev,
+ ABS_MT_POSITION_X, input_x);
+ input_report_abs(ts->input_dev,
+ ABS_MT_POSITION_Y, input_y);
+ input_report_abs(ts->input_dev,
+ ABS_MT_TOUCH_MAJOR, input_w);
+ GTP_DEBUG("Pen/Stylus: (%d, %d)[%d]",
+ input_x, input_y, input_w);
+ pre_pen = 1;
+ pre_touch = 0;
+ }
#endif
- if (pre_touch || touch_num)
- {
- s32 pos = 0;
- u16 touch_index = 0;
-
- coor_data = &point_data[3];
-
- if(touch_num)
- {
- id = coor_data[pos] & 0x0F;
-
- #if GTP_WITH_PEN
- id = coor_data[pos];
- if ((id == 128))
- {
- GTP_DEBUG("Pen touch DOWN(Slot)!");
- input_x = coor_data[pos + 1] | (coor_data[pos + 2] << 8);
- input_y = coor_data[pos + 3] | (coor_data[pos + 4] << 8);
- input_w = coor_data[pos + 5] | (coor_data[pos + 6] << 8);
-
- input_report_key(ts->input_dev, BTN_TOOL_PEN, 1);
- input_mt_slot(ts->input_dev, 5);
- input_report_abs(ts->input_dev, ABS_MT_TRACKING_ID, 5);
- input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x);
- input_report_abs(ts->input_dev, ABS_MT_POSITION_Y, input_y);
- input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, input_w);
- GTP_DEBUG("Pen/Stylus: (%d, %d)[%d]", input_x, input_y, input_w);
- pre_pen = 1;
- pre_touch = 0;
- }
- #endif
-
- touch_index |= (0x01<<id);
- }
-
- GTP_DEBUG("id = %d,touch_index = 0x%x, pre_touch = 0x%x\n",id, touch_index,pre_touch);
- for (i = 0; i < GTP_MAX_TOUCH; i++)
- {
- #if GTP_WITH_PEN
- if (pre_pen == 1)
- {
- break;
- }
- #endif
-
- if (touch_index & (0x01<<i))
- {
- input_x = coor_data[pos + 1] | (coor_data[pos + 2] << 8);
- input_y = coor_data[pos + 3] | (coor_data[pos + 4] << 8);
- input_w = coor_data[pos + 5] | (coor_data[pos + 6] << 8);
-
- gtp_touch_down(ts, id, input_x, input_y, input_w);
- pre_touch |= 0x01 << i;
-
- pos += 8;
- id = coor_data[pos] & 0x0F;
- touch_index |= (0x01<<id);
- }
- else
- {
- gtp_touch_up(ts, i);
- pre_touch &= ~(0x01 << i);
- }
- }
- }
+
+ touch_index |= (0x01<<id);
+ }
+
+ GTP_DEBUG("id = %d,touch_index = 0x%x, pre_touch = 0x%x\n",
+ id, touch_index, pre_touch);
+ for (i = 0; i < GTP_MAX_TOUCH; i++) {
+#if GTP_WITH_PEN
+ if (pre_pen == 1)
+ break;
+#endif
+ if (touch_index & (0x01<<i)) {
+ input_x = coor_data[pos + 1] |
+ coor_data[pos + 2] << 8;
+ input_y = coor_data[pos + 3] |
+ coor_data[pos + 4] << 8;
+ input_w = coor_data[pos + 5] |
+ coor_data[pos + 6] << 8;
+
+ gtp_touch_down(ts, id,
+ input_x, input_y, input_w);
+ pre_touch |= 0x01 << i;
+
+ pos += 8;
+ id = coor_data[pos] & 0x0F;
+ touch_index |= (0x01<<id);
+ } else {
+ gtp_touch_up(ts, i);
+ pre_touch &= ~(0x01 << i);
+ }
+ }
+ }
#else
- input_report_key(ts->input_dev, BTN_TOUCH, (touch_num || key_value));
- if (touch_num)
- {
- for (i = 0; i < touch_num; i++)
- {
- coor_data = &point_data[i * 8 + 3];
-
- id = coor_data[0]; // & 0x0F;
- input_x = coor_data[1] | (coor_data[2] << 8);
- input_y = coor_data[3] | (coor_data[4] << 8);
- input_w = coor_data[5] | (coor_data[6] << 8);
-
- #if GTP_WITH_PEN
- if (id == 128)
- {
- GTP_DEBUG("Pen touch DOWN!");
- input_report_key(ts->input_dev, BTN_TOOL_PEN, 1);
- pre_pen = 1;
- id = 0;
- }
- #endif
-
- gtp_touch_down(ts, id, input_x, input_y, input_w);
- }
- }
- else if (pre_touch)
- {
-
- #if GTP_WITH_PEN
- if (pre_pen == 1)
- {
- GTP_DEBUG("Pen touch UP!");
- input_report_key(ts->input_dev, BTN_TOOL_PEN, 0);
- pre_pen = 0;
- }
- #endif
-
- GTP_DEBUG("Touch Release!");
- gtp_touch_up(ts, 0);
- }
-
- pre_touch = touch_num;
+ input_report_key(ts->input_dev, BTN_TOUCH, (touch_num || key_value));
+ if (touch_num) {
+ for (i = 0; i < touch_num; i++) {
+ coor_data = &point_data[i * 8 + 3];
+
+ id = coor_data[0];
+ input_x = coor_data[1] | coor_data[2] << 8;
+ input_y = coor_data[3] | coor_data[4] << 8;
+ input_w = coor_data[5] | coor_data[6] << 8;
+#if GTP_WITH_PEN
+ if (id == 128) {
+ GTP_DEBUG("Pen touch DOWN!");
+ input_report_key(ts->input_dev,
+ BTN_TOOL_PEN, 1);
+ pre_pen = 1;
+ id = 0;
+ }
+#endif
+ gtp_touch_down(ts, id, input_x, input_y, input_w);
+ }
+ } else if (pre_touch) {
+#if GTP_WITH_PEN
+ if (pre_pen == 1) {
+ GTP_DEBUG("Pen touch UP!");
+ input_report_key(ts->input_dev, BTN_TOOL_PEN, 0);
+ pre_pen = 0;
+ }
+#endif
+ GTP_DEBUG("Touch Released!");
+ gtp_touch_up(ts, 0);
+ }
+
+ pre_touch = touch_num;
#endif
- input_sync(ts->input_dev);
+ input_sync(ts->input_dev);
exit_work_func:
- if(!ts->gtp_rawdiff_mode)
- {
- ret = gtp_i2c_write(ts->client, end_cmd, 3);
- if (ret < 0)
- {
- GTP_INFO("I2C write end_cmd error!");
- }
- }
- if (ts->use_irq)
- {
- gtp_irq_enable(ts);
- }
+ if (!ts->gtp_rawdiff_mode) {
+ ret = gtp_i2c_write(ts->client, end_cmd, 3);
+ if (ret < 0)
+ dev_warn(&ts->client->dev, "I2C write end_cmd error!\n");
+
+ }
+ if (ts->use_irq)
+ gtp_irq_enable(ts);
+
+ return;
}
/*******************************************************
Function:
- Timer interrupt service routine for polling mode.
+ Timer interrupt service routine for polling mode.
Input:
- timer: timer struct pointer
+ timer: timer struct pointer
Output:
- Timer work mode.
- HRTIMER_NORESTART: no restart mode
+ Timer work mode.
+ HRTIMER_NORESTART: no restart mode
*********************************************************/
static enum hrtimer_restart goodix_ts_timer_handler(struct hrtimer *timer)
{
- struct goodix_ts_data *ts = container_of(timer, struct goodix_ts_data, timer);
+ struct goodix_ts_data
+ *ts = container_of(timer, struct goodix_ts_data, timer);
- GTP_DEBUG_FUNC();
+ GTP_DEBUG_FUNC();
- queue_work(goodix_wq, &ts->work);
- hrtimer_start(&ts->timer, ktime_set(0, (GTP_POLL_TIME+6)*1000000), HRTIMER_MODE_REL);
- return HRTIMER_NORESTART;
+ queue_work(ts->goodix_wq, &ts->work);
+ hrtimer_start(&ts->timer, ktime_set(0, (GTP_POLL_TIME + 6) * 1000000),
+ HRTIMER_MODE_REL);
+ return HRTIMER_NORESTART;
}
/*******************************************************
Function:
- External interrupt service routine for interrupt mode.
+ External interrupt service routine for interrupt mode.
Input:
- irq: interrupt number.
- dev_id: private data pointer
+ irq: interrupt number.
+ dev_id: private data pointer
Output:
- Handle Result.
- IRQ_HANDLED: interrupt handled successfully
+ Handle Result.
+ IRQ_HANDLED: interrupt handled successfully
*********************************************************/
static irqreturn_t goodix_ts_irq_handler(int irq, void *dev_id)
{
- struct goodix_ts_data *ts = dev_id;
+ struct goodix_ts_data *ts = dev_id;
- GTP_DEBUG_FUNC();
-
- gtp_irq_disable(ts);
+ GTP_DEBUG_FUNC();
- queue_work(goodix_wq, &ts->work);
-
- return IRQ_HANDLED;
+ gtp_irq_disable(ts);
+
+ queue_work(ts->goodix_wq, &ts->work);
+
+ return IRQ_HANDLED;
}
/*******************************************************
Function:
- Synchronization.
+ Synchronization.
Input:
- ms: synchronization time in millisecond.
+ ms: synchronization time in millisecond.
Output:
- None.
+ None.
*******************************************************/
-void gtp_int_sync(s32 ms)
+void gtp_int_sync(struct goodix_ts_data *ts, int ms)
{
- GTP_GPIO_OUTPUT(GTP_INT_PORT, 0);
- msleep(ms);
- GTP_GPIO_AS_INT(GTP_INT_PORT);
+ gpio_direction_output(ts->pdata->irq_gpio, 0);
+ msleep(ms);
+ gpio_direction_input(ts->pdata->irq_gpio);
}
/*******************************************************
Function:
- Reset chip.
+ Reset chip.
Input:
- ms: reset time in millisecond
+ ms: reset time in millisecond, must >10ms
Output:
- None.
+ None.
*******************************************************/
-void gtp_reset_guitar(struct i2c_client *client, s32 ms)
+static void gtp_reset_guitar(struct goodix_ts_data *ts, int ms)
{
- GTP_DEBUG_FUNC();
+ GTP_DEBUG_FUNC();
- GTP_GPIO_OUTPUT(GTP_RST_PORT, 0); // begin select I2C slave addr
- msleep(ms); // T2: > 10ms
- // HIGH: 0x28/0x29, LOW: 0xBA/0xBB
- GTP_GPIO_OUTPUT(GTP_INT_PORT, client->addr == 0x14);
+ /* This reset sequence will selcet I2C slave address */
+ gpio_direction_output(ts->pdata->reset_gpio, 0);
+ msleep(ms);
- msleep(2); // T3: > 100us
- GTP_GPIO_OUTPUT(GTP_RST_PORT, 1);
-
- msleep(6); // T4: > 5ms
+ if (ts->client->addr == GTP_I2C_ADDRESS_HIGH)
+ gpio_direction_output(ts->pdata->irq_gpio, 1);
+ else
+ gpio_direction_output(ts->pdata->irq_gpio, 0);
- GTP_GPIO_AS_INPUT(GTP_RST_PORT); // end select I2C slave addr
+ usleep(RESET_DELAY_T3_US);
+ gpio_direction_output(ts->pdata->reset_gpio, 1);
+ msleep(RESET_DELAY_T4);
+
+ gpio_direction_input(ts->pdata->reset_gpio);
+
+ gtp_int_sync(ts, 50);
- gtp_int_sync(50);
-
#if GTP_ESD_PROTECT
- gtp_init_ext_watchdog(client);
+ gtp_init_ext_watchdog(ts->client);
#endif
}
+#ifdef CONFIG_HAS_EARLYSUSPEND
#if GTP_SLIDE_WAKEUP
/*******************************************************
Function:
- Enter doze mode for sliding wakeup.
+ Enter doze mode for sliding wakeup.
Input:
- ts: goodix tp private data
+ ts: goodix tp private data
Output:
- 1: succeed, otherwise failed
+ 1: succeed, otherwise failed
*******************************************************/
static s8 gtp_enter_doze(struct goodix_ts_data *ts)
{
- s8 ret = -1;
- s8 retry = 0;
- u8 i2c_control_buf[3] = {(u8)(GTP_REG_SLEEP >> 8), (u8)GTP_REG_SLEEP, 8};
+ int ret = -1;
+ s8 retry = 0;
+ u8 i2c_control_buf[3] = {
+ (u8)(GTP_REG_SLEEP >> 8),
+ (u8)GTP_REG_SLEEP, 8};
- GTP_DEBUG_FUNC();
+ GTP_DEBUG_FUNC();
#if GTP_DBL_CLK_WAKEUP
- i2c_control_buf[2] = 0x09;
+ i2c_control_buf[2] = 0x09;
#endif
-
- gtp_irq_disable(ts);
-
- GTP_DEBUG("entering doze mode...");
- while(retry++ < 5)
- {
- i2c_control_buf[0] = 0x80;
- i2c_control_buf[1] = 0x46;
- ret = gtp_i2c_write(ts->client, i2c_control_buf, 3);
- if (ret < 0)
- {
- GTP_DEBUG("failed to set doze flag into 0x8046, %d", retry);
- continue;
- }
- i2c_control_buf[0] = 0x80;
- i2c_control_buf[1] = 0x40;
- ret = gtp_i2c_write(ts->client, i2c_control_buf, 3);
- if (ret > 0)
- {
- doze_status = DOZE_ENABLED;
- GTP_INFO("GTP has been working in doze mode!");
- gtp_irq_enable(ts);
- return ret;
- }
- msleep(10);
- }
- GTP_ERROR("GTP send doze cmd failed.");
- gtp_irq_enable(ts);
- return ret;
+ gtp_irq_disable(ts);
+
+ GTP_DEBUG("entering doze mode...");
+ while (retry++ < 5) {
+ i2c_control_buf[0] = 0x80;
+ i2c_control_buf[1] = 0x46;
+ ret = gtp_i2c_write(ts->client, i2c_control_buf, 3);
+ if (ret < 0) {
+ GTP_DEBUG(
+ "failed to set doze flag into 0x8046, %d",
+ retry);
+ continue;
+ }
+ i2c_control_buf[0] = 0x80;
+ i2c_control_buf[1] = 0x40;
+ ret = gtp_i2c_write(ts->client, i2c_control_buf, 3);
+ if (ret > 0) {
+ doze_status = DOZE_ENABLED;
+ dev_dbg(&ts->client->dev,
+ "GTP has been working in doze mode!");
+ gtp_irq_enable(ts);
+ return ret;
+ }
+ msleep(20);
+ }
+ dev_err(&ts->client->dev, "GTP send doze cmd failed.\n");
+ gtp_irq_enable(ts);
+ return ret;
}
-#else
+#else
/*******************************************************
Function:
- Enter sleep mode.
+ Enter sleep mode.
Input:
- ts: private data.
+ ts: private data.
Output:
- Executive outcomes.
- 1: succeed, otherwise failed.
+ Executive outcomes.
+ 1: succeed, otherwise failed.
*******************************************************/
-static s8 gtp_enter_sleep(struct goodix_ts_data * ts)
+static s8 gtp_enter_sleep(struct goodix_ts_data *ts)
{
- s8 ret = -1;
- s8 retry = 0;
- u8 i2c_control_buf[3] = {(u8)(GTP_REG_SLEEP >> 8), (u8)GTP_REG_SLEEP, 5};
-
- GTP_DEBUG_FUNC();
-
- GTP_GPIO_OUTPUT(GTP_INT_PORT, 0);
- msleep(5);
-
- while(retry++ < 5)
- {
- ret = gtp_i2c_write(ts->client, i2c_control_buf, 3);
- if (ret > 0)
- {
- GTP_INFO("GTP enter sleep!");
-
- return ret;
- }
- msleep(10);
- }
- GTP_ERROR("GTP send sleep cmd failed.");
- return ret;
+ int ret = -1;
+ s8 retry = 0;
+ u8 i2c_control_buf[3] = {
+ (u8)(GTP_REG_SLEEP >> 8),
+ (u8)GTP_REG_SLEEP, 5};
+
+ GTP_DEBUG_FUNC();
+
+ ret = gpio_direction_output(ts->pdata->irq_gpio, 0);
+ usleep(5000);
+ while (retry++ < 5) {
+ ret = gtp_i2c_write(ts->client, i2c_control_buf, 3);
+ if (ret > 0) {
+ dev_dbg(&ts->client->dev,
+ "GTP enter sleep!");
+ return ret;
+ }
+ msleep(20);
+ }
+ dev_err(&ts->client->dev, "GTP send sleep cmd failed.\n");
+ return ret;
}
-#endif
+#endif
+
/*******************************************************
Function:
- Wakeup from sleep.
+ Wakeup from sleep.
Input:
- ts: private data.
+ ts: private data.
Output:
- Executive outcomes.
- >0: succeed, otherwise: failed.
+ Executive outcomes.
+ >0: succeed, otherwise: failed.
*******************************************************/
-static s8 gtp_wakeup_sleep(struct goodix_ts_data * ts)
+static s8 gtp_wakeup_sleep(struct goodix_ts_data *ts)
{
- u8 retry = 0;
- s8 ret = -1;
-
- GTP_DEBUG_FUNC();
-
+ u8 retry = 0;
+ s8 ret = -1;
+
+ GTP_DEBUG_FUNC();
+
#if GTP_POWER_CTRL_SLEEP
- while(retry++ < 5)
- {
- gtp_reset_guitar(ts->client, 20);
-
- ret = gtp_send_cfg(ts->client);
- if (ret < 0)
- {
- GTP_INFO("Wakeup sleep send config failed!");
- continue;
- }
- GTP_INFO("GTP wakeup sleep");
- return 1;
- }
+ while (retry++ < 5) {
+ gtp_reset_guitar(ts, 20);
+
+ ret = gtp_send_cfg(ts);
+ if (ret > 0) {
+ dev_dbg(&ts->client->dev,
+ "Wakeup sleep send config success.");
+ continue;
+ }
+ dev_dbg(&ts->client->dev, "GTP Wakeup!");
+ return 1;
+ }
#else
- while(retry++ < 10)
- {
- #if GTP_SLIDE_WAKEUP
- if (DOZE_WAKEUP != doze_status) // wakeup not by slide
- {
- gtp_reset_guitar(ts->client, 10);
- }
- else // wakeup by slide
- {
- doze_status = DOZE_DISABLED;
- }
- #else
- if (chip_gt9xxs == 1)
- {
- gtp_reset_guitar(ts->client, 10);
- }
- else
- {
- GTP_GPIO_OUTPUT(GTP_INT_PORT, 1);
- msleep(5);
- }
- #endif
- ret = gtp_i2c_test(ts->client);
- if (ret > 0)
- {
- GTP_INFO("GTP wakeup sleep.");
-
- #if (!GTP_SLIDE_WAKEUP)
- if (chip_gt9xxs == 0)
- {
- gtp_int_sync(25);
- msleep(20);
- #if GTP_ESD_PROTECT
- gtp_init_ext_watchdog(ts->client);
- #endif
- }
- #endif
- return ret;
- }
- gtp_reset_guitar(ts->client, 20);
- }
+ while (retry++ < 10) {
+#if GTP_SLIDE_WAKEUP
+ /* wakeup not by slide */
+ if (doze_status != DOZE_WAKEUP)
+ gtp_reset_guitar(ts, 10);
+ else
+ /* wakeup by slide */
+ doze_status = DOZE_DISABLED;
+#else
+ if (chip_gt9xxs == 1) {
+ gtp_reset_guitar(ts, 10);
+ } else {
+ ret = gpio_direction_output(ts->pdata->irq_gpio, 1);
+ usleep(5000);
+ }
+#endif
+ ret = gtp_i2c_test(ts->client);
+ if (ret > 0) {
+ dev_dbg(&ts->client->dev, "GTP wakeup sleep.");
+#if (!GTP_SLIDE_WAKEUP)
+ if (chip_gt9xxs == 0) {
+ gtp_int_sync(ts, 25);
+ msleep(20);
+#if GTP_ESD_PROTECT
+ gtp_init_ext_watchdog(ts->client);
+#endif
+ }
+#endif
+ return ret;
+ }
+ gtp_reset_guitar(ts, 20);
+ }
#endif
- GTP_ERROR("GTP wakeup sleep failed.");
- return ret;
+ dev_err(&ts->client->dev, "GTP wakeup sleep failed.\n");
+ return ret;
}
+#endif /* !CONFIG_HAS_EARLYSUSPEND */
/*******************************************************
Function:
- Initialize gtp.
+ Initialize gtp.
Input:
- ts: goodix private data
+ ts: goodix private data
Output:
- Executive outcomes.
- 0: succeed, otherwise: failed
+ Executive outcomes.
+ > =0: succeed, otherwise: failed
*******************************************************/
-static s32 gtp_init_panel(struct goodix_ts_data *ts)
+static int gtp_init_panel(struct goodix_ts_data *ts)
{
- s32 ret = -1;
+ struct i2c_client *client = ts->client;
+ unsigned char *config_data;
+ int ret = -EIO;
#if GTP_DRIVER_SEND_CFG
- s32 i;
- u8 check_sum = 0;
- u8 opr_buf[16];
- u8 sensor_id = 0;
-
- u8 cfg_info_group1[] = CTP_CFG_GROUP1;
- u8 cfg_info_group2[] = CTP_CFG_GROUP2;
- u8 cfg_info_group3[] = CTP_CFG_GROUP3;
- u8 cfg_info_group4[] = CTP_CFG_GROUP4;
- u8 cfg_info_group5[] = CTP_CFG_GROUP5;
- u8 cfg_info_group6[] = CTP_CFG_GROUP6;
- u8 *send_cfg_buf[] = {cfg_info_group1, cfg_info_group2, cfg_info_group3,
- cfg_info_group4, cfg_info_group5, cfg_info_group6};
- u8 cfg_info_len[] = { CFG_GROUP_LEN(cfg_info_group1),
- CFG_GROUP_LEN(cfg_info_group2),
- CFG_GROUP_LEN(cfg_info_group3),
- CFG_GROUP_LEN(cfg_info_group4),
- CFG_GROUP_LEN(cfg_info_group5),
- CFG_GROUP_LEN(cfg_info_group6)};
-
- GTP_DEBUG("Config Groups\' Lengths: %d, %d, %d, %d, %d, %d",
- cfg_info_len[0], cfg_info_len[1], cfg_info_len[2], cfg_info_len[3],
- cfg_info_len[4], cfg_info_len[5]);
-
- ret = gtp_i2c_read_dbl_check(ts->client, 0x41E4, opr_buf, 1);
- if (SUCCESS == ret)
- {
- if (opr_buf[0] != 0xBE)
- {
- ts->fw_error = 1;
- GTP_ERROR("Firmware error, no config sent!");
- return -1;
- }
- }
-
- if ((!cfg_info_len[1]) && (!cfg_info_len[2]) &&
- (!cfg_info_len[3]) && (!cfg_info_len[4]) &&
- (!cfg_info_len[5]))
- {
- sensor_id = 0;
- }
- else
- {
- ret = gtp_i2c_read_dbl_check(ts->client, GTP_REG_SENSOR_ID, &sensor_id, 1);
- if (SUCCESS == ret)
- {
- if (sensor_id >= 0x06)
- {
- GTP_ERROR("Invalid sensor_id(0x%02X), No Config Sent!", sensor_id);
- return -1;
- }
- }
- else
- {
- GTP_ERROR("Failed to get sensor_id, No config sent!");
- return -1;
- }
- }
- GTP_DEBUG("Sensor_ID: %d", sensor_id);
-
- ts->gtp_cfg_len = cfg_info_len[sensor_id];
-
- if (ts->gtp_cfg_len < GTP_CONFIG_MIN_LENGTH)
- {
- GTP_ERROR("Sensor_ID(%d) matches with NULL or INVALID CONFIG GROUP! NO Config Sent! You need to check you header file CFG_GROUP section!", sensor_id);
- return -1;
- }
-
- ret = gtp_i2c_read_dbl_check(ts->client, GTP_REG_CONFIG_DATA, &opr_buf[0], 1);
-
- if (ret == SUCCESS)
- {
- GTP_DEBUG("CFG_GROUP%d Config Version: %d, 0x%02X; IC Config Version: %d, 0x%02X", sensor_id+1,
- send_cfg_buf[sensor_id][0], send_cfg_buf[sensor_id][0], opr_buf[0], opr_buf[0]);
-
- if (opr_buf[0] < 90)
- {
- grp_cfg_version = send_cfg_buf[sensor_id][0]; // backup group config version
- send_cfg_buf[sensor_id][0] = 0x00;
- ts->fixed_cfg = 0;
- }
- else // treated as fixed config, not send config
- {
- GTP_INFO("Ic fixed config with config version(%d, 0x%02X)", opr_buf[0], opr_buf[0]);
- ts->fixed_cfg = 1;
- }
- }
- else
- {
- GTP_ERROR("Failed to get ic config version!No config sent!");
- return -1;
- }
-
- memset(&config[GTP_ADDR_LENGTH], 0, GTP_CONFIG_MAX_LENGTH);
- memcpy(&config[GTP_ADDR_LENGTH], send_cfg_buf[sensor_id], ts->gtp_cfg_len);
+ int i;
+ u8 check_sum = 0;
+ u8 opr_buf[16];
+ u8 sensor_id = 0;
+
+ u8 cfg_info_group1[] = CTP_CFG_GROUP1;
+ u8 cfg_info_group2[] = CTP_CFG_GROUP2;
+ u8 cfg_info_group3[] = CTP_CFG_GROUP3;
+ u8 cfg_info_group4[] = CTP_CFG_GROUP4;
+ u8 cfg_info_group5[] = CTP_CFG_GROUP5;
+ u8 cfg_info_group6[] = CTP_CFG_GROUP6;
+ u8 *send_cfg_buf[] = {cfg_info_group1, cfg_info_group2,
+ cfg_info_group3, cfg_info_group4,
+ cfg_info_group5, cfg_info_group6};
+
+ u8 cfg_info_len[] = {ARRAY_SIZE(cfg_info_group1),
+ ARRAY_SIZE(cfg_info_group2),
+ ARRAY_SIZE(cfg_info_group3),
+ ARRAY_SIZE(cfg_info_group4),
+ ARRAY_SIZE(cfg_info_group5),
+ ARRAY_SIZE(cfg_info_group6)};
+
+ GTP_DEBUG("Config Groups\' Lengths: %d, %d, %d, %d, %d, %d",
+ cfg_info_len[0], cfg_info_len[1], cfg_info_len[2],
+ cfg_info_len[3], cfg_info_len[4], cfg_info_len[5]);
+
+ ret = gtp_i2c_read_dbl_check(ts->client, 0x41E4, opr_buf, 1);
+ if (ret == SUCCESS) {
+ if (opr_buf[0] != 0xBE) {
+ ts->fw_error = 1;
+ dev_err(&client->dev,
+ "Firmware error, no config sent!");
+ return -EINVAL;
+ }
+ }
+ if ((!cfg_info_len[1]) && (!cfg_info_len[2]) && (!cfg_info_len[3])
+ && (!cfg_info_len[4]) && (!cfg_info_len[5])) {
+ sensor_id = 0;
+ } else {
+ ret = gtp_i2c_read_dbl_check(ts->client, GTP_REG_SENSOR_ID,
+ &sensor_id, 1);
+ if (ret == SUCCESS) {
+ if (sensor_id >= 0x06) {
+ dev_err(&client->dev,
+ "Invalid sensor_id(0x%02X), No Config Sent!",
+ sensor_id);
+ return -EINVAL;
+ }
+ } else {
+ dev_err(&client->dev,
+ "Failed to get sensor_id, No config sent!");
+ return -EINVAL;
+ }
+ }
+ GTP_DEBUG("Sensor_ID: %d", sensor_id);
+
+ ts->gtp_cfg_len = cfg_info_len[sensor_id];
+
+ if (ts->gtp_cfg_len < GTP_CONFIG_MIN_LENGTH) {
+ dev_err(&client->dev,
+ "Sensor_ID(%d) matches with NULL or INVALID CONFIG GROUP! NO Config Sent! You need to check you header file CFG_GROUP section!\n",
+ sensor_id);
+ return -EINVAL;
+ }
+ ret = gtp_i2c_read_dbl_check(ts->client, GTP_REG_CONFIG_DATA,
+ &opr_buf[0], 1);
+
+ if (ret == SUCCESS) {
+ if (opr_buf[0] < 90) {
+ /* backup group config version */
+ grp_cfg_version = send_cfg_buf[sensor_id][0];
+ send_cfg_buf[sensor_id][0] = 0x00;
+ ts->fixed_cfg = 0;
+ } else {
+ /* treated as fixed config, not send config */
+ dev_warn(&client->dev,
+ "Ic fixed config with config version(%d, 0x%02X)",
+ opr_buf[0], opr_buf[0]);
+ ts->fixed_cfg = 1;
+ }
+ } else {
+ dev_err(&client->dev,
+ "Failed to get ic config version!No config sent!");
+ return -EINVAL;
+ }
+
+ config_data = devm_kzalloc(&client->dev,
+ GTP_CONFIG_MAX_LENGTH + GTP_ADDR_LENGTH,
+ GFP_KERNEL);
+ if (!config_data) {
+ dev_err(&client->dev,
+ "Not enough memory for panel config data\n");
+ return -ENOMEM;
+ }
+
+ ts->config_data = config_data;
+ config_data[0] = GTP_REG_CONFIG_DATA >> 8;
+ config_data[1] = GTP_REG_CONFIG_DATA & 0xff;
+ memset(&config_data[GTP_ADDR_LENGTH], 0, GTP_CONFIG_MAX_LENGTH);
+ memcpy(&config_data[GTP_ADDR_LENGTH], send_cfg_buf[sensor_id],
+ ts->gtp_cfg_len);
#if GTP_CUSTOM_CFG
- config[RESOLUTION_LOC] = (u8)GTP_MAX_WIDTH;
- config[RESOLUTION_LOC + 1] = (u8)(GTP_MAX_WIDTH>>8);
- config[RESOLUTION_LOC + 2] = (u8)GTP_MAX_HEIGHT;
- config[RESOLUTION_LOC + 3] = (u8)(GTP_MAX_HEIGHT>>8);
-
- if (GTP_INT_TRIGGER == 0) //RISING
- {
- config[TRIGGER_LOC] &= 0xfe;
- }
- else if (GTP_INT_TRIGGER == 1) //FALLING
- {
- config[TRIGGER_LOC] |= 0x01;
- }
-#endif // GTP_CUSTOM_CFG
-
- check_sum = 0;
- for (i = GTP_ADDR_LENGTH; i < ts->gtp_cfg_len; i++)
- {
- check_sum += config[i];
- }
- config[ts->gtp_cfg_len] = (~check_sum) + 1;
-
-#else // DRIVER NOT SEND CONFIG
- ts->gtp_cfg_len = GTP_CONFIG_MAX_LENGTH;
- ret = gtp_i2c_read(ts->client, config, ts->gtp_cfg_len + GTP_ADDR_LENGTH);
- if (ret < 0)
- {
- GTP_ERROR("Read Config Failed, Using Default Resolution & INT Trigger!");
- ts->abs_x_max = GTP_MAX_WIDTH;
- ts->abs_y_max = GTP_MAX_HEIGHT;
- ts->int_trigger_type = GTP_INT_TRIGGER;
- }
-#endif // GTP_DRIVER_SEND_CFG
-
- GTP_DEBUG_FUNC();
- if ((ts->abs_x_max == 0) && (ts->abs_y_max == 0))
- {
- ts->abs_x_max = (config[RESOLUTION_LOC + 1] << 8) + config[RESOLUTION_LOC];
- ts->abs_y_max = (config[RESOLUTION_LOC + 3] << 8) + config[RESOLUTION_LOC + 2];
- ts->int_trigger_type = (config[TRIGGER_LOC]) & 0x03;
- }
- ret = gtp_send_cfg(ts->client);
- if (ret < 0)
- {
- GTP_ERROR("Send config error.");
- }
- GTP_DEBUG("X_MAX = %d, Y_MAX = %d, TRIGGER = 0x%02x",
- ts->abs_x_max,ts->abs_y_max,ts->int_trigger_type);
-
- msleep(10);
- return 0;
+ config_data[RESOLUTION_LOC] =
+ (unsigned char)(GTP_MAX_WIDTH && 0xFF);
+ config_data[RESOLUTION_LOC + 1] =
+ (unsigned char)(GTP_MAX_WIDTH >> 8);
+ config_data[RESOLUTION_LOC + 2] =
+ (unsigned char)(GTP_MAX_HEIGHT && 0xFF);
+ config_data[RESOLUTION_LOC + 3] =
+ (unsigned char)(GTP_MAX_HEIGHT >> 8);
+
+ if (GTP_INT_TRIGGER == 0)
+ config_data[TRIGGER_LOC] &= 0xfe;
+ else if (GTP_INT_TRIGGER == 1)
+ config_data[TRIGGER_LOC] |= 0x01;
+#endif /* !GTP_CUSTOM_CFG */
+
+ check_sum = 0;
+ for (i = GTP_ADDR_LENGTH; i < ts->gtp_cfg_len; i++)
+ check_sum += config_data[i];
+
+ config_data[ts->gtp_cfg_len] = (~check_sum) + 1;
+
+#else /* DRIVER NOT SEND CONFIG */
+ ts->gtp_cfg_len = GTP_CONFIG_MAX_LENGTH;
+ ret = gtp_i2c_read(ts->client, config_data,
+ ts->gtp_cfg_len + GTP_ADDR_LENGTH);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Read Config Failed, Using DEFAULT Resolution & INT Trigger!\n");
+ ts->abs_x_max = GTP_MAX_WIDTH;
+ ts->abs_y_max = GTP_MAX_HEIGHT;
+ ts->int_trigger_type = GTP_INT_TRIGGER;
+ }
+#endif /* !DRIVER NOT SEND CONFIG */
+
+ GTP_DEBUG_FUNC();
+ if ((ts->abs_x_max == 0) && (ts->abs_y_max == 0)) {
+ ts->abs_x_max = (config_data[RESOLUTION_LOC + 1] << 8)
+ + config_data[RESOLUTION_LOC];
+ ts->abs_y_max = (config_data[RESOLUTION_LOC + 3] << 8)
+ + config_data[RESOLUTION_LOC + 2];
+ ts->int_trigger_type = (config_data[TRIGGER_LOC]) & 0x03;
+ }
+ ret = gtp_send_cfg(ts);
+ if (ret < 0)
+ dev_err(&client->dev, "%s: Send config error.\n", __func__);
+
+ GTP_DEBUG("X_MAX = %d, Y_MAX = %d, TRIGGER = 0x%02x",
+ ts->abs_x_max, ts->abs_y_max,
+ ts->int_trigger_type);
+
+ msleep(20);
+ return ret;
}
/*******************************************************
Function:
- Read chip version.
+ Read chip version.
Input:
- client: i2c device
- version: buffer to keep ic firmware version
+ client: i2c device
+ version: buffer to keep ic firmware version
Output:
- read operation return.
- 2: succeed, otherwise: failed
+ read operation return.
+ 2: succeed, otherwise: failed
*******************************************************/
-s32 gtp_read_version(struct i2c_client *client, u16* version)
+int gtp_read_version(struct i2c_client *client, u16 *version)
{
- s32 ret = -1;
- u8 buf[8] = {GTP_REG_VERSION >> 8, GTP_REG_VERSION & 0xff};
-
- GTP_DEBUG_FUNC();
-
- ret = gtp_i2c_read(client, buf, sizeof(buf));
- if (ret < 0)
- {
- GTP_ERROR("GTP read version failed");
- return ret;
- }
-
- if (version)
- {
- *version = (buf[7] << 8) | buf[6];
- }
-
- if (buf[5] == 0x00)
- {
- GTP_INFO("IC Version: %c%c%c_%02x%02x", buf[2], buf[3], buf[4], buf[7], buf[6]);
- }
- else
- {
- if (buf[5] == 'S' || buf[5] == 's')
- {
- chip_gt9xxs = 1;
- }
- GTP_INFO("IC Version: %c%c%c%c_%02x%02x", buf[2], buf[3], buf[4], buf[5], buf[7], buf[6]);
- }
- return ret;
+ int ret = -EIO;
+ u8 buf[8] = { GTP_REG_VERSION >> 8, GTP_REG_VERSION & 0xff };
+
+ GTP_DEBUG_FUNC();
+
+ ret = gtp_i2c_read(client, buf, sizeof(buf));
+ if (ret < 0) {
+ dev_err(&client->dev, "GTP read version failed.\n");
+ return ret;
+ }
+
+ if (version)
+ *version = (buf[7] << 8) | buf[6];
+
+ if (buf[5] == 0x00) {
+ dev_dbg(&client->dev, "IC Version: %c%c%c_%02x%02x\n", buf[2],
+ buf[3], buf[4], buf[7], buf[6]);
+ } else {
+ if (buf[5] == 'S' || buf[5] == 's')
+ chip_gt9xxs = 1;
+ dev_dbg(&client->dev, "IC Version: %c%c%c%c_%02x%02x\n", buf[2],
+ buf[3], buf[4], buf[5], buf[7], buf[6]);
+ }
+ return ret;
}
/*******************************************************
Function:
- I2c test Function.
+ I2c test Function.
Input:
- client:i2c client.
+ client:i2c client.
Output:
- Executive outcomes.
- 2: succeed, otherwise failed.
+ Executive outcomes.
+ 2: succeed, otherwise failed.
*******************************************************/
-static s8 gtp_i2c_test(struct i2c_client *client)
+static int gtp_i2c_test(struct i2c_client *client)
{
- u8 test[3] = {GTP_REG_CONFIG_DATA >> 8, GTP_REG_CONFIG_DATA & 0xff};
- u8 retry = 0;
- s8 ret = -1;
-
- GTP_DEBUG_FUNC();
-
- while(retry++ < 5)
- {
- ret = gtp_i2c_read(client, test, 3);
- if (ret > 0)
- {
- return ret;
- }
- GTP_ERROR("GTP i2c test failed time %d.",retry);
- msleep(10);
- }
- return ret;
+ u8 buf[3] = { GTP_REG_CONFIG_DATA >> 8, GTP_REG_CONFIG_DATA & 0xff };
+ int retry = 5;
+ int ret = -EIO;
+
+ GTP_DEBUG_FUNC();
+
+ while (retry--) {
+ ret = gtp_i2c_read(client, buf, 3);
+ if (ret > 0)
+ return ret;
+ dev_err(&client->dev, "GTP i2c test failed time %d.\n", retry);
+ msleep(20);
+ }
+ return ret;
}
/*******************************************************
Function:
- Request gpio(INT & RST) ports.
+ Request gpio(INT & RST) ports.
Input:
- ts: private data.
+ ts: private data.
Output:
- Executive outcomes.
- >= 0: succeed, < 0: failed
+ Executive outcomes.
+ = 0: succeed, != 0: failed
*******************************************************/
-static s8 gtp_request_io_port(struct goodix_ts_data *ts)
+static int gtp_request_io_port(struct goodix_ts_data *ts)
{
- s32 ret = 0;
-
- ret = GTP_GPIO_REQUEST(GTP_INT_PORT, "GTP_INT_IRQ");
- if (ret < 0)
- {
- GTP_ERROR("Failed to request GPIO:%d, ERRNO:%d", (s32)GTP_INT_PORT, ret);
- ret = -ENODEV;
- }
- else
- {
- GTP_GPIO_AS_INT(GTP_INT_PORT);
- ts->client->irq = GTP_INT_IRQ;
- }
-
- ret = GTP_GPIO_REQUEST(GTP_RST_PORT, "GTP_RST_PORT");
- if (ret < 0)
- {
- GTP_ERROR("Failed to request GPIO:%d, ERRNO:%d",(s32)GTP_RST_PORT,ret);
- ret = -ENODEV;
- }
-
- GTP_GPIO_AS_INPUT(GTP_RST_PORT);
- gtp_reset_guitar(ts->client, 20);
-
-
- if(ret < 0)
- {
- GTP_GPIO_FREE(GTP_RST_PORT);
- GTP_GPIO_FREE(GTP_INT_PORT);
- }
-
- return ret;
+ struct i2c_client *client = ts->client;
+ struct goodix_ts_platform_data *pdata = ts->pdata;
+ int ret;
+
+ if (gpio_is_valid(pdata->irq_gpio)) {
+ ret = gpio_request(pdata->irq_gpio, "goodix_ts_irq_gpio");
+ if (ret) {
+ dev_err(&client->dev, "irq gpio request failed\n");
+ goto pwr_off;
+ }
+ ret = gpio_direction_input(pdata->irq_gpio);
+ if (ret) {
+ dev_err(&client->dev,
+ "set_direction for irq gpio failed\n");
+ goto free_irq_gpio;
+ }
+ } else {
+ dev_err(&client->dev, "irq gpio is invalid!\n");
+ ret = -EINVAL;
+ goto free_irq_gpio;
+ }
+
+ if (gpio_is_valid(pdata->reset_gpio)) {
+ ret = gpio_request(pdata->reset_gpio, "goodix_ts__reset_gpio");
+ if (ret) {
+ dev_err(&client->dev, "reset gpio request failed\n");
+ goto free_irq_gpio;
+ }
+
+ ret = gpio_direction_output(pdata->reset_gpio, 0);
+ if (ret) {
+ dev_err(&client->dev,
+ "set_direction for reset gpio failed\n");
+ goto free_reset_gpio;
+ }
+ } else {
+ dev_err(&client->dev, "reset gpio is invalid!\n");
+ ret = -EINVAL;
+ goto free_reset_gpio;
+ }
+ gpio_direction_input(pdata->reset_gpio);
+
+ return ret;
+
+free_reset_gpio:
+ if (gpio_is_valid(pdata->reset_gpio))
+ gpio_free(pdata->reset_gpio);
+free_irq_gpio:
+ if (gpio_is_valid(pdata->irq_gpio))
+ gpio_free(pdata->irq_gpio);
+pwr_off:
+ return ret;
}
/*******************************************************
Function:
- Request interrupt.
+ Request interrupt.
Input:
- ts: private data.
+ ts: private data.
Output:
- Executive outcomes.
- 0: succeed, -1: failed.
+ Executive outcomes.
+ 0: succeed, -1: failed.
*******************************************************/
-static s8 gtp_request_irq(struct goodix_ts_data *ts)
+static int gtp_request_irq(struct goodix_ts_data *ts)
{
- s32 ret = -1;
- const u8 irq_table[] = GTP_IRQ_TAB;
-
- GTP_DEBUG("INT trigger type:%x", ts->int_trigger_type);
-
- ret = request_irq(ts->client->irq,
- goodix_ts_irq_handler,
- irq_table[ts->int_trigger_type],
- ts->client->name,
- ts);
- if (ret)
- {
- GTP_ERROR("Request IRQ failed!ERRNO:%d.", ret);
- GTP_GPIO_AS_INPUT(GTP_INT_PORT);
- GTP_GPIO_FREE(GTP_INT_PORT);
-
- hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- ts->timer.function = goodix_ts_timer_handler;
- hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
- return -1;
- }
- else
- {
- gtp_irq_disable(ts);
- ts->use_irq = 1;
- return 0;
- }
+ int ret;
+ const u8 irq_table[] = GTP_IRQ_TAB;
+
+ GTP_DEBUG("INT trigger type:%x, irq=%d", ts->int_trigger_type,
+ ts->client->irq);
+
+ ret = request_irq(ts->client->irq, goodix_ts_irq_handler,
+ irq_table[ts->int_trigger_type],
+ ts->client->name, ts);
+ if (ret) {
+ dev_err(&ts->client->dev, "Request IRQ failed!ERRNO:%d.\n",
+ ret);
+ gpio_direction_input(ts->pdata->irq_gpio);
+
+ hrtimer_init(&ts->timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ ts->timer.function = goodix_ts_timer_handler;
+ hrtimer_start(&ts->timer, ktime_set(1, 0),
+ HRTIMER_MODE_REL);
+ ts->use_irq = false;
+ return ret;
+ }
+ gtp_irq_disable(ts);
+ ts->use_irq = true;
+ return 0;
}
/*******************************************************
Function:
- Request input device Function.
+ Request input device Function.
Input:
- ts:private data.
+ ts:private data.
Output:
- Executive outcomes.
- 0: succeed, otherwise: failed.
+ Executive outcomes.
+ 0: succeed, otherwise: failed.
*******************************************************/
-static s8 gtp_request_input_dev(struct goodix_ts_data *ts)
+static int gtp_request_input_dev(struct goodix_ts_data *ts)
{
- s8 ret = -1;
- s8 phys[32];
+ int ret;
+ char phys[PHY_BUF_SIZE];
#if GTP_HAVE_TOUCH_KEY
- u8 index = 0;
+ int index = 0;
#endif
-
- GTP_DEBUG_FUNC();
-
- ts->input_dev = input_allocate_device();
- if (ts->input_dev == NULL)
- {
- GTP_ERROR("Failed to allocate input device.");
- return -ENOMEM;
- }
-
- ts->input_dev->evbit[0] = BIT_MASK(EV_SYN) | BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS) ;
+
+ GTP_DEBUG_FUNC();
+
+ ts->input_dev = input_allocate_device();
+ if (ts->input_dev == NULL) {
+ dev_err(&ts->client->dev,
+ "Failed to allocate input device.\n");
+ return -ENOMEM;
+ }
+
+ ts->input_dev->evbit[0] =
+ BIT_MASK(EV_SYN) | BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
#if GTP_ICS_SLOT_REPORT
- __set_bit(INPUT_PROP_DIRECT, ts->input_dev->propbit);
- input_mt_init_slots(ts->input_dev, 10); // in case of "out of memory"
+ __set_bit(INPUT_PROP_DIRECT, ts->input_dev->propbit);
+ input_mt_init_slots(ts->input_dev, 10);/* in case of "out of memory" */
#else
- ts->input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+ ts->input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
#endif
#if GTP_HAVE_TOUCH_KEY
- for (index = 0; index < GTP_MAX_KEY_NUM; index++)
- {
- input_set_capability(ts->input_dev, EV_KEY, touch_key_array[index]);
- }
+ for (index = 0; index < ARRAY_SIZE(touch_key_array); index++) {
+ input_set_capability(ts->input_dev,
+ EV_KEY, touch_key_array[index]);
+ }
#endif
#if GTP_SLIDE_WAKEUP
- input_set_capability(ts->input_dev, EV_KEY, KEY_POWER);
-#endif
+ input_set_capability(ts->input_dev, EV_KEY, KEY_POWER);
+#endif
#if GTP_WITH_PEN
- // pen support
- __set_bit(BTN_TOOL_PEN, ts->input_dev->keybit);
- __set_bit(INPUT_PROP_DIRECT, ts->input_dev->propbit);
- __set_bit(INPUT_PROP_POINTER, ts->input_dev->propbit);
+ /* pen support */
+ __set_bit(BTN_TOOL_PEN, ts->input_dev->keybit);
+ __set_bit(INPUT_PROP_DIRECT, ts->input_dev->propbit);
+ __set_bit(INPUT_PROP_POINTER, ts->input_dev->propbit);
#endif
#if GTP_CHANGE_X2Y
- GTP_SWAP(ts->abs_x_max, ts->abs_y_max);
-#endif
-
- input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X, 0, ts->abs_x_max, 0, 0);
- input_set_abs_params(ts->input_dev, ABS_MT_POSITION_Y, 0, ts->abs_y_max, 0, 0);
- input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0);
- input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
- input_set_abs_params(ts->input_dev, ABS_MT_TRACKING_ID, 0, 255, 0, 0);
-
- sprintf(phys, "input/ts");
- ts->input_dev->name = goodix_ts_name;
- ts->input_dev->phys = phys;
- ts->input_dev->id.bustype = BUS_I2C;
- ts->input_dev->id.vendor = 0xDEAD;
- ts->input_dev->id.product = 0xBEEF;
- ts->input_dev->id.version = 10427;
-
- ret = input_register_device(ts->input_dev);
- if (ret)
- {
- GTP_ERROR("Register %s input device failed", ts->input_dev->name);
- return -ENODEV;
- }
-
-#ifdef CONFIG_HAS_EARLYSUSPEND
- ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
- ts->early_suspend.suspend = goodix_ts_early_suspend;
- ts->early_suspend.resume = goodix_ts_late_resume;
- register_early_suspend(&ts->early_suspend);
+ GTP_SWAP(ts->abs_x_max, ts->abs_y_max);
#endif
- return 0;
+ input_set_abs_params(ts->input_dev, ABS_MT_POSITION_X,
+ 0, ts->abs_x_max, 0, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_POSITION_Y,
+ 0, ts->abs_y_max, 0, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR,
+ 0, 255, 0, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR,
+ 0, 255, 0, 0);
+ input_set_abs_params(ts->input_dev, ABS_MT_TRACKING_ID,
+ 0, 255, 0, 0);
+
+ snprintf(phys, PHY_BUF_SIZE, "input/ts");
+ ts->input_dev->name = GOODIX_DEV_NAME;
+ ts->input_dev->phys = phys;
+ ts->input_dev->id.bustype = BUS_I2C;
+ ts->input_dev->id.vendor = 0xDEAD;
+ ts->input_dev->id.product = 0xBEEF;
+ ts->input_dev->id.version = 10427;
+
+ ret = input_register_device(ts->input_dev);
+ if (ret) {
+ dev_err(&ts->client->dev,
+ "Register %s input device failed.\n",
+ ts->input_dev->name);
+ goto exit_free_inputdev;
+ }
+
+ return 0;
+
+exit_free_inputdev:
+ input_free_device(ts->input_dev);
+ ts->input_dev = NULL;
+ return ret;
}
/*******************************************************
Function:
- I2c probe.
+ I2c probe.
Input:
- client: i2c device struct.
- id: device id.
+ client: i2c device struct.
+ id: device id.
Output:
- Executive outcomes.
- 0: succeed.
+ Executive outcomes.
+ 0: succeed.
*******************************************************/
-static int goodix_ts_probe(struct i2c_client *client, const struct i2c_device_id *id)
+
+static int goodix_ts_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
- s32 ret = -1;
- struct goodix_ts_data *ts;
- u16 version_info;
-
- GTP_DEBUG_FUNC();
-
- //do NOT remove these logs
- GTP_INFO("GTP Driver Version: %s", GTP_DRIVER_VERSION);
- GTP_INFO("GTP Driver Built@%s, %s", __TIME__, __DATE__);
- GTP_INFO("GTP I2C Address: 0x%02x", client->addr);
-
- i2c_connect_client = client;
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
- {
- GTP_ERROR("I2C check functionality failed.");
- return -ENODEV;
- }
- ts = kzalloc(sizeof(*ts), GFP_KERNEL);
- if (ts == NULL)
- {
- GTP_ERROR("Alloc GFP_KERNEL memory failed.");
- return -ENOMEM;
- }
-
- memset(ts, 0, sizeof(*ts));
- INIT_WORK(&ts->work, goodix_ts_work_func);
- ts->client = client;
- spin_lock_init(&ts->irq_lock); // 2.6.39 later
- // ts->irq_lock = SPIN_LOCK_UNLOCKED; // 2.6.39 & before
- i2c_set_clientdata(client, ts);
-
- ts->gtp_rawdiff_mode = 0;
-
- ret = gtp_request_io_port(ts);
- if (ret < 0)
- {
- GTP_ERROR("GTP request IO port failed.");
- kfree(ts);
- return ret;
- }
-
- ret = gtp_i2c_test(client);
- if (ret < 0)
- {
- GTP_ERROR("I2C communication ERROR!");
- }
+ struct goodix_ts_data *ts;
+ u16 version_info;
+ int ret;
+
+ dev_dbg(&client->dev, "GTP I2C Address: 0x%02x\n", client->addr);
+
+#if GTP_ESD_PROTECT
+ i2c_connect_client = client;
+#endif
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "GTP I2C not supported\n");
+ return -ENODEV;
+ }
+
+ ts = kzalloc(sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ memset(ts, 0, sizeof(*ts));
+ ts->client = client;
+ /* For kernel 2.6.39 later we spin_lock_init(&ts->irq_lock)
+ * For 2.6.39 & before, use ts->irq_lock = SPIN_LOCK_UNLOCKED
+ */
+ spin_lock_init(&ts->irq_lock);
+ i2c_set_clientdata(client, ts);
+
+ ts->gtp_rawdiff_mode = 0;
+
+ ret = gtp_request_io_port(ts);
+ if (ret) {
+ dev_err(&client->dev, "GTP request IO port failed.\n");
+ goto exit_power_off;
+ }
+
+ gtp_reset_guitar(ts, 20);
+
+ ret = gtp_i2c_test(client);
+ if (ret != 2) {
+ dev_err(&client->dev, "I2C communication ERROR!\n");
+ goto exit_free_io_port;
+ }
#if GTP_AUTO_UPDATE
- ret = gup_init_update_proc(ts);
- if (ret < 0)
- {
- GTP_ERROR("Create update thread error.");
- }
+ ret = gup_init_update_proc(ts);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "GTP Create firmware update thread error.\n");
+ goto exit_free_io_port;
+ }
#endif
-
- ret = gtp_init_panel(ts);
- if (ret < 0)
- {
- GTP_ERROR("GTP init panel failed.");
- ts->abs_x_max = GTP_MAX_WIDTH;
- ts->abs_y_max = GTP_MAX_HEIGHT;
- ts->int_trigger_type = GTP_INT_TRIGGER;
- }
-
- ret = gtp_request_input_dev(ts);
- if (ret < 0)
- {
- GTP_ERROR("GTP request input dev failed");
- }
-
- ret = gtp_request_irq(ts);
- if (ret < 0)
- {
- GTP_INFO("GTP works in polling mode.");
- }
- else
- {
- GTP_INFO("GTP works in interrupt mode.");
- }
-
- ret = gtp_read_version(client, &version_info);
- if (ret < 0)
- {
- GTP_ERROR("Read version failed.");
- }
- if (ts->use_irq)
- {
- gtp_irq_enable(ts);
- }
-
+
+ ret = gtp_init_panel(ts);
+ if (ret < 0) {
+ dev_err(&client->dev, "GTP init panel failed.\n");
+ ts->abs_x_max = GTP_MAX_WIDTH;
+ ts->abs_y_max = GTP_MAX_HEIGHT;
+ ts->int_trigger_type = GTP_INT_TRIGGER;
+ }
+
+ ret = gtp_request_input_dev(ts);
+ if (ret) {
+ dev_err(&client->dev, "GTP request input dev failed.\n");
+ goto exit_free_inputdev;
+ }
+
+ ts->goodix_wq = create_singlethread_workqueue("goodix_wq");
+ INIT_WORK(&ts->work, goodix_ts_work_func);
+
+ ret = gtp_request_irq(ts);
+ if (ret < 0)
+ dev_info(&client->dev, "GTP works in polling mode.\n");
+ else
+ dev_info(&client->dev, "GTP works in interrupt mode.\n");
+
+ ret = gtp_read_version(client, &version_info);
+ if (ret != 2) {
+ dev_err(&client->dev, "Read version failed.\n");
+ goto exit_free_irq;
+ }
+ if (ts->use_irq)
+ gtp_irq_enable(ts);
+
#if GTP_CREATE_WR_NODE
- init_wr_node(client);
+ init_wr_node(client);
#endif
-
+
#if GTP_ESD_PROTECT
- gtp_esd_switch(client, SWITCH_ON);
+ gtp_esd_switch(client, SWITCH_ON);
#endif
- return 0;
+ init_done = true;
+ return 0;
+exit_free_irq:
+ if (ts->use_irq)
+ free_irq(client->irq, ts);
+ else
+ hrtimer_cancel(&ts->timer);
+ cancel_work_sync(&ts->work);
+ flush_workqueue(ts->goodix_wq);
+ destroy_workqueue(ts->goodix_wq);
+
+ input_unregister_device(ts->input_dev);
+ if (ts->input_dev) {
+ input_free_device(ts->input_dev);
+ ts->input_dev = NULL;
+ }
+exit_free_inputdev:
+ kfree(ts->config_data);
+exit_free_io_port:
+exit_power_off:
+ i2c_set_clientdata(client, NULL);
+ kfree(ts);
+ return ret;
}
-
/*******************************************************
Function:
- Goodix touchscreen driver release function.
+ Goodix touchscreen driver release function.
Input:
- client: i2c device struct.
+ client: i2c device struct.
Output:
- Executive outcomes. 0---succeed.
+ Executive outcomes. 0---succeed.
*******************************************************/
static int goodix_ts_remove(struct i2c_client *client)
{
- struct goodix_ts_data *ts = i2c_get_clientdata(client);
-
- GTP_DEBUG_FUNC();
-
+ struct goodix_ts_data *ts = i2c_get_clientdata(client);
+
+ GTP_DEBUG_FUNC();
#ifdef CONFIG_HAS_EARLYSUSPEND
- unregister_early_suspend(&ts->early_suspend);
+ unregister_early_suspend(&ts->early_suspend);
#endif
#if GTP_CREATE_WR_NODE
- uninit_wr_node();
+ uninit_wr_node();
#endif
#if GTP_ESD_PROTECT
- destroy_workqueue(gtp_esd_check_workqueue);
+ cancel_work_sync(gtp_esd_check_workqueue);
+ flush_workqueue(gtp_esd_check_workqueue);
+ destroy_workqueue(gtp_esd_check_workqueue);
#endif
- if (ts)
- {
- if (ts->use_irq)
- {
- GTP_GPIO_AS_INPUT(GTP_INT_PORT);
- GTP_GPIO_FREE(GTP_INT_PORT);
- free_irq(client->irq, ts);
- }
- else
- {
- hrtimer_cancel(&ts->timer);
- }
- }
-
- GTP_INFO("GTP driver removing...");
- i2c_set_clientdata(client, NULL);
- input_unregister_device(ts->input_dev);
- kfree(ts);
-
- return 0;
+ if (ts) {
+ if (ts->use_irq)
+ free_irq(client->irq, ts);
+ else
+ hrtimer_cancel(&ts->timer);
+
+ cancel_work_sync(&ts->work);
+ flush_workqueue(ts->goodix_wq);
+ destroy_workqueue(ts->goodix_wq);
+
+ input_unregister_device(ts->input_dev);
+ if (ts->input_dev) {
+ input_free_device(ts->input_dev);
+ ts->input_dev = NULL;
+ }
+ kfree(ts->config_data);
+
+ if (gpio_is_valid(ts->pdata->reset_gpio))
+ gpio_free(ts->pdata->reset_gpio);
+ if (gpio_is_valid(ts->pdata->irq_gpio))
+ gpio_free(ts->pdata->irq_gpio);
+
+ i2c_set_clientdata(client, NULL);
+ kfree(ts);
+ }
+
+ return 0;
}
#ifdef CONFIG_HAS_EARLYSUSPEND
/*******************************************************
Function:
- Early suspend function.
+ Early suspend function.
Input:
- h: early_suspend struct.
+ h: early_suspend struct.
Output:
- None.
+ None.
*******************************************************/
static void goodix_ts_early_suspend(struct early_suspend *h)
{
- struct goodix_ts_data *ts;
- s8 ret = -1;
- ts = container_of(h, struct goodix_ts_data, early_suspend);
-
- GTP_DEBUG_FUNC();
+ struct goodix_ts_data *ts;
+ s8 ret = -1;
+
+ ts = container_of(h, struct goodix_ts_data, early_suspend);
+
+ GTP_DEBUG_FUNC();
#if GTP_ESD_PROTECT
- ts->gtp_is_suspend = 1;
- gtp_esd_switch(ts->client, SWITCH_OFF);
+ ts->gtp_is_suspend = 1;
+ gtp_esd_switch(ts->client, SWITCH_OFF);
#endif
#if GTP_SLIDE_WAKEUP
- ret = gtp_enter_doze(ts);
+ ret = gtp_enter_doze(ts);
#else
- if (ts->use_irq)
- {
- gtp_irq_disable(ts);
- }
- else
- {
- hrtimer_cancel(&ts->timer);
- }
- ret = gtp_enter_sleep(ts);
-#endif
- if (ret < 0)
- {
- GTP_ERROR("GTP early suspend failed.");
- }
- // to avoid waking up while not sleeping
- // delay 48 + 10ms to ensure reliability
- msleep(58);
+ if (ts->use_irq)
+ gtp_irq_disable(ts);
+ else
+ hrtimer_cancel(&ts->timer);
+ ret = gtp_enter_sleep(ts);
+#endif
+ if (ret < 0)
+ dev_err(&ts->client->dev, "GTP early suspend failed.\n");
+ /* to avoid waking up while not sleeping,
+ * delay 48 + 10ms to ensure reliability
+ */
+ msleep(58);
}
/*******************************************************
Function:
- Late resume function.
+ Late resume function.
Input:
- h: early_suspend struct.
+ h: early_suspend struct.
Output:
- None.
+ None.
*******************************************************/
static void goodix_ts_late_resume(struct early_suspend *h)
{
- struct goodix_ts_data *ts;
- s8 ret = -1;
- ts = container_of(h, struct goodix_ts_data, early_suspend);
-
- GTP_DEBUG_FUNC();
-
- ret = gtp_wakeup_sleep(ts);
+ struct goodix_ts_data *ts;
+ s8 ret = -1;
+
+ ts = container_of(h, struct goodix_ts_data, early_suspend);
+
+ GTP_DEBUG_FUNC();
+
+ ret = gtp_wakeup_sleep(ts);
#if GTP_SLIDE_WAKEUP
- doze_status = DOZE_DISABLED;
+ doze_status = DOZE_DISABLED;
#endif
- if (ret < 0)
- {
- GTP_ERROR("GTP later resume failed.");
- }
+ if (ret < 0)
+ dev_err(&ts->client->dev, "GTP later resume failed.\n");
- if (ts->use_irq)
- {
- gtp_irq_enable(ts);
- }
- else
- {
- hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
- }
+ if (ts->use_irq)
+ gtp_irq_enable(ts);
+ else
+ hrtimer_start(&ts->timer,
+ ktime_set(1, 0), HRTIMER_MODE_REL);
#if GTP_ESD_PROTECT
- ts->gtp_is_suspend = 0;
- gtp_esd_switch(ts->client, SWITCH_ON);
+ ts->gtp_is_suspend = 0;
+ gtp_esd_switch(ts->client, SWITCH_ON);
#endif
}
#endif
@@ -1591,172 +1611,159 @@ static void goodix_ts_late_resume(struct early_suspend *h)
#if GTP_ESD_PROTECT
/*******************************************************
Function:
- switch on & off esd delayed work
+ switch on & off esd delayed work
Input:
- client: i2c device
- on: SWITCH_ON / SWITCH_OFF
+ client: i2c device
+ on: SWITCH_ON / SWITCH_OFF
Output:
- void
+ void
*********************************************************/
-void gtp_esd_switch(struct i2c_client *client, s32 on)
+void gtp_esd_switch(struct i2c_client *client, int on)
{
- struct goodix_ts_data *ts;
-
- ts = i2c_get_clientdata(client);
- if (SWITCH_ON == on) // switch on esd
- {
- if (!ts->esd_running)
- {
- ts->esd_running = 1;
- GTP_INFO("Esd started");
- queue_delayed_work(gtp_esd_check_workqueue, &gtp_esd_check_work, GTP_ESD_CHECK_CIRCLE);
- }
- }
- else // switch off esd
- {
- if (ts->esd_running)
- {
- ts->esd_running = 0;
- GTP_INFO("Esd cancelled");
- cancel_delayed_work_sync(&gtp_esd_check_work);
- }
- }
+ struct goodix_ts_data *ts;
+
+ ts = i2c_get_clientdata(client);
+ if (on == SWITCH_ON) {
+ /* switch on esd */
+ if (!ts->esd_running) {
+ ts->esd_running = 1;
+ dev_dbg(&client->dev, "Esd started\n");
+ queue_delayed_work(gtp_esd_check_workqueue,
+ &gtp_esd_check_work, GTP_ESD_CHECK_CIRCLE);
+ }
+ } else {
+ /* switch off esd */
+ if (ts->esd_running) {
+ ts->esd_running = 0;
+ dev_dbg(&client->dev, "Esd cancelled\n");
+ cancel_delayed_work_sync(&gtp_esd_check_work);
+ }
+ }
}
/*******************************************************
Function:
- Initialize external watchdog for esd protect
+ Initialize external watchdog for esd protect
Input:
- client: i2c device.
+ client: i2c device.
Output:
- result of i2c write operation.
- 1: succeed, otherwise: failed
+ result of i2c write operation.
+ 1: succeed, otherwise: failed
*********************************************************/
-static s32 gtp_init_ext_watchdog(struct i2c_client *client)
+static int gtp_init_ext_watchdog(struct i2c_client *client)
{
- u8 opr_buffer[4] = {0x80, 0x40, 0xAA, 0xAA};
-
- struct i2c_msg msg; // in case of recursively reset by calling gtp_i2c_write
- s32 ret = -1;
- s32 retries = 0;
-
- GTP_DEBUG("Init external watchdog...");
- GTP_DEBUG_FUNC();
-
- msg.flags = !I2C_M_RD;
- msg.addr = client->addr;
- msg.len = 4;
- msg.buf = opr_buffer;
-
- while(retries < 5)
- {
- ret = i2c_transfer(client->adapter, &msg, 1);
- if (ret == 1)
- {
- return 1;
- }
- retries++;
- }
- if (retries >= 5)
- {
- GTP_ERROR("init external watchdog failed!");
- }
- return 0;
+ /* in case of recursively reset by calling gtp_i2c_write*/
+ struct i2c_msg msg;
+ u8 opr_buffer[4] = {0x80, 0x40, 0xAA, 0xAA};
+ int ret;
+ int retries = 0;
+
+ GTP_DEBUG("Init external watchdog...");
+ GTP_DEBUG_FUNC();
+
+ msg.flags = !I2C_M_RD;
+ msg.addr = client->addr;
+ msg.len = 4;
+ msg.buf = opr_buffer;
+
+ while (retries < 5) {
+ ret = i2c_transfer(client->adapter, &msg, 1);
+ if (ret == 1)
+ return 1;
+ retries++;
+ }
+ if (retries >= 5)
+ dev_err(&client->dev, "init external watchdog failed!");
+ return 0;
}
/*******************************************************
Function:
- Esd protect function.
- Added external watchdog by meta, 2013/03/07
+ Esd protect function.
+ Added external watchdog by meta, 2013/03/07
Input:
- work: delayed work
+ work: delayed work
Output:
- None.
+ None.
*******************************************************/
static void gtp_esd_check_func(struct work_struct *work)
{
- s32 i;
- s32 ret = -1;
- struct goodix_ts_data *ts = NULL;
- u8 test[4] = {0x80, 0x40};
-
- GTP_DEBUG_FUNC();
-
- ts = i2c_get_clientdata(i2c_connect_client);
-
- if (ts->gtp_is_suspend)
- {
- ts->esd_running = 0;
- GTP_INFO("Esd terminated!");
- return;
- }
-
- for (i = 0; i < 3; i++)
- {
- ret = gtp_i2c_read(ts->client, test, 4);
-
- GTP_DEBUG("0x8040 = 0x%02X, 0x8041 = 0x%02X", test[2], test[3]);
- if ((ret < 0))
- {
- // IIC communication problem
- continue;
- }
- else
- {
- if ((test[2] == 0xAA) || (test[3] != 0xAA))
- {
- // IC works abnormally..
- i = 3;
- break;
- }
- else
- {
- // IC works normally, Write 0x8040 0xAA, feed the dog
- test[2] = 0xAA;
- gtp_i2c_write(ts->client, test, 3);
- break;
- }
- }
- }
- if (i >= 3)
- {
- GTP_ERROR("IC Working ABNORMALLY, Resetting Guitar...");
- gtp_reset_guitar(ts->client, 50);
- }
-
- if(!ts->gtp_is_suspend)
- {
- queue_delayed_work(gtp_esd_check_workqueue, &gtp_esd_check_work, GTP_ESD_CHECK_CIRCLE);
- }
- else
- {
- GTP_INFO("Esd terminated!");
- ts->esd_running = 0;
- }
- return;
+ s32 i;
+ s32 ret = -1;
+ struct goodix_ts_data *ts = NULL;
+ u8 test[4] = {0x80, 0x40};
+
+ GTP_DEBUG_FUNC();
+
+ ts = i2c_get_clientdata(i2c_connect_client);
+
+ if (ts->gtp_is_suspend) {
+ dev_dbg(&ts->client->dev, "Esd terminated!\n");
+ ts->esd_running = 0;
+ return;
+ }
+#ifdef CONFIG_GT9XX_TOUCHPANEL_UPDATE
+ if (ts->enter_update)
+ return;
+#endif
+
+ for (i = 0; i < 3; i++) {
+ ret = gtp_i2c_read(ts->client, test, 4);
+
+ GTP_DEBUG("0x8040 = 0x%02X, 0x8041 = 0x%02X", test[2], test[3]);
+ if ((ret < 0)) {
+ /* IC works abnormally..*/
+ continue;
+ } else {
+ if ((test[2] == 0xAA) || (test[3] != 0xAA)) {
+ /* IC works abnormally..*/
+ i = 3;
+ break;
+ }
+ /* IC works normally, Write 0x8040 0xAA*/
+ test[2] = 0xAA;
+ gtp_i2c_write(ts->client, test, 3);
+ break;
+ }
+ }
+ if (i >= 3) {
+ dev_err(&ts->client->dev,
+ "IC Working ABNORMALLY, Resetting Guitar...\n");
+ gtp_reset_guitar(ts, 50);
+ }
+
+ if (!ts->gtp_is_suspend)
+ queue_delayed_work(gtp_esd_check_workqueue,
+ &gtp_esd_check_work, GTP_ESD_CHECK_CIRCLE);
+ else {
+ dev_dbg(&ts->client->dev, "Esd terminated!\n");
+ ts->esd_running = 0;
+ }
+
+ return;
}
#endif
static const struct i2c_device_id goodix_ts_id[] = {
- { GTP_I2C_NAME, 0 },
- { }
+ { GTP_I2C_NAME, 0 },
+ { }
};
static struct i2c_driver goodix_ts_driver = {
- .probe = goodix_ts_probe,
- .remove = goodix_ts_remove,
-#ifndef CONFIG_HAS_EARLYSUSPEND
- .suspend = goodix_ts_early_suspend,
- .resume = goodix_ts_late_resume,
+ .probe = goodix_ts_probe,
+ .remove = goodix_ts_remove,
+#ifdef CONFIG_HAS_EARLYSUSPEND
+ .suspend = goodix_ts_early_suspend,
+ .resume = goodix_ts_late_resume,
#endif
- .id_table = goodix_ts_id,
- .driver = {
- .name = GTP_I2C_NAME,
- .owner = THIS_MODULE,
- },
+ .id_table = goodix_ts_id,
+ .driver = {
+ .name = GTP_I2C_NAME,
+ .owner = THIS_MODULE,
+ },
};
-/*******************************************************
+/*******************************************************
Function:
Driver Install function.
Input:
@@ -1764,43 +1771,31 @@ Input:
Output:
Executive Outcomes. 0---succeed.
********************************************************/
-static int __devinit goodix_ts_init(void)
+static int __init goodix_ts_init(void)
{
- s32 ret;
-
- GTP_DEBUG_FUNC();
- GTP_INFO("GTP driver installing...");
- goodix_wq = create_singlethread_workqueue("goodix_wq");
- if (!goodix_wq)
- {
- GTP_ERROR("Creat workqueue failed.");
- return -ENOMEM;
- }
+ int ret;
+
+ GTP_DEBUG_FUNC();
#if GTP_ESD_PROTECT
- INIT_DELAYED_WORK(&gtp_esd_check_work, gtp_esd_check_func);
- gtp_esd_check_workqueue = create_workqueue("gtp_esd_check");
+ INIT_DELAYED_WORK(&gtp_esd_check_work, gtp_esd_check_func);
+ gtp_esd_check_workqueue = create_workqueue("gtp_esd_check");
#endif
- ret = i2c_add_driver(&goodix_ts_driver);
- return ret;
+ ret = i2c_add_driver(&goodix_ts_driver);
+ return ret;
}
-/*******************************************************
+/*******************************************************
Function:
- Driver uninstall function.
+ Driver uninstall function.
Input:
- None.
+ None.
Output:
- Executive Outcomes. 0---succeed.
+ Executive Outcomes. 0---succeed.
********************************************************/
static void __exit goodix_ts_exit(void)
{
- GTP_DEBUG_FUNC();
- GTP_INFO("GTP driver exited.");
- i2c_del_driver(&goodix_ts_driver);
- if (goodix_wq)
- {
- destroy_workqueue(goodix_wq);
- }
+ GTP_DEBUG_FUNC();
+ i2c_del_driver(&goodix_ts_driver);
}
late_initcall(goodix_ts_init);
diff --git a/drivers/input/touchscreen/gt9xx/gt9xx.h b/drivers/input/touchscreen/gt9xx/gt9xx.h
index e375af530d57..48fa2ad2faca 100644
--- a/drivers/input/touchscreen/gt9xx/gt9xx.h
+++ b/drivers/input/touchscreen/gt9xx/gt9xx.h
@@ -1,241 +1,270 @@
/* drivers/input/touchscreen/gt9xx.h
- *
+ *
+ * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ *
* 2010 - 2013 Goodix Technology.
- *
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * This program is distributed in the hope that it will be a reference
- * to you, when you are integrating the GOODiX's CTP IC into your system,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ *
+ * This program is distributed in the hope that it will be a reference
+ * to you, when you are integrating the GOODiX's CTP IC into your system,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
- *
+ *
*/
#ifndef _GOODIX_GT9XX_H_
#define _GOODIX_GT9XX_H_
#include <linux/kernel.h>
-#include <linux/hrtimer.h>
#include <linux/i2c.h>
+#include <linux/irq.h>
#include <linux/input.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/proc_fs.h>
-#include <linux/string.h>
-#include <asm/uaccess.h>
-#include <linux/vmalloc.h>
+#include <linux/slab.h>
#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <mach/gpio.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/firmware.h>
+#include <linux/debugfs.h>
+#if defined(CONFIG_HAS_EARLYSUSPEND)
#include <linux/earlysuspend.h>
+#define GOODIX_SUSPEND_LEVEL 1
+#endif
+struct goodix_ts_platform_data {
+ int irq_gpio;
+ u32 irq_gpio_flags;
+ int reset_gpio;
+ u32 reset_gpio_flags;
+ int ldo_en_gpio;
+ u32 ldo_en_gpio_flags;
+ u32 family_id;
+ u32 x_max;
+ u32 y_max;
+ u32 x_min;
+ u32 y_min;
+ u32 panel_minx;
+ u32 panel_miny;
+ u32 panel_maxx;
+ u32 panel_maxy;
+ bool no_force_update;
+ bool i2c_pull_up;
+};
struct goodix_ts_data {
- spinlock_t irq_lock;
- struct i2c_client *client;
- struct input_dev *input_dev;
- struct hrtimer timer;
- struct work_struct work;
- struct early_suspend early_suspend;
- s32 irq_is_disable;
- s32 use_irq;
- u16 abs_x_max;
- u16 abs_y_max;
- u8 max_touch_num;
- u8 int_trigger_type;
- u8 green_wake_mode;
- u8 chip_type;
- u8 enter_update;
- u8 gtp_is_suspend;
- u8 gtp_rawdiff_mode;
- u8 gtp_cfg_len;
- u8 fixed_cfg;
- u8 esd_running;
- u8 fw_error;
+ spinlock_t irq_lock;
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ struct goodix_ts_platform_data *pdata;
+ struct hrtimer timer;
+ struct workqueue_struct *goodix_wq;
+ struct work_struct work;
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+ struct early_suspend early_suspend;
+#endif
+ s32 irq_is_disabled;
+ s32 use_irq;
+ u16 abs_x_max;
+ u16 abs_y_max;
+ u8 max_touch_num;
+ u8 int_trigger_type;
+ u8 green_wake_mode;
+ u8 chip_type;
+ u8 *config_data;
+ u8 enter_update;
+ u8 gtp_is_suspend;
+ u8 gtp_rawdiff_mode;
+ u8 gtp_cfg_len;
+ u8 fixed_cfg;
+ u8 esd_running;
+ u8 fw_error;
};
extern u16 show_len;
extern u16 total_len;
-//***************************PART1:ON/OFF define*******************************
-#define GTP_CUSTOM_CFG 0
-#define GTP_CHANGE_X2Y 0
-#define GTP_DRIVER_SEND_CFG 1
-#define GTP_HAVE_TOUCH_KEY 0
-#define GTP_POWER_CTRL_SLEEP 0
-#define GTP_ICS_SLOT_REPORT 0
-
-#define GTP_AUTO_UPDATE 1 // auto updated by .bin file as default
-#define GTP_HEADER_FW_UPDATE 0 // auto updated by head_fw_array in gt9xx_firmware.h, function together with GTP_AUTO_UPDATE
-
-#define GTP_CREATE_WR_NODE 1
-#define GTP_ESD_PROTECT 0
-#define GTP_WITH_PEN 0
-
-#define GTP_SLIDE_WAKEUP 0
-#define GTP_DBL_CLK_WAKEUP 0 // double-click wakeup, function together with GTP_SLIDE_WAKEUP
-
-#define GTP_DEBUG_ON 1
-#define GTP_DEBUG_ARRAY_ON 0
-#define GTP_DEBUG_FUNC_ON 0
-
-//*************************** PART2:TODO define **********************************
-// STEP_1(REQUIRED): Define Configuration Information Group(s)
-// Sensor_ID Map:
+/***************************PART1:ON/OFF define*******************************/
+#define GTP_CUSTOM_CFG 0
+#define GTP_CHANGE_X2Y 0
+#define GTP_DRIVER_SEND_CFG 1
+#define GTP_HAVE_TOUCH_KEY 1
+#define GTP_POWER_CTRL_SLEEP 1
+#define GTP_ICS_SLOT_REPORT 0
+
+/* auto updated by .bin file as default */
+#define GTP_AUTO_UPDATE 0
+/* auto updated by head_fw_array in gt9xx_firmware.h,
+ * function together with GTP_AUTO_UPDATE
+ */
+#define GTP_HEADER_FW_UPDATE 0
+
+#define GTP_CREATE_WR_NODE 0
+#define GTP_ESD_PROTECT 0
+#define GTP_WITH_PEN 0
+
+#define GTP_SLIDE_WAKEUP 0
+/* double-click wakeup, function together with GTP_SLIDE_WAKEUP */
+#define GTP_DBL_CLK_WAKEUP 0
+
+#define GTP_DEBUG_ON 1
+#define GTP_DEBUG_ARRAY_ON 0
+#define GTP_DEBUG_FUNC_ON 0
+
+/*************************** PART2:TODO define *******************************/
+/* STEP_1(REQUIRED): Define Configuration Information Group(s) */
+/* Sensor_ID Map: */
/* sensor_opt1 sensor_opt2 Sensor_ID
- GND GND 0
- VDDIO GND 1
- NC GND 2
- GND NC/300K 3
- VDDIO NC/300K 4
- NC NC/300K 5
+ * GND GND 0
+ * VDDIO GND 1
+ * NC GND 2
+ * GND NC/300K 3
+ * VDDIO NC/300K 4
+ * NC NC/300K 5
*/
-// TODO: define your own default or for Sensor_ID == 0 config here.
-// The predefined one is just a sample config, which is not suitable for your tp in most cases.
+/* Define your own default or for Sensor_ID == 0 config here */
+/* The predefined one is just a sample config,
+ * which is not suitable for your tp in most cases.
+ */
#define CTP_CFG_GROUP1 {\
- 0x41,0x1C,0x02,0xC0,0x03,0x0A,0x05,0x01,0x01,0x0F,\
- 0x23,0x0F,0x5F,0x41,0x03,0x05,0x00,0x00,0x00,0x00,\
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x91,0x00,0x0A,\
- 0x28,0x00,0xB8,0x0B,0x00,0x00,0x00,0x9A,0x03,0x25,\
- 0x00,0x00,0x00,0x00,0x00,0x03,0x64,0x32,0x00,0x00,\
- 0x00,0x32,0x8C,0x94,0x05,0x01,0x05,0x00,0x00,0x96,\
- 0x0C,0x22,0xD8,0x0E,0x23,0x56,0x11,0x25,0xFF,0x13,\
- 0x28,0xA7,0x15,0x2E,0x00,0x00,0x10,0x30,0x48,0x00,\
- 0x56,0x4A,0x3A,0xFF,0xFF,0x16,0x00,0x00,0x00,0x00,\
- 0x00,0x01,0x1B,0x14,0x0D,0x19,0x00,0x00,0x01,0x00,\
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
- 0x00,0x00,0x1A,0x18,0x16,0x14,0x12,0x10,0x0E,0x0C,\
- 0x0A,0x08,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,\
- 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,\
- 0xFF,0xFF,0x1D,0x1E,0x1F,0x20,0x22,0x24,0x28,0x29,\
- 0x0C,0x0A,0x08,0x00,0x02,0x04,0x05,0x06,0x0E,0xFF,\
- 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,\
- 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,\
- 0xFF,0xFF,0xFF,0xFF,0x00,0x00,0x00,0x00,0x00,0x00,\
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
- 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,\
- 0x00,0x00,0x00,0x00,0x00,0x00,0x91,0x01\
- }
-
-// TODO: define your config for Sensor_ID == 1 here, if needed
+ 0x41, 0x1C, 0x02, 0xC0, 0x03, 0x0A, 0x05, 0x01, 0x01, 0x0F,\
+ 0x23, 0x0F, 0x5F, 0x41, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00,\
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x91, 0x00, 0x0A,\
+ 0x28, 0x00, 0xB8, 0x0B, 0x00, 0x00, 0x00, 0x9A, 0x03, 0x25,\
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x64, 0x32, 0x00, 0x00,\
+ 0x00, 0x32, 0x8C, 0x94, 0x05, 0x01, 0x05, 0x00, 0x00, 0x96,\
+ 0x0C, 0x22, 0xD8, 0x0E, 0x23, 0x56, 0x11, 0x25, 0xFF, 0x13,\
+ 0x28, 0xA7, 0x15, 0x2E, 0x00, 0x00, 0x10, 0x30, 0x48, 0x00,\
+ 0x56, 0x4A, 0x3A, 0xFF, 0xFF, 0x16, 0x00, 0x00, 0x00, 0x00,\
+ 0x00, 0x01, 0x1B, 0x14, 0x0D, 0x19, 0x00, 0x00, 0x01, 0x00,\
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
+ 0x00, 0x00, 0x1A, 0x18, 0x16, 0x14, 0x12, 0x10, 0x0E, 0x0C,\
+ 0x0A, 0x08, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\
+ 0xFF, 0xFF, 0x1D, 0x1E, 0x1F, 0x20, 0x22, 0x24, 0x28, 0x29,\
+ 0x0C, 0x0A, 0x08, 0x00, 0x02, 0x04, 0x05, 0x06, 0x0E, 0xFF,\
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x91, 0x01\
+ }
+
+/* Define your config for Sensor_ID == 1 here, if needed */
#define CTP_CFG_GROUP2 {\
- }
-// TODO: define your config for Sensor_ID == 2 here, if needed
+ }
+
+/* Define your config for Sensor_ID == 2 here, if needed */
#define CTP_CFG_GROUP3 {\
- }
+ }
-// TODO: define your config for Sensor_ID == 3 here, if needed
+/* Define your config for Sensor_ID == 3 here, if needed */
#define CTP_CFG_GROUP4 {\
- }
+ }
-// TODO: define your config for Sensor_ID == 4 here, if needed
+/* Define your config for Sensor_ID == 4 here, if needed */
#define CTP_CFG_GROUP5 {\
- }
+ }
-// TODO: define your config for Sensor_ID == 5 here, if needed
+/* Define your config for Sensor_ID == 5 here, if needed */
#define CTP_CFG_GROUP6 {\
- }
-
-// STEP_2(REQUIRED): Customize your I/O ports & I/O operations
-#define GTP_RST_PORT S5PV210_GPJ3(6)
-#define GTP_INT_PORT S5PV210_GPH1(3)
-#define GTP_INT_IRQ gpio_to_irq(GTP_INT_PORT)
-#define GTP_INT_CFG S3C_GPIO_SFN(0xF)
-
-#define GTP_GPIO_AS_INPUT(pin) do{\
- gpio_direction_input(pin);\
- s3c_gpio_setpull(pin, S3C_GPIO_PULL_NONE);\
- }while(0)
-#define GTP_GPIO_AS_INT(pin) do{\
- GTP_GPIO_AS_INPUT(pin);\
- s3c_gpio_cfgpin(pin, GTP_INT_CFG);\
- }while(0)
-#define GTP_GPIO_GET_VALUE(pin) gpio_get_value(pin)
-#define GTP_GPIO_OUTPUT(pin,level) gpio_direction_output(pin,level)
-#define GTP_GPIO_REQUEST(pin, label) gpio_request(pin, label)
-#define GTP_GPIO_FREE(pin) gpio_free(pin)
-#define GTP_IRQ_TAB {IRQ_TYPE_EDGE_RISING, IRQ_TYPE_EDGE_FALLING, IRQ_TYPE_LEVEL_LOW, IRQ_TYPE_LEVEL_HIGH}
-
-// STEP_3(optional): Specify your special config info if needed
+ }
+
+#define GTP_IRQ_TAB {\
+ IRQ_TYPE_EDGE_RISING,\
+ IRQ_TYPE_EDGE_FALLING,\
+ IRQ_TYPE_LEVEL_LOW,\
+ IRQ_TYPE_LEVEL_HIGH\
+ }
+
+/* STEP_3(optional): Specify your special config info if needed */
+#define GTP_IRQ_TAB_RISING 0
+#define GTP_IRQ_TAB_FALLING 1
#if GTP_CUSTOM_CFG
- #define GTP_MAX_HEIGHT 800
- #define GTP_MAX_WIDTH 480
- #define GTP_INT_TRIGGER 0 // 0: Rising 1: Falling
+#define GTP_MAX_HEIGHT 800
+#define GTP_MAX_WIDTH 480
+#define GTP_INT_TRIGGER GTP_IRQ_TAB_RISING
#else
- #define GTP_MAX_HEIGHT 4096
- #define GTP_MAX_WIDTH 4096
- #define GTP_INT_TRIGGER 1
+#define GTP_MAX_HEIGHT 4096
+#define GTP_MAX_WIDTH 4096
+#define GTP_INT_TRIGGER GTP_IRQ_TAB_FALLING
#endif
+
#define GTP_MAX_TOUCH 5
-#define GTP_ESD_CHECK_CIRCLE 2000 // jiffy: ms
+#define GTP_ESD_CHECK_CIRCLE 2000 /* jiffy: ms */
-// STEP_4(optional): If keys are available and reported as keys, config your key info here
-#if GTP_HAVE_TOUCH_KEY
- #define GTP_KEY_TAB {KEY_MENU, KEY_HOME, KEY_BACK}
-#endif
+/***************************PART3:OTHER define*********************************/
+#define GTP_DRIVER_VERSION "V1.8<2013/06/08>"
+#define GTP_I2C_NAME "Goodix-TS"
+#define GTP_POLL_TIME 10 /* jiffy: ms*/
+#define GTP_ADDR_LENGTH 2
+#define GTP_CONFIG_MIN_LENGTH 186
+#define GTP_CONFIG_MAX_LENGTH 240
+#define FAIL 0
+#define SUCCESS 1
+#define SWITCH_OFF 0
+#define SWITCH_ON 1
+
+/* Registers define */
+#define GTP_READ_COOR_ADDR 0x814E
+#define GTP_REG_SLEEP 0x8040
+#define GTP_REG_SENSOR_ID 0x814A
+#define GTP_REG_CONFIG_DATA 0x8047
+#define GTP_REG_VERSION 0x8140
+
+#define RESOLUTION_LOC 3
+#define TRIGGER_LOC 8
-//***************************PART3:OTHER define*********************************
-#define GTP_DRIVER_VERSION "V1.8<2013/06/08>"
-#define GTP_I2C_NAME "Goodix-TS"
-#define GTP_POLL_TIME 10 // jiffy: ms
-#define GTP_ADDR_LENGTH 2
-#define GTP_CONFIG_MIN_LENGTH 186
-#define GTP_CONFIG_MAX_LENGTH 240
-#define FAIL 0
-#define SUCCESS 1
-#define SWITCH_OFF 0
-#define SWITCH_ON 1
-
-// Registers define
-#define GTP_READ_COOR_ADDR 0x814E
-#define GTP_REG_SLEEP 0x8040
-#define GTP_REG_SENSOR_ID 0x814A
-#define GTP_REG_CONFIG_DATA 0x8047
-#define GTP_REG_VERSION 0x8140
-
-#define RESOLUTION_LOC 3
-#define TRIGGER_LOC 8
-
-#define CFG_GROUP_LEN(p_cfg_grp) (sizeof(p_cfg_grp) / sizeof(p_cfg_grp[0]))
-// Log define
-#define GTP_INFO(fmt,arg...) printk("<<-GTP-INFO->> "fmt"\n",##arg)
-#define GTP_ERROR(fmt,arg...) printk("<<-GTP-ERROR->> "fmt"\n",##arg)
-#define GTP_DEBUG(fmt,arg...) do{\
- if(GTP_DEBUG_ON)\
- printk("<<-GTP-DEBUG->> [%d]"fmt"\n",__LINE__, ##arg);\
- }while(0)
-#define GTP_DEBUG_ARRAY(array, num) do{\
- s32 i;\
- u8* a = array;\
- if(GTP_DEBUG_ARRAY_ON)\
- {\
- printk("<<-GTP-DEBUG-ARRAY->>\n");\
- for (i = 0; i < (num); i++)\
- {\
- printk("%02x ", (a)[i]);\
- if ((i + 1 ) %10 == 0)\
- {\
- printk("\n");\
- }\
- }\
- printk("\n");\
- }\
- }while(0)
-#define GTP_DEBUG_FUNC() do{\
- if(GTP_DEBUG_FUNC_ON)\
- printk("<<-GTP-FUNC->> Func:%s@Line:%d\n",__func__,__LINE__);\
- }while(0)
-#define GTP_SWAP(x, y) do{\
- typeof(x) z = x;\
- x = y;\
- y = z;\
- }while (0)
-
-//*****************************End of Part III********************************
+/* Log define */
+#define GTP_DEBUG(fmt, arg...) do {\
+ if (GTP_DEBUG_ON) {\
+ pr_debug("<<-GTP-DEBUG->> [%d]"fmt"\n",\
+ __LINE__, ##arg); } \
+ } while (0)
+#define GTP_DEBUG_ARRAY(array, num) do {\
+ s32 i; \
+ u8 *a = array; \
+ if (GTP_DEBUG_ARRAY_ON) {\
+ pr_debug("<<-GTP-DEBUG-ARRAY->>\n");\
+ for (i = 0; i < (num); i++) { \
+ pr_debug("%02x ", (a)[i]);\
+ if ((i + 1) % 10 == 0) { \
+ pr_debug("\n");\
+ } \
+ } \
+ pr_debug("\n");\
+ } \
+ } while (0)
+
+#define GTP_DEBUG_FUNC() do {\
+ if (GTP_DEBUG_FUNC_ON)\
+ pr_debug("<<-GTP-FUNC->> Func:%s@Line:%d\n",\
+ __func__, __LINE__);\
+ } while (0)
+
+#define GTP_SWAP(x, y) do {\
+ typeof(x) z = x;\
+ x = y;\
+ y = z;\
+ } while (0)
+/*****************************End of Part III********************************/
+
+void gtp_esd_switch(struct i2c_client *client, int on);
+
+#if GTP_CREATE_WR_NODE
+extern s32 init_wr_node(struct i2c_client *client);
+extern void uninit_wr_node(void);
+#endif
+
+#if GTP_AUTO_UPDATE
+extern u8 gup_init_update_proc(struct goodix_ts_data *ts);
+#endif
#endif /* _GOODIX_GT9XX_H_ */
diff --git a/drivers/input/touchscreen/it7258_ts_i2c.c b/drivers/input/touchscreen/it7258_ts_i2c.c
index 1fa28f1f0d9d..c60a2b5a94b0 100644
--- a/drivers/input/touchscreen/it7258_ts_i2c.c
+++ b/drivers/input/touchscreen/it7258_ts_i2c.c
@@ -128,6 +128,7 @@
#define IT_I2C_VTG_MIN_UV 2600000
#define IT_I2C_VTG_MAX_UV 3300000
#define IT_I2C_ACTIVE_LOAD_UA 10000
+#define DELAY_VTG_REG_EN 170
#define PINCTRL_STATE_ACTIVE "pmx_ts_active"
#define PINCTRL_STATE_SUSPEND "pmx_ts_suspend"
@@ -167,6 +168,7 @@ struct IT7260_ts_platform_data {
unsigned int disp_maxy;
unsigned num_of_fingers;
unsigned int reset_delay;
+ unsigned int avdd_lpm_cur;
bool low_reset;
};
@@ -1492,6 +1494,14 @@ static int IT7260_parse_dt(struct device *dev,
return rc;
}
+ rc = of_property_read_u32(np, "ite,avdd-lpm-cur", &temp_val);
+ if (!rc) {
+ pdata->avdd_lpm_cur = temp_val;
+ } else if (rc && (rc != -EINVAL)) {
+ dev_err(dev, "Unable to read avdd lpm current value %d\n", rc);
+ return rc;
+ }
+
pdata->low_reset = of_property_read_bool(np, "ite,low-reset");
rc = IT7260_get_dt_coords(dev, "ite,display-coords", pdata);
@@ -1619,6 +1629,12 @@ static int IT7260_ts_probe(struct i2c_client *client,
goto err_power_device;
}
+ /*
+ * After enabling regulators, controller needs a delay to come to
+ * an active state.
+ */
+ msleep(DELAY_VTG_REG_EN);
+
ret = IT7260_ts_pinctrl_init(gl_ts);
if (!ret && gl_ts->ts_pinctrl) {
/*
@@ -1865,6 +1881,15 @@ static int IT7260_ts_resume(struct device *dev)
if (device_may_wakeup(dev)) {
if (gl_ts->device_needs_wakeup) {
+ /* Set active current for the avdd regulator */
+ if (gl_ts->pdata->avdd_lpm_cur) {
+ retval = reg_set_optimum_mode_check(gl_ts->avdd,
+ IT_I2C_ACTIVE_LOAD_UA);
+ if (retval < 0)
+ dev_err(dev, "Regulator avdd set_opt failed at resume rc=%d\n",
+ retval);
+ }
+
gl_ts->device_needs_wakeup = false;
disable_irq_wake(gl_ts->client->irq);
}
@@ -1903,6 +1928,15 @@ static int IT7260_ts_suspend(struct device *dev)
/* put the device in low power idle mode */
IT7260_ts_chipLowPowerMode(PWR_CTL_LOW_POWER_MODE);
+ /* Set lpm current for avdd regulator */
+ if (gl_ts->pdata->avdd_lpm_cur) {
+ retval = reg_set_optimum_mode_check(gl_ts->avdd,
+ gl_ts->pdata->avdd_lpm_cur);
+ if (retval < 0)
+ dev_err(dev, "Regulator avdd set_opt failed at suspend rc=%d\n",
+ retval);
+ }
+
gl_ts->device_needs_wakeup = true;
enable_irq_wake(gl_ts->client->irq);
}
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 4a26ab920016..cbe77022404b 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -672,6 +672,9 @@ static int get_hfi_extradata_index(enum hal_extradata_id index)
case HAL_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI:
ret = HFI_PROPERTY_PARAM_VDEC_CONTENT_LIGHT_LEVEL_SEI_EXTRADATA;
break;
+ case HAL_EXTRADATA_PQ_INFO:
+ ret = HFI_PROPERTY_PARAM_VENC_OVERRIDE_QP_EXTRADATA;
+ break;
default:
dprintk(VIDC_WARN, "Extradata index not found: %d\n", index);
break;
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index d8c6e30204d1..0c26cc7debaf 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -1086,13 +1086,6 @@ int msm_vdec_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
return -EINVAL;
}
- rc = msm_comm_try_get_bufreqs(inst);
- if (rc) {
- dprintk(VIDC_ERR, "Getting buffer requirements failed: %d\n",
- rc);
- return rc;
- }
-
hdev = inst->core->device;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
fmt = inst->fmts[CAPTURE_PORT];
@@ -1135,13 +1128,6 @@ int msm_vdec_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
stride = inst->prop.width[CAPTURE_PORT];
scanlines = inst->prop.height[CAPTURE_PORT];
- rc = msm_comm_try_get_bufreqs(inst);
- if (rc) {
- dprintk(VIDC_ERR,
- "%s: Failed : Buffer requirements\n", __func__);
- goto exit;
- }
-
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
plane_sizes = &inst->bufq[OUTPUT_PORT].vb2_bufq.plane_sizes[0];
for (i = 0; i < fmt->num_planes; ++i) {
@@ -1176,10 +1162,10 @@ int msm_vdec_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
scanlines = VENUS_Y_SCANLINES(color_format,
inst->prop.height[CAPTURE_PORT]);
- bufreq = get_buff_req_buffer(inst,
- msm_comm_get_hal_output_buffer(inst));
f->fmt.pix_mp.plane_fmt[0].sizeimage =
- bufreq ? bufreq->buffer_size : 0;
+ fmt->get_frame_size(0,
+ inst->prop.height[CAPTURE_PORT],
+ inst->prop.width[CAPTURE_PORT]);
extra_idx = EXTRADATA_IDX(fmt->num_planes);
if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
@@ -1316,28 +1302,6 @@ static int update_output_buffer_size(struct msm_vidc_inst *inst,
goto exit;
}
- /* Query buffer requirements from firmware */
- rc = msm_comm_try_get_bufreqs(inst);
- if (rc)
- dprintk(VIDC_WARN,
- "Failed to get buf req, %d\n", rc);
-
- /* Read back updated firmware size */
- for (i = 0; i < num_planes; ++i) {
- enum hal_buffer type = msm_comm_get_hal_output_buffer(inst);
-
- if (EXTRADATA_IDX(num_planes) &&
- i == EXTRADATA_IDX(num_planes)) {
- type = HAL_BUFFER_EXTRADATA_OUTPUT;
- }
-
- bufreq = get_buff_req_buffer(inst, type);
- f->fmt.pix_mp.plane_fmt[i].sizeimage = bufreq ?
- bufreq->buffer_size : 0;
- dprintk(VIDC_DBG,
- "updated buffer size for plane[%d] = %d\n",
- i, f->fmt.pix_mp.plane_fmt[i].sizeimage);
- }
exit:
return rc;
}
@@ -1377,10 +1341,12 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
{
struct msm_vidc_format *fmt = NULL;
struct hal_frame_size frame_sz;
+ unsigned int extra_idx = 0;
int rc = 0;
int ret = 0;
int i;
int max_input_size = 0;
+ struct hal_buffer_requirements *bufreq;
if (!inst || !f) {
dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
@@ -1425,23 +1391,22 @@ int msm_vdec_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
HAL_PARAM_FRAME_SIZE, &frame_sz);
}
- ret = ret || msm_comm_try_get_bufreqs(inst);
- if (ret) {
- for (i = 0; i < fmt->num_planes; ++i) {
- f->fmt.pix_mp.plane_fmt[i].sizeimage =
- get_frame_size(inst, fmt, f->type, i);
- }
- } else {
- rc = update_output_buffer_size(inst, f,
- fmt->num_planes);
- if (rc) {
- dprintk(VIDC_ERR,
- "%s - failed to update buffer size: %d\n",
- __func__, rc);
- goto err_invalid_fmt;
- }
+ f->fmt.pix_mp.plane_fmt[0].sizeimage =
+ fmt->get_frame_size(0,
+ f->fmt.pix_mp.height, f->fmt.pix_mp.width);
+
+ extra_idx = EXTRADATA_IDX(fmt->num_planes);
+ if (extra_idx && extra_idx < VIDEO_MAX_PLANES) {
+ bufreq = get_buff_req_buffer(inst,
+ HAL_BUFFER_EXTRADATA_OUTPUT);
+ f->fmt.pix_mp.plane_fmt[extra_idx].sizeimage =
+ bufreq ? bufreq->buffer_size : 0;
}
+ for (i = 0; i < fmt->num_planes; ++i)
+ inst->bufq[CAPTURE_PORT].vb2_bufq.plane_sizes[i] =
+ f->fmt.pix_mp.plane_fmt[i].sizeimage;
+
f->fmt.pix_mp.num_planes = fmt->num_planes;
for (i = 0; i < fmt->num_planes; ++i) {
inst->bufq[CAPTURE_PORT].vb2_bufq.plane_sizes[i] =
@@ -1619,6 +1584,13 @@ static int msm_vdec_queue_setup(struct vb2_queue *q,
return -EINVAL;
}
+ rc = msm_comm_try_get_bufreqs(inst);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "%s: Failed : Buffer requirements\n", __func__);
+ goto exit;
+ }
+
switch (q->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
*num_planes = inst->fmts[OUTPUT_PORT]->num_planes;
@@ -1693,7 +1665,7 @@ static int msm_vdec_queue_setup(struct vb2_queue *q,
inst->buff_req.buffer[1].buffer_count_actual,
inst->buff_req.buffer[1].buffer_size,
inst->buff_req.buffer[1].buffer_alignment);
- sizes[0] = bufreq->buffer_size;
+ sizes[0] = inst->bufq[CAPTURE_PORT].vb2_bufq.plane_sizes[0];
/*
* Set actual buffer count to firmware for DPB buffers.
@@ -1734,6 +1706,7 @@ static int msm_vdec_queue_setup(struct vb2_queue *q,
rc = -EINVAL;
break;
}
+exit:
return rc;
}
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 5c7408740e95..55e54f7eb008 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -725,7 +725,7 @@ static struct msm_vidc_ctrl msm_venc_ctrls[] = {
.name = "Extradata Type",
.type = V4L2_CTRL_TYPE_MENU,
.minimum = V4L2_MPEG_VIDC_EXTRADATA_NONE,
- .maximum = V4L2_MPEG_VIDC_EXTRADATA_ROI_QP,
+ .maximum = V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO,
.default_value = V4L2_MPEG_VIDC_EXTRADATA_NONE,
.menu_skip_mask = ~(
(1 << V4L2_MPEG_VIDC_EXTRADATA_NONE) |
@@ -747,7 +747,8 @@ static struct msm_vidc_ctrl msm_venc_ctrls[] = {
(1 << V4L2_MPEG_VIDC_EXTRADATA_LTR) |
(1 << V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI) |
(1 << V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS)|
- (1 << V4L2_MPEG_VIDC_EXTRADATA_ROI_QP)
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_ROI_QP) |
+ (1 << V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO)
),
.qmenu = mpeg_video_vidc_extradata,
},
@@ -1512,6 +1513,7 @@ static int msm_venc_queue_setup(struct vb2_queue *q,
case V4L2_MPEG_VIDC_EXTRADATA_ASPECT_RATIO:
case V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS:
case V4L2_MPEG_VIDC_EXTRADATA_ROI_QP:
+ case V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO:
*num_planes = *num_planes + 1;
break;
default:
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 40be56e874c3..8541e06d997a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -76,6 +76,7 @@ const char *const mpeg_video_vidc_extradata[] = {
"Extradata output crop",
"Extradata display colour SEI",
"Extradata light level SEI",
+ "Extradata PQ Info",
};
struct getprop_buf {
@@ -4688,6 +4689,9 @@ enum hal_extradata_id msm_comm_get_hal_extradata_index(
case V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI:
ret = HAL_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI;
break;
+ case V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO:
+ ret = HAL_EXTRADATA_PQ_INFO;
+ break;
default:
dprintk(VIDC_WARN, "Extradata not found: %d\n", index);
break;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index cbb4e3569b13..330710631211 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -253,6 +253,8 @@ struct hfi_extradata_header {
(HFI_PROPERTY_PARAM_VENC_OX_START + 0x007)
#define HFI_PROPERTY_PARAM_VENC_ROI_QP_EXTRADATA \
(HFI_PROPERTY_PARAM_VENC_OX_START + 0x008)
+#define HFI_PROPERTY_PARAM_VENC_OVERRIDE_QP_EXTRADATA \
+ (HFI_PROPERTY_PARAM_VENC_OX_START + 0x009)
#define HFI_PROPERTY_CONFIG_VENC_OX_START \
(HFI_DOMAIN_BASE_VENC + HFI_ARCH_OX_OFFSET + 0x6000)
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index aba1d04726e4..36df3a1d45a1 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -124,6 +124,7 @@ enum hal_extradata_id {
HAL_EXTRADATA_OUTPUT_CROP,
HAL_EXTRADATA_MASTERING_DISPLAY_COLOUR_SEI,
HAL_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI,
+ HAL_EXTRADATA_PQ_INFO,
};
enum hal_property {
diff --git a/drivers/mfd/wcd9xxx-utils.c b/drivers/mfd/wcd9xxx-utils.c
index 22d61d96a11d..38286831a02c 100644
--- a/drivers/mfd/wcd9xxx-utils.c
+++ b/drivers/mfd/wcd9xxx-utils.c
@@ -298,6 +298,7 @@ struct wcd9xxx_pdata *wcd9xxx_populate_dt_data(struct device *dev)
struct wcd9xxx_pdata *pdata;
u32 dmic_sample_rate = WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED;
u32 mad_dmic_sample_rate = WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED;
+ u32 ecpp_dmic_sample_rate = WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED;
u32 dmic_clk_drive = WCD9XXX_DMIC_CLK_DRIVE_UNDEFINED;
u32 prop_val;
@@ -358,6 +359,15 @@ struct wcd9xxx_pdata *wcd9xxx_populate_dt_data(struct device *dev)
pdata->mclk_rate,
"mad_dmic_rate");
+ if (!(wcd9xxx_read_of_property_u32(dev, "qcom,cdc-ecpp-dmic-rate",
+ &prop_val)))
+ ecpp_dmic_sample_rate = prop_val;
+
+ pdata->ecpp_dmic_sample_rate = wcd9xxx_validate_dmic_sample_rate(dev,
+ ecpp_dmic_sample_rate,
+ pdata->mclk_rate,
+ "ecpp_dmic_rate");
+
if (!(of_property_read_u32(dev->of_node,
"qcom,cdc-dmic-clk-drv-strength",
&prop_val)))
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index ca790d684be7..779994a1c9dd 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -2100,7 +2100,7 @@ static int qseecom_unload_app(struct qseecom_dev_handle *data,
if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
pr_debug("Do not unload keymaster app from tz\n");
- return 0;
+ goto unload_exit;
}
__qseecom_cleanup_app(data);
@@ -2152,7 +2152,7 @@ static int qseecom_unload_app(struct qseecom_dev_handle *data,
pr_err("scm_call to unload app (id = %d) failed\n",
req.app_id);
ret = -EFAULT;
- goto not_release_exit;
+ goto unload_exit;
} else {
pr_warn("App id %d now unloaded\n", req.app_id);
}
@@ -2160,7 +2160,7 @@ static int qseecom_unload_app(struct qseecom_dev_handle *data,
pr_err("app (%d) unload_failed!!\n",
data->client.app_id);
ret = -EFAULT;
- goto not_release_exit;
+ goto unload_exit;
}
if (resp.result == QSEOS_RESULT_SUCCESS)
pr_debug("App (%d) is unloaded!!\n",
@@ -2170,7 +2170,7 @@ static int qseecom_unload_app(struct qseecom_dev_handle *data,
if (ret) {
pr_err("process_incomplete_cmd fail err: %d\n",
ret);
- goto not_release_exit;
+ goto unload_exit;
}
}
}
@@ -2200,7 +2200,6 @@ static int qseecom_unload_app(struct qseecom_dev_handle *data,
unload_exit:
qseecom_unmap_ion_allocated_memory(data);
data->released = true;
-not_release_exit:
return ret;
}
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index 918f8c82acdd..9b962a63c3d8 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -107,36 +107,14 @@ static void nqx_disable_irq(struct nqx_dev *nqx_dev)
spin_unlock_irqrestore(&nqx_dev->irq_enabled_lock, flags);
}
-static void nqx_enable_irq(struct nqx_dev *nqx_dev)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&nqx_dev->irq_enabled_lock, flags);
- if (!nqx_dev->irq_enabled) {
- nqx_dev->irq_enabled = true;
- enable_irq(nqx_dev->client->irq);
- }
- spin_unlock_irqrestore(&nqx_dev->irq_enabled_lock, flags);
-}
-
static irqreturn_t nqx_dev_irq_handler(int irq, void *dev_id)
{
struct nqx_dev *nqx_dev = dev_id;
unsigned long flags;
- int ret;
if (device_may_wakeup(&nqx_dev->client->dev))
pm_wakeup_event(&nqx_dev->client->dev, WAKEUP_SRC_TIMEOUT);
- ret = gpio_get_value(nqx_dev->irq_gpio);
- if (!ret) {
-#ifdef NFC_KERNEL_BU
- dev_info(&nqx_dev->client->dev,
- "nqx nfc : nqx_dev_irq_handler error = %d\n", ret);
-#endif
- return IRQ_HANDLED;
- }
-
nqx_disable_irq(nqx_dev);
spin_lock_irqsave(&nqx_dev->irq_enabled_lock, flags);
nqx_dev->count_irq++;
@@ -175,15 +153,24 @@ static ssize_t nfc_read(struct file *filp, char __user *buf,
ret = -EAGAIN;
goto err;
}
- if (!nqx_dev->irq_enabled) {
- enable_irq(nqx_dev->client->irq);
- nqx_dev->irq_enabled = true;
+ while (1) {
+ ret = 0;
+ if (!nqx_dev->irq_enabled) {
+ nqx_dev->irq_enabled = true;
+ enable_irq(nqx_dev->client->irq);
+ }
+ if (!gpio_get_value(nqx_dev->irq_gpio)) {
+ ret = wait_event_interruptible(nqx_dev->read_wq,
+ !nqx_dev->irq_enabled);
+ }
+ if (ret)
+ goto err;
+ nqx_disable_irq(nqx_dev);
+
+ if (gpio_get_value(nqx_dev->irq_gpio))
+ break;
+ dev_err_ratelimited(&nqx_dev->client->dev, "gpio is low, no need to read data\n");
}
- ret = wait_event_interruptible(nqx_dev->read_wq,
- gpio_get_value(nqx_dev->irq_gpio));
- if (ret)
- goto err;
- nqx_disable_irq(nqx_dev);
}
tmp = nqx_dev->kbuf;
@@ -393,7 +380,6 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
/* hardware dependent delay */
msleep(100);
} else if (arg == 1) {
- nqx_enable_irq(nqx_dev);
dev_dbg(&nqx_dev->client->dev,
"gpio_set_value enable: %s: info: %p\n",
__func__, nqx_dev);
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 7a9307294a6d..09d1166e29a6 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/ipa_uc_offload.h>
#include "ipa_api.h"
#define DRV_NAME "ipa"
@@ -2821,6 +2822,35 @@ void ipa_recycle_wan_skb(struct sk_buff *skb)
}
EXPORT_SYMBOL(ipa_recycle_wan_skb);
+/**
+ * ipa_setup_uc_ntn_pipes() - setup uc offload pipes
+ */
+int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *inp,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_setup_uc_ntn_pipes, inp,
+ notify, priv, hdr_len, outp);
+
+ return ret;
+}
+
+/**
+ * ipa_tear_down_uc_offload_pipes() - tear down uc offload pipes
+ */
+int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
+ int ipa_ep_idx_dl)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_tear_down_uc_offload_pipes, ipa_ep_idx_ul,
+ ipa_ep_idx_dl);
+
+ return ret;
+}
+
static const struct dev_pm_ops ipa_pm_ops = {
.suspend_noirq = ipa_ap_suspend,
.resume_noirq = ipa_ap_resume,
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index 3c2471dd11dd..eab048323bd5 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -11,6 +11,7 @@
*/
#include <linux/ipa_mhi.h>
+#include <linux/ipa_uc_offload.h>
#include "ipa_common_i.h"
#ifndef _IPA_API_H_
@@ -362,6 +363,12 @@ struct ipa_api_controller {
void (*ipa_recycle_wan_skb)(struct sk_buff *skb);
+ int (*ipa_setup_uc_ntn_pipes)(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *);
+
+ int (*ipa_tear_down_uc_offload_pipes)(int ipa_ep_idx_ul,
+ int ipa_ep_idx_dl);
};
#ifdef CONFIG_IPA
diff --git a/drivers/platform/msm/ipa/ipa_clients/Makefile b/drivers/platform/msm/ipa/ipa_clients/Makefile
index aac473f62751..61cef2d71960 100644
--- a/drivers/platform/msm/ipa/ipa_clients/Makefile
+++ b/drivers/platform/msm/ipa/ipa_clients/Makefile
@@ -1,2 +1,2 @@
-obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o
-obj-$(CONFIG_IPA) += odu_bridge.o ipa_mhi_client.o \ No newline at end of file
+obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o
+obj-$(CONFIG_IPA) += odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
new file mode 100644
index 000000000000..069f0a2e3fee
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
@@ -0,0 +1,597 @@
+/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa_uc_offload.h>
+#include <linux/msm_ipa.h>
+#include "../ipa_common_i.h"
+
+#define IPA_NTN_DMA_POOL_ALIGNMENT 8
+#define OFFLOAD_DRV_NAME "ipa_uc_offload"
+#define IPA_UC_OFFLOAD_DBG(fmt, args...) \
+ do { \
+ pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPA_UC_OFFLOAD_LOW(fmt, args...) \
+ do { \
+ pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPA_UC_OFFLOAD_ERR(fmt, args...) \
+ do { \
+ pr_err(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+#define IPA_UC_OFFLOAD_INFO(fmt, args...) \
+ do { \
+ pr_info(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+ OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+ } while (0)
+
+enum ipa_uc_offload_state {
+ IPA_UC_OFFLOAD_STATE_INVALID,
+ IPA_UC_OFFLOAD_STATE_INITIALIZED,
+ IPA_UC_OFFLOAD_STATE_UP,
+ IPA_UC_OFFLOAD_STATE_DOWN,
+};
+
+struct ipa_uc_offload_ctx {
+ enum ipa_uc_offload_proto proto;
+ enum ipa_uc_offload_state state;
+ void *priv;
+ u8 hdr_len;
+ u32 partial_hdr_hdl[IPA_IP_MAX];
+ char netdev_name[IPA_RESOURCE_NAME_MAX];
+ ipa_notify_cb notify;
+ struct completion ntn_completion;
+};
+
+static struct ipa_uc_offload_ctx *ipa_uc_offload_ctx[IPA_UC_MAX_PROT_SIZE];
+
+static int ipa_commit_partial_hdr(
+ struct ipa_ioc_add_hdr *hdr,
+ const char *netdev_name,
+ struct ipa_hdr_info *hdr_info)
+{
+ int i;
+
+ if (hdr == NULL || hdr_info == NULL) {
+ IPA_UC_OFFLOAD_ERR("Invalid input\n");
+ return -EINVAL;
+ }
+
+ hdr->commit = 1;
+ hdr->num_hdrs = 2;
+
+ snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name),
+ "%s_ipv4", netdev_name);
+ snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name),
+ "%s_ipv6", netdev_name);
+ for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) {
+ hdr->hdr[i].hdr_len = hdr_info[i].hdr_len;
+ memcpy(hdr->hdr[i].hdr, hdr_info[i].hdr, hdr->hdr[i].hdr_len);
+ hdr->hdr[i].type = hdr_info[i].hdr_type;
+ hdr->hdr[i].is_partial = 1;
+ hdr->hdr[i].is_eth2_ofst_valid = 1;
+ hdr->hdr[i].eth2_ofst = hdr_info[i].dst_mac_addr_offset;
+ }
+
+ if (ipa_add_hdr(hdr)) {
+ IPA_UC_OFFLOAD_ERR("fail to add partial headers\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int ipa_uc_offload_ntn_reg_intf(
+ struct ipa_uc_offload_intf_params *inp,
+ struct ipa_uc_offload_out_params *outp,
+ struct ipa_uc_offload_ctx *ntn_ctx)
+{
+ struct ipa_ioc_add_hdr *hdr;
+ struct ipa_tx_intf tx;
+ struct ipa_rx_intf rx;
+ struct ipa_ioc_tx_intf_prop tx_prop[2];
+ struct ipa_ioc_rx_intf_prop rx_prop[2];
+ u32 len;
+ int ret = 0;
+
+ IPA_UC_OFFLOAD_DBG("register interface for netdev %s\n",
+ inp->netdev_name);
+
+ memcpy(ntn_ctx->netdev_name, inp->netdev_name, IPA_RESOURCE_NAME_MAX);
+ ntn_ctx->hdr_len = inp->hdr_info[0].hdr_len;
+ ntn_ctx->notify = inp->notify;
+ ntn_ctx->priv = inp->priv;
+
+ /* add partial header */
+ len = sizeof(struct ipa_ioc_add_hdr) + 2 * sizeof(struct ipa_hdr_add);
+ hdr = kzalloc(len, GFP_KERNEL);
+ if (hdr == NULL) {
+ IPA_UC_OFFLOAD_ERR("fail to alloc %d bytes\n", len);
+ return -ENOMEM;
+ }
+
+ if (ipa_commit_partial_hdr(hdr, ntn_ctx->netdev_name, inp->hdr_info)) {
+ IPA_UC_OFFLOAD_ERR("fail to commit partial headers\n");
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ /* populate tx prop */
+ tx.num_props = 2;
+ tx.prop = tx_prop;
+
+ memset(tx_prop, 0, sizeof(tx_prop));
+ tx_prop[0].ip = IPA_IP_v4;
+ tx_prop[0].dst_pipe = IPA_CLIENT_ODU_TETH_CONS;
+ tx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
+ memcpy(tx_prop[0].hdr_name, hdr->hdr[IPA_IP_v4].name,
+ sizeof(tx_prop[0].hdr_name));
+
+ tx_prop[1].ip = IPA_IP_v6;
+ tx_prop[1].dst_pipe = IPA_CLIENT_ODU_TETH_CONS;
+ tx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
+ memcpy(tx_prop[1].hdr_name, hdr->hdr[IPA_IP_v6].name,
+ sizeof(tx_prop[1].hdr_name));
+
+ /* populate rx prop */
+ rx.num_props = 2;
+ rx.prop = rx_prop;
+
+ memset(rx_prop, 0, sizeof(rx_prop));
+ rx_prop[0].ip = IPA_IP_v4;
+ rx_prop[0].src_pipe = IPA_CLIENT_ODU_PROD;
+ rx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
+ if (inp->is_meta_data_valid) {
+ rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA;
+ rx_prop[0].attrib.meta_data = inp->meta_data;
+ rx_prop[0].attrib.meta_data_mask = inp->meta_data_mask;
+ }
+
+ rx_prop[1].ip = IPA_IP_v6;
+ rx_prop[1].src_pipe = IPA_CLIENT_ODU_PROD;
+ rx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
+ if (inp->is_meta_data_valid) {
+ rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA;
+ rx_prop[1].attrib.meta_data = inp->meta_data;
+ rx_prop[1].attrib.meta_data_mask = inp->meta_data_mask;
+ }
+
+ if (ipa_register_intf(inp->netdev_name, &tx, &rx)) {
+ IPA_UC_OFFLOAD_ERR("fail to add interface prop\n");
+ memset(ntn_ctx, 0, sizeof(*ntn_ctx));
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ ntn_ctx->partial_hdr_hdl[IPA_IP_v4] = hdr->hdr[IPA_IP_v4].hdr_hdl;
+ ntn_ctx->partial_hdr_hdl[IPA_IP_v6] = hdr->hdr[IPA_IP_v6].hdr_hdl;
+ init_completion(&ntn_ctx->ntn_completion);
+ ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
+
+fail:
+ kfree(hdr);
+ return ret;
+}
+
+int ipa_uc_offload_reg_intf(
+ struct ipa_uc_offload_intf_params *inp,
+ struct ipa_uc_offload_out_params *outp)
+{
+ struct ipa_uc_offload_ctx *ctx;
+ int ret = 0;
+
+ if (inp == NULL || outp == NULL) {
+ IPA_UC_OFFLOAD_ERR("invalid params in=%p out=%p\n", inp, outp);
+ return -EINVAL;
+ }
+
+ if (inp->proto <= IPA_UC_INVALID ||
+ inp->proto >= IPA_UC_MAX_PROT_SIZE) {
+ IPA_UC_OFFLOAD_ERR("invalid proto %d\n", inp->proto);
+ return -EINVAL;
+ }
+
+ if (!ipa_uc_offload_ctx[inp->proto]) {
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (ctx == NULL) {
+ IPA_UC_OFFLOAD_ERR("fail to alloc uc offload ctx\n");
+ return -EFAULT;
+ }
+ ipa_uc_offload_ctx[inp->proto] = ctx;
+ ctx->proto = inp->proto;
+ } else
+ ctx = ipa_uc_offload_ctx[inp->proto];
+
+ if (ctx->state != IPA_UC_OFFLOAD_STATE_INVALID) {
+ IPA_UC_OFFLOAD_ERR("Already Initialized\n");
+ return -EINVAL;
+ }
+
+ if (ctx->proto == IPA_UC_NTN) {
+ ret = ipa_uc_offload_ntn_reg_intf(inp, outp, ctx);
+ if (!ret)
+ outp->clnt_hndl = IPA_UC_NTN;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_reg_intf);
+
+static int ipa_uc_ntn_cons_release(void)
+{
+ return 0;
+}
+
+static int ipa_uc_ntn_cons_request(void)
+{
+ int ret = 0;
+ struct ipa_uc_offload_ctx *ntn_ctx;
+
+ ntn_ctx = ipa_uc_offload_ctx[IPA_UC_NTN];
+ if (!ntn_ctx) {
+ IPA_UC_OFFLOAD_ERR("NTN is not initialized\n");
+ ret = -EFAULT;
+ } else if (ntn_ctx->state != IPA_UC_OFFLOAD_STATE_UP) {
+ IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", ntn_ctx->state);
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+static void ipa_uc_offload_rm_notify(void *user_data, enum ipa_rm_event event,
+ unsigned long data)
+{
+ struct ipa_uc_offload_ctx *offload_ctx;
+
+ offload_ctx = (struct ipa_uc_offload_ctx *)user_data;
+ if (!(offload_ctx && offload_ctx->proto > IPA_UC_INVALID &&
+ offload_ctx->proto < IPA_UC_MAX_PROT_SIZE)) {
+ IPA_UC_OFFLOAD_ERR("Invalid user data\n");
+ return;
+ }
+
+ if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED)
+ IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", offload_ctx->state);
+
+ switch (event) {
+ case IPA_RM_RESOURCE_GRANTED:
+ complete_all(&offload_ctx->ntn_completion);
+ break;
+
+ case IPA_RM_RESOURCE_RELEASED:
+ break;
+
+ default:
+ IPA_UC_OFFLOAD_ERR("Invalid RM Evt: %d", event);
+ break;
+ }
+}
+
+int ipa_uc_ntn_conn_pipes(struct ipa_ntn_conn_in_params *inp,
+ struct ipa_ntn_conn_out_params *outp,
+ struct ipa_uc_offload_ctx *ntn_ctx)
+{
+ struct ipa_rm_create_params param;
+ int result = 0;
+
+ if (inp->dl.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT ||
+ inp->dl.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) {
+ IPA_UC_OFFLOAD_ERR("alignment failure on TX\n");
+ return -EINVAL;
+ }
+ if (inp->ul.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT ||
+ inp->ul.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) {
+ IPA_UC_OFFLOAD_ERR("alignment failure on RX\n");
+ return -EINVAL;
+ }
+
+ memset(&param, 0, sizeof(param));
+ param.name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
+ param.reg_params.user_data = ntn_ctx;
+ param.reg_params.notify_cb = ipa_uc_offload_rm_notify;
+ param.floor_voltage = IPA_VOLTAGE_SVS;
+ result = ipa_rm_create_resource(&param);
+ if (result) {
+ IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_PROD resource\n");
+ return -EFAULT;
+ }
+
+ memset(&param, 0, sizeof(param));
+ param.name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
+ param.request_resource = ipa_uc_ntn_cons_request;
+ param.release_resource = ipa_uc_ntn_cons_release;
+ result = ipa_rm_create_resource(&param);
+ if (result) {
+ IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_CONS resource\n");
+ goto fail_create_rm_cons;
+ }
+
+ if (ipa_rm_add_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+ IPA_RM_RESOURCE_APPS_CONS)) {
+ IPA_UC_OFFLOAD_ERR("fail to add rm dependency\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa_setup_uc_ntn_pipes(inp, ntn_ctx->notify,
+ ntn_ctx->priv, ntn_ctx->hdr_len, outp)) {
+ IPA_UC_OFFLOAD_ERR("fail to setup uc offload pipes\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ ntn_ctx->state = IPA_UC_OFFLOAD_STATE_UP;
+ result = ipa_rm_request_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+ if (result == -EINPROGRESS) {
+ if (wait_for_completion_timeout(&ntn_ctx->ntn_completion,
+ 10*HZ) == 0) {
+ IPA_UC_OFFLOAD_ERR("ODU PROD resource req time out\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ } else if (result != 0) {
+ IPA_UC_OFFLOAD_ERR("fail to request resource\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS);
+fail_create_rm_cons:
+ ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+
+ return result;
+}
+
+int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *inp,
+ struct ipa_uc_offload_conn_out_params *outp)
+{
+ int ret = 0;
+ struct ipa_uc_offload_ctx *offload_ctx;
+
+ if (!(inp && outp)) {
+ IPA_UC_OFFLOAD_ERR("bad parm. in=%p out=%p\n", inp, outp);
+ return -EINVAL;
+ }
+
+ if (inp->clnt_hndl <= IPA_UC_INVALID ||
+ inp->clnt_hndl >= IPA_UC_MAX_PROT_SIZE) {
+ IPA_UC_OFFLOAD_ERR("invalid client handle %d\n",
+ inp->clnt_hndl);
+ return -EINVAL;
+ }
+
+ offload_ctx = ipa_uc_offload_ctx[inp->clnt_hndl];
+ if (!offload_ctx) {
+ IPA_UC_OFFLOAD_ERR("Invalid Handle\n");
+ return -EINVAL;
+ }
+
+ if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) {
+ IPA_UC_OFFLOAD_ERR("Invalid state %d\n", offload_ctx->state);
+ return -EPERM;
+ }
+
+ switch (offload_ctx->proto) {
+ case IPA_UC_NTN:
+ ret = ipa_uc_ntn_conn_pipes(&inp->u.ntn, &outp->u.ntn,
+ offload_ctx);
+ break;
+
+ default:
+ IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", offload_ctx->proto);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_conn_pipes);
+
+int ipa_set_perf_profile(struct ipa_perf_profile *profile)
+{
+ struct ipa_rm_perf_profile rm_profile;
+ enum ipa_rm_resource_name resource_name;
+
+ if (profile == NULL) {
+ IPA_UC_OFFLOAD_ERR("Invalid input\n");
+ return -EINVAL;
+ }
+
+ rm_profile.max_supported_bandwidth_mbps =
+ profile->max_supported_bw_mbps;
+
+ if (profile->client == IPA_CLIENT_ODU_PROD) {
+ resource_name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
+ } else if (profile->client == IPA_CLIENT_ODU_TETH_CONS) {
+ resource_name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
+ } else {
+ IPA_UC_OFFLOAD_ERR("not supported\n");
+ return -EINVAL;
+ }
+
+ if (ipa_rm_set_perf_profile(resource_name, &rm_profile)) {
+ IPA_UC_OFFLOAD_ERR("fail to setup rm perf profile\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ipa_set_perf_profile);
+
+static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx)
+{
+ int ipa_ep_idx_ul, ipa_ep_idx_dl;
+
+ ntn_ctx->state = IPA_UC_OFFLOAD_STATE_DOWN;
+ if (ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+ IPA_RM_RESOURCE_APPS_CONS)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete rm dependency\n");
+ return -EFAULT;
+ }
+
+ if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_PROD resource\n");
+ return -EFAULT;
+ }
+
+ if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_CONS resource\n");
+ return -EFAULT;
+ }
+
+ ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ODU_PROD);
+ ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ODU_TETH_CONS);
+ if (ipa_tear_down_uc_offload_pipes(ipa_ep_idx_ul, ipa_ep_idx_dl)) {
+ IPA_UC_OFFLOAD_ERR("fail to tear down uc offload pipes\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int ipa_uc_offload_disconn_pipes(u32 clnt_hdl)
+{
+ struct ipa_uc_offload_ctx *offload_ctx;
+ int ret = 0;
+
+ if (clnt_hdl <= IPA_UC_INVALID ||
+ clnt_hdl >= IPA_UC_MAX_PROT_SIZE) {
+ IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ offload_ctx = ipa_uc_offload_ctx[clnt_hdl];
+ if (!offload_ctx) {
+ IPA_UC_OFFLOAD_ERR("Invalid client Handle\n");
+ return -EINVAL;
+ }
+
+ if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_UP) {
+ IPA_UC_OFFLOAD_ERR("Invalid state\n");
+ return -EINVAL;
+ }
+
+ switch (offload_ctx->proto) {
+ case IPA_UC_NTN:
+ ret = ipa_uc_ntn_disconn_pipes(offload_ctx);
+ break;
+
+ default:
+ IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_disconn_pipes);
+
+static int ipa_uc_ntn_cleanup(struct ipa_uc_offload_ctx *ntn_ctx)
+{
+ int len, result = 0;
+ struct ipa_ioc_del_hdr *hdr;
+
+ len = sizeof(struct ipa_ioc_del_hdr) + 2 * sizeof(struct ipa_hdr_del);
+ hdr = kzalloc(len, GFP_KERNEL);
+ if (hdr == NULL) {
+ IPA_UC_OFFLOAD_ERR("fail to alloc %d bytes\n", len);
+ return -ENOMEM;
+ }
+
+ hdr->commit = 1;
+ hdr->num_hdls = 2;
+ hdr->hdl[0].hdl = ntn_ctx->partial_hdr_hdl[0];
+ hdr->hdl[1].hdl = ntn_ctx->partial_hdr_hdl[1];
+
+ if (ipa_del_hdr(hdr)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete partial header\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa_deregister_intf(ntn_ctx->netdev_name)) {
+ IPA_UC_OFFLOAD_ERR("fail to delete interface prop\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+fail:
+ kfree(hdr);
+ return result;
+}
+
+int ipa_uc_offload_cleanup(u32 clnt_hdl)
+{
+ struct ipa_uc_offload_ctx *offload_ctx;
+ int ret = 0;
+
+ if (clnt_hdl <= IPA_UC_INVALID ||
+ clnt_hdl >= IPA_UC_MAX_PROT_SIZE) {
+ IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ offload_ctx = ipa_uc_offload_ctx[clnt_hdl];
+ if (!offload_ctx) {
+ IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
+ return -EINVAL;
+ }
+
+ if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_DOWN) {
+ IPA_UC_OFFLOAD_ERR("Invalid State %d\n", offload_ctx->state);
+ return -EINVAL;
+ }
+
+ switch (offload_ctx->proto) {
+ case IPA_UC_NTN:
+ ret = ipa_uc_ntn_cleanup(offload_ctx);
+ break;
+
+ default:
+ IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ kfree(offload_ctx);
+ offload_ctx = NULL;
+ ipa_uc_offload_ctx[clnt_hdl] = NULL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_cleanup);
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
index 060613281e4c..115348251d17 100644
--- a/drivers/platform/msm/ipa/ipa_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -17,6 +17,7 @@
#define _IPA_COMMON_I_H_
#include <linux/ipc_logging.h>
#include <linux/ipa.h>
+#include <linux/ipa_uc_offload.h>
#define __FILENAME__ \
(strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
@@ -342,6 +343,11 @@ int ipa_uc_state_check(void);
void ipa_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb);
void ipa_set_tag_process_before_gating(bool val);
bool ipa_has_open_aggr_frame(enum ipa_client_type client);
+int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp);
+
+int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
u8 *ipa_write_64(u64 w, u8 *dest);
u8 *ipa_write_32(u32 w, u8 *dest);
diff --git a/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h b/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h
new file mode 100644
index 000000000000..ae6cfc4fcd50
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa_mhi.h>
+#include <linux/ipa_qmi_service_v01.h>
+
+#ifndef _IPA_UC_OFFLOAD_COMMON_I_H_
+#define _IPA_UC_OFFLOAD_COMMON_I_H_
+
+int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp);
+int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+
+#endif /* _IPA_UC_OFFLOAD_COMMON_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/Makefile b/drivers/platform/msm/ipa/ipa_v2/Makefile
index 435acbf1cab8..69b8a4c94461 100644
--- a/drivers/platform/msm/ipa/ipa_v2/Makefile
+++ b/drivers/platform/msm/ipa/ipa_v2/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_IPA) += ipat.o
ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
- ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o
+ ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o
obj-$(CONFIG_RMNET_IPA) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index e94b144457ce..fc3d9f355da6 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -4052,6 +4052,12 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
else
IPADBG(":wdi init ok\n");
+ result = ipa_ntn_init();
+ if (result)
+ IPAERR(":ntn init failed (%d)\n", -result);
+ else
+ IPADBG(":ntn init ok\n");
+
ipa_ctx->q6_proxy_clk_vote_valid = true;
ipa_register_panic_hdlr();
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index ca3c6d0a1c1a..0eab77d27760 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -110,6 +110,7 @@ static struct dentry *dfile_ip6_flt;
static struct dentry *dfile_stats;
static struct dentry *dfile_wstats;
static struct dentry *dfile_wdi_stats;
+static struct dentry *dfile_ntn_stats;
static struct dentry *dfile_dbg_cnt;
static struct dentry *dfile_msg;
static struct dentry *dfile_ip4_nat;
@@ -1100,6 +1101,110 @@ nxt_clnt_cons:
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
+static ssize_t ipa_read_ntn(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+#define TX_STATS(y) \
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) \
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+ struct IpaHwStatsNTNInfoData_t stats;
+ int nbytes;
+ int cnt = 0;
+
+ if (!ipa2_get_ntn_stats(&stats)) {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "TX num_pkts_processed=%u\n"
+ "TX tail_ptr_val=%u\n"
+ "TX num_db_fired=%u\n"
+ "TX ringFull=%u\n"
+ "TX ringEmpty=%u\n"
+ "TX ringUsageHigh=%u\n"
+ "TX ringUsageLow=%u\n"
+ "TX RingUtilCount=%u\n"
+ "TX bamFifoFull=%u\n"
+ "TX bamFifoEmpty=%u\n"
+ "TX bamFifoUsageHigh=%u\n"
+ "TX bamFifoUsageLow=%u\n"
+ "TX bamUtilCount=%u\n"
+ "TX num_db=%u\n"
+ "TX num_unexpected_db=%u\n"
+ "TX num_bam_int_handled=%u\n"
+ "TX num_bam_int_in_non_running_state=%u\n"
+ "TX num_qmb_int_handled=%u\n"
+ "TX num_bam_int_handled_while_wait_for_bam=%u\n"
+ "TX num_bam_int_handled_while_not_in_bam=%u\n",
+ TX_STATS(num_pkts_processed),
+ TX_STATS(tail_ptr_val),
+ TX_STATS(num_db_fired),
+ TX_STATS(tx_comp_ring_stats.ringFull),
+ TX_STATS(tx_comp_ring_stats.ringEmpty),
+ TX_STATS(tx_comp_ring_stats.ringUsageHigh),
+ TX_STATS(tx_comp_ring_stats.ringUsageLow),
+ TX_STATS(tx_comp_ring_stats.RingUtilCount),
+ TX_STATS(bam_stats.bamFifoFull),
+ TX_STATS(bam_stats.bamFifoEmpty),
+ TX_STATS(bam_stats.bamFifoUsageHigh),
+ TX_STATS(bam_stats.bamFifoUsageLow),
+ TX_STATS(bam_stats.bamUtilCount),
+ TX_STATS(num_db),
+ TX_STATS(num_unexpected_db),
+ TX_STATS(num_bam_int_handled),
+ TX_STATS(num_bam_int_in_non_running_state),
+ TX_STATS(num_qmb_int_handled),
+ TX_STATS(num_bam_int_handled_while_wait_for_bam),
+ TX_STATS(num_bam_int_handled_while_not_in_bam));
+ cnt += nbytes;
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "RX max_outstanding_pkts=%u\n"
+ "RX num_pkts_processed=%u\n"
+ "RX rx_ring_rp_value=%u\n"
+ "RX ringFull=%u\n"
+ "RX ringEmpty=%u\n"
+ "RX ringUsageHigh=%u\n"
+ "RX ringUsageLow=%u\n"
+ "RX RingUtilCount=%u\n"
+ "RX bamFifoFull=%u\n"
+ "RX bamFifoEmpty=%u\n"
+ "RX bamFifoUsageHigh=%u\n"
+ "RX bamFifoUsageLow=%u\n"
+ "RX bamUtilCount=%u\n"
+ "RX num_bam_int_handled=%u\n"
+ "RX num_db=%u\n"
+ "RX num_unexpected_db=%u\n"
+ "RX num_pkts_in_dis_uninit_state=%u\n"
+ "num_ic_inj_vdev_change=%u\n"
+ "num_ic_inj_fw_desc_change=%u\n",
+ RX_STATS(max_outstanding_pkts),
+ RX_STATS(num_pkts_processed),
+ RX_STATS(rx_ring_rp_value),
+ RX_STATS(rx_ind_ring_stats.ringFull),
+ RX_STATS(rx_ind_ring_stats.ringEmpty),
+ RX_STATS(rx_ind_ring_stats.ringUsageHigh),
+ RX_STATS(rx_ind_ring_stats.ringUsageLow),
+ RX_STATS(rx_ind_ring_stats.RingUtilCount),
+ RX_STATS(bam_stats.bamFifoFull),
+ RX_STATS(bam_stats.bamFifoEmpty),
+ RX_STATS(bam_stats.bamFifoUsageHigh),
+ RX_STATS(bam_stats.bamFifoUsageLow),
+ RX_STATS(bam_stats.bamUtilCount),
+ RX_STATS(num_bam_int_handled),
+ RX_STATS(num_db),
+ RX_STATS(num_unexpected_db),
+ RX_STATS(num_pkts_in_dis_uninit_state),
+ RX_STATS(num_bam_int_handled_while_not_in_bam),
+ RX_STATS(num_bam_int_handled_while_in_bam_state));
+ cnt += nbytes;
+ } else {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "Fail to read NTN stats\n");
+ cnt += nbytes;
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
static ssize_t ipa_read_wdi(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
@@ -1745,6 +1850,10 @@ const struct file_operations ipa_wdi_ops = {
.read = ipa_read_wdi,
};
+const struct file_operations ipa_ntn_ops = {
+ .read = ipa_read_ntn,
+};
+
const struct file_operations ipa_msg_ops = {
.read = ipa_read_msg,
};
@@ -1907,6 +2016,13 @@ void ipa_debugfs_init(void)
goto fail;
}
+ dfile_ntn_stats = debugfs_create_file("ntn", read_only_mode, dent, 0,
+ &ipa_ntn_ops);
+ if (!dfile_ntn_stats || IS_ERR(dfile_ntn_stats)) {
+ IPAERR("fail to create file for debug_fs ntn stats\n");
+ goto fail;
+ }
+
dfile_dbg_cnt = debugfs_create_file("dbg_cnt", read_write_mode, dent, 0,
&ipa_dbg_cnt_ops);
if (!dfile_dbg_cnt || IS_ERR(dfile_dbg_cnt)) {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index 6a5b779b24f8..5ea7a08b3135 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -26,12 +26,14 @@
#include <linux/platform_device.h>
#include <asm/dma-iommu.h>
#include <linux/iommu.h>
+#include <linux/ipa_uc_offload.h>
#include "ipa_hw_defs.h"
#include "ipa_ram_mmap.h"
#include "ipa_reg.h"
#include "ipa_qmi_service.h"
#include "../ipa_api.h"
#include "../ipa_common_i.h"
+#include "ipa_uc_offload_i.h"
#define DRV_NAME "ipa"
#define NAT_DEV_NAME "ipaNatTable"
@@ -544,7 +546,7 @@ struct ipa_ep_context {
bool skip_ep_cfg;
bool keep_ipa_awake;
struct ipa_wlan_stats wstats;
- u32 wdi_state;
+ u32 uc_offload_state;
u32 rx_replenish_threshold;
bool disconnect_in_progress;
u32 qmi_request_sent;
@@ -817,134 +819,6 @@ struct ipa_tag_completion {
struct ipa_controller;
/**
- * @brief Enum value determined based on the feature it
- * corresponds to
- * +----------------+----------------+
- * | 3 bits | 5 bits |
- * +----------------+----------------+
- * | HW_FEATURE | OPCODE |
- * +----------------+----------------+
- *
- */
-#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode)
-#define EXTRACT_UC_FEATURE(value) (value >> 5)
-
-#define IPA_HW_NUM_FEATURES 0x8
-
-/**
- * enum ipa_hw_features - Values that represent the features supported in IPA HW
- * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW
- * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW
- * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW
-*/
-enum ipa_hw_features {
- IPA_HW_FEATURE_COMMON = 0x0,
- IPA_HW_FEATURE_MHI = 0x1,
- IPA_HW_FEATURE_WDI = 0x3,
- IPA_HW_FEATURE_MAX = IPA_HW_NUM_FEATURES
-};
-
-/**
- * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common
- * section in 128B shared memory located in offset zero of SW Partition in IPA
- * SRAM.
- * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS
- * @cmdParams : CPU->HW command parameter. The parameter filed can hold 32 bits
- * of parameters (immediate parameters) and point on structure in system memory
- * (in such case the address must be accessible for HW)
- * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES
- * @responseParams : HW->CPU response parameter. The parameter filed can hold 32
- * bits of parameters (immediate parameters) and point on structure in system
- * memory
- * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS
- * @eventParams : HW->CPU event parameter. The parameter filed can hold 32 bits of
- * parameters (immediate parameters) and point on structure in system memory
- * @firstErrorAddress : Contains the address of first error-source on SNOC
- * @hwState : State of HW. The state carries information regarding the error type.
- * @warningCounter : The warnings counter. The counter carries information regarding
- * non fatal errors in HW
- * @interfaceVersionCommon : The Common interface version as reported by HW
- *
- * The shared memory is used for communication between IPA HW and CPU.
- */
-struct IpaHwSharedMemCommonMapping_t {
- u8 cmdOp;
- u8 reserved_01;
- u16 reserved_03_02;
- u32 cmdParams;
- u8 responseOp;
- u8 reserved_09;
- u16 reserved_0B_0A;
- u32 responseParams;
- u8 eventOp;
- u8 reserved_11;
- u16 reserved_13_12;
- u32 eventParams;
- u32 reserved_1B_18;
- u32 firstErrorAddress;
- u8 hwState;
- u8 warningCounter;
- u16 reserved_23_22;
- u16 interfaceVersionCommon;
- u16 reserved_27_26;
-} __packed;
-
-/**
- * union IpaHwFeatureInfoData_t - parameters for stats/config blob
- *
- * @offset : Location of a feature within the EventInfoData
- * @size : Size of the feature
- */
-union IpaHwFeatureInfoData_t {
- struct IpaHwFeatureInfoParams_t {
- u32 offset:16;
- u32 size:16;
- } __packed params;
- u32 raw32b;
-} __packed;
-
-/**
- * struct IpaHwEventInfoData_t - Structure holding the parameters for
- * statistics and config info
- *
- * @baseAddrOffset : Base Address Offset of the statistics or config
- * structure from IPA_WRAPPER_BASE
- * @IpaHwFeatureInfoData_t : Location and size of each feature within
- * the statistics or config structure
- *
- * @note Information about each feature in the featureInfo[]
- * array is populated at predefined indices per the IPA_HW_FEATURES
- * enum definition
- */
-struct IpaHwEventInfoData_t {
- u32 baseAddrOffset;
- union IpaHwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES];
-} __packed;
-
-/**
- * struct IpaHwEventLogInfoData_t - Structure holding the parameters for
- * IPA_HW_2_CPU_EVENT_LOG_INFO Event
- *
- * @featureMask : Mask indicating the features enabled in HW.
- * Refer IPA_HW_FEATURE_MASK
- * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event
- * Log Buffer structure
- * @statsInfo : Statistics related information
- * @configInfo : Configuration related information
- *
- * @note The offset location of this structure from IPA_WRAPPER_BASE
- * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO
- * Event
- */
-struct IpaHwEventLogInfoData_t {
- u32 featureMask;
- u32 circBuffBaseAddrOffset;
- struct IpaHwEventInfoData_t statsInfo;
- struct IpaHwEventInfoData_t configInfo;
-
-} __packed;
-
-/**
* struct ipa_uc_hdlrs - IPA uC callback functions
* @ipa_uc_loaded_hdlr: Function handler when uC is loaded
* @ipa_uc_event_hdlr: Event handler function
@@ -1226,6 +1100,7 @@ struct ipa_context {
struct ipa_uc_ctx uc_ctx;
struct ipa_uc_wdi_ctx uc_wdi_ctx;
+ struct ipa_uc_ntn_ctx uc_ntn_ctx;
u32 wan_rx_ring_size;
bool skip_uc_pipe_reset;
bool smmu_present;
@@ -1604,6 +1479,11 @@ int ipa2_resume_wdi_pipe(u32 clnt_hdl);
int ipa2_suspend_wdi_pipe(u32 clnt_hdl);
int ipa2_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
u16 ipa2_get_smem_restr_bytes(void);
+int ipa2_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *inp,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp);
+int ipa2_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+
/*
* To retrieve doorbell physical address of
* wlan pipes
@@ -1949,4 +1829,8 @@ int ipa_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
int ipa2_rx_poll(u32 clnt_hdl, int budget);
void ipa2_recycle_wan_skb(struct sk_buff *skb);
+int ipa_ntn_init(void);
+int ipa2_get_ntn_stats(struct IpaHwStatsNTNInfoData_t *stats);
+int ipa2_register_ipa_ready_cb(void (*ipa_ready_cb)(void *),
+ void *user_data);
#endif /* _IPA_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
new file mode 100644
index 000000000000..08ed47f3cacf
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
@@ -0,0 +1,438 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "ipa_i.h"
+
+#define IPA_UC_NTN_DB_PA_TX 0x79620DC
+#define IPA_UC_NTN_DB_PA_RX 0x79620D8
+
+static void ipa_uc_ntn_event_handler(
+ struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio)
+{
+ union IpaHwNTNErrorEventData_t ntn_evt;
+
+ if (uc_sram_mmio->eventOp == IPA_HW_2_CPU_EVENT_NTN_ERROR) {
+ ntn_evt.raw32b = uc_sram_mmio->eventParams;
+ IPADBG("uC NTN evt errType=%u pipe=%d cherrType=%u\n",
+ ntn_evt.params.ntn_error_type,
+ ntn_evt.params.ipa_pipe_number,
+ ntn_evt.params.ntn_ch_err_type);
+ }
+}
+
+static void ipa_uc_ntn_event_log_info_handler(
+ struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+{
+ if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_NTN)) == 0) {
+ IPAERR("NTN feature missing 0x%x\n",
+ uc_event_top_mmio->featureMask);
+ return;
+ }
+
+ if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_NTN].
+ params.size != sizeof(struct IpaHwStatsNTNInfoData_t)) {
+ IPAERR("NTN stats sz invalid exp=%zu is=%u\n",
+ sizeof(struct IpaHwStatsNTNInfoData_t),
+ uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_NTN].params.size);
+ return;
+ }
+
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst = uc_event_top_mmio->
+ statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_NTN].params.offset;
+ IPAERR("NTN stats ofst=0x%x\n", ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+ if (ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst +
+ sizeof(struct IpaHwStatsNTNInfoData_t) >=
+ ipa_ctx->ctrl->ipa_reg_base_ofst +
+ IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) +
+ ipa_ctx->smem_sz) {
+ IPAERR("uc_ntn_stats 0x%x outside SRAM\n",
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+ return;
+ }
+
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio =
+ ioremap(ipa_ctx->ipa_wrapper_base +
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst,
+ sizeof(struct IpaHwStatsNTNInfoData_t));
+ if (!ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+ IPAERR("fail to ioremap uc ntn stats\n");
+ return;
+ }
+}
+
+/**
+ * ipa2_get_wdi_stats() - Query WDI statistics from uc
+ * @stats: [inout] stats blob from client populated by driver
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa2_get_ntn_stats(struct IpaHwStatsNTNInfoData_t *stats)
+{
+#define TX_STATS(y) stats->tx_ch_stats[0].y = \
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) stats->rx_ch_stats[0].y = \
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+ if (unlikely(!ipa_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (!stats || !ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+ IPAERR("bad parms stats=%p ntn_stats=%p\n",
+ stats,
+ ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio);
+ return -EINVAL;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ TX_STATS(num_pkts_processed);
+ TX_STATS(tail_ptr_val);
+ TX_STATS(num_db_fired);
+ TX_STATS(tx_comp_ring_stats.ringFull);
+ TX_STATS(tx_comp_ring_stats.ringEmpty);
+ TX_STATS(tx_comp_ring_stats.ringUsageHigh);
+ TX_STATS(tx_comp_ring_stats.ringUsageLow);
+ TX_STATS(tx_comp_ring_stats.RingUtilCount);
+ TX_STATS(bam_stats.bamFifoFull);
+ TX_STATS(bam_stats.bamFifoEmpty);
+ TX_STATS(bam_stats.bamFifoUsageHigh);
+ TX_STATS(bam_stats.bamFifoUsageLow);
+ TX_STATS(bam_stats.bamUtilCount);
+ TX_STATS(num_db);
+ TX_STATS(num_unexpected_db);
+ TX_STATS(num_bam_int_handled);
+ TX_STATS(num_bam_int_in_non_running_state);
+ TX_STATS(num_qmb_int_handled);
+ TX_STATS(num_bam_int_handled_while_wait_for_bam);
+ TX_STATS(num_bam_int_handled_while_not_in_bam);
+
+ RX_STATS(max_outstanding_pkts);
+ RX_STATS(num_pkts_processed);
+ RX_STATS(rx_ring_rp_value);
+ RX_STATS(rx_ind_ring_stats.ringFull);
+ RX_STATS(rx_ind_ring_stats.ringEmpty);
+ RX_STATS(rx_ind_ring_stats.ringUsageHigh);
+ RX_STATS(rx_ind_ring_stats.ringUsageLow);
+ RX_STATS(rx_ind_ring_stats.RingUtilCount);
+ RX_STATS(bam_stats.bamFifoFull);
+ RX_STATS(bam_stats.bamFifoEmpty);
+ RX_STATS(bam_stats.bamFifoUsageHigh);
+ RX_STATS(bam_stats.bamFifoUsageLow);
+ RX_STATS(bam_stats.bamUtilCount);
+ RX_STATS(num_bam_int_handled);
+ RX_STATS(num_db);
+ RX_STATS(num_unexpected_db);
+ RX_STATS(num_pkts_in_dis_uninit_state);
+ RX_STATS(num_bam_int_handled_while_not_in_bam);
+ RX_STATS(num_bam_int_handled_while_in_bam_state);
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+int ipa2_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data)
+{
+ int ret;
+
+ ret = ipa2_uc_state_check();
+ if (ret) {
+ ipa_ctx->uc_ntn_ctx.uc_ready_cb = ipa_ready_cb;
+ ipa_ctx->uc_ntn_ctx.priv = user_data;
+ }
+
+ return -EEXIST;
+}
+
+static void ipa_uc_ntn_loaded_handler(void)
+{
+ if (!ipa_ctx) {
+ IPAERR("IPA ctx is null\n");
+ return;
+ }
+
+ if (ipa_ctx->uc_ntn_ctx.uc_ready_cb) {
+ ipa_ctx->uc_ntn_ctx.uc_ready_cb(
+ ipa_ctx->uc_ntn_ctx.priv);
+
+ ipa_ctx->uc_ntn_ctx.uc_ready_cb =
+ NULL;
+ ipa_ctx->uc_ntn_ctx.priv = NULL;
+ }
+}
+
+int ipa_ntn_init(void)
+{
+ struct ipa_uc_hdlrs uc_ntn_cbs = { 0 };
+
+ uc_ntn_cbs.ipa_uc_event_hdlr = ipa_uc_ntn_event_handler;
+ uc_ntn_cbs.ipa_uc_event_log_info_hdlr =
+ ipa_uc_ntn_event_log_info_handler;
+ uc_ntn_cbs.ipa_uc_loaded_hdlr =
+ ipa_uc_ntn_loaded_handler;
+
+ ipa_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs);
+
+ return 0;
+}
+
+static int ipa2_uc_send_ntn_setup_pipe_cmd(
+ struct ipa_ntn_setup_info *ntn_info, u8 dir)
+{
+ int ipa_ep_idx;
+ int result = 0;
+ struct ipa_mem_buffer cmd;
+ struct IpaHwNtnSetUpCmdData_t *Ntn_params;
+ struct IpaHwOffloadSetUpCmdData_t *cmd_data;
+
+ if (ntn_info == NULL) {
+ IPAERR("invalid input\n");
+ return -EINVAL;
+ }
+
+ ipa_ep_idx = ipa_get_ep_mapping(ntn_info->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("fail to get ep idx.\n");
+ return -EFAULT;
+ }
+
+ IPADBG("client=%d ep=%d\n", ntn_info->client, ipa_ep_idx);
+
+ IPADBG("ring_base_pa = 0x%pa\n",
+ &ntn_info->ring_base_pa);
+ IPADBG("ntn_ring_size = %d\n", ntn_info->ntn_ring_size);
+ IPADBG("buff_pool_base_pa = 0x%pa\n", &ntn_info->buff_pool_base_pa);
+ IPADBG("num_buffers = %d\n", ntn_info->num_buffers);
+ IPADBG("data_buff_size = %d\n", ntn_info->data_buff_size);
+ IPADBG("tail_ptr_base_pa = 0x%pa\n", &ntn_info->ntn_reg_base_ptr_pa);
+
+ cmd.size = sizeof(*cmd_data);
+ cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size,
+ &cmd.phys_base, GFP_KERNEL);
+ if (cmd.base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ return -ENOMEM;
+ }
+
+ cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base;
+ cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+ Ntn_params = &cmd_data->SetupCh_params.NtnSetupCh_params;
+ Ntn_params->ring_base_pa = ntn_info->ring_base_pa;
+ Ntn_params->buff_pool_base_pa = ntn_info->buff_pool_base_pa;
+ Ntn_params->ntn_ring_size = ntn_info->ntn_ring_size;
+ Ntn_params->num_buffers = ntn_info->num_buffers;
+ Ntn_params->ntn_reg_base_ptr_pa = ntn_info->ntn_reg_base_ptr_pa;
+ Ntn_params->data_buff_size = ntn_info->data_buff_size;
+ Ntn_params->ipa_pipe_number = ipa_ep_idx;
+ Ntn_params->dir = dir;
+
+ result = ipa_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result)
+ result = -EFAULT;
+
+ dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+ return result;
+}
+
+/**
+ * ipa2_setup_uc_ntn_pipes() - setup uc offload pipes
+ */
+int ipa2_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp)
+{
+ int ipa_ep_idx_ul, ipa_ep_idx_dl;
+ struct ipa_ep_context *ep_ul, *ep_dl;
+ int result = 0;
+
+ if (in == NULL) {
+ IPAERR("invalid input\n");
+ return -EINVAL;
+ }
+
+ ipa_ep_idx_ul = ipa_get_ep_mapping(in->ul.client);
+ ipa_ep_idx_dl = ipa_get_ep_mapping(in->dl.client);
+ if (ipa_ep_idx_ul == -1 || ipa_ep_idx_dl == -1) {
+ IPAERR("fail to alloc EP.\n");
+ return -EFAULT;
+ }
+
+ ep_ul = &ipa_ctx->ep[ipa_ep_idx_ul];
+ ep_dl = &ipa_ctx->ep[ipa_ep_idx_dl];
+
+ if (ep_ul->valid || ep_dl->valid) {
+ IPAERR("EP already allocated ul:%d dl:%d\n",
+ ep_ul->valid, ep_dl->valid);
+ return -EFAULT;
+ }
+
+ memset(ep_ul, 0, offsetof(struct ipa_ep_context, sys));
+ memset(ep_dl, 0, offsetof(struct ipa_ep_context, sys));
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ /* setup ul ep cfg */
+ ep_ul->valid = 1;
+ ep_ul->client = in->ul.client;
+ result = ipa_enable_data_path(ipa_ep_idx_ul);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx_ul);
+ return -EFAULT;
+ }
+ ep_ul->client_notify = notify;
+ ep_ul->priv = priv;
+
+ memset(&ep_ul->cfg, 0, sizeof(ep_ul->cfg));
+ ep_ul->cfg.nat.nat_en = IPA_SRC_NAT;
+ ep_ul->cfg.hdr.hdr_len = hdr_len;
+ ep_ul->cfg.mode.mode = IPA_BASIC;
+
+ if (ipa2_cfg_ep(ipa_ep_idx_ul, &ep_ul->cfg)) {
+ IPAERR("fail to setup ul pipe cfg\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa2_uc_send_ntn_setup_pipe_cmd(&in->ul, IPA_NTN_RX_DIR)) {
+ IPAERR("fail to send cmd to uc for ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa_install_dflt_flt_rules(ipa_ep_idx_ul);
+ outp->ul_uc_db_pa = IPA_UC_NTN_DB_PA_RX;
+ ep_ul->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+ IPAERR("client %d (ep: %d) connected\n", in->ul.client,
+ ipa_ep_idx_ul);
+
+ /* setup dl ep cfg */
+ ep_dl->valid = 1;
+ ep_dl->client = in->dl.client;
+ result = ipa_enable_data_path(ipa_ep_idx_dl);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx_dl);
+ result = -EFAULT;
+ goto fail;
+ }
+
+ memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg));
+ ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT;
+ ep_dl->cfg.hdr.hdr_len = hdr_len;
+ ep_dl->cfg.mode.mode = IPA_BASIC;
+
+ if (ipa2_cfg_ep(ipa_ep_idx_dl, &ep_dl->cfg)) {
+ IPAERR("fail to setup dl pipe cfg\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa2_uc_send_ntn_setup_pipe_cmd(&in->dl, IPA_NTN_TX_DIR)) {
+ IPAERR("fail to send cmd to uc for dl pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX;
+ ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+ IPAERR("client %d (ep: %d) connected\n", in->dl.client,
+ ipa_ep_idx_dl);
+
+fail:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return result;
+}
+
+/**
+ * ipa2_tear_down_uc_offload_pipes() - tear down uc offload pipes
+ */
+
+int ipa2_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
+ int ipa_ep_idx_dl)
+{
+ struct ipa_mem_buffer cmd;
+ struct ipa_ep_context *ep_ul, *ep_dl;
+ struct IpaHwOffloadCommonChCmdData_t *cmd_data;
+ union IpaHwNtnCommonChCmdData_t *tear;
+ int result = 0;
+
+ IPADBG("ep_ul = %d\n", ipa_ep_idx_ul);
+ IPADBG("ep_dl = %d\n", ipa_ep_idx_dl);
+
+ ep_ul = &ipa_ctx->ep[ipa_ep_idx_ul];
+ ep_dl = &ipa_ctx->ep[ipa_ep_idx_dl];
+
+ if (ep_ul->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED ||
+ ep_dl->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED) {
+ IPAERR("channel bad state: ul %d dl %d\n",
+ ep_ul->uc_offload_state, ep_dl->uc_offload_state);
+ return -EFAULT;
+ }
+
+ cmd.size = sizeof(*cmd_data);
+ cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size,
+ &cmd.phys_base, GFP_KERNEL);
+ if (cmd.base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ return -ENOMEM;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ /* teardown the UL pipe */
+ cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base;
+ cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+ tear = &cmd_data->CommonCh_params.NtnCommonCh_params;
+ tear->params.ipa_pipe_number = ipa_ep_idx_ul;
+ result = ipa_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result) {
+ IPAERR("fail to tear down ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa_disable_data_path(ipa_ep_idx_ul);
+ ipa_delete_dflt_flt_rules(ipa_ep_idx_ul);
+ memset(&ipa_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa_ep_context));
+ IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
+
+ /* teardown the DL pipe */
+ tear->params.ipa_pipe_number = ipa_ep_idx_dl;
+ result = ipa_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result) {
+ IPAERR("fail to tear down ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa_disable_data_path(ipa_ep_idx_dl);
+ memset(&ipa_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa_ep_context));
+ IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
+
+fail:
+ dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h
new file mode 100644
index 000000000000..3bec471b4656
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h
@@ -0,0 +1,514 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_UC_OFFLOAD_I_H_
+#define _IPA_UC_OFFLOAD_I_H_
+
+#include <linux/ipa.h>
+#include "ipa_i.h"
+
+/*
+ * Neutrino protocol related data structures
+ */
+
+#define IPA_UC_MAX_NTN_TX_CHANNELS 1
+#define IPA_UC_MAX_NTN_RX_CHANNELS 1
+
+#define IPA_NTN_TX_DIR 1
+#define IPA_NTN_RX_DIR 2
+
+/**
+ * @brief Enum value determined based on the feature it
+ * corresponds to
+ * +----------------+----------------+
+ * | 3 bits | 5 bits |
+ * +----------------+----------------+
+ * | HW_FEATURE | OPCODE |
+ * +----------------+----------------+
+ *
+ */
+#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode)
+#define EXTRACT_UC_FEATURE(value) (value >> 5)
+
+#define IPA_HW_NUM_FEATURES 0x8
+
+/**
+ * enum ipa_hw_features - Values that represent the features supported in IPA HW
+ * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW
+ * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW
+ * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW
+ * @IPA_HW_FEATURE_NTN : Feature related to NTN operation in IPA HW
+ * @IPA_HW_FEATURE_OFFLOAD : Feature related to NTN operation in IPA HW
+*/
+enum ipa_hw_features {
+ IPA_HW_FEATURE_COMMON = 0x0,
+ IPA_HW_FEATURE_MHI = 0x1,
+ IPA_HW_FEATURE_WDI = 0x3,
+ IPA_HW_FEATURE_NTN = 0x4,
+ IPA_HW_FEATURE_OFFLOAD = 0x5,
+ IPA_HW_FEATURE_MAX = IPA_HW_NUM_FEATURES
+};
+
+/**
+ * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common
+ * section in 128B shared memory located in offset zero of SW Partition in IPA
+ * SRAM.
+ * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS
+ * @cmdParams : CPU->HW command parameter. The parameter filed can hold 32 bits
+ * of parameters (immediate parameters) and point on structure in
+ * system memory (in such case the address must be accessible
+ * for HW)
+ * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES
+ * @responseParams : HW->CPU response parameter. The parameter filed can hold
+ * 32 bits of parameters (immediate parameters) and point
+ * on structure in system memory
+ * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS
+ * @eventParams : HW->CPU event parameter. The parameter filed can hold 32 bits
+ * of parameters (immediate parameters) and point on
+ * structure in system memory
+ * @firstErrorAddress : Contains the address of first error-source on SNOC
+ * @hwState : State of HW. The state carries information regarding the error
+ * type.
+ * @warningCounter : The warnings counter. The counter carries information
+ * regarding non fatal errors in HW
+ * @interfaceVersionCommon : The Common interface version as reported by HW
+ *
+ * The shared memory is used for communication between IPA HW and CPU.
+ */
+struct IpaHwSharedMemCommonMapping_t {
+ u8 cmdOp;
+ u8 reserved_01;
+ u16 reserved_03_02;
+ u32 cmdParams;
+ u8 responseOp;
+ u8 reserved_09;
+ u16 reserved_0B_0A;
+ u32 responseParams;
+ u8 eventOp;
+ u8 reserved_11;
+ u16 reserved_13_12;
+ u32 eventParams;
+ u32 reserved_1B_18;
+ u32 firstErrorAddress;
+ u8 hwState;
+ u8 warningCounter;
+ u16 reserved_23_22;
+ u16 interfaceVersionCommon;
+ u16 reserved_27_26;
+} __packed;
+
+/**
+ * union IpaHwFeatureInfoData_t - parameters for stats/config blob
+ *
+ * @offset : Location of a feature within the EventInfoData
+ * @size : Size of the feature
+ */
+union IpaHwFeatureInfoData_t {
+ struct IpaHwFeatureInfoParams_t {
+ u32 offset:16;
+ u32 size:16;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * struct IpaHwEventInfoData_t - Structure holding the parameters for
+ * statistics and config info
+ *
+ * @baseAddrOffset : Base Address Offset of the statistics or config
+ * structure from IPA_WRAPPER_BASE
+ * @IpaHwFeatureInfoData_t : Location and size of each feature within
+ * the statistics or config structure
+ *
+ * @note Information about each feature in the featureInfo[]
+ * array is populated at predefined indices per the IPA_HW_FEATURES
+ * enum definition
+ */
+struct IpaHwEventInfoData_t {
+ u32 baseAddrOffset;
+ union IpaHwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES];
+} __packed;
+
+/**
+ * struct IpaHwEventLogInfoData_t - Structure holding the parameters for
+ * IPA_HW_2_CPU_EVENT_LOG_INFO Event
+ *
+ * @featureMask : Mask indicating the features enabled in HW.
+ * Refer IPA_HW_FEATURE_MASK
+ * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event
+ * Log Buffer structure
+ * @statsInfo : Statistics related information
+ * @configInfo : Configuration related information
+ *
+ * @note The offset location of this structure from IPA_WRAPPER_BASE
+ * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO
+ * Event
+ */
+struct IpaHwEventLogInfoData_t {
+ u32 featureMask;
+ u32 circBuffBaseAddrOffset;
+ struct IpaHwEventInfoData_t statsInfo;
+ struct IpaHwEventInfoData_t configInfo;
+
+} __packed;
+
+/**
+ * struct ipa_uc_ntn_ctx
+ * @ntn_uc_stats_ofst: Neutrino stats offset
+ * @ntn_uc_stats_mmio: Neutrino stats
+ * @priv: private data of client
+ * @uc_ready_cb: uc Ready cb
+ */
+struct ipa_uc_ntn_ctx {
+ u32 ntn_uc_stats_ofst;
+ struct IpaHwStatsNTNInfoData_t *ntn_uc_stats_mmio;
+ void *priv;
+ ipa_uc_ready_cb uc_ready_cb;
+};
+
+/**
+ * enum ipa_hw_2_cpu_ntn_events - Values that represent HW event
+ * to be sent to CPU
+ * @IPA_HW_2_CPU_EVENT_NTN_ERROR : Event to specify that HW
+ * detected an error in NTN
+ *
+ */
+enum ipa_hw_2_cpu_ntn_events {
+ IPA_HW_2_CPU_EVENT_NTN_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_NTN, 0),
+};
+
+
+/**
+ * enum ipa_hw_ntn_errors - NTN specific error types.
+ * @IPA_HW_NTN_ERROR_NONE : No error persists
+ * @IPA_HW_NTN_CHANNEL_ERROR : Error is specific to channel
+ */
+enum ipa_hw_ntn_errors {
+ IPA_HW_NTN_ERROR_NONE = 0,
+ IPA_HW_NTN_CHANNEL_ERROR = 1
+};
+
+/**
+ * enum ipa_hw_ntn_channel_states - Values that represent NTN
+ * channel state machine.
+ * @IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED : Channel is
+ * initialized but disabled
+ * @IPA_HW_NTN_CHANNEL_STATE_RUNNING : Channel is running.
+ * Entered after SET_UP_COMMAND is processed successfully
+ * @IPA_HW_NTN_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_NTN_CHANNEL_STATE_INVALID : Invalid state. Shall not
+ * be in use in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not reflect the
+ * sub-state the state machine may be in.
+ */
+enum ipa_hw_ntn_channel_states {
+ IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED = 1,
+ IPA_HW_NTN_CHANNEL_STATE_RUNNING = 2,
+ IPA_HW_NTN_CHANNEL_STATE_ERROR = 3,
+ IPA_HW_NTN_CHANNEL_STATE_INVALID = 0xFF
+};
+
+/**
+ * enum ipa_hw_ntn_channel_errors - List of NTN Channel error
+ * types. This is present in the event param
+ * @IPA_HW_NTN_CH_ERR_NONE: No error persists
+ * @IPA_HW_NTN_TX_FSM_ERROR: Error in the state machine
+ * transition
+ * @IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL: Error while calculating
+ * num RE to bring
+ * @IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL: Write pointer update
+ * failed in Rx ring
+ * @IPA_HW_NTN_RX_FSM_ERROR: Error in the state machine
+ * transition
+ * @IPA_HW_NTN_RX_CACHE_NON_EMPTY:
+ * @IPA_HW_NTN_CH_ERR_RESERVED:
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in.
+ */
+enum ipa_hw_ntn_channel_errors {
+ IPA_HW_NTN_CH_ERR_NONE = 0,
+ IPA_HW_NTN_TX_RING_WP_UPDATE_FAIL = 1,
+ IPA_HW_NTN_TX_FSM_ERROR = 2,
+ IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL = 3,
+ IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL = 4,
+ IPA_HW_NTN_RX_FSM_ERROR = 5,
+ IPA_HW_NTN_RX_CACHE_NON_EMPTY = 6,
+ IPA_HW_NTN_CH_ERR_RESERVED = 0xFF
+};
+
+
+/**
+ * struct IpaHwNtnSetUpCmdData_t - Ntn setup command data
+ * @ring_base_pa: physical address of the base of the Tx/Rx NTN
+ * ring
+ * @buff_pool_base_pa: physical address of the base of the Tx/Rx
+ * buffer pool
+ * @ntn_ring_size: size of the Tx/Rx NTN ring
+ * @num_buffers: Rx/tx buffer pool size
+ * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN
+ * Ring's tail pointer
+ * @ipa_pipe_number: IPA pipe number that has to be used for the
+ * Tx/Rx path
+ * @dir: Tx/Rx Direction
+ * @data_buff_size: size of the each data buffer allocated in
+ * DDR
+ */
+struct IpaHwNtnSetUpCmdData_t {
+ u32 ring_base_pa;
+ u32 buff_pool_base_pa;
+ u16 ntn_ring_size;
+ u16 num_buffers;
+ u32 ntn_reg_base_ptr_pa;
+ u8 ipa_pipe_number;
+ u8 dir;
+ u16 data_buff_size;
+
+} __packed;
+
+/**
+ * struct IpaHwNtnCommonChCmdData_t - Structure holding the
+ * parameters for Ntn Tear down command data params
+ *
+ *@ipa_pipe_number: IPA pipe number. This could be Tx or an Rx pipe
+ */
+union IpaHwNtnCommonChCmdData_t {
+ struct IpaHwNtnCommonChCmdParams_t {
+ u32 ipa_pipe_number :8;
+ u32 reserved :24;
+ } __packed params;
+ uint32_t raw32b;
+} __packed;
+
+
+/**
+ * struct IpaHwNTNErrorEventData_t - Structure holding the
+ * IPA_HW_2_CPU_EVENT_NTN_ERROR event. The parameters are passed
+ * as immediate params in the shared memory
+ *
+ *@ntn_error_type: type of NTN error (IPA_HW_NTN_ERRORS)
+ *@ipa_pipe_number: IPA pipe number on which error has happened
+ * Applicable only if error type indicates channel error
+ *@ntn_ch_err_type: Information about the channel error (if
+ * available)
+ */
+union IpaHwNTNErrorEventData_t {
+ struct IpaHwNTNErrorEventParams_t {
+ u32 ntn_error_type :8;
+ u32 reserved :8;
+ u32 ipa_pipe_number :8;
+ u32 ntn_ch_err_type :8;
+ } __packed params;
+ uint32_t raw32b;
+} __packed;
+
+/**
+ * struct NTNRxInfoData_t - NTN Structure holding the
+ * Rx pipe information
+ *
+ *@max_outstanding_pkts: Number of outstanding packets in Rx
+ * Ring
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@rx_ring_rp_value: Read pointer last advertized to the WLAN FW
+ *
+ *@ntn_ch_err_type: Information about the channel error (if
+ * available)
+ *@rx_ind_ring_stats:
+ *@bam_stats:
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_pkts_in_dis_uninit_state:
+ *@num_bam_int_handled_while_not_in_bam: Number of Bam
+ * Interrupts handled by FW
+ *@num_bam_int_handled_while_in_bam_state: Number of Bam
+ * Interrupts handled by FW
+ */
+struct NTNRxInfoData_t {
+ u32 max_outstanding_pkts;
+ u32 num_pkts_processed;
+ u32 rx_ring_rp_value;
+ struct IpaHwRingStats_t rx_ind_ring_stats;
+ struct IpaHwBamStats_t bam_stats;
+ u32 num_bam_int_handled;
+ u32 num_db;
+ u32 num_unexpected_db;
+ u32 num_pkts_in_dis_uninit_state;
+ u32 num_bam_int_handled_while_not_in_bam;
+ u32 num_bam_int_handled_while_in_bam_state;
+} __packed;
+
+
+/**
+ * struct NTNTxInfoData_t - Structure holding the NTN Tx channel
+ * Ensure that this is always word aligned
+ *
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@tail_ptr_val: Latest value of doorbell written to copy engine
+ *@num_db_fired: Number of DB from uC FW to Copy engine
+ *
+ *@tx_comp_ring_stats:
+ *@bam_stats:
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_bam_int_in_non_running_state: Number of Bam interrupts
+ * while not in Running state
+ *@num_qmb_int_handled: Number of QMB interrupts handled
+ *@num_bam_int_handled_while_wait_for_bam: Number of times the
+ * Imm Cmd is injected due to fw_desc change
+ */
+struct NTNTxInfoData_t {
+ u32 num_pkts_processed;
+ u32 tail_ptr_val;
+ u32 num_db_fired;
+ struct IpaHwRingStats_t tx_comp_ring_stats;
+ struct IpaHwBamStats_t bam_stats;
+ u32 num_db;
+ u32 num_unexpected_db;
+ u32 num_bam_int_handled;
+ u32 num_bam_int_in_non_running_state;
+ u32 num_qmb_int_handled;
+ u32 num_bam_int_handled_while_wait_for_bam;
+ u32 num_bam_int_handled_while_not_in_bam;
+} __packed;
+
+
+/**
+ * struct IpaHwStatsNTNInfoData_t - Structure holding the NTN Tx
+ * channel Ensure that this is always word aligned
+ *
+ */
+struct IpaHwStatsNTNInfoData_t {
+ struct NTNRxInfoData_t rx_ch_stats[IPA_UC_MAX_NTN_RX_CHANNELS];
+ struct NTNTxInfoData_t tx_ch_stats[IPA_UC_MAX_NTN_TX_CHANNELS];
+} __packed;
+
+
+/*
+ * uC offload related data structures
+ */
+#define IPA_UC_OFFLOAD_CONNECTED BIT(0)
+#define IPA_UC_OFFLOAD_ENABLED BIT(1)
+#define IPA_UC_OFFLOAD_RESUMED BIT(2)
+
+/**
+ * enum ipa_cpu_2_hw_offload_commands - Values that represent
+ * the offload commands from CPU
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP : Command to set up
+ * Offload protocol's Tx/Rx Path
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_RX_SET_UP : Command to tear down
+ * Offload protocol's Tx/ Rx Path
+ */
+enum ipa_cpu_2_hw_offload_commands {
+ IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+};
+
+
+/**
+ * enum ipa_hw_offload_channel_states - Values that represent
+ * offload channel state machine.
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED : Channel is initialized
+ * but disabled
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING : Channel is running. Entered after
+ * SET_UP_COMMAND is processed successfully
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID : Invalid state. Shall not be in use
+ * in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in
+ */
+enum ipa_hw_offload_channel_states {
+ IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED = 1,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING = 2,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR = 3,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID = 0xFF
+};
+
+
+/**
+ * enum ipa_hw_2_cpu_cmd_resp_status - Values that represent
+ * offload related command response status to be sent to CPU.
+ */
+enum ipa_hw_2_cpu_offload_cmd_resp_status {
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 0),
+ IPA_HW_2_CPU_OFFLOAD_MAX_TX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4),
+ IPA_HW_2_CPU_OFFLOAD_UNKNOWN_TX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5),
+ IPA_HW_2_CPU_OFFLOAD_TX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6),
+ IPA_HW_2_CPU_OFFLOAD_TX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 7),
+ IPA_HW_2_CPU_OFFLOAD_MAX_RX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 8),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 9),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 10),
+ IPA_HW_2_CPU_OFFLOAD_UNKNOWN_RX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 11),
+ IPA_HW_2_CPU_OFFLOAD_RX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 12),
+ IPA_HW_2_CPU_OFFLOAD_RX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 13),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 14),
+};
+
+/**
+ * struct IpaHwSetUpCmd -
+ *
+ *
+ */
+union IpaHwSetUpCmd {
+ struct IpaHwNtnSetUpCmdData_t NtnSetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwOffloadSetUpCmdData_t -
+ *
+ *
+ */
+struct IpaHwOffloadSetUpCmdData_t {
+ u8 protocol;
+ union IpaHwSetUpCmd SetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwCommonChCmd - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN
+ *
+ *
+ */
+union IpaHwCommonChCmd {
+ union IpaHwNtnCommonChCmdData_t NtnCommonCh_params;
+} __packed;
+
+struct IpaHwOffloadCommonChCmdData_t {
+ u8 protocol;
+ union IpaHwCommonChCmd CommonCh_params;
+} __packed;
+
+#endif /* _IPA_UC_OFFLOAD_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
index a45b51ad7b7b..a1072638b281 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -963,7 +963,7 @@ int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in,
IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
- ep->wdi_state |= IPA_WDI_CONNECTED;
+ ep->uc_offload_state |= IPA_WDI_CONNECTED;
IPADBG("client %d (ep: %d) connected\n", in->sys.client, ipa_ep_idx);
return 0;
@@ -1001,7 +1001,7 @@ int ipa2_disconnect_wdi_pipe(u32 clnt_hdl)
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1013,8 +1013,8 @@ int ipa2_disconnect_wdi_pipe(u32 clnt_hdl)
ep = &ipa_ctx->ep[clnt_hdl];
- if (ep->wdi_state != IPA_WDI_CONNECTED) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
@@ -1067,7 +1067,7 @@ int ipa2_enable_wdi_pipe(u32 clnt_hdl)
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1079,8 +1079,8 @@ int ipa2_enable_wdi_pipe(u32 clnt_hdl)
ep = &ipa_ctx->ep[clnt_hdl];
- if (ep->wdi_state != IPA_WDI_CONNECTED) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
@@ -1105,7 +1105,7 @@ int ipa2_enable_wdi_pipe(u32 clnt_hdl)
}
IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
- ep->wdi_state |= IPA_WDI_ENABLED;
+ ep->uc_offload_state |= IPA_WDI_ENABLED;
IPADBG("client (ep: %d) enabled\n", clnt_hdl);
uc_timeout:
@@ -1135,7 +1135,7 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl)
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1147,8 +1147,8 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl)
ep = &ipa_ctx->ep[clnt_hdl];
- if (ep->wdi_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
@@ -1206,7 +1206,7 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl)
}
IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
- ep->wdi_state &= ~IPA_WDI_ENABLED;
+ ep->uc_offload_state &= ~IPA_WDI_ENABLED;
IPADBG("client (ep: %d) disabled\n", clnt_hdl);
uc_timeout:
@@ -1235,7 +1235,7 @@ int ipa2_resume_wdi_pipe(u32 clnt_hdl)
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1247,8 +1247,8 @@ int ipa2_resume_wdi_pipe(u32 clnt_hdl)
ep = &ipa_ctx->ep[clnt_hdl];
- if (ep->wdi_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
@@ -1273,7 +1273,7 @@ int ipa2_resume_wdi_pipe(u32 clnt_hdl)
else
IPADBG("client (ep: %d) un-susp/delay\n", clnt_hdl);
- ep->wdi_state |= IPA_WDI_RESUMED;
+ ep->uc_offload_state |= IPA_WDI_RESUMED;
IPADBG("client (ep: %d) resumed\n", clnt_hdl);
uc_timeout:
@@ -1302,7 +1302,7 @@ int ipa2_suspend_wdi_pipe(u32 clnt_hdl)
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1314,9 +1314,9 @@ int ipa2_suspend_wdi_pipe(u32 clnt_hdl)
ep = &ipa_ctx->ep[clnt_hdl];
- if (ep->wdi_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED |
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED |
IPA_WDI_RESUMED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
@@ -1369,7 +1369,7 @@ int ipa2_suspend_wdi_pipe(u32 clnt_hdl)
ipa_ctx->tag_process_before_gating = true;
IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
- ep->wdi_state &= ~IPA_WDI_RESUMED;
+ ep->uc_offload_state &= ~IPA_WDI_RESUMED;
IPADBG("client (ep: %d) suspended\n", clnt_hdl);
uc_timeout:
@@ -1384,7 +1384,7 @@ int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1396,8 +1396,8 @@ int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
ep = &ipa_ctx->ep[clnt_hdl];
- if (!(ep->wdi_state & IPA_WDI_CONNECTED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index 1d88082352c6..b627cd1fc833 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -5084,6 +5084,7 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_get_dma_dev = ipa2_get_dma_dev;
api_ctrl->ipa_get_gsi_ep_info = ipa2_get_gsi_ep_info;
api_ctrl->ipa_stop_gsi_channel = ipa2_stop_gsi_channel;
+ api_ctrl->ipa_register_ipa_ready_cb = ipa2_register_ipa_ready_cb;
api_ctrl->ipa_inc_client_enable_clks = ipa2_inc_client_enable_clks;
api_ctrl->ipa_dec_client_disable_clks = ipa2_dec_client_disable_clks;
api_ctrl->ipa_inc_client_enable_clks_no_block =
@@ -5098,6 +5099,9 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_get_ipc_logbuf_low = ipa2_get_ipc_logbuf_low;
api_ctrl->ipa_rx_poll = ipa2_rx_poll;
api_ctrl->ipa_recycle_wan_skb = ipa2_recycle_wan_skb;
+ api_ctrl->ipa_setup_uc_ntn_pipes = ipa2_setup_uc_ntn_pipes;
+ api_ctrl->ipa_tear_down_uc_offload_pipes =
+ ipa2_tear_down_uc_offload_pipes;
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile
index 9653dd6d27f2..a4faaea715a8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/Makefile
+++ b/drivers/platform/msm/ipa/ipa_v3/Makefile
@@ -3,6 +3,6 @@ obj-$(CONFIG_IPA3) += ipahal/
obj-$(CONFIG_IPA3) += ipat.o
ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
- ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o
+ ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o
obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 33066e8b9c19..4db07bad7d93 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -3714,6 +3714,12 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
else
IPADBG(":wdi init ok\n");
+ result = ipa3_ntn_init();
+ if (result)
+ IPAERR(":ntn init failed (%d)\n", -result);
+ else
+ IPADBG(":ntn init ok\n");
+
ipa3_register_panic_hdlr();
ipa3_ctx->q6_proxy_clk_vote_valid = true;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 0319c5c78b0d..c3c5ae38ec14 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -95,6 +95,7 @@ static struct dentry *dfile_ip6_flt_hw;
static struct dentry *dfile_stats;
static struct dentry *dfile_wstats;
static struct dentry *dfile_wdi_stats;
+static struct dentry *dfile_ntn_stats;
static struct dentry *dfile_dbg_cnt;
static struct dentry *dfile_msg;
static struct dentry *dfile_ip4_nat;
@@ -1184,6 +1185,110 @@ nxt_clnt_cons:
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
+static ssize_t ipa3_read_ntn(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+#define TX_STATS(y) \
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) \
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+ struct Ipa3HwStatsNTNInfoData_t stats;
+ int nbytes;
+ int cnt = 0;
+
+ if (!ipa3_get_ntn_stats(&stats)) {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "TX num_pkts_processed=%u\n"
+ "TX tail_ptr_val=%u\n"
+ "TX num_db_fired=%u\n"
+ "TX ringFull=%u\n"
+ "TX ringEmpty=%u\n"
+ "TX ringUsageHigh=%u\n"
+ "TX ringUsageLow=%u\n"
+ "TX RingUtilCount=%u\n"
+ "TX bamFifoFull=%u\n"
+ "TX bamFifoEmpty=%u\n"
+ "TX bamFifoUsageHigh=%u\n"
+ "TX bamFifoUsageLow=%u\n"
+ "TX bamUtilCount=%u\n"
+ "TX num_db=%u\n"
+ "TX num_unexpected_db=%u\n"
+ "TX num_bam_int_handled=%u\n"
+ "TX num_bam_int_in_non_running_state=%u\n"
+ "TX num_qmb_int_handled=%u\n"
+ "TX num_bam_int_handled_while_wait_for_bam=%u\n"
+ "TX num_bam_int_handled_while_not_in_bam=%u\n",
+ TX_STATS(num_pkts_processed),
+ TX_STATS(tail_ptr_val),
+ TX_STATS(num_db_fired),
+ TX_STATS(tx_comp_ring_stats.ringFull),
+ TX_STATS(tx_comp_ring_stats.ringEmpty),
+ TX_STATS(tx_comp_ring_stats.ringUsageHigh),
+ TX_STATS(tx_comp_ring_stats.ringUsageLow),
+ TX_STATS(tx_comp_ring_stats.RingUtilCount),
+ TX_STATS(bam_stats.bamFifoFull),
+ TX_STATS(bam_stats.bamFifoEmpty),
+ TX_STATS(bam_stats.bamFifoUsageHigh),
+ TX_STATS(bam_stats.bamFifoUsageLow),
+ TX_STATS(bam_stats.bamUtilCount),
+ TX_STATS(num_db),
+ TX_STATS(num_unexpected_db),
+ TX_STATS(num_bam_int_handled),
+ TX_STATS(num_bam_int_in_non_running_state),
+ TX_STATS(num_qmb_int_handled),
+ TX_STATS(num_bam_int_handled_while_wait_for_bam),
+ TX_STATS(num_bam_int_handled_while_not_in_bam));
+ cnt += nbytes;
+ nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+ "RX max_outstanding_pkts=%u\n"
+ "RX num_pkts_processed=%u\n"
+ "RX rx_ring_rp_value=%u\n"
+ "RX ringFull=%u\n"
+ "RX ringEmpty=%u\n"
+ "RX ringUsageHigh=%u\n"
+ "RX ringUsageLow=%u\n"
+ "RX RingUtilCount=%u\n"
+ "RX bamFifoFull=%u\n"
+ "RX bamFifoEmpty=%u\n"
+ "RX bamFifoUsageHigh=%u\n"
+ "RX bamFifoUsageLow=%u\n"
+ "RX bamUtilCount=%u\n"
+ "RX num_bam_int_handled=%u\n"
+ "RX num_db=%u\n"
+ "RX num_unexpected_db=%u\n"
+ "RX num_pkts_in_dis_uninit_state=%u\n"
+ "num_ic_inj_vdev_change=%u\n"
+ "num_ic_inj_fw_desc_change=%u\n",
+ RX_STATS(max_outstanding_pkts),
+ RX_STATS(num_pkts_processed),
+ RX_STATS(rx_ring_rp_value),
+ RX_STATS(rx_ind_ring_stats.ringFull),
+ RX_STATS(rx_ind_ring_stats.ringEmpty),
+ RX_STATS(rx_ind_ring_stats.ringUsageHigh),
+ RX_STATS(rx_ind_ring_stats.ringUsageLow),
+ RX_STATS(rx_ind_ring_stats.RingUtilCount),
+ RX_STATS(bam_stats.bamFifoFull),
+ RX_STATS(bam_stats.bamFifoEmpty),
+ RX_STATS(bam_stats.bamFifoUsageHigh),
+ RX_STATS(bam_stats.bamFifoUsageLow),
+ RX_STATS(bam_stats.bamUtilCount),
+ RX_STATS(num_bam_int_handled),
+ RX_STATS(num_db),
+ RX_STATS(num_unexpected_db),
+ RX_STATS(num_pkts_in_dis_uninit_state),
+ RX_STATS(num_bam_int_handled_while_not_in_bam),
+ RX_STATS(num_bam_int_handled_while_in_bam_state));
+ cnt += nbytes;
+ } else {
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "Fail to read NTN stats\n");
+ cnt += nbytes;
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
static ssize_t ipa3_read_wdi(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
@@ -1747,6 +1852,10 @@ const struct file_operations ipa3_wdi_ops = {
.read = ipa3_read_wdi,
};
+const struct file_operations ipa3_ntn_ops = {
+ .read = ipa3_read_ntn,
+};
+
const struct file_operations ipa3_msg_ops = {
.read = ipa3_read_msg,
};
@@ -1931,6 +2040,13 @@ void ipa3_debugfs_init(void)
goto fail;
}
+ dfile_ntn_stats = debugfs_create_file("ntn", read_only_mode, dent, 0,
+ &ipa3_ntn_ops);
+ if (!dfile_ntn_stats || IS_ERR(dfile_ntn_stats)) {
+ IPAERR("fail to create file for debug_fs ntn stats\n");
+ goto fail;
+ }
+
dfile_dbg_cnt = debugfs_create_file("dbg_cnt", read_write_mode, dent, 0,
&ipa3_dbg_cnt_ops);
if (!dfile_dbg_cnt || IS_ERR(dfile_dbg_cnt)) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 97a3117d44e9..806510ea8867 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -35,6 +35,7 @@
#include "ipahal/ipahal.h"
#include "ipahal/ipahal_fltrt.h"
#include "../ipa_common_i.h"
+#include "ipa_uc_offload_i.h"
#define DRV_NAME "ipa"
#define NAT_DEV_NAME "ipaNatTable"
@@ -546,7 +547,7 @@ struct ipa3_ep_context {
bool skip_ep_cfg;
bool keep_ipa_awake;
struct ipa3_wlan_stats wstats;
- u32 wdi_state;
+ u32 uc_offload_state;
bool disconnect_in_progress;
u32 qmi_request_sent;
bool napi_enabled;
@@ -869,200 +870,6 @@ struct ipa3_tag_completion {
struct ipa3_controller;
/**
- * @brief Enum value determined based on the feature it
- * corresponds to
- * +----------------+----------------+
- * | 3 bits | 5 bits |
- * +----------------+----------------+
- * | HW_FEATURE | OPCODE |
- * +----------------+----------------+
- *
- */
-#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode)
-#define EXTRACT_UC_FEATURE(value) (value >> 5)
-
-#define IPA_HW_NUM_FEATURES 0x8
-
-/**
- * enum ipa3_hw_features - Values that represent the features supported in IPA HW
- * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW
- * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW
- * @IPA_HW_FEATURE_POWER_COLLAPSE: Feature related to IPA Power collapse
- * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW
- * @IPA_HW_FEATURE_ZIP: Feature related to CMP/DCMP operation in IPA HW
-*/
-enum ipa3_hw_features {
- IPA_HW_FEATURE_COMMON = 0x0,
- IPA_HW_FEATURE_MHI = 0x1,
- IPA_HW_FEATURE_POWER_COLLAPSE = 0x2,
- IPA_HW_FEATURE_WDI = 0x3,
- IPA_HW_FEATURE_ZIP = 0x4,
- IPA_HW_FEATURE_MAX = IPA_HW_NUM_FEATURES
-};
-
-/**
- * enum ipa3_hw_2_cpu_events - Values that represent HW event to be sent to CPU.
- * @IPA_HW_2_CPU_EVENT_NO_OP : No event present
- * @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the
- * device
- * @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information
- */
-enum ipa3_hw_2_cpu_events {
- IPA_HW_2_CPU_EVENT_NO_OP =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
- IPA_HW_2_CPU_EVENT_ERROR =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
- IPA_HW_2_CPU_EVENT_LOG_INFO =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
-};
-
-/**
- * enum ipa3_hw_errors - Common error types.
- * @IPA_HW_ERROR_NONE : No error persists
- * @IPA_HW_INVALID_DOORBELL_ERROR : Invalid data read from doorbell
- * @IPA_HW_DMA_ERROR : Unexpected DMA error
- * @IPA_HW_FATAL_SYSTEM_ERROR : HW has crashed and requires reset.
- * @IPA_HW_INVALID_OPCODE : Invalid opcode sent
- * @IPA_HW_INVALID_PARAMS : Invalid params for the requested command
- * @IPA_HW_GSI_CH_NOT_EMPTY_FAILURE : GSI channel emptiness validation failed
- */
-enum ipa3_hw_errors {
- IPA_HW_ERROR_NONE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
- IPA_HW_INVALID_DOORBELL_ERROR =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
- IPA_HW_DMA_ERROR =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
- IPA_HW_FATAL_SYSTEM_ERROR =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
- IPA_HW_INVALID_OPCODE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
- IPA_HW_INVALID_PARAMS =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
- IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
- IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7),
- IPA_HW_GSI_CH_NOT_EMPTY_FAILURE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8)
-};
-
-/**
- * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common
- * section in 128B shared memory located in offset zero of SW Partition in IPA
- * SRAM.
- * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS
- * @cmdParams : CPU->HW command parameter lower 32bit.
- * @cmdParams_hi : CPU->HW command parameter higher 32bit.
- * of parameters (immediate parameters) and point on structure in system memory
- * (in such case the address must be accessible for HW)
- * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES
- * @responseParams : HW->CPU response parameter. The parameter filed can hold 32
- * bits of parameters (immediate parameters) and point on structure in system
- * memory
- * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS
- * @eventParams : HW->CPU event parameter. The parameter filed can hold 32 bits of
- * parameters (immediate parameters) and point on structure in system memory
- * @firstErrorAddress : Contains the address of first error-source on SNOC
- * @hwState : State of HW. The state carries information regarding the error type.
- * @warningCounter : The warnings counter. The counter carries information regarding
- * non fatal errors in HW
- * @interfaceVersionCommon : The Common interface version as reported by HW
- *
- * The shared memory is used for communication between IPA HW and CPU.
- */
-struct IpaHwSharedMemCommonMapping_t {
- u8 cmdOp;
- u8 reserved_01;
- u16 reserved_03_02;
- u32 cmdParams;
- u32 cmdParams_hi;
- u8 responseOp;
- u8 reserved_0D;
- u16 reserved_0F_0E;
- u32 responseParams;
- u8 eventOp;
- u8 reserved_15;
- u16 reserved_17_16;
- u32 eventParams;
- u32 firstErrorAddress;
- u8 hwState;
- u8 warningCounter;
- u16 reserved_23_22;
- u16 interfaceVersionCommon;
- u16 reserved_27_26;
-} __packed;
-
-/**
- * union IpaHwFeatureInfoData_t - parameters for stats/config blob
- *
- * @offset : Location of a feature within the EventInfoData
- * @size : Size of the feature
- */
-union IpaHwFeatureInfoData_t {
- struct IpaHwFeatureInfoParams_t {
- u32 offset:16;
- u32 size:16;
- } __packed params;
- u32 raw32b;
-} __packed;
-
-/**
- * union IpaHwErrorEventData_t - HW->CPU Common Events
- * @errorType : Entered when a system error is detected by the HW. Type of
- * error is specified by IPA_HW_ERRORS
- * @reserved : Reserved
- */
-union IpaHwErrorEventData_t {
- struct IpaHwErrorEventParams_t {
- u32 errorType:8;
- u32 reserved:24;
- } __packed params;
- u32 raw32b;
-} __packed;
-
-/**
- * struct IpaHwEventInfoData_t - Structure holding the parameters for
- * statistics and config info
- *
- * @baseAddrOffset : Base Address Offset of the statistics or config
- * structure from IPA_WRAPPER_BASE
- * @IpaHwFeatureInfoData_t : Location and size of each feature within
- * the statistics or config structure
- *
- * @note Information about each feature in the featureInfo[]
- * array is populated at predefined indices per the IPA_HW_FEATURES
- * enum definition
- */
-struct IpaHwEventInfoData_t {
- u32 baseAddrOffset;
- union IpaHwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES];
-} __packed;
-
-/**
- * struct IpaHwEventLogInfoData_t - Structure holding the parameters for
- * IPA_HW_2_CPU_EVENT_LOG_INFO Event
- *
- * @featureMask : Mask indicating the features enabled in HW.
- * Refer IPA_HW_FEATURE_MASK
- * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event
- * Log Buffer structure
- * @statsInfo : Statistics related information
- * @configInfo : Configuration related information
- *
- * @note The offset location of this structure from IPA_WRAPPER_BASE
- * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO
- * Event
- */
-struct IpaHwEventLogInfoData_t {
- u32 featureMask;
- u32 circBuffBaseAddrOffset;
- struct IpaHwEventInfoData_t statsInfo;
- struct IpaHwEventInfoData_t configInfo;
-
-} __packed;
-
-/**
* struct ipa3_uc_hdlrs - IPA uC callback functions
* @ipa_uc_loaded_hdlr: Function handler when uC is loaded
* @ipa_uc_event_hdlr: Event handler function
@@ -1393,6 +1200,7 @@ struct ipa3_context {
struct ipa3_uc_ctx uc_ctx;
struct ipa3_uc_wdi_ctx uc_wdi_ctx;
+ struct ipa3_uc_ntn_ctx uc_ntn_ctx;
u32 wan_rx_ring_size;
bool skip_uc_pipe_reset;
enum ipa_transport_type transport_prototype;
@@ -1854,6 +1662,11 @@ int ipa3_resume_wdi_pipe(u32 clnt_hdl);
int ipa3_suspend_wdi_pipe(u32 clnt_hdl);
int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
u16 ipa3_get_smem_restr_bytes(void);
+int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp);
+int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+
/*
* To retrieve doorbell physical address of
* wlan pipes
@@ -2197,4 +2010,6 @@ void ipa3_recycle_wan_skb(struct sk_buff *skb);
int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map);
int ipa3_smmu_map_peer_buff(u64 iova, phys_addr_t phys_addr,
u32 size, bool map);
+int ipa3_ntn_init(void);
+int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats);
#endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
new file mode 100644
index 000000000000..7b891843028d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
@@ -0,0 +1,410 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "ipa_i.h"
+
+#define IPA_UC_NTN_DB_PA_TX 0x79620DC
+#define IPA_UC_NTN_DB_PA_RX 0x79620D8
+
+static void ipa3_uc_ntn_event_handler(struct IpaHwSharedMemCommonMapping_t
+ *uc_sram_mmio)
+
+{
+ union Ipa3HwNTNErrorEventData_t ntn_evt;
+
+ if (uc_sram_mmio->eventOp ==
+ IPA_HW_2_CPU_EVENT_NTN_ERROR) {
+ ntn_evt.raw32b = uc_sram_mmio->eventParams;
+ IPADBG("uC NTN evt errType=%u pipe=%d cherrType=%u\n",
+ ntn_evt.params.ntn_error_type,
+ ntn_evt.params.ipa_pipe_number,
+ ntn_evt.params.ntn_ch_err_type);
+ }
+}
+
+static void ipa3_uc_ntn_event_log_info_handler(
+struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+
+{
+ if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_NTN)) == 0) {
+ IPAERR("NTN feature missing 0x%x\n",
+ uc_event_top_mmio->featureMask);
+ return;
+ }
+
+ if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_NTN].
+ params.size != sizeof(struct Ipa3HwStatsNTNInfoData_t)) {
+ IPAERR("NTN stats sz invalid exp=%zu is=%u\n",
+ sizeof(struct Ipa3HwStatsNTNInfoData_t),
+ uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_NTN].params.size);
+ return;
+ }
+
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst = uc_event_top_mmio->
+ statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
+ featureInfo[IPA_HW_FEATURE_NTN].params.offset;
+ IPAERR("NTN stats ofst=0x%x\n", ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+ if (ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst +
+ sizeof(struct Ipa3HwStatsNTNInfoData_t) >=
+ ipa3_ctx->ctrl->ipa_reg_base_ofst +
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
+ ipa3_ctx->smem_sz) {
+ IPAERR("uc_ntn_stats 0x%x outside SRAM\n",
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+ return;
+ }
+
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio =
+ ioremap(ipa3_ctx->ipa_wrapper_base +
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst,
+ sizeof(struct Ipa3HwStatsNTNInfoData_t));
+ if (!ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+ IPAERR("fail to ioremap uc ntn stats\n");
+ return;
+ }
+}
+
+/**
+ * ipa2_get_wdi_stats() - Query WDI statistics from uc
+ * @stats: [inout] stats blob from client populated by driver
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats)
+{
+#define TX_STATS(y) stats->tx_ch_stats[0].y = \
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) stats->rx_ch_stats[0].y = \
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+ if (unlikely(!ipa3_ctx)) {
+ IPAERR("IPA driver was not initialized\n");
+ return -EINVAL;
+ }
+
+ if (!stats || !ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+ IPAERR("bad parms stats=%p ntn_stats=%p\n",
+ stats,
+ ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio);
+ return -EINVAL;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ TX_STATS(num_pkts_processed);
+ TX_STATS(tail_ptr_val);
+ TX_STATS(num_db_fired);
+ TX_STATS(tx_comp_ring_stats.ringFull);
+ TX_STATS(tx_comp_ring_stats.ringEmpty);
+ TX_STATS(tx_comp_ring_stats.ringUsageHigh);
+ TX_STATS(tx_comp_ring_stats.ringUsageLow);
+ TX_STATS(tx_comp_ring_stats.RingUtilCount);
+ TX_STATS(bam_stats.bamFifoFull);
+ TX_STATS(bam_stats.bamFifoEmpty);
+ TX_STATS(bam_stats.bamFifoUsageHigh);
+ TX_STATS(bam_stats.bamFifoUsageLow);
+ TX_STATS(bam_stats.bamUtilCount);
+ TX_STATS(num_db);
+ TX_STATS(num_unexpected_db);
+ TX_STATS(num_bam_int_handled);
+ TX_STATS(num_bam_int_in_non_running_state);
+ TX_STATS(num_qmb_int_handled);
+ TX_STATS(num_bam_int_handled_while_wait_for_bam);
+ TX_STATS(num_bam_int_handled_while_not_in_bam);
+
+ RX_STATS(max_outstanding_pkts);
+ RX_STATS(num_pkts_processed);
+ RX_STATS(rx_ring_rp_value);
+ RX_STATS(rx_ind_ring_stats.ringFull);
+ RX_STATS(rx_ind_ring_stats.ringEmpty);
+ RX_STATS(rx_ind_ring_stats.ringUsageHigh);
+ RX_STATS(rx_ind_ring_stats.ringUsageLow);
+ RX_STATS(rx_ind_ring_stats.RingUtilCount);
+ RX_STATS(bam_stats.bamFifoFull);
+ RX_STATS(bam_stats.bamFifoEmpty);
+ RX_STATS(bam_stats.bamFifoUsageHigh);
+ RX_STATS(bam_stats.bamFifoUsageLow);
+ RX_STATS(bam_stats.bamUtilCount);
+ RX_STATS(num_bam_int_handled);
+ RX_STATS(num_db);
+ RX_STATS(num_unexpected_db);
+ RX_STATS(num_pkts_in_dis_uninit_state);
+ RX_STATS(num_bam_int_handled_while_not_in_bam);
+ RX_STATS(num_bam_int_handled_while_in_bam_state);
+
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+ return 0;
+}
+
+int ipa3_ntn_init(void)
+{
+ struct ipa3_uc_hdlrs uc_ntn_cbs = { 0 };
+
+ uc_ntn_cbs.ipa_uc_event_hdlr = ipa3_uc_ntn_event_handler;
+ uc_ntn_cbs.ipa_uc_event_log_info_hdlr =
+ ipa3_uc_ntn_event_log_info_handler;
+
+ ipa3_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs);
+
+ return 0;
+}
+
+static int ipa3_uc_send_ntn_setup_pipe_cmd(
+ struct ipa_ntn_setup_info *ntn_info, u8 dir)
+{
+ int ipa_ep_idx;
+ int result = 0;
+ struct ipa_mem_buffer cmd;
+ struct Ipa3HwNtnSetUpCmdData_t *Ntn_params;
+ struct IpaHwOffloadSetUpCmdData_t *cmd_data;
+
+ if (ntn_info == NULL) {
+ IPAERR("invalid input\n");
+ return -EINVAL;
+ }
+
+ ipa_ep_idx = ipa_get_ep_mapping(ntn_info->client);
+ if (ipa_ep_idx == -1) {
+ IPAERR("fail to get ep idx.\n");
+ return -EFAULT;
+ }
+
+ IPADBG("client=%d ep=%d\n", ntn_info->client, ipa_ep_idx);
+
+ IPADBG("ring_base_pa = 0x%pa\n",
+ &ntn_info->ring_base_pa);
+ IPADBG("ntn_ring_size = %d\n", ntn_info->ntn_ring_size);
+ IPADBG("buff_pool_base_pa = 0x%pa\n", &ntn_info->buff_pool_base_pa);
+ IPADBG("num_buffers = %d\n", ntn_info->num_buffers);
+ IPADBG("data_buff_size = %d\n", ntn_info->data_buff_size);
+ IPADBG("tail_ptr_base_pa = 0x%pa\n", &ntn_info->ntn_reg_base_ptr_pa);
+
+ cmd.size = sizeof(*cmd_data);
+ cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+ &cmd.phys_base, GFP_KERNEL);
+ if (cmd.base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ return -ENOMEM;
+ }
+
+ cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base;
+ cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+ Ntn_params = &cmd_data->SetupCh_params.NtnSetupCh_params;
+ Ntn_params->ring_base_pa = ntn_info->ring_base_pa;
+ Ntn_params->buff_pool_base_pa = ntn_info->buff_pool_base_pa;
+ Ntn_params->ntn_ring_size = ntn_info->ntn_ring_size;
+ Ntn_params->num_buffers = ntn_info->num_buffers;
+ Ntn_params->ntn_reg_base_ptr_pa = ntn_info->ntn_reg_base_ptr_pa;
+ Ntn_params->data_buff_size = ntn_info->data_buff_size;
+ Ntn_params->ipa_pipe_number = ipa_ep_idx;
+ Ntn_params->dir = dir;
+
+ result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result)
+ result = -EFAULT;
+
+ dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+ return result;
+}
+
+/**
+ * ipa3_setup_uc_ntn_pipes() - setup uc offload pipes
+ */
+int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+ ipa_notify_cb notify, void *priv, u8 hdr_len,
+ struct ipa_ntn_conn_out_params *outp)
+{
+ struct ipa3_ep_context *ep_ul;
+ struct ipa3_ep_context *ep_dl;
+ int ipa_ep_idx_ul;
+ int ipa_ep_idx_dl;
+ int result = 0;
+
+ if (in == NULL) {
+ IPAERR("invalid input\n");
+ return -EINVAL;
+ }
+
+ ipa_ep_idx_ul = ipa_get_ep_mapping(in->ul.client);
+ ipa_ep_idx_dl = ipa_get_ep_mapping(in->dl.client);
+ if (ipa_ep_idx_ul == -1 || ipa_ep_idx_dl == -1) {
+ IPAERR("fail to alloc EP.\n");
+ return -EFAULT;
+ }
+
+ ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul];
+ ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl];
+
+ if (ep_ul->valid || ep_dl->valid) {
+ IPAERR("EP already allocated.\n");
+ return -EFAULT;
+ }
+
+ memset(ep_ul, 0, offsetof(struct ipa3_ep_context, sys));
+ memset(ep_dl, 0, offsetof(struct ipa3_ep_context, sys));
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ /* setup ul ep cfg */
+ ep_ul->valid = 1;
+ ep_ul->client = in->ul.client;
+ result = ipa3_enable_data_path(ipa_ep_idx_ul);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx_ul);
+ return -EFAULT;
+ }
+ ep_ul->client_notify = notify;
+ ep_ul->priv = priv;
+
+ memset(&ep_ul->cfg, 0, sizeof(ep_ul->cfg));
+ ep_ul->cfg.nat.nat_en = IPA_SRC_NAT;
+ ep_ul->cfg.hdr.hdr_len = hdr_len;
+ ep_ul->cfg.mode.mode = IPA_BASIC;
+
+ if (ipa3_cfg_ep(ipa_ep_idx_ul, &ep_ul->cfg)) {
+ IPAERR("fail to setup ul pipe cfg\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->ul, IPA_NTN_RX_DIR)) {
+ IPAERR("fail to send cmd to uc for ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa3_install_dflt_flt_rules(ipa_ep_idx_ul);
+ outp->ul_uc_db_pa = IPA_UC_NTN_DB_PA_RX;
+ ep_ul->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+ IPADBG("client %d (ep: %d) connected\n", in->ul.client,
+ ipa_ep_idx_ul);
+
+ /* setup dl ep cfg */
+ ep_dl->valid = 1;
+ ep_dl->client = in->dl.client;
+ result = ipa3_enable_data_path(ipa_ep_idx_dl);
+ if (result) {
+ IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+ ipa_ep_idx_dl);
+ result = -EFAULT;
+ goto fail;
+ }
+
+ memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg));
+ ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT;
+ ep_dl->cfg.hdr.hdr_len = hdr_len;
+ ep_dl->cfg.mode.mode = IPA_BASIC;
+
+ if (ipa3_cfg_ep(ipa_ep_idx_dl, &ep_dl->cfg)) {
+ IPAERR("fail to setup dl pipe cfg\n");
+ result = -EFAULT;
+ goto fail;
+ }
+
+ if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->dl, IPA_NTN_TX_DIR)) {
+ IPAERR("fail to send cmd to uc for dl pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX;
+ ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+ IPADBG("client %d (ep: %d) connected\n", in->dl.client,
+ ipa_ep_idx_dl);
+
+fail:
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return result;
+}
+
+/**
+ * ipa3_tear_down_uc_offload_pipes() - tear down uc offload pipes
+ */
+
+int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
+ int ipa_ep_idx_dl)
+{
+ struct ipa_mem_buffer cmd;
+ struct ipa3_ep_context *ep_ul, *ep_dl;
+ struct IpaHwOffloadCommonChCmdData_t *cmd_data;
+ union Ipa3HwNtnCommonChCmdData_t *tear;
+ int result = 0;
+
+ IPADBG("ep_ul = %d\n", ipa_ep_idx_ul);
+ IPADBG("ep_dl = %d\n", ipa_ep_idx_dl);
+
+ ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul];
+ ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl];
+
+ if (ep_ul->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED ||
+ ep_dl->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED) {
+ IPAERR("channel bad state: ul %d dl %d\n",
+ ep_ul->uc_offload_state, ep_dl->uc_offload_state);
+ return -EFAULT;
+ }
+
+ cmd.size = sizeof(*cmd_data);
+ cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+ &cmd.phys_base, GFP_KERNEL);
+ if (cmd.base == NULL) {
+ IPAERR("fail to get DMA memory.\n");
+ return -ENOMEM;
+ }
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ /* teardown the UL pipe */
+ cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base;
+ cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+ tear = &cmd_data->CommonCh_params.NtnCommonCh_params;
+ tear->params.ipa_pipe_number = ipa_ep_idx_ul;
+ result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result) {
+ IPAERR("fail to tear down ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa3_disable_data_path(ipa_ep_idx_ul);
+ ipa3_delete_dflt_flt_rules(ipa_ep_idx_ul);
+ memset(&ipa3_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa3_ep_context));
+ IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
+
+ /* teardown the DL pipe */
+ tear->params.ipa_pipe_number = ipa_ep_idx_dl;
+ result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10*HZ);
+ if (result) {
+ IPAERR("fail to tear down ul pipe\n");
+ result = -EFAULT;
+ goto fail;
+ }
+ ipa3_disable_data_path(ipa_ep_idx_dl);
+ memset(&ipa3_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa3_ep_context));
+ IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
+
+fail:
+ dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+ return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
new file mode 100644
index 000000000000..946fc7e31fb9
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
@@ -0,0 +1,580 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_UC_OFFLOAD_I_H_
+#define _IPA_UC_OFFLOAD_I_H_
+
+#include <linux/ipa.h>
+#include "ipa_i.h"
+
+/*
+ * Neutrino protocol related data structures
+ */
+
+#define IPA_UC_MAX_NTN_TX_CHANNELS 1
+#define IPA_UC_MAX_NTN_RX_CHANNELS 1
+
+#define IPA_NTN_TX_DIR 1
+#define IPA_NTN_RX_DIR 2
+
+/**
+ * @brief Enum value determined based on the feature it
+ * corresponds to
+ * +----------------+----------------+
+ * | 3 bits | 5 bits |
+ * +----------------+----------------+
+ * | HW_FEATURE | OPCODE |
+ * +----------------+----------------+
+ *
+ */
+#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode)
+#define EXTRACT_UC_FEATURE(value) (value >> 5)
+
+#define IPA_HW_NUM_FEATURES 0x8
+
+/**
+ * enum ipa3_hw_features - Values that represent the features supported
+ * in IPA HW
+ * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW
+ * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW
+ * @IPA_HW_FEATURE_POWER_COLLAPSE: Feature related to IPA Power collapse
+ * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW
+ * @IPA_HW_FEATURE_ZIP: Feature related to CMP/DCMP operation in IPA HW
+ * @IPA_HW_FEATURE_NTN : Feature related to NTN operation in IPA HW
+ * @IPA_HW_FEATURE_OFFLOAD : Feature related to NTN operation in IPA HW
+*/
+enum ipa3_hw_features {
+ IPA_HW_FEATURE_COMMON = 0x0,
+ IPA_HW_FEATURE_MHI = 0x1,
+ IPA_HW_FEATURE_POWER_COLLAPSE = 0x2,
+ IPA_HW_FEATURE_WDI = 0x3,
+ IPA_HW_FEATURE_ZIP = 0x4,
+ IPA_HW_FEATURE_NTN = 0x5,
+ IPA_HW_FEATURE_OFFLOAD = 0x6,
+ IPA_HW_FEATURE_MAX = IPA_HW_NUM_FEATURES
+};
+
+/**
+ * enum ipa3_hw_2_cpu_events - Values that represent HW event to be sent to CPU.
+ * @IPA_HW_2_CPU_EVENT_NO_OP : No event present
+ * @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the
+ * device
+ * @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information
+ */
+enum ipa3_hw_2_cpu_events {
+ IPA_HW_2_CPU_EVENT_NO_OP =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+ IPA_HW_2_CPU_EVENT_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+ IPA_HW_2_CPU_EVENT_LOG_INFO =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+};
+
+/**
+ * enum ipa3_hw_errors - Common error types.
+ * @IPA_HW_ERROR_NONE : No error persists
+ * @IPA_HW_INVALID_DOORBELL_ERROR : Invalid data read from doorbell
+ * @IPA_HW_DMA_ERROR : Unexpected DMA error
+ * @IPA_HW_FATAL_SYSTEM_ERROR : HW has crashed and requires reset.
+ * @IPA_HW_INVALID_OPCODE : Invalid opcode sent
+ * @IPA_HW_INVALID_PARAMS : Invalid params for the requested command
+ * @IPA_HW_GSI_CH_NOT_EMPTY_FAILURE : GSI channel emptiness validation failed
+ */
+enum ipa3_hw_errors {
+ IPA_HW_ERROR_NONE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+ IPA_HW_INVALID_DOORBELL_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+ IPA_HW_DMA_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+ IPA_HW_FATAL_SYSTEM_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
+ IPA_HW_INVALID_OPCODE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
+ IPA_HW_INVALID_PARAMS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
+ IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
+ IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7),
+ IPA_HW_GSI_CH_NOT_EMPTY_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8)
+};
+
+/**
+ * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common
+ * section in 128B shared memory located in offset zero of SW Partition in IPA
+ * SRAM.
+ * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS
+ * @cmdParams : CPU->HW command parameter lower 32bit.
+ * @cmdParams_hi : CPU->HW command parameter higher 32bit.
+ * of parameters (immediate parameters) and point on structure in system memory
+ * (in such case the address must be accessible for HW)
+ * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES
+ * @responseParams : HW->CPU response parameter. The parameter filed can hold 32
+ * bits of parameters (immediate parameters) and point on structure in system
+ * memory
+ * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS
+ * @eventParams : HW->CPU event parameter. The parameter filed can hold 32
+ * bits of parameters (immediate parameters) and point on
+ * structure in system memory
+ * @firstErrorAddress : Contains the address of first error-source on SNOC
+ * @hwState : State of HW. The state carries information regarding the
+ * error type.
+ * @warningCounter : The warnings counter. The counter carries information
+ * regarding non fatal errors in HW
+ * @interfaceVersionCommon : The Common interface version as reported by HW
+ *
+ * The shared memory is used for communication between IPA HW and CPU.
+ */
+struct IpaHwSharedMemCommonMapping_t {
+ u8 cmdOp;
+ u8 reserved_01;
+ u16 reserved_03_02;
+ u32 cmdParams;
+ u32 cmdParams_hi;
+ u8 responseOp;
+ u8 reserved_0D;
+ u16 reserved_0F_0E;
+ u32 responseParams;
+ u8 eventOp;
+ u8 reserved_15;
+ u16 reserved_17_16;
+ u32 eventParams;
+ u32 firstErrorAddress;
+ u8 hwState;
+ u8 warningCounter;
+ u16 reserved_23_22;
+ u16 interfaceVersionCommon;
+ u16 reserved_27_26;
+} __packed;
+
+/**
+ * union Ipa3HwFeatureInfoData_t - parameters for stats/config blob
+ *
+ * @offset : Location of a feature within the EventInfoData
+ * @size : Size of the feature
+ */
+union Ipa3HwFeatureInfoData_t {
+ struct IpaHwFeatureInfoParams_t {
+ u32 offset:16;
+ u32 size:16;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwErrorEventData_t - HW->CPU Common Events
+ * @errorType : Entered when a system error is detected by the HW. Type of
+ * error is specified by IPA_HW_ERRORS
+ * @reserved : Reserved
+ */
+union IpaHwErrorEventData_t {
+ struct IpaHwErrorEventParams_t {
+ u32 errorType:8;
+ u32 reserved:24;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
+ * struct Ipa3HwEventInfoData_t - Structure holding the parameters for
+ * statistics and config info
+ *
+ * @baseAddrOffset : Base Address Offset of the statistics or config
+ * structure from IPA_WRAPPER_BASE
+ * @Ipa3HwFeatureInfoData_t : Location and size of each feature within
+ * the statistics or config structure
+ *
+ * @note Information about each feature in the featureInfo[]
+ * array is populated at predefined indices per the IPA_HW_FEATURES
+ * enum definition
+ */
+struct Ipa3HwEventInfoData_t {
+ u32 baseAddrOffset;
+ union Ipa3HwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES];
+} __packed;
+
+/**
+ * struct IpaHwEventLogInfoData_t - Structure holding the parameters for
+ * IPA_HW_2_CPU_EVENT_LOG_INFO Event
+ *
+ * @featureMask : Mask indicating the features enabled in HW.
+ * Refer IPA_HW_FEATURE_MASK
+ * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event
+ * Log Buffer structure
+ * @statsInfo : Statistics related information
+ * @configInfo : Configuration related information
+ *
+ * @note The offset location of this structure from IPA_WRAPPER_BASE
+ * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO
+ * Event
+ */
+struct IpaHwEventLogInfoData_t {
+ u32 featureMask;
+ u32 circBuffBaseAddrOffset;
+ struct Ipa3HwEventInfoData_t statsInfo;
+ struct Ipa3HwEventInfoData_t configInfo;
+
+} __packed;
+
+/**
+ * struct ipa3_uc_ntn_ctx
+ * @ntn_uc_stats_ofst: Neutrino stats offset
+ * @ntn_uc_stats_mmio: Neutrino stats
+ * @priv: private data of client
+ * @uc_ready_cb: uc Ready cb
+ */
+struct ipa3_uc_ntn_ctx {
+ u32 ntn_uc_stats_ofst;
+ struct Ipa3HwStatsNTNInfoData_t *ntn_uc_stats_mmio;
+ void *priv;
+ ipa_uc_ready_cb uc_ready_cb;
+};
+
+/**
+ * enum ipa3_hw_2_cpu_ntn_events - Values that represent HW event
+ * to be sent to CPU
+ * @IPA_HW_2_CPU_EVENT_NTN_ERROR : Event to specify that HW
+ * detected an error in NTN
+ *
+ */
+enum ipa3_hw_2_cpu_ntn_events {
+ IPA_HW_2_CPU_EVENT_NTN_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_NTN, 0),
+};
+
+
+/**
+ * enum ipa3_hw_ntn_errors - NTN specific error types.
+ * @IPA_HW_NTN_ERROR_NONE : No error persists
+ * @IPA_HW_NTN_CHANNEL_ERROR : Error is specific to channel
+ */
+enum ipa3_hw_ntn_errors {
+ IPA_HW_NTN_ERROR_NONE = 0,
+ IPA_HW_NTN_CHANNEL_ERROR = 1
+};
+
+/**
+ * enum ipa3_hw_ntn_channel_states - Values that represent NTN
+ * channel state machine.
+ * @IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED : Channel is
+ * initialized but disabled
+ * @IPA_HW_NTN_CHANNEL_STATE_RUNNING : Channel is running.
+ * Entered after SET_UP_COMMAND is processed successfully
+ * @IPA_HW_NTN_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_NTN_CHANNEL_STATE_INVALID : Invalid state. Shall not
+ * be in use in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not reflect the
+ * sub-state the state machine may be in.
+ */
+enum ipa3_hw_ntn_channel_states {
+ IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED = 1,
+ IPA_HW_NTN_CHANNEL_STATE_RUNNING = 2,
+ IPA_HW_NTN_CHANNEL_STATE_ERROR = 3,
+ IPA_HW_NTN_CHANNEL_STATE_INVALID = 0xFF
+};
+
+/**
+ * enum ipa3_hw_ntn_channel_errors - List of NTN Channel error
+ * types. This is present in the event param
+ * @IPA_HW_NTN_CH_ERR_NONE: No error persists
+ * @IPA_HW_NTN_TX_FSM_ERROR: Error in the state machine
+ * transition
+ * @IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL: Error while calculating
+ * num RE to bring
+ * @IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL: Write pointer update
+ * failed in Rx ring
+ * @IPA_HW_NTN_RX_FSM_ERROR: Error in the state machine
+ * transition
+ * @IPA_HW_NTN_RX_CACHE_NON_EMPTY:
+ * @IPA_HW_NTN_CH_ERR_RESERVED:
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in.
+ */
+enum ipa3_hw_ntn_channel_errors {
+ IPA_HW_NTN_CH_ERR_NONE = 0,
+ IPA_HW_NTN_TX_RING_WP_UPDATE_FAIL = 1,
+ IPA_HW_NTN_TX_FSM_ERROR = 2,
+ IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL = 3,
+ IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL = 4,
+ IPA_HW_NTN_RX_FSM_ERROR = 5,
+ IPA_HW_NTN_RX_CACHE_NON_EMPTY = 6,
+ IPA_HW_NTN_CH_ERR_RESERVED = 0xFF
+};
+
+
+/**
+ * struct Ipa3HwNtnSetUpCmdData_t - Ntn setup command data
+ * @ring_base_pa: physical address of the base of the Tx/Rx NTN
+ * ring
+ * @buff_pool_base_pa: physical address of the base of the Tx/Rx
+ * buffer pool
+ * @ntn_ring_size: size of the Tx/Rx NTN ring
+ * @num_buffers: Rx/tx buffer pool size
+ * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN
+ * Ring's tail pointer
+ * @ipa_pipe_number: IPA pipe number that has to be used for the
+ * Tx/Rx path
+ * @dir: Tx/Rx Direction
+ * @data_buff_size: size of the each data buffer allocated in
+ * DDR
+ */
+struct Ipa3HwNtnSetUpCmdData_t {
+ u32 ring_base_pa;
+ u32 buff_pool_base_pa;
+ u16 ntn_ring_size;
+ u16 num_buffers;
+ u32 ntn_reg_base_ptr_pa;
+ u8 ipa_pipe_number;
+ u8 dir;
+ u16 data_buff_size;
+
+} __packed;
+
+/**
+ * struct Ipa3HwNtnCommonChCmdData_t - Structure holding the
+ * parameters for Ntn Tear down command data params
+ *
+ *@ipa_pipe_number: IPA pipe number. This could be Tx or an Rx pipe
+ */
+union Ipa3HwNtnCommonChCmdData_t {
+ struct IpaHwNtnCommonChCmdParams_t {
+ u32 ipa_pipe_number :8;
+ u32 reserved :24;
+ } __packed params;
+ uint32_t raw32b;
+} __packed;
+
+
+/**
+ * struct Ipa3HwNTNErrorEventData_t - Structure holding the
+ * IPA_HW_2_CPU_EVENT_NTN_ERROR event. The parameters are passed
+ * as immediate params in the shared memory
+ *
+ *@ntn_error_type: type of NTN error (ipa3_hw_ntn_errors)
+ *@ipa_pipe_number: IPA pipe number on which error has happened
+ * Applicable only if error type indicates channel error
+ *@ntn_ch_err_type: Information about the channel error (if
+ * available)
+ */
+union Ipa3HwNTNErrorEventData_t {
+ struct IpaHwNTNErrorEventParams_t {
+ u32 ntn_error_type :8;
+ u32 reserved :8;
+ u32 ipa_pipe_number :8;
+ u32 ntn_ch_err_type :8;
+ } __packed params;
+ uint32_t raw32b;
+} __packed;
+
+/**
+ * struct NTN3RxInfoData_t - NTN Structure holding the Rx pipe
+ * information
+ *
+ *@max_outstanding_pkts: Number of outstanding packets in Rx
+ * Ring
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@rx_ring_rp_value: Read pointer last advertized to the WLAN FW
+ *
+ *@ntn_ch_err_type: Information about the channel error (if
+ * available)
+ *@rx_ind_ring_stats:
+ *@bam_stats:
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_pkts_in_dis_uninit_state:
+ *@num_bam_int_handled_while_not_in_bam: Number of Bam
+ * Interrupts handled by FW
+ *@num_bam_int_handled_while_in_bam_state: Number of Bam
+ * Interrupts handled by FW
+ */
+struct NTN3RxInfoData_t {
+ u32 max_outstanding_pkts;
+ u32 num_pkts_processed;
+ u32 rx_ring_rp_value;
+ struct IpaHwRingStats_t rx_ind_ring_stats;
+ struct IpaHwBamStats_t bam_stats;
+ u32 num_bam_int_handled;
+ u32 num_db;
+ u32 num_unexpected_db;
+ u32 num_pkts_in_dis_uninit_state;
+ u32 num_bam_int_handled_while_not_in_bam;
+ u32 num_bam_int_handled_while_in_bam_state;
+} __packed;
+
+
+/**
+ * struct NTNTxInfoData_t - Structure holding the NTN Tx channel
+ * Ensure that this is always word aligned
+ *
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@tail_ptr_val: Latest value of doorbell written to copy engine
+ *@num_db_fired: Number of DB from uC FW to Copy engine
+ *
+ *@tx_comp_ring_stats:
+ *@bam_stats:
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_bam_int_in_non_running_state: Number of Bam interrupts
+ * while not in Running state
+ *@num_qmb_int_handled: Number of QMB interrupts handled
+ *@num_bam_int_handled_while_wait_for_bam: Number of times the
+ * Imm Cmd is injected due to fw_desc change
+ */
+struct NTNTxInfoData_t {
+ u32 num_pkts_processed;
+ u32 tail_ptr_val;
+ u32 num_db_fired;
+ struct IpaHwRingStats_t tx_comp_ring_stats;
+ struct IpaHwBamStats_t bam_stats;
+ u32 num_db;
+ u32 num_unexpected_db;
+ u32 num_bam_int_handled;
+ u32 num_bam_int_in_non_running_state;
+ u32 num_qmb_int_handled;
+ u32 num_bam_int_handled_while_wait_for_bam;
+ u32 num_bam_int_handled_while_not_in_bam;
+} __packed;
+
+
+/**
+ * struct Ipa3HwStatsNTNInfoData_t - Structure holding the NTN Tx
+ * channel Ensure that this is always word aligned
+ *
+ */
+struct Ipa3HwStatsNTNInfoData_t {
+ struct NTN3RxInfoData_t rx_ch_stats[IPA_UC_MAX_NTN_RX_CHANNELS];
+ struct NTNTxInfoData_t tx_ch_stats[IPA_UC_MAX_NTN_TX_CHANNELS];
+} __packed;
+
+
+/*
+ * uC offload related data structures
+ */
+#define IPA_UC_OFFLOAD_CONNECTED BIT(0)
+#define IPA_UC_OFFLOAD_ENABLED BIT(1)
+#define IPA_UC_OFFLOAD_RESUMED BIT(2)
+
+/**
+ * enum ipa_cpu_2_hw_offload_commands - Values that represent
+ * the offload commands from CPU
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP : Command to set up
+ * Offload protocol's Tx/Rx Path
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_RX_SET_UP : Command to tear down
+ * Offload protocol's Tx/ Rx Path
+ */
+enum ipa_cpu_2_hw_offload_commands {
+ IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+ IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+};
+
+
+/**
+ * enum ipa3_hw_offload_channel_states - Values that represent
+ * offload channel state machine.
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED : Channel is
+ * initialized but disabled
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING : Channel is running.
+ * Entered after SET_UP_COMMAND is processed successfully
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID : Invalid state. Shall not
+ * be in use in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in
+ */
+enum ipa3_hw_offload_channel_states {
+ IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED = 1,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING = 2,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR = 3,
+ IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID = 0xFF
+};
+
+
+/**
+ * enum ipa3_hw_2_cpu_cmd_resp_status - Values that represent
+ * offload related command response status to be sent to CPU.
+ */
+enum ipa3_hw_2_cpu_offload_cmd_resp_status {
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 0),
+ IPA_HW_2_CPU_OFFLOAD_MAX_TX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3),
+ IPA_HW_2_CPU_OFFLOAD_TX_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4),
+ IPA_HW_2_CPU_OFFLOAD_UNKNOWN_TX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5),
+ IPA_HW_2_CPU_OFFLOAD_TX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6),
+ IPA_HW_2_CPU_OFFLOAD_TX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 7),
+ IPA_HW_2_CPU_OFFLOAD_MAX_RX_CHANNELS =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 8),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_PARAMS_UNALIGNED =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 9),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_SET_UP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 10),
+ IPA_HW_2_CPU_OFFLOAD_UNKNOWN_RX_CHANNEL =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 11),
+ IPA_HW_2_CPU_OFFLOAD_RX_INVALID_FSM_TRANSITION =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 12),
+ IPA_HW_2_CPU_OFFLOAD_RX_FSM_TRANSITION_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 13),
+ IPA_HW_2_CPU_OFFLOAD_RX_RING_OVERRUN_POSSIBILITY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 14),
+};
+
+/**
+ * struct IpaHwSetUpCmd -
+ *
+ *
+ */
+union IpaHwSetUpCmd {
+ struct Ipa3HwNtnSetUpCmdData_t NtnSetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwOffloadSetUpCmdData_t -
+ *
+ *
+ */
+struct IpaHwOffloadSetUpCmdData_t {
+ u8 protocol;
+ union IpaHwSetUpCmd SetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwCommonChCmd - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN
+ *
+ *
+ */
+union IpaHwCommonChCmd {
+ union Ipa3HwNtnCommonChCmdData_t NtnCommonCh_params;
+} __packed;
+
+struct IpaHwOffloadCommonChCmdData_t {
+ u8 protocol;
+ union IpaHwCommonChCmd CommonCh_params;
+} __packed;
+
+#endif /* _IPA_UC_OFFLOAD_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index 1caccddf5834..e0f32bdcbb3d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -1190,7 +1190,7 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
- ep->wdi_state |= IPA_WDI_CONNECTED;
+ ep->uc_offload_state |= IPA_WDI_CONNECTED;
IPADBG("client %d (ep: %d) connected\n", in->sys.client, ipa_ep_idx);
return 0;
@@ -1222,7 +1222,7 @@ int ipa3_disconnect_wdi_pipe(u32 clnt_hdl)
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1234,8 +1234,8 @@ int ipa3_disconnect_wdi_pipe(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
- if (ep->wdi_state != IPA_WDI_CONNECTED) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
@@ -1283,7 +1283,7 @@ int ipa3_enable_wdi_pipe(u32 clnt_hdl)
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1295,8 +1295,8 @@ int ipa3_enable_wdi_pipe(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
- if (ep->wdi_state != IPA_WDI_CONNECTED) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
@@ -1319,7 +1319,7 @@ int ipa3_enable_wdi_pipe(u32 clnt_hdl)
result = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
}
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
- ep->wdi_state |= IPA_WDI_ENABLED;
+ ep->uc_offload_state |= IPA_WDI_ENABLED;
IPADBG("client (ep: %d) enabled\n", clnt_hdl);
uc_timeout:
@@ -1345,7 +1345,7 @@ int ipa3_disable_wdi_pipe(u32 clnt_hdl)
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1379,8 +1379,8 @@ int ipa3_disable_wdi_pipe(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
- if (ep->wdi_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
@@ -1436,7 +1436,7 @@ int ipa3_disable_wdi_pipe(u32 clnt_hdl)
ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
}
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
- ep->wdi_state &= ~IPA_WDI_ENABLED;
+ ep->uc_offload_state &= ~IPA_WDI_ENABLED;
IPADBG("client (ep: %d) disabled\n", clnt_hdl);
uc_timeout:
@@ -1460,7 +1460,7 @@ int ipa3_resume_wdi_pipe(u32 clnt_hdl)
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1472,8 +1472,8 @@ int ipa3_resume_wdi_pipe(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
- if (ep->wdi_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
@@ -1497,7 +1497,7 @@ int ipa3_resume_wdi_pipe(u32 clnt_hdl)
else
IPADBG("client (ep: %d) un-susp/delay\n", clnt_hdl);
- ep->wdi_state |= IPA_WDI_RESUMED;
+ ep->uc_offload_state |= IPA_WDI_RESUMED;
IPADBG("client (ep: %d) resumed\n", clnt_hdl);
uc_timeout:
@@ -1521,7 +1521,7 @@ int ipa3_suspend_wdi_pipe(u32 clnt_hdl)
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1533,9 +1533,9 @@ int ipa3_suspend_wdi_pipe(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
- if (ep->wdi_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED |
+ if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED |
IPA_WDI_RESUMED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
@@ -1588,7 +1588,7 @@ int ipa3_suspend_wdi_pipe(u32 clnt_hdl)
ipa3_ctx->tag_process_before_gating = true;
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
- ep->wdi_state &= ~IPA_WDI_RESUMED;
+ ep->uc_offload_state &= ~IPA_WDI_RESUMED;
IPADBG("client (ep: %d) suspended\n", clnt_hdl);
uc_timeout:
@@ -1603,7 +1603,7 @@ int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm.\n");
+ IPAERR("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1615,8 +1615,8 @@ int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
ep = &ipa3_ctx->ep[clnt_hdl];
- if (!(ep->wdi_state & IPA_WDI_CONNECTED)) {
- IPAERR("WDI channel bad state %d\n", ep->wdi_state);
+ if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
+ IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 5499eba92b1c..b89dcfe18925 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -3196,6 +3196,9 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_get_ipc_logbuf_low = ipa3_get_ipc_logbuf_low;
api_ctrl->ipa_rx_poll = ipa3_rx_poll;
api_ctrl->ipa_recycle_wan_skb = ipa3_recycle_wan_skb;
+ api_ctrl->ipa_setup_uc_ntn_pipes = ipa3_setup_uc_ntn_pipes;
+ api_ctrl->ipa_tear_down_uc_offload_pipes =
+ ipa3_tear_down_uc_offload_pipes;
return 0;
}
diff --git a/drivers/regulator/cpr3-mmss-regulator.c b/drivers/regulator/cpr3-mmss-regulator.c
index e5055708a871..232bcf8fcf31 100644
--- a/drivers/regulator/cpr3-mmss-regulator.c
+++ b/drivers/regulator/cpr3-mmss-regulator.c
@@ -50,6 +50,9 @@
* @limitation: CPR limitation select fuse parameter value
* @aging_init_quot_diff: Initial quotient difference between CPR aging
* min and max sensors measured at time of manufacturing
+ * @force_highest_corner: Flag indicating that all corners must operate
+ * at the voltage of the highest corner. This is
+ * applicable to MSMCOBALT only.
*
* This struct holds the values for all of the fuses read from memory.
*/
@@ -60,6 +63,7 @@ struct cpr3_msm8996_mmss_fuses {
u64 cpr_fusing_rev;
u64 limitation;
u64 aging_init_quot_diff;
+ u64 force_highest_corner;
};
/* Fuse combos 0 - 7 map to CPR fusing revision 0 - 7 */
@@ -158,6 +162,12 @@ msmcobalt_mmss_offset_voltage_param[MSM8996_MMSS_FUSE_CORNERS][2] = {
{{65, 44, 47}, {} },
};
+static const struct cpr3_fuse_param
+msmcobalt_cpr_force_highest_corner_param[] = {
+ {100, 45, 45},
+ {},
+};
+
#define MSM8996PRO_SOC_ID 4
#define MSMCOBALT_SOC_ID 5
@@ -243,6 +253,12 @@ enum msmcobalt_cpr_partial_binning {
MSMCOBALT_CPR_PARTIAL_BINNING_SAFE_CORNER = 0xE,
};
+/*
+ * The partial binning open-loop voltage fuse values only apply to the lowest
+ * two fuse corners (0 and 1, i.e. MinSVS and SVS).
+ */
+#define MSMCOBALT_CPR_PARTIAL_BINNING_MAX_FUSE_CORNER 1
+
/**
* cpr3_msm8996_mmss_read_fuse_data() - load MMSS specific fuse parameter values
* @vreg: Pointer to the CPR3 regulator
@@ -338,6 +354,19 @@ static int cpr3_msm8996_mmss_read_fuse_data(struct cpr3_regulator *vreg)
}
if (vreg->thread->ctrl->soc_revision == MSMCOBALT_SOC_ID) {
+ rc = cpr3_read_fuse_param(base,
+ msmcobalt_cpr_force_highest_corner_param,
+ &fuse->force_highest_corner);
+ if (rc) {
+ cpr3_err(vreg, "Unable to read CPR force highest corner fuse, rc=%d\n",
+ rc);
+ return rc;
+ }
+ if (fuse->force_highest_corner)
+ cpr3_info(vreg, "Fusing requires all operation at the highest corner\n");
+ }
+
+ if (vreg->thread->ctrl->soc_revision == MSMCOBALT_SOC_ID) {
combo_max = CPR3_MSMCOBALT_MMSS_FUSE_COMBO_COUNT;
vreg->fuse_combo = fuse->cpr_fusing_rev;
} else if (vreg->thread->ctrl->soc_revision == MSM8996PRO_SOC_ID) {
@@ -738,7 +767,8 @@ static int cpr3_msm8996_mmss_calculate_open_loop_voltages(
*/
if (is_msmcobalt &&
(volt_init == MSMCOBALT_CPR_PARTIAL_BINNING_NEXT_CORNER ||
- volt_init == MSMCOBALT_CPR_PARTIAL_BINNING_SAFE_CORNER))
+ volt_init == MSMCOBALT_CPR_PARTIAL_BINNING_SAFE_CORNER) &&
+ i <= MSMCOBALT_CPR_PARTIAL_BINNING_MAX_FUSE_CORNER)
volt_init = MSM8996_MMSS_MIN_VOLTAGE_FUSE_VAL;
fuse_volt[i] = cpr3_convert_open_loop_voltage_fuse(ref_volt[i],
@@ -849,19 +879,43 @@ static int cpr3_msmcobalt_partial_binning_override(struct cpr3_regulator *vreg)
u32 proc_freq;
struct cpr3_corner *corner;
struct cpr3_corner *safe_corner;
- int i, j, low, high, safe_fuse_corner;
+ int i, j, low, high, safe_fuse_corner, max_fuse_corner;
if (vreg->thread->ctrl->soc_revision != MSMCOBALT_SOC_ID)
return 0;
- /* Loop over all fuse corners except for the highest one. */
- for (i = 0; i < vreg->fuse_corner_count - 1; i++) {
+ /* Handle the force highest corner fuse. */
+ if (fuse->force_highest_corner) {
+ cpr3_info(vreg, "overriding CPR parameters for corners 0 to %d with quotients and voltages of corner %d\n",
+ vreg->corner_count - 2, vreg->corner_count - 1);
+ corner = &vreg->corner[vreg->corner_count - 1];
+ for (i = 0; i < vreg->corner_count - 1; i++) {
+ proc_freq = vreg->corner[i].proc_freq;
+ vreg->corner[i] = *corner;
+ vreg->corner[i].proc_freq = proc_freq;
+ }
+
+ /*
+ * Return since the potential partial binning fuse values are
+ * superceded by the force highest corner fuse value.
+ */
+ return 0;
+ }
+
+ /*
+ * Allow up to the max corner which can be fused with partial
+ * binning values.
+ */
+ max_fuse_corner = min(MSMCOBALT_CPR_PARTIAL_BINNING_MAX_FUSE_CORNER,
+ vreg->fuse_corner_count - 2);
+
+ for (i = 0; i <= max_fuse_corner; i++) {
/* Determine which higher corners to override with (if any). */
if (fuse->init_voltage[i] != next
&& fuse->init_voltage[i] != safe)
continue;
- for (j = i + 1; j < vreg->fuse_corner_count - 1; j++)
+ for (j = i + 1; j <= max_fuse_corner; j++)
if (fuse->init_voltage[j] != next
&& fuse->init_voltage[j] != safe)
break;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 3f8aa534c220..c45cbfa8a786 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -104,6 +104,16 @@ config MSM_GLINK_SMEM_NATIVE_XPRT
transport to only connecting with entities internal to the
System-on-Chip.
+config MSM_GLINK_SPI_XPRT
+ depends on MSM_GLINK
+ tristate "Generic Link (G-Link) SPI Transport"
+ help
+ G-Link SPI Transport is a Transport plug-in developed over SPI
+ bus. This transport plug-in performs marshaling of G-Link
+ commands & data to the appropriate SPI bus wire format and
+ allows for G-Link communication with remote subsystems that are
+ external to the System-on-Chip.
+
config MSM_SPCOM
depends on MSM_GLINK
bool "Secure Processor Communication over GLINK"
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index f8450a4868ad..269b72c68b68 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_MSM_GLINK) += glink.o glink_debugfs.o glink_ssr.o
obj-$(CONFIG_MSM_GLINK_LOOPBACK_SERVER) += glink_loopback_server.o
obj-$(CONFIG_MSM_GLINK_SMD_XPRT) += glink_smd_xprt.o
obj-$(CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT)+= glink_smem_native_xprt.o
+obj-$(CONFIG_MSM_GLINK_SPI_XPRT) += glink_spi_xprt.o
obj-$(CONFIG_MSM_SMEM_LOGGING) += smem_log.o
obj-$(CONFIG_MSM_SYSMON_GLINK_COMM) += sysmon-glink.o sysmon-qmi.o
obj-$(CONFIG_ARCH_MSM8996) += kryo-l2-accessors.o
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index 464fe17158cf..57e58a57fab7 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -372,10 +372,10 @@ static struct channel_ctx *ch_name_to_ch_ctx_create(
const char *name);
static void ch_push_remote_rx_intent(struct channel_ctx *ctx, size_t size,
- uint32_t riid);
+ uint32_t riid, void *cookie);
static int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size,
- uint32_t *riid_ptr, size_t *intent_size);
+ uint32_t *riid_ptr, size_t *intent_size, void **cookie);
static struct glink_core_rx_intent *ch_push_local_rx_intent(
struct channel_ctx *ctx, const void *pkt_priv, size_t size);
@@ -1139,11 +1139,12 @@ bool ch_check_duplicate_riid(struct channel_ctx *ctx, int riid)
* @ctx: Local channel context
* @size: Size of Intent
* @riid_ptr: Pointer to return value of remote intent ID
+ * @cookie: Transport-specific cookie to return
*
* This functions searches for an RX intent that is >= to the requested size.
*/
int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size,
- uint32_t *riid_ptr, size_t *intent_size)
+ uint32_t *riid_ptr, size_t *intent_size, void **cookie)
{
struct glink_core_rx_intent *intent;
struct glink_core_rx_intent *intent_tmp;
@@ -1177,6 +1178,7 @@ int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size,
intent->intent_size);
*riid_ptr = intent->id;
*intent_size = intent->intent_size;
+ *cookie = intent->cookie;
kfree(intent);
spin_unlock_irqrestore(
&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
@@ -1192,11 +1194,12 @@ int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size,
* @ctx: Local channel context
* @size: Size of Intent
* @riid: Remote intent ID
+ * @cookie: Transport-specific cookie to cache
*
* This functions adds a remote RX intent to the remote RX intent list.
*/
void ch_push_remote_rx_intent(struct channel_ctx *ctx, size_t size,
- uint32_t riid)
+ uint32_t riid, void *cookie)
{
struct glink_core_rx_intent *intent;
unsigned long flags;
@@ -1225,6 +1228,7 @@ void ch_push_remote_rx_intent(struct channel_ctx *ctx, size_t size,
}
intent->id = riid;
intent->intent_size = size;
+ intent->cookie = cookie;
spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
list_add_tail(&intent->list, &ctx->rmt_rx_intent_list);
@@ -2794,6 +2798,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
bool is_atomic =
tx_flags & (GLINK_TX_SINGLE_THREADED | GLINK_TX_ATOMIC);
unsigned long flags;
+ void *cookie = NULL;
if (!size)
return -EINVAL;
@@ -2826,7 +2831,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
}
/* find matching rx intent (first-fit algorithm for now) */
- if (ch_pop_remote_rx_intent(ctx, size, &riid, &intent_size)) {
+ if (ch_pop_remote_rx_intent(ctx, size, &riid, &intent_size, &cookie)) {
if (!(tx_flags & GLINK_TX_REQ_INTENT)) {
/* no rx intent available */
GLINK_ERR_CH(ctx,
@@ -2856,7 +2861,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
}
while (ch_pop_remote_rx_intent(ctx, size, &riid,
- &intent_size)) {
+ &intent_size, &cookie)) {
rwref_get(&ctx->ch_state_lhb2);
rwref_read_put(&ctx->ch_state_lhb2);
if (is_atomic) {
@@ -2928,7 +2933,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
is_atomic ? GFP_ATOMIC : GFP_KERNEL);
if (!tx_info) {
GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__);
- ch_push_remote_rx_intent(ctx, intent_size, riid);
+ ch_push_remote_rx_intent(ctx, intent_size, riid, cookie);
rwref_read_put(&ctx->ch_state_lhb2);
return -ENOMEM;
}
@@ -2946,6 +2951,7 @@ static int glink_tx_common(void *handle, void *pkt_priv,
tx_info->vprovider = vbuf_provider;
tx_info->pprovider = pbuf_provider;
tx_info->intent_size = intent_size;
+ tx_info->cookie = cookie;
/* schedule packet for transmit */
if ((tx_flags & GLINK_TX_SINGLE_THREADED) &&
@@ -3577,6 +3583,10 @@ int glink_xprt_name_to_id(const char *name, uint16_t *id)
*id = SMEM_XPRT_ID;
return 0;
}
+ if (!strcmp(name, "spi")) {
+ *id = SPIV2_XPRT_ID;
+ return 0;
+ }
if (!strcmp(name, "smd_trans")) {
*id = SMD_TRANS_XPRT_ID;
return 0;
@@ -4844,7 +4854,35 @@ static void glink_core_remote_rx_intent_put(struct glink_transport_if *if_ptr,
return;
}
- ch_push_remote_rx_intent(ctx, size, riid);
+ ch_push_remote_rx_intent(ctx, size, riid, NULL);
+ rwref_put(&ctx->ch_state_lhb2);
+}
+
+/**
+ * glink_core_remote_rx_intent_put_cookie() - Receive remove intent
+ *
+ * @if_ptr: Pointer to transport instance
+ * @rcid: Remote Channel ID
+ * @riid: Remote Intent ID
+ * @size: Size of the remote intent ID
+ * @cookie: Transport-specific cookie to cache
+ */
+static void glink_core_remote_rx_intent_put_cookie(
+ struct glink_transport_if *if_ptr,
+ uint32_t rcid, uint32_t riid, size_t size, void *cookie)
+{
+ struct channel_ctx *ctx;
+
+ ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+ if (!ctx) {
+ /* unknown rcid received - this shouldn't happen */
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid rcid received %u\n", __func__,
+ (unsigned)rcid);
+ return;
+ }
+
+ ch_push_remote_rx_intent(ctx, size, riid, cookie);
rwref_put(&ctx->ch_state_lhb2);
}
@@ -5050,6 +5088,7 @@ void glink_core_rx_cmd_tx_done(struct glink_transport_if *if_ptr,
struct glink_core_tx_pkt *tx_pkt;
unsigned long flags;
size_t intent_size;
+ void *cookie;
ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
if (!ctx) {
@@ -5082,11 +5121,12 @@ void glink_core_rx_cmd_tx_done(struct glink_transport_if *if_ptr,
ctx->notify_tx_done(ctx, ctx->user_priv, tx_pkt->pkt_priv,
tx_pkt->data ? tx_pkt->data : tx_pkt->iovec);
intent_size = tx_pkt->intent_size;
+ cookie = tx_pkt->cookie;
ch_remove_tx_pending_remote_done(ctx, tx_pkt);
spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
if (reuse)
- ch_push_remote_rx_intent(ctx, intent_size, riid);
+ ch_push_remote_rx_intent(ctx, intent_size, riid, cookie);
rwref_put(&ctx->ch_state_lhb2);
}
@@ -5525,6 +5565,8 @@ static struct glink_core_if core_impl = {
.rx_get_pkt_ctx = glink_core_rx_get_pkt_ctx,
.rx_put_pkt_ctx = glink_core_rx_put_pkt_ctx,
.rx_cmd_remote_rx_intent_put = glink_core_remote_rx_intent_put,
+ .rx_cmd_remote_rx_intent_put_cookie =
+ glink_core_remote_rx_intent_put_cookie,
.rx_cmd_remote_rx_intent_req = glink_core_rx_cmd_remote_rx_intent_req,
.rx_cmd_rx_intent_req_ack = glink_core_rx_cmd_rx_intent_req_ack,
.rx_cmd_tx_done = glink_core_rx_cmd_tx_done,
diff --git a/drivers/soc/qcom/glink_core_if.h b/drivers/soc/qcom/glink_core_if.h
index 93c59d9c4aa1..14113305a50e 100644
--- a/drivers/soc/qcom/glink_core_if.h
+++ b/drivers/soc/qcom/glink_core_if.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -64,6 +64,7 @@ struct glink_core_version {
* iovec: Pointer to vector buffer if the transport passes a vector buffer
* vprovider: Virtual address-space buffer provider for a vector buffer
* pprovider: Physical address-space buffer provider for a vector buffer
+ * cookie: Private transport specific cookie
* pkt_priv: G-Link core owned packet-private data
* list: G-Link core owned list node
* bounce_buf: Pointer to the temporary/internal bounce buffer
@@ -78,6 +79,7 @@ struct glink_core_rx_intent {
void *iovec;
void * (*vprovider)(void *iovec, size_t offset, size_t *size);
void * (*pprovider)(void *iovec, size_t offset, size_t *size);
+ void *cookie;
/* G-Link-Core-owned elements - please ignore */
struct list_head list;
@@ -151,6 +153,9 @@ struct glink_core_if {
struct glink_core_rx_intent *intent_ptr, bool complete);
void (*rx_cmd_remote_rx_intent_put)(struct glink_transport_if *if_ptr,
uint32_t rcid, uint32_t riid, size_t size);
+ void (*rx_cmd_remote_rx_intent_put_cookie)(
+ struct glink_transport_if *if_ptr, uint32_t rcid,
+ uint32_t riid, size_t size, void *cookie);
void (*rx_cmd_tx_done)(struct glink_transport_if *if_ptr, uint32_t rcid,
uint32_t riid, bool reuse);
void (*rx_cmd_remote_rx_intent_req)(struct glink_transport_if *if_ptr,
diff --git a/drivers/soc/qcom/glink_spi_xprt.c b/drivers/soc/qcom/glink_spi_xprt.c
new file mode 100644
index 000000000000..6c91ac54821d
--- /dev/null
+++ b/drivers/soc/qcom/glink_spi_xprt.c
@@ -0,0 +1,2192 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spinlock.h>
+#include <linux/srcu.h>
+#include <linux/wait.h>
+#include <linux/component.h>
+#include <soc/qcom/tracer_pkt.h>
+#include <sound/wcd-dsp-mgr.h>
+#include <sound/wcd-spi.h>
+#include "glink_core_if.h"
+#include "glink_private.h"
+#include "glink_xprt_if.h"
+
+#define XPRT_NAME "spi"
+#define FIFO_ALIGNMENT 16
+#define FIFO_FULL_RESERVE 8
+#define TX_BLOCKED_CMD_RESERVE 16
+#define TRACER_PKT_FEATURE BIT(2)
+#define DEFAULT_FIFO_SIZE 1024
+#define SHORT_PKT_SIZE 16
+#define XPRT_ALIGNMENT 4
+
+#define MAX_INACTIVE_CYCLES 50
+#define POLL_INTERVAL_US 500
+
+#define ACTIVE_TX BIT(0)
+#define ACTIVE_RX BIT(1)
+
+#define ID_MASK 0xFFFFFF
+/**
+ * enum command_types - definition of the types of commands sent/received
+ * @VERSION_CMD: Version and feature set supported
+ * @VERSION_ACK_CMD: Response for @VERSION_CMD
+ * @OPEN_CMD: Open a channel
+ * @CLOSE_CMD: Close a channel
+ * @OPEN_ACK_CMD: Response to @OPEN_CMD
+ * @CLOSE_ACK_CMD: Response for @CLOSE_CMD
+ * @RX_INTENT_CMD: RX intent for a channel is queued
+ * @RX_DONE_CMD: Use of RX intent for a channel is complete
+ * @RX_DONE_W_REUSE_CMD: Same as @RX_DONE but also reuse the used intent
+ * @RX_INTENT_REQ_CMD: Request to have RX intent queued
+ * @RX_INTENT_REQ_ACK_CMD: Response for @RX_INTENT_REQ_CMD
+ * @TX_DATA_CMD: Start of a data transfer
+ * @TX_DATA_CONT_CMD: Continuation or end of a data transfer
+ * @READ_NOTIF_CMD: Request for a notification when this cmd is read
+ * @SIGNALS_CMD: Sideband signals
+ * @TRACER_PKT_CMD: Start of a Tracer Packet Command
+ * @TRACER_PKT_CONT_CMD: Continuation or end of a Tracer Packet Command
+ * @TX_SHORT_DATA_CMD: Transmit short packets
+ */
+enum command_types {
+ VERSION_CMD,
+ VERSION_ACK_CMD,
+ OPEN_CMD,
+ CLOSE_CMD,
+ OPEN_ACK_CMD,
+ CLOSE_ACK_CMD,
+ RX_INTENT_CMD,
+ RX_DONE_CMD,
+ RX_DONE_W_REUSE_CMD,
+ RX_INTENT_REQ_CMD,
+ RX_INTENT_REQ_ACK_CMD,
+ TX_DATA_CMD,
+ TX_DATA_CONT_CMD,
+ READ_NOTIF_CMD,
+ SIGNALS_CMD,
+ TRACER_PKT_CMD,
+ TRACER_PKT_CONT_CMD,
+ TX_SHORT_DATA_CMD,
+};
+
+/**
+ * struct glink_cmpnt - Component to cache WDSP component and its operations
+ * @master_dev: Device structure corresponding to WDSP device.
+ * @master_ops: Operations supported by the WDSP device.
+ */
+struct glink_cmpnt {
+ struct device *master_dev;
+ struct wdsp_mgr_ops *master_ops;
+};
+
+/**
+ * struct edge_info - local information for managing a single complete edge
+ * @xprt_if: The transport interface registered with the
+ * glink core associated with this edge.
+ * @xprt_cfg: The transport configuration for the glink core
+ * assocaited with this edge.
+ * @subsys_name: Name of the remote subsystem in the edge.
+ * @spi_dev: Pointer to the connectingSPI Device.
+ * @fifo_size: Size of the FIFO at the remote end.
+ * @tx_fifo_start: Base Address of the TX FIFO.
+ * @tx_fifo_end: End Address of the TX FIFO.
+ * @rx_fifo_start: Base Address of the RX FIFO.
+ * @rx_fifo_end: End Address of the RX FIFO.
+ * @tx_fifo_read_reg_addr: Address of the TX FIFO Read Index Register.
+ * @tx_fifo_write_reg_addr: Address of the TX FIFO Write Index Register.
+ * @rx_fifo_read_reg_addr: Address of the RX FIFO Read Index Register.
+ * @rx_fifo_write_reg_addr: Address of the RX FIFO Write Index Register.
+ * @kwork: Work to be executed when receiving data.
+ * @kworker: Handle to the entity processing @kwork.
+ * @task: Handle to the task context that runs @kworker.
+ * @use_ref: Active users of this transport grab a
+ * reference. Used for SSR synchronization.
+ * @in_ssr: Signals if this transport is in ssr.
+ * @write_lock: Lock to serialize write/tx operation.
+ * @tx_blocked_queue: Queue of entities waiting for the remote side to
+ * signal the resumption of TX.
+ * @tx_resume_needed: A tx resume signal needs to be sent to the glink
+ * core.
+ * @tx_blocked_signal_sent: Flag to indicate the flush signal has already
+ * been sent, and a response is pending from the
+ * remote side. Protected by @write_lock.
+ * @num_pw_states: Size of @ramp_time_us.
+ * @ramp_time_us: Array of ramp times in microseconds where array
+ * index position represents a power state.
+ * @activity_flag: Flag indicating active TX and RX.
+ * @activity_lock: Lock to synchronize access to activity flag.
+ * @cmpnt: Component to interface with the remote device.
+ */
+struct edge_info {
+ struct list_head list;
+ struct glink_transport_if xprt_if;
+ struct glink_core_transport_cfg xprt_cfg;
+ char subsys_name[GLINK_NAME_SIZE];
+ struct spi_device *spi_dev;
+
+ uint32_t fifo_size;
+ uint32_t tx_fifo_start;
+ uint32_t tx_fifo_end;
+ uint32_t rx_fifo_start;
+ uint32_t rx_fifo_end;
+ unsigned int tx_fifo_read_reg_addr;
+ unsigned int tx_fifo_write_reg_addr;
+ unsigned int rx_fifo_read_reg_addr;
+ unsigned int rx_fifo_write_reg_addr;
+
+ struct kthread_work kwork;
+ struct kthread_worker kworker;
+ struct task_struct *task;
+ struct srcu_struct use_ref;
+ bool in_ssr;
+ struct mutex write_lock;
+ wait_queue_head_t tx_blocked_queue;
+ bool tx_resume_needed;
+ bool tx_blocked_signal_sent;
+
+ uint32_t num_pw_states;
+ unsigned long *ramp_time_us;
+
+ uint32_t activity_flag;
+ spinlock_t activity_lock;
+
+ struct glink_cmpnt cmpnt;
+};
+
+static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
+ const struct glink_core_version *version,
+ uint32_t features);
+static DEFINE_SPINLOCK(edge_infos_lock);
+static LIST_HEAD(edge_infos);
+static struct glink_core_version versions[] = {
+ {1, TRACER_PKT_FEATURE, negotiate_features_v1},
+};
+
+/**
+ * negotiate_features_v1() - determine what features of a version can be used
+ * @if_ptr: The transport for which features are negotiated for.
+ * @version: The version negotiated.
+ * @features: The set of requested features.
+ *
+ * Return: What set of the requested features can be supported.
+ */
+static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
+ const struct glink_core_version *version,
+ uint32_t features)
+{
+ return features & version->features;
+}
+
+/**
+ * wdsp_suspend() - Vote for the WDSP device suspend
+ * @cmpnt: Component to identify the WDSP device.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int wdsp_suspend(struct glink_cmpnt *cmpnt)
+{
+ if (cmpnt && cmpnt->master_dev &&
+ cmpnt->master_ops && cmpnt->master_ops->suspend)
+ return cmpnt->master_ops->suspend(cmpnt->master_dev);
+ else
+ return -EINVAL;
+}
+
+/**
+ * wdsp_resume() - Vote for the WDSP device resume
+ * @cmpnt: Component to identify the WDSP device.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int wdsp_resume(struct glink_cmpnt *cmpnt)
+{
+ if (cmpnt && cmpnt->master_dev &&
+ cmpnt->master_ops && cmpnt->master_ops->resume)
+ return cmpnt->master_ops->resume(cmpnt->master_dev);
+ else
+ return -EINVAL;
+}
+
+/**
+ * glink_spi_xprt_set_poll_mode() - Set the transport to polling mode
+ * @einfo: Edge information corresponding to the transport.
+ *
+ * This helper function indicates the start of RX polling. This will
+ * prevent the system from suspending and keeps polling for RX for a
+ * pre-defined duration.
+ */
+static void glink_spi_xprt_set_poll_mode(struct edge_info *einfo)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&einfo->activity_lock, flags);
+ einfo->activity_flag |= ACTIVE_RX;
+ spin_unlock_irqrestore(&einfo->activity_lock, flags);
+ if (!strcmp(einfo->xprt_cfg.edge, "wdsp"))
+ wdsp_resume(&einfo->cmpnt);
+}
+
+/**
+ * glink_spi_xprt_set_irq_mode() - Set the transport to IRQ mode
+ * @einfo: Edge information corresponding to the transport.
+ *
+ * This helper indicates the end of RX polling. This will allow the
+ * system to suspend and new RX data can be handled only through an IRQ.
+ */
+static void glink_spi_xprt_set_irq_mode(struct edge_info *einfo)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&einfo->activity_lock, flags);
+ einfo->activity_flag &= ~ACTIVE_RX;
+ spin_unlock_irqrestore(&einfo->activity_lock, flags);
+}
+
+/**
+ * glink_spi_xprt_rx_data() - Receive data over SPI bus
+ * @einfo: Edge from which the data has to be received.
+ * @src: Source Address of the RX data.
+ * @dst: Address of the destination RX buffer.
+ * @size: Size of the RX data.
+ *
+ * This function is used to receive data or command as a byte stream from
+ * the remote subsystem over the SPI bus.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_spi_xprt_rx_data(struct edge_info *einfo, void *src,
+ void *dst, uint32_t size)
+{
+ struct wcd_spi_msg spi_msg;
+
+ memset(&spi_msg, 0, sizeof(spi_msg));
+ spi_msg.data = dst;
+ spi_msg.remote_addr = (uint32_t)(size_t)src;
+ spi_msg.len = (size_t)size;
+ return wcd_spi_data_read(einfo->spi_dev, &spi_msg);
+}
+
+/**
+ * glink_spi_xprt_tx_data() - Transmit data over SPI bus
+ * @einfo: Edge from which the data has to be received.
+ * @src: Address of the TX buffer.
+ * @dst: Destination Address of the TX Date.
+ * @size: Size of the TX data.
+ *
+ * This function is used to transmit data or command as a byte stream to
+ * the remote subsystem over the SPI bus.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_spi_xprt_tx_data(struct edge_info *einfo, void *src,
+ void *dst, uint32_t size)
+{
+ struct wcd_spi_msg spi_msg;
+
+ memset(&spi_msg, 0, sizeof(spi_msg));
+ spi_msg.data = src;
+ spi_msg.remote_addr = (uint32_t)(size_t)dst;
+ spi_msg.len = (size_t)size;
+ return wcd_spi_data_write(einfo->spi_dev, &spi_msg);
+}
+
+/**
+ * glink_spi_xprt_reg_read() - Read the TX/RX FIFO Read/Write Index registers
+ * @einfo: Edge from which the registers have to be read.
+ * @reg_addr: Address of the register to be read.
+ * @data: Buffer into which the register data has to be read.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_spi_xprt_reg_read(struct edge_info *einfo, u32 reg_addr,
+ uint32_t *data)
+{
+ int rc;
+
+ rc = glink_spi_xprt_rx_data(einfo, (void *)(unsigned long)reg_addr,
+ data, sizeof(*data));
+ if (!rc)
+ *data = *data & ID_MASK;
+ return rc;
+}
+
+/**
+ * glink_spi_xprt_reg_write() - Write the TX/RX FIFO Read/Write Index registers
+ * @einfo: Edge to which the registers have to be written.
+ * @reg_addr: Address of the registers to be written.
+ * @data: Data to be written to the registers.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_spi_xprt_reg_write(struct edge_info *einfo, u32 reg_addr,
+ uint32_t data)
+{
+ return glink_spi_xprt_tx_data(einfo, &data,
+ (void *)(unsigned long)reg_addr, sizeof(data));
+}
+
+/**
+ * glink_spi_xprt_write_avail() - Available Write Space in the remote side
+ * @einfo: Edge information corresponding to the remote side.
+ *
+ * This function reads the TX FIFO Read & Write Index registers from the
+ * remote subsystem and calculate the available write space.
+ *
+ * Return: 0 on error, available write space on success.
+ */
+static int glink_spi_xprt_write_avail(struct edge_info *einfo)
+{
+ uint32_t read_id;
+ uint32_t write_id;
+ int write_avail;
+ int ret;
+
+ ret = glink_spi_xprt_reg_read(einfo, einfo->tx_fifo_read_reg_addr,
+ &read_id);
+ if (ret < 0) {
+ pr_err("%s: Error %d reading %s tx_fifo_read_reg_addr %d\n",
+ __func__, ret, einfo->xprt_cfg.edge,
+ einfo->tx_fifo_read_reg_addr);
+ return 0;
+ }
+
+ ret = glink_spi_xprt_reg_read(einfo, einfo->tx_fifo_write_reg_addr,
+ &write_id);
+ if (ret < 0) {
+ pr_err("%s: Error %d reading %s tx_fifo_write_reg_addr %d\n",
+ __func__, ret, einfo->xprt_cfg.edge,
+ einfo->tx_fifo_write_reg_addr);
+ return 0;
+ }
+
+ if (!read_id || !write_id)
+ return 0;
+
+ if (unlikely(!einfo->tx_fifo_start))
+ einfo->tx_fifo_start = write_id;
+
+ if (read_id > write_id)
+ write_avail = read_id - write_id;
+ else
+ write_avail = einfo->fifo_size - (write_id - read_id);
+
+ if (write_avail < FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE)
+ write_avail = 0;
+ else
+ write_avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE;
+
+ return write_avail;
+}
+
+/**
+ * glink_spi_xprt_read_avail() - Available Read Data from the remote side
+ * @einfo: Edge information corresponding to the remote side.
+ *
+ * This function reads the RX FIFO Read & Write Index registers from the
+ * remote subsystem and calculate the available read data size.
+ *
+ * Return: 0 on error, available read data on success.
+ */
+static int glink_spi_xprt_read_avail(struct edge_info *einfo)
+{
+ uint32_t read_id;
+ uint32_t write_id;
+ int read_avail;
+ int ret;
+
+ ret = glink_spi_xprt_reg_read(einfo, einfo->rx_fifo_read_reg_addr,
+ &read_id);
+ if (ret < 0) {
+ pr_err("%s: Error %d reading %s rx_fifo_read_reg_addr %d\n",
+ __func__, ret, einfo->xprt_cfg.edge,
+ einfo->rx_fifo_read_reg_addr);
+ return 0;
+ }
+
+ ret = glink_spi_xprt_reg_read(einfo, einfo->rx_fifo_write_reg_addr,
+ &write_id);
+ if (ret < 0) {
+ pr_err("%s: Error %d reading %s rx_fifo_write_reg_addr %d\n",
+ __func__, ret, einfo->xprt_cfg.edge,
+ einfo->rx_fifo_write_reg_addr);
+ return 0;
+ }
+
+ if (!read_id || !write_id)
+ return 0;
+
+ if (unlikely(!einfo->rx_fifo_start))
+ einfo->rx_fifo_start = read_id;
+
+ if (read_id <= write_id)
+ read_avail = write_id - read_id;
+ else
+ read_avail = einfo->fifo_size - (read_id - write_id);
+ return read_avail;
+}
+
+/**
+ * glink_spi_xprt_rx_cmd() - Receive G-Link commands
+ * @einfo: Edge information corresponding to the remote side.
+ * @dst: Destination buffer where the commands have to be read into.
+ * @size: Size of the data to be read.
+ *
+ * This function is used to receive the commands from the RX FIFO. This
+ * function updates the RX FIFO Read Index after reading the data.
+ *
+ * Return: 0 on success, standard Linux error codes on error.
+ */
+static int glink_spi_xprt_rx_cmd(struct edge_info *einfo, void *dst,
+ uint32_t size)
+{
+ uint32_t read_id;
+ uint32_t size_to_read = size;
+ uint32_t offset = 0;
+ int ret;
+
+ ret = glink_spi_xprt_reg_read(einfo, einfo->rx_fifo_read_reg_addr,
+ &read_id);
+ if (ret < 0) {
+ pr_err("%s: Error %d reading %s rx_fifo_read_reg_addr %d\n",
+ __func__, ret, einfo->xprt_cfg.edge,
+ einfo->rx_fifo_read_reg_addr);
+ return ret;
+ }
+
+ do {
+ if ((read_id + size_to_read) >=
+ (einfo->rx_fifo_start + einfo->fifo_size))
+ size_to_read = einfo->rx_fifo_start + einfo->fifo_size
+ - read_id;
+ ret = glink_spi_xprt_rx_data(einfo, (void *)(size_t)read_id,
+ dst + offset, size_to_read);
+ if (ret < 0) {
+ pr_err("%s: Error %d reading data\n", __func__, ret);
+ return ret;
+ }
+ read_id += size_to_read;
+ offset += size_to_read;
+ if (read_id >= (einfo->rx_fifo_start + einfo->fifo_size))
+ read_id = einfo->rx_fifo_start;
+ size_to_read = size - offset;
+ } while (size_to_read);
+
+ ret = glink_spi_xprt_reg_write(einfo, einfo->rx_fifo_read_reg_addr,
+ read_id);
+ if (ret < 0)
+ pr_err("%s: Error %d writing %s rx_fifo_read_reg_addr %d\n",
+ __func__, ret, einfo->xprt_cfg.edge,
+ einfo->rx_fifo_read_reg_addr);
+ return ret;
+}
+
+/**
+ * glink_spi_xprt_tx_cmd_safe() - Transmit G-Link commands
+ * @einfo: Edge information corresponding to the remote subsystem.
+ * @src: Source buffer containing the G-Link command.
+ * @size: Size of the command to transmit.
+ *
+ * This function is used to transmit the G-Link commands. This function
+ * must be called with einfo->write_lock locked.
+ *
+ * Return: 0 on success, standard Linux error codes on error.
+ */
+static int glink_spi_xprt_tx_cmd_safe(struct edge_info *einfo, void *src,
+ uint32_t size)
+{
+ uint32_t write_id;
+ uint32_t size_to_write = size;
+ uint32_t offset = 0;
+ int ret;
+
+ ret = glink_spi_xprt_reg_read(einfo, einfo->tx_fifo_write_reg_addr,
+ &write_id);
+ if (ret < 0) {
+ pr_err("%s: Error %d reading %s tx_fifo_write_reg_addr %d\n",
+ __func__, ret, einfo->xprt_cfg.edge,
+ einfo->tx_fifo_write_reg_addr);
+ return ret;
+ }
+
+ do {
+ if ((write_id + size_to_write) >=
+ (einfo->tx_fifo_start + einfo->fifo_size))
+ size_to_write = einfo->tx_fifo_start + einfo->fifo_size
+ - write_id;
+ ret = glink_spi_xprt_tx_data(einfo, src + offset,
+ (void *)(size_t)write_id, size_to_write);
+ if (ret < 0) {
+ pr_err("%s: Error %d writing data\n", __func__, ret);
+ return ret;
+ }
+ write_id += size_to_write;
+ offset += size_to_write;
+ if (write_id >= (einfo->tx_fifo_start + einfo->fifo_size))
+ write_id = einfo->tx_fifo_start;
+ size_to_write = size - offset;
+ } while (size_to_write);
+
+ ret = glink_spi_xprt_reg_write(einfo, einfo->tx_fifo_write_reg_addr,
+ write_id);
+ if (ret < 0)
+ pr_err("%s: Error %d writing %s tx_fifo_write_reg_addr %d\n",
+ __func__, ret, einfo->xprt_cfg.edge,
+ einfo->tx_fifo_write_reg_addr);
+ return ret;
+}
+
+/**
+ * send_tx_blocked_signal() - Send flow control request message
+ * @einfo: Edge information corresponding to the remote subsystem.
+ *
+ * This function is used to send a message to the remote subsystem indicating
+ * that the local subsystem is waiting for the write space. The remote
+ * subsystem on receiving this message will send a resume tx message.
+ */
+static void send_tx_blocked_signal(struct edge_info *einfo)
+{
+ struct read_notif_request {
+ uint16_t cmd;
+ uint16_t reserved;
+ uint32_t reserved2;
+ uint64_t reserved3;
+ };
+ struct read_notif_request read_notif_req = {0};
+
+ read_notif_req.cmd = READ_NOTIF_CMD;
+
+ if (!einfo->tx_blocked_signal_sent) {
+ einfo->tx_blocked_signal_sent = true;
+ glink_spi_xprt_tx_cmd_safe(einfo, &read_notif_req,
+ sizeof(read_notif_req));
+ }
+}
+
+/**
+ * glink_spi_xprt_tx_cmd() - Transmit G-Link commands
+ * @einfo: Edge information corresponding to the remote subsystem.
+ * @src: Source buffer containing the G-Link command.
+ * @size: Size of the command to transmit.
+ *
+ * This function is used to transmit the G-Link commands. This function
+ * might sleep if the space is not available to transmit the command.
+ *
+ * Return: 0 on success, standard Linux error codes on error.
+ */
+static int glink_spi_xprt_tx_cmd(struct edge_info *einfo, void *src,
+ uint32_t size)
+{
+ int ret;
+ DEFINE_WAIT(wait);
+
+ mutex_lock(&einfo->write_lock);
+ while (glink_spi_xprt_write_avail(einfo) < size) {
+ send_tx_blocked_signal(einfo);
+ prepare_to_wait(&einfo->tx_blocked_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (glink_spi_xprt_write_avail(einfo) < size &&
+ !einfo->in_ssr) {
+ mutex_unlock(&einfo->write_lock);
+ schedule();
+ mutex_lock(&einfo->write_lock);
+ }
+ finish_wait(&einfo->tx_blocked_queue, &wait);
+ if (einfo->in_ssr) {
+ mutex_unlock(&einfo->write_lock);
+ return -EFAULT;
+ }
+ }
+ ret = glink_spi_xprt_tx_cmd_safe(einfo, src, size);
+ mutex_unlock(&einfo->write_lock);
+ return ret;
+}
+
+/**
+ * process_rx_data() - process received data from an edge
+ * @einfo: The edge the data is received on.
+ * @cmd_id: ID to specify the type of data.
+ * @rcid: The remote channel id associated with the data.
+ * @intend_id: The intent the data should be put in.
+ * @src: Address of the source buffer from which the data
+ * is read.
+ * @frag_size: Size of the data fragment to read.
+ * @size_remaining: Size of data left to be read in this packet.
+ */
+static void process_rx_data(struct edge_info *einfo, uint16_t cmd_id,
+ uint32_t rcid, uint32_t intent_id, void *src,
+ uint32_t frag_size, uint32_t size_remaining)
+{
+ struct glink_core_rx_intent *intent;
+ int rc = 0;
+
+ intent = einfo->xprt_if.glink_core_if_ptr->rx_get_pkt_ctx(
+ &einfo->xprt_if, rcid, intent_id);
+ if (intent == NULL) {
+ GLINK_ERR("%s: no intent for ch %d liid %d\n", __func__, rcid,
+ intent_id);
+ return;
+ } else if (intent->data == NULL) {
+ GLINK_ERR("%s: intent for ch %d liid %d has no data buff\n",
+ __func__, rcid, intent_id);
+ return;
+ } else if (intent->intent_size - intent->write_offset < frag_size ||
+ intent->write_offset + size_remaining > intent->intent_size) {
+ GLINK_ERR("%s: rx data size:%d and remaining:%d %s %d %s:%d\n",
+ __func__, frag_size, size_remaining,
+ "will overflow ch", rcid, "intent", intent_id);
+ return;
+ }
+
+ if (cmd_id == TX_SHORT_DATA_CMD)
+ memcpy(intent->data + intent->write_offset, src, frag_size);
+ else
+ rc = glink_spi_xprt_rx_data(einfo, src,
+ intent->data + intent->write_offset, frag_size);
+ if (rc < 0) {
+ GLINK_ERR("%s: Error %d receiving data %d:%d:%d:%d\n",
+ __func__, rc, rcid, intent_id, frag_size,
+ size_remaining);
+ size_remaining += frag_size;
+ } else {
+ intent->write_offset += frag_size;
+ intent->pkt_size += frag_size;
+
+ if (unlikely((cmd_id == TRACER_PKT_CMD ||
+ cmd_id == TRACER_PKT_CONT_CMD) && !size_remaining)) {
+ tracer_pkt_log_event(intent->data, GLINK_XPRT_RX);
+ intent->tracer_pkt = true;
+ }
+ }
+ einfo->xprt_if.glink_core_if_ptr->rx_put_pkt_ctx(&einfo->xprt_if,
+ rcid, intent, size_remaining ? false : true);
+}
+
+/**
+ * process_rx_cmd() - Process incoming G-Link commands
+ * @einfo: Edge information corresponding to the remote subsystem.
+ * @rx_data: Buffer which contains the G-Link commands to be processed.
+ * @rx_size: Size of the buffer containing the series of G-Link commands.
+ *
+ * This function is used to parse and process a series of G-Link commands
+ * received in a buffer.
+ */
+static void process_rx_cmd(struct edge_info *einfo,
+ void *rx_data, int rx_size)
+{
+ struct command {
+ uint16_t id;
+ uint16_t param1;
+ uint32_t param2;
+ uint32_t param3;
+ uint32_t param4;
+ };
+ struct intent_desc {
+ uint32_t size;
+ uint32_t id;
+ uint64_t addr;
+ };
+ struct rx_desc {
+ uint32_t size;
+ uint32_t size_left;
+ uint64_t addr;
+ };
+ struct rx_short_data_desc {
+ unsigned char data[SHORT_PKT_SIZE];
+ };
+ struct command *cmd;
+ struct intent_desc *intents;
+ struct rx_desc *rx_descp;
+ struct rx_short_data_desc *rx_sd_descp;
+ int offset = 0;
+ int rcu_id;
+ uint16_t rcid;
+ uint16_t name_len;
+ uint16_t prio;
+ char *name;
+ bool granted;
+ int i;
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+
+ while (offset < rx_size) {
+ cmd = (struct command *)(rx_data + offset);
+ offset += sizeof(*cmd);
+ switch (cmd->id) {
+ case VERSION_CMD:
+ if (cmd->param3)
+ einfo->fifo_size = cmd->param3;
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_version(
+ &einfo->xprt_if, cmd->param1, cmd->param2);
+ break;
+
+ case VERSION_ACK_CMD:
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_version_ack(
+ &einfo->xprt_if, cmd->param1, cmd->param2);
+ break;
+
+ case OPEN_CMD:
+ rcid = cmd->param1;
+ name_len = (uint16_t)(cmd->param2 & 0xFFFF);
+ prio = (uint16_t)((cmd->param2 & 0xFFFF0000) >> 16);
+ name = (char *)(rx_data + offset);
+ offset += ALIGN(name_len, FIFO_ALIGNMENT);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_open(
+ &einfo->xprt_if, rcid, name, prio);
+ break;
+
+ case CLOSE_CMD:
+ einfo->xprt_if.glink_core_if_ptr->
+ rx_cmd_ch_remote_close(
+ &einfo->xprt_if, cmd->param1);
+ break;
+
+ case OPEN_ACK_CMD:
+ prio = (uint16_t)(cmd->param2 & 0xFFFF);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_open_ack(
+ &einfo->xprt_if, cmd->param1, prio);
+ break;
+
+ case CLOSE_ACK_CMD:
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_close_ack(
+ &einfo->xprt_if, cmd->param1);
+ break;
+
+ case RX_INTENT_CMD:
+ for (i = 0; i < cmd->param2; i++) {
+ intents = (struct intent_desc *)
+ (rx_data + offset);
+ offset += sizeof(*intents);
+ einfo->xprt_if.glink_core_if_ptr->
+ rx_cmd_remote_rx_intent_put_cookie(
+ &einfo->xprt_if, cmd->param1,
+ intents->id, intents->size,
+ (void *)(intents->addr));
+ }
+ break;
+
+ case RX_DONE_CMD:
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
+ &einfo->xprt_if, cmd->param1, cmd->param2,
+ false);
+ break;
+
+ case RX_INTENT_REQ_CMD:
+ einfo->xprt_if.glink_core_if_ptr->
+ rx_cmd_remote_rx_intent_req(
+ &einfo->xprt_if, cmd->param1,
+ cmd->param2);
+ break;
+
+ case RX_INTENT_REQ_ACK_CMD:
+ granted = cmd->param2 == 1 ? true : false;
+ einfo->xprt_if.glink_core_if_ptr->
+ rx_cmd_rx_intent_req_ack(&einfo->xprt_if,
+ cmd->param1, granted);
+ break;
+
+ case TX_DATA_CMD:
+ case TX_DATA_CONT_CMD:
+ case TRACER_PKT_CMD:
+ case TRACER_PKT_CONT_CMD:
+ rx_descp = (struct rx_desc *)(rx_data + offset);
+ offset += sizeof(*rx_descp);
+ process_rx_data(einfo, cmd->id, cmd->param1,
+ cmd->param2, (void *)rx_descp->addr,
+ rx_descp->size, rx_descp->size_left);
+ break;
+
+ case TX_SHORT_DATA_CMD:
+ rx_sd_descp = (struct rx_short_data_desc *)
+ (rx_data + offset);
+ offset += sizeof(*rx_sd_descp);
+ process_rx_data(einfo, cmd->id, cmd->param1,
+ cmd->param2, (void *)rx_sd_descp->data,
+ cmd->param3, cmd->param4);
+ break;
+
+ case READ_NOTIF_CMD:
+ break;
+
+ case SIGNALS_CMD:
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_remote_sigs(
+ &einfo->xprt_if, cmd->param1, cmd->param2);
+ break;
+
+ case RX_DONE_W_REUSE_CMD:
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
+ &einfo->xprt_if, cmd->param1,
+ cmd->param2, true);
+ break;
+
+ default:
+ pr_err("Unrecognized command: %d\n", cmd->id);
+ break;
+ }
+ }
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * __rx_worker() - Receive commands on a specific edge
+ * @einfo: Edge to process commands on.
+ *
+ * This function checks the size of data to be received, allocates the
+ * buffer for that data and reads the data from the remote subsytem
+ * into that buffer. This function then calls the process_rx_cmd() to
+ * parse the received G-Link command sequence. This function will also
+ * poll for the data for a predefined duration for performance reasons.
+ */
+static void __rx_worker(struct edge_info *einfo)
+{
+ uint32_t inactive_cycles = 0;
+ int rx_avail, rc;
+ void *rx_data;
+ int rcu_id;
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (unlikely(!einfo->rx_fifo_start)) {
+ rx_avail = glink_spi_xprt_read_avail(einfo);
+ if (!rx_avail) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+ einfo->in_ssr = false;
+ einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
+ }
+
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+
+ glink_spi_xprt_set_poll_mode(einfo);
+ while (inactive_cycles < MAX_INACTIVE_CYCLES) {
+ if (einfo->tx_resume_needed &&
+ glink_spi_xprt_write_avail(einfo)) {
+ einfo->tx_resume_needed = false;
+ einfo->xprt_if.glink_core_if_ptr->tx_resume(
+ &einfo->xprt_if);
+ }
+ mutex_lock(&einfo->write_lock);
+ if (einfo->tx_blocked_signal_sent) {
+ wake_up_all(&einfo->tx_blocked_queue);
+ einfo->tx_blocked_signal_sent = false;
+ }
+ mutex_unlock(&einfo->write_lock);
+
+ rx_avail = glink_spi_xprt_read_avail(einfo);
+ if (!rx_avail) {
+ usleep_range(POLL_INTERVAL_US, POLL_INTERVAL_US + 50);
+ inactive_cycles++;
+ continue;
+ }
+ inactive_cycles = 0;
+
+ rx_data = kzalloc(rx_avail, GFP_KERNEL);
+ if (!rx_data)
+ break;
+
+ rc = glink_spi_xprt_rx_cmd(einfo, rx_data, rx_avail);
+ if (rc < 0) {
+ GLINK_ERR("%s: Error %d receiving data\n",
+ __func__, rc);
+ kfree(rx_data);
+ break;
+ }
+ process_rx_cmd(einfo, rx_data, rx_avail);
+ kfree(rx_data);
+ }
+ glink_spi_xprt_set_irq_mode(einfo);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * rx_worker() - Worker function to process received commands
+ * @work: kwork associated with the edge to process commands on.
+ */
+static void rx_worker(struct kthread_work *work)
+{
+ struct edge_info *einfo;
+
+ einfo = container_of(work, struct edge_info, kwork);
+ __rx_worker(einfo);
+};
+
+/**
+ * tx_cmd_version() - Convert a version cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @version: The version number to encode.
+ * @features: The features information to encode.
+ */
+static void tx_cmd_version(struct glink_transport_if *if_ptr, uint32_t version,
+ uint32_t features)
+{
+ struct command {
+ uint16_t id;
+ uint16_t version;
+ uint32_t features;
+ uint32_t fifo_size;
+ uint32_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+
+ cmd.id = VERSION_CMD;
+ cmd.version = version;
+ cmd.features = features;
+
+ glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_version_ack() - Convert a version ack cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @version: The version number to encode.
+ * @features: The features information to encode.
+ */
+static void tx_cmd_version_ack(struct glink_transport_if *if_ptr,
+ uint32_t version,
+ uint32_t features)
+{
+ struct command {
+ uint16_t id;
+ uint16_t version;
+ uint32_t features;
+ uint32_t fifo_size;
+ uint32_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+
+ cmd.id = VERSION_ACK_CMD;
+ cmd.version = version;
+ cmd.features = features;
+
+ glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * set_version() - Activate a negotiated version and feature set
+ * @if_ptr: The transport to configure.
+ * @version: The version to use.
+ * @features: The features to use.
+ *
+ * Return: The supported capabilities of the transport.
+ */
+static uint32_t set_version(struct glink_transport_if *if_ptr, uint32_t version,
+ uint32_t features)
+{
+ struct edge_info *einfo;
+ uint32_t ret;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+ }
+
+ ret = GCAP_SIGNALS;
+ if (features & TRACER_PKT_FEATURE)
+ ret |= GCAP_TRACER_PKT;
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return ret;
+}
+
+/**
+ * tx_cmd_ch_open() - Convert a channel open cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @name: The channel name to encode.
+ * @req_xprt: The transport the core would like to migrate this channel to.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_ch_open(struct glink_transport_if *if_ptr, uint32_t lcid,
+ const char *name, uint16_t req_xprt)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint16_t length;
+ uint16_t req_xprt;
+ uint64_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ uint32_t buf_size;
+ void *buf;
+ int rcu_id;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ cmd.id = OPEN_CMD;
+ cmd.lcid = lcid;
+ cmd.length = (uint16_t)(strlen(name) + 1);
+ cmd.req_xprt = req_xprt;
+
+ buf_size = ALIGN(sizeof(cmd) + cmd.length, FIFO_ALIGNMENT);
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -ENOMEM;
+ }
+
+ memcpy(buf, &cmd, sizeof(cmd));
+ memcpy(buf + sizeof(cmd), name, cmd.length);
+
+ glink_spi_xprt_tx_cmd(einfo, buf, buf_size);
+
+ kfree(buf);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_ch_close() - Convert a channel close cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_ch_close(struct glink_transport_if *if_ptr, uint32_t lcid)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t reserved1;
+ uint64_t reserved2;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ cmd.id = CLOSE_CMD;
+ cmd.lcid = lcid;
+
+ glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_ch_remote_open_ack() - Convert a channel open ack cmd to wire format
+ * and transmit
+ * @if_ptr: The transport to transmit on.
+ * @rcid: The remote channel id to encode.
+ * @xprt_resp: The response to a transport migration request.
+ */
+static void tx_cmd_ch_remote_open_ack(struct glink_transport_if *if_ptr,
+ uint32_t rcid, uint16_t xprt_resp)
+{
+ struct command {
+ uint16_t id;
+ uint16_t rcid;
+ uint16_t reserved1;
+ uint16_t xprt_resp;
+ uint64_t reserved2;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+
+ cmd.id = OPEN_ACK_CMD;
+ cmd.rcid = rcid;
+ cmd.xprt_resp = xprt_resp;
+
+ glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_ch_remote_close_ack() - Convert a channel close ack cmd to wire format
+ * and transmit
+ * @if_ptr: The transport to transmit on.
+ * @rcid: The remote channel id to encode.
+ */
+static void tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr,
+ uint32_t rcid)
+{
+ struct command {
+ uint16_t id;
+ uint16_t rcid;
+ uint32_t reserved1;
+ uint64_t reserved2;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+
+ cmd.id = CLOSE_ACK_CMD;
+ cmd.rcid = rcid;
+
+ glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * ssr() - Process a subsystem restart notification of a transport
+ * @if_ptr: The transport to restart
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int ssr(struct glink_transport_if *if_ptr)
+{
+ struct edge_info *einfo;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ einfo->in_ssr = true;
+ wake_up_all(&einfo->tx_blocked_queue);
+
+ synchronize_srcu(&einfo->use_ref);
+ einfo->tx_resume_needed = false;
+ einfo->tx_blocked_signal_sent = false;
+ einfo->tx_fifo_start = 0;
+ einfo->rx_fifo_start = 0;
+ einfo->fifo_size = DEFAULT_FIFO_SIZE;
+ einfo->xprt_if.glink_core_if_ptr->link_down(&einfo->xprt_if);
+
+ return 0;
+}
+
+/**
+ * allocate_rx_intent() - Allocate/reserve space for RX Intent
+ * @if_ptr: The transport the intent is associated with.
+ * @size: size of intent.
+ * @intent: Pointer to the intent structure.
+ *
+ * Assign "data" with the buffer created, since the transport creates
+ * a linear buffer and "iovec" with the "intent" itself, so that
+ * the data can be passed to a client that receives only vector buffer.
+ * Note that returning NULL for the pointer is valid (it means that space has
+ * been reserved, but the actual pointer will be provided later).
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int allocate_rx_intent(struct glink_transport_if *if_ptr, size_t size,
+ struct glink_core_rx_intent *intent)
+{
+ void *t;
+
+ t = kzalloc(size, GFP_KERNEL);
+ if (!t)
+ return -ENOMEM;
+
+ intent->data = t;
+ intent->iovec = (void *)intent;
+ intent->vprovider = rx_linear_vbuf_provider;
+ intent->pprovider = NULL;
+ return 0;
+}
+
+/**
+ * deallocate_rx_intent() - Deallocate space created for RX Intent
+ * @if_ptr: The transport the intent is associated with.
+ * @intent: Pointer to the intent structure.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int deallocate_rx_intent(struct glink_transport_if *if_ptr,
+ struct glink_core_rx_intent *intent)
+{
+ if (!intent || !intent->data)
+ return -EINVAL;
+
+ kfree(intent->data);
+ intent->data = NULL;
+ intent->iovec = NULL;
+ intent->vprovider = NULL;
+ return 0;
+}
+
+/**
+ * tx_cmd_local_rx_intent() - Convert an rx intent cmd to wire format and
+ * transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @size: The intent size to encode.
+ * @liid: The local intent id to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_local_rx_intent(struct glink_transport_if *if_ptr,
+ uint32_t lcid, size_t size, uint32_t liid)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t count;
+ uint64_t reserved;
+ uint32_t size;
+ uint32_t liid;
+ uint64_t addr;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ if (size > UINT_MAX) {
+ pr_err("%s: size %zu is too large to encode\n", __func__, size);
+ return -EMSGSIZE;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ cmd.id = RX_INTENT_CMD;
+ cmd.lcid = lcid;
+ cmd.count = 1;
+ cmd.size = size;
+ cmd.liid = liid;
+
+ glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_local_rx_done() - Convert an rx done cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @liid: The local intent id to encode.
+ * @reuse: Reuse the consumed intent.
+ */
+static void tx_cmd_local_rx_done(struct glink_transport_if *if_ptr,
+ uint32_t lcid, uint32_t liid, bool reuse)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t liid;
+ uint64_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+
+ cmd.id = reuse ? RX_DONE_W_REUSE_CMD : RX_DONE_CMD;
+ cmd.lcid = lcid;
+ cmd.liid = liid;
+
+ glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_rx_intent_req() - Convert an rx intent request cmd to wire format and
+ * transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @size: The requested intent size to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_rx_intent_req(struct glink_transport_if *if_ptr,
+ uint32_t lcid, size_t size)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t size;
+ uint64_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ if (size > UINT_MAX) {
+ pr_err("%s: size %zu is too large to encode\n", __func__, size);
+ return -EMSGSIZE;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ cmd.id = RX_INTENT_REQ_CMD,
+ cmd.lcid = lcid;
+ cmd.size = size;
+
+ glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_rx_intent_req_ack() - Convert an rx intent request ack cmd to wire
+ * format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @granted: The request response to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_remote_rx_intent_req_ack(struct glink_transport_if *if_ptr,
+ uint32_t lcid, bool granted)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t response;
+ uint64_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ cmd.id = RX_INTENT_REQ_ACK_CMD,
+ cmd.lcid = lcid;
+ if (granted)
+ cmd.response = 1;
+ else
+ cmd.response = 0;
+
+ glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_set_sigs() - Convert a signals ack cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @sigs: The signals to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_set_sigs(struct glink_transport_if *if_ptr, uint32_t lcid,
+ uint32_t sigs)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t sigs;
+ uint64_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ cmd.id = SIGNALS_CMD,
+ cmd.lcid = lcid;
+ cmd.sigs = sigs;
+
+ glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_data() - convert a data/tracer_pkt to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @cmd_id: The command ID to transmit.
+ * @lcid: The local channel id to encode.
+ * @pctx: The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx_data(struct glink_transport_if *if_ptr, uint16_t cmd_id,
+ uint32_t lcid, struct glink_core_tx_pkt *pctx)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t riid;
+ uint64_t reserved;
+ uint32_t size;
+ uint32_t size_left;
+ uint64_t addr;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ uint32_t size;
+ void *data_start, *dst = NULL;
+ size_t tx_size = 0;
+ int rcu_id;
+
+ if (pctx->size < pctx->size_remaining) {
+ GLINK_ERR("%s: size remaining exceeds size. Resetting.\n",
+ __func__);
+ pctx->size_remaining = pctx->size;
+ }
+ if (!pctx->size_remaining)
+ return 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ if (cmd_id == TX_DATA_CMD) {
+ if (pctx->size_remaining == pctx->size)
+ cmd.id = TX_DATA_CMD;
+ else
+ cmd.id = TX_DATA_CONT_CMD;
+ } else {
+ if (pctx->size_remaining == pctx->size)
+ cmd.id = TRACER_PKT_CMD;
+ else
+ cmd.id = TRACER_PKT_CONT_CMD;
+ }
+ cmd.lcid = lcid;
+ cmd.riid = pctx->riid;
+ data_start = get_tx_vaddr(pctx, pctx->size - pctx->size_remaining,
+ &tx_size);
+ if (unlikely(!data_start)) {
+ GLINK_ERR("%s: invalid data_start\n", __func__);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EINVAL;
+ }
+ if (tx_size & (XPRT_ALIGNMENT - 1))
+ tx_size = ALIGN(tx_size - SHORT_PKT_SIZE, XPRT_ALIGNMENT);
+ if (likely(pctx->cookie))
+ dst = pctx->cookie + (pctx->size - pctx->size_remaining);
+
+ mutex_lock(&einfo->write_lock);
+ size = glink_spi_xprt_write_avail(einfo);
+ /* Need enough space to write the command */
+ if (size <= sizeof(cmd)) {
+ einfo->tx_resume_needed = true;
+ mutex_unlock(&einfo->write_lock);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EAGAIN;
+ }
+ cmd.addr = 0;
+ cmd.size = tx_size;
+ pctx->size_remaining -= tx_size;
+ cmd.size_left = pctx->size_remaining;
+ if (cmd.id == TRACER_PKT_CMD)
+ tracer_pkt_log_event((void *)(pctx->data), GLINK_XPRT_TX);
+
+ if (!strcmp(einfo->xprt_cfg.edge, "wdsp"))
+ wdsp_resume(&einfo->cmpnt);
+ glink_spi_xprt_tx_data(einfo, data_start, dst, tx_size);
+ glink_spi_xprt_tx_cmd_safe(einfo, &cmd, sizeof(cmd));
+ GLINK_DBG("%s %s: lcid[%u] riid[%u] cmd[%d], size[%d], size_left[%d]\n",
+ "<SPI>", __func__, cmd.lcid, cmd.riid, cmd.id, cmd.size,
+ cmd.size_left);
+ mutex_unlock(&einfo->write_lock);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return cmd.size;
+}
+
+/**
+ * tx_short_data() - Tansmit a short packet in band along with command
+ * @if_ptr: The transport to transmit on.
+ * @cmd_id: The command ID to transmit.
+ * @lcid: The local channel id to encode.
+ * @pctx: The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx_short_data(struct glink_transport_if *if_ptr,
+ uint32_t lcid, struct glink_core_tx_pkt *pctx)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t riid;
+ uint32_t size;
+ uint32_t size_left;
+ unsigned char data[SHORT_PKT_SIZE];
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ uint32_t size;
+ void *data_start;
+ size_t tx_size = 0;
+ int rcu_id;
+
+ if (pctx->size < pctx->size_remaining) {
+ GLINK_ERR("%s: size remaining exceeds size. Resetting.\n",
+ __func__);
+ pctx->size_remaining = pctx->size;
+ }
+ if (!pctx->size_remaining)
+ return 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+ if (einfo->in_ssr) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EFAULT;
+ }
+
+ cmd.id = TX_SHORT_DATA_CMD;
+ cmd.lcid = lcid;
+ cmd.riid = pctx->riid;
+ data_start = get_tx_vaddr(pctx, pctx->size - pctx->size_remaining,
+ &tx_size);
+ if (unlikely(!data_start || tx_size > SHORT_PKT_SIZE)) {
+ GLINK_ERR("%s: invalid data_start %p or tx_size %zu\n",
+ __func__, data_start, tx_size);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&einfo->write_lock);
+ size = glink_spi_xprt_write_avail(einfo);
+ /* Need enough space to write the command */
+ if (size <= sizeof(cmd)) {
+ einfo->tx_resume_needed = true;
+ mutex_unlock(&einfo->write_lock);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EAGAIN;
+ }
+ cmd.size = tx_size;
+ pctx->size_remaining -= tx_size;
+ cmd.size_left = pctx->size_remaining;
+ memcpy(cmd.data, data_start, tx_size);
+ if (!strcmp(einfo->xprt_cfg.edge, "wdsp"))
+ wdsp_resume(&einfo->cmpnt);
+ glink_spi_xprt_tx_cmd_safe(einfo, &cmd, sizeof(cmd));
+ GLINK_DBG("%s %s: lcid[%u] riid[%u] cmd[%d], size[%d], size_left[%d]\n",
+ "<SPI>", __func__, cmd.lcid, cmd.riid, cmd.id, cmd.size,
+ cmd.size_left);
+ mutex_unlock(&einfo->write_lock);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return cmd.size;
+}
+
+/**
+ * tx() - convert a data transmit cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @pctx: The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx(struct glink_transport_if *if_ptr, uint32_t lcid,
+ struct glink_core_tx_pkt *pctx)
+{
+ if (pctx->size_remaining <= SHORT_PKT_SIZE)
+ return tx_short_data(if_ptr, lcid, pctx);
+ return tx_data(if_ptr, TX_DATA_CMD, lcid, pctx);
+}
+
+/**
+ * tx_cmd_tracer_pkt() - convert a tracer packet cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @pctx: The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx_cmd_tracer_pkt(struct glink_transport_if *if_ptr, uint32_t lcid,
+ struct glink_core_tx_pkt *pctx)
+{
+ return tx_data(if_ptr, TRACER_PKT_CMD, lcid, pctx);
+}
+
+/**
+ * int wait_link_down() - Check status of read/write indices
+ * @if_ptr: The transport to check
+ *
+ * Return: 1 if indices are all zero, 0 otherwise
+ */
+static int wait_link_down(struct glink_transport_if *if_ptr)
+{
+ return 0;
+}
+
+/**
+ * get_power_vote_ramp_time() - Get the ramp time required for the power
+ * votes to be applied
+ * @if_ptr: The transport interface on which power voting is requested.
+ * @state: The power state for which ramp time is required.
+ *
+ * Return: The ramp time specific to the power state, standard error otherwise.
+ */
+static unsigned long get_power_vote_ramp_time(
+ struct glink_transport_if *if_ptr, uint32_t state)
+{
+ return 0;
+}
+
+/**
+ * power_vote() - Update the power votes to meet qos requirement
+ * @if_ptr: The transport interface on which power voting is requested.
+ * @state: The power state for which the voting should be done.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int power_vote(struct glink_transport_if *if_ptr, uint32_t state)
+{
+ unsigned long flags;
+ struct edge_info *einfo;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+ spin_lock_irqsave(&einfo->activity_lock, flags);
+ einfo->activity_flag |= ACTIVE_TX;
+ spin_unlock_irqrestore(&einfo->activity_lock, flags);
+ return 0;
+}
+
+/**
+ * power_unvote() - Remove the all the power votes
+ * @if_ptr: The transport interface on which power voting is requested.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int power_unvote(struct glink_transport_if *if_ptr)
+{
+ unsigned long flags;
+ struct edge_info *einfo;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+ spin_lock_irqsave(&einfo->activity_lock, flags);
+ einfo->activity_flag &= ~ACTIVE_TX;
+ spin_unlock_irqrestore(&einfo->activity_lock, flags);
+ return 0;
+}
+
+static int glink_wdsp_cmpnt_init(struct device *dev, void *priv_data)
+{
+ return 0;
+}
+
+static int glink_wdsp_cmpnt_deinit(struct device *dev, void *priv_data)
+{
+ return 0;
+}
+
+static int glink_wdsp_cmpnt_event_handler(struct device *dev,
+ void *priv_data, enum wdsp_event_type event, void *data)
+{
+ struct edge_info *einfo = dev_get_drvdata(dev);
+ struct glink_cmpnt *cmpnt = &einfo->cmpnt;
+ struct device *sdev;
+ struct spi_device *spi_dev;
+
+ switch (event) {
+ case WDSP_EVENT_PRE_BOOTUP:
+ if (cmpnt && cmpnt->master_dev &&
+ cmpnt->master_ops &&
+ cmpnt->master_ops->get_dev_for_cmpnt)
+ sdev = cmpnt->master_ops->get_dev_for_cmpnt(
+ cmpnt->master_dev, WDSP_CMPNT_TRANSPORT);
+ else
+ sdev = NULL;
+
+ if (!sdev) {
+ dev_err(dev, "%s: Failed to get transport device\n",
+ __func__);
+ break;
+ }
+
+ spi_dev = to_spi_device(sdev);
+ einfo->spi_dev = spi_dev;
+ break;
+ case WDSP_EVENT_IPC1_INTR:
+ queue_kthread_work(&einfo->kworker, &einfo->kwork);
+ break;
+ default:
+ pr_debug("%s: unhandled event %d", __func__, event);
+ break;
+ }
+
+ return 0;
+}
+
+/* glink_wdsp_cmpnt_ops - Callback operations registered wtih WDSP framework */
+static struct wdsp_cmpnt_ops glink_wdsp_cmpnt_ops = {
+ .init = glink_wdsp_cmpnt_init,
+ .deinit = glink_wdsp_cmpnt_deinit,
+ .event_handler = glink_wdsp_cmpnt_event_handler,
+};
+
+static int glink_component_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct edge_info *einfo = dev_get_drvdata(dev);
+ struct glink_cmpnt *cmpnt = &einfo->cmpnt;
+ int ret = 0;
+
+ cmpnt->master_dev = master;
+ cmpnt->master_ops = data;
+
+ if (cmpnt->master_ops && cmpnt->master_ops->register_cmpnt_ops)
+ ret = cmpnt->master_ops->register_cmpnt_ops(master, dev, einfo,
+ &glink_wdsp_cmpnt_ops);
+ else
+ ret = -EINVAL;
+
+ if (ret)
+ dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n",
+ __func__, ret);
+ return ret;
+}
+
+static void glink_component_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct edge_info *einfo = dev_get_drvdata(dev);
+ struct glink_cmpnt *cmpnt = &einfo->cmpnt;
+
+ cmpnt->master_dev = NULL;
+ cmpnt->master_ops = NULL;
+}
+
+static const struct component_ops glink_component_ops = {
+ .bind = glink_component_bind,
+ .unbind = glink_component_unbind,
+};
+
+/**
+ * init_xprt_if() - Initialize the xprt_if for an edge
+ * @einfo: The edge to initialize.
+ */
+static void init_xprt_if(struct edge_info *einfo)
+{
+ einfo->xprt_if.tx_cmd_version = tx_cmd_version;
+ einfo->xprt_if.tx_cmd_version_ack = tx_cmd_version_ack;
+ einfo->xprt_if.set_version = set_version;
+ einfo->xprt_if.tx_cmd_ch_open = tx_cmd_ch_open;
+ einfo->xprt_if.tx_cmd_ch_close = tx_cmd_ch_close;
+ einfo->xprt_if.tx_cmd_ch_remote_open_ack = tx_cmd_ch_remote_open_ack;
+ einfo->xprt_if.tx_cmd_ch_remote_close_ack = tx_cmd_ch_remote_close_ack;
+ einfo->xprt_if.ssr = ssr;
+ einfo->xprt_if.allocate_rx_intent = allocate_rx_intent;
+ einfo->xprt_if.deallocate_rx_intent = deallocate_rx_intent;
+ einfo->xprt_if.tx_cmd_local_rx_intent = tx_cmd_local_rx_intent;
+ einfo->xprt_if.tx_cmd_local_rx_done = tx_cmd_local_rx_done;
+ einfo->xprt_if.tx = tx;
+ einfo->xprt_if.tx_cmd_rx_intent_req = tx_cmd_rx_intent_req;
+ einfo->xprt_if.tx_cmd_remote_rx_intent_req_ack =
+ tx_cmd_remote_rx_intent_req_ack;
+ einfo->xprt_if.tx_cmd_set_sigs = tx_cmd_set_sigs;
+ einfo->xprt_if.wait_link_down = wait_link_down;
+ einfo->xprt_if.tx_cmd_tracer_pkt = tx_cmd_tracer_pkt;
+ einfo->xprt_if.get_power_vote_ramp_time = get_power_vote_ramp_time;
+ einfo->xprt_if.power_vote = power_vote;
+ einfo->xprt_if.power_unvote = power_unvote;
+}
+
+/**
+ * init_xprt_cfg() - Initialize the xprt_cfg for an edge
+ * @einfo: The edge to initialize.
+ * @name: The name of the remote side this edge communicates to.
+ */
+static void init_xprt_cfg(struct edge_info *einfo, const char *name)
+{
+ einfo->xprt_cfg.name = XPRT_NAME;
+ einfo->xprt_cfg.edge = name;
+ einfo->xprt_cfg.versions = versions;
+ einfo->xprt_cfg.versions_entries = ARRAY_SIZE(versions);
+ einfo->xprt_cfg.max_cid = SZ_64K;
+ einfo->xprt_cfg.max_iid = SZ_2G;
+}
+
+/**
+ * parse_qos_dt_params() - Parse the power states from DT
+ * @dev: Reference to the platform device for a specific edge.
+ * @einfo: Edge information for the edge probe function is called.
+ *
+ * Return: 0 on success, standard error code otherwise.
+ */
+static int parse_qos_dt_params(struct device_node *node,
+ struct edge_info *einfo)
+{
+ int rc;
+ int i;
+ char *key;
+ uint32_t *arr32;
+ uint32_t num_states;
+
+ key = "qcom,ramp-time";
+ if (!of_find_property(node, key, &num_states))
+ return -ENODEV;
+
+ num_states /= sizeof(uint32_t);
+
+ einfo->num_pw_states = num_states;
+
+ arr32 = kmalloc_array(num_states, sizeof(uint32_t), GFP_KERNEL);
+ if (!arr32)
+ return -ENOMEM;
+
+ einfo->ramp_time_us = kmalloc_array(num_states, sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!einfo->ramp_time_us) {
+ rc = -ENOMEM;
+ goto mem_alloc_fail;
+ }
+
+ rc = of_property_read_u32_array(node, key, arr32, num_states);
+ if (rc) {
+ rc = -ENODEV;
+ goto invalid_key;
+ }
+ for (i = 0; i < num_states; i++)
+ einfo->ramp_time_us[i] = arr32[i];
+
+ kfree(arr32);
+ return 0;
+
+invalid_key:
+ kfree(einfo->ramp_time_us);
+mem_alloc_fail:
+ kfree(arr32);
+ return rc;
+}
+
+/**
+ * parse_qos_dt_params() - Parse any remote FIFO configuration
+ * @node: Reference to the platform device for a specific edge.
+ * @einfo: Edge information for the edge probe function is called.
+ *
+ * Return: 0 on success, standard error code otherwise.
+ */
+static int parse_remote_fifo_cfg(struct device_node *node,
+ struct edge_info *einfo)
+{
+ int rc;
+ char *key;
+
+ key = "qcom,out-read-idx-reg";
+ rc = of_property_read_u32(node, key, &einfo->tx_fifo_read_reg_addr);
+ if (rc)
+ goto key_error;
+
+ key = "qcom,out-write-idx-reg";
+ rc = of_property_read_u32(node, key, &einfo->tx_fifo_write_reg_addr);
+ if (rc)
+ goto key_error;
+
+ key = "qcom,in-read-idx-reg";
+ rc = of_property_read_u32(node, key, &einfo->rx_fifo_read_reg_addr);
+ if (rc)
+ goto key_error;
+
+ key = "qcom,in-write-idx-reg";
+ rc = of_property_read_u32(node, key, &einfo->rx_fifo_write_reg_addr);
+ if (rc)
+ goto key_error;
+ return 0;
+
+key_error:
+ pr_err("%s: Error %d parsing key %s\n", __func__, rc, key);
+ return rc;
+}
+
+static int glink_spi_probe(struct platform_device *pdev)
+{
+ struct device_node *node;
+ struct device_node *phandle_node;
+ struct edge_info *einfo;
+ int rc;
+ char *key;
+ const char *subsys_name;
+ unsigned long flags;
+
+ node = pdev->dev.of_node;
+
+ einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
+ if (!einfo) {
+ rc = -ENOMEM;
+ goto edge_info_alloc_fail;
+ }
+
+ key = "label";
+ subsys_name = of_get_property(node, key, NULL);
+ if (!subsys_name) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+ strlcpy(einfo->subsys_name, subsys_name, sizeof(einfo->subsys_name));
+
+ init_xprt_cfg(einfo, subsys_name);
+ init_xprt_if(einfo);
+
+ einfo->in_ssr = true;
+ einfo->fifo_size = DEFAULT_FIFO_SIZE;
+ init_kthread_work(&einfo->kwork, rx_worker);
+ init_kthread_worker(&einfo->kworker);
+ init_srcu_struct(&einfo->use_ref);
+ mutex_init(&einfo->write_lock);
+ init_waitqueue_head(&einfo->tx_blocked_queue);
+ spin_lock_init(&einfo->activity_lock);
+
+ spin_lock_irqsave(&edge_infos_lock, flags);
+ list_add_tail(&einfo->list, &edge_infos);
+ spin_unlock_irqrestore(&edge_infos_lock, flags);
+
+ einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
+ "spi_%s", subsys_name);
+ if (IS_ERR(einfo->task)) {
+ rc = PTR_ERR(einfo->task);
+ pr_err("%s: kthread run failed %d\n", __func__, rc);
+ goto kthread_fail;
+ }
+
+ key = "qcom,remote-fifo-config";
+ phandle_node = of_parse_phandle(node, key, 0);
+ if (phandle_node)
+ parse_remote_fifo_cfg(phandle_node, einfo);
+
+ key = "qcom,qos-config";
+ phandle_node = of_parse_phandle(node, key, 0);
+ if (phandle_node && !(of_get_glink_core_qos_cfg(phandle_node,
+ &einfo->xprt_cfg)))
+ parse_qos_dt_params(node, einfo);
+
+ rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
+ if (rc == -EPROBE_DEFER)
+ goto reg_xprt_fail;
+ if (rc) {
+ pr_err("%s: glink core register transport failed: %d\n",
+ __func__, rc);
+ goto reg_xprt_fail;
+ }
+
+ dev_set_drvdata(&pdev->dev, einfo);
+ if (!strcmp(einfo->xprt_cfg.edge, "wdsp")) {
+ rc = component_add(&pdev->dev, &glink_component_ops);
+ if (rc) {
+ pr_err("%s: component_add failed, err = %d\n",
+ __func__, rc);
+ rc = -ENODEV;
+ goto reg_cmpnt_fail;
+ }
+ }
+ return 0;
+
+reg_cmpnt_fail:
+ dev_set_drvdata(&pdev->dev, NULL);
+ glink_core_unregister_transport(&einfo->xprt_if);
+reg_xprt_fail:
+ flush_kthread_worker(&einfo->kworker);
+ kthread_stop(einfo->task);
+ einfo->task = NULL;
+kthread_fail:
+ spin_lock_irqsave(&edge_infos_lock, flags);
+ list_del(&einfo->list);
+ spin_unlock_irqrestore(&edge_infos_lock, flags);
+missing_key:
+ kfree(einfo);
+edge_info_alloc_fail:
+ return rc;
+}
+
+static int glink_spi_remove(struct platform_device *pdev)
+{
+ struct edge_info *einfo;
+ unsigned long flags;
+
+ einfo = (struct edge_info *)dev_get_drvdata(&pdev->dev);
+ glink_core_unregister_transport(&einfo->xprt_if);
+ flush_kthread_worker(&einfo->kworker);
+ kthread_stop(einfo->task);
+ einfo->task = NULL;
+ spin_lock_irqsave(&edge_infos_lock, flags);
+ list_del(&einfo->list);
+ spin_unlock_irqrestore(&edge_infos_lock, flags);
+ kfree(einfo);
+ return 0;
+}
+
+static int glink_spi_resume(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int glink_spi_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ unsigned long flags;
+ struct edge_info *einfo;
+ bool suspend;
+ int rc = -EBUSY;
+
+ einfo = (struct edge_info *)dev_get_drvdata(&pdev->dev);
+ if (strcmp(einfo->xprt_cfg.edge, "wdsp"))
+ return 0;
+
+ spin_lock_irqsave(&einfo->activity_lock, flags);
+ suspend = !(einfo->activity_flag);
+ spin_unlock_irqrestore(&einfo->activity_lock, flags);
+ if (suspend)
+ rc = wdsp_suspend(&einfo->cmpnt);
+ if (rc < 0)
+ pr_err("%s: Could not suspend activity_flag %d, rc %d\n",
+ __func__, einfo->activity_flag, rc);
+ return rc;
+}
+
+static const struct of_device_id spi_match_table[] = {
+ { .compatible = "qcom,glink-spi-xprt" },
+ {},
+};
+
+static struct platform_driver glink_spi_driver = {
+ .probe = glink_spi_probe,
+ .remove = glink_spi_remove,
+ .resume = glink_spi_resume,
+ .suspend = glink_spi_suspend,
+ .driver = {
+ .name = "msm_glink_spi_xprt",
+ .owner = THIS_MODULE,
+ .of_match_table = spi_match_table,
+ },
+};
+
+static int __init glink_spi_xprt_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&glink_spi_driver);
+ if (rc)
+ pr_err("%s: glink_spi register failed %d\n", __func__, rc);
+
+ return rc;
+}
+module_init(glink_spi_xprt_init);
+
+static void __exit glink_spi_xprt_exit(void)
+{
+ platform_driver_unregister(&glink_spi_driver);
+}
+module_exit(glink_spi_xprt_exit);
+
+MODULE_DESCRIPTION("MSM G-Link SPI Transport");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/glink_xprt_if.h b/drivers/soc/qcom/glink_xprt_if.h
index 6242e867fe72..f4d5a3b303db 100644
--- a/drivers/soc/qcom/glink_xprt_if.h
+++ b/drivers/soc/qcom/glink_xprt_if.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -28,6 +28,7 @@ enum buf_type {
enum xprt_ids {
SMEM_XPRT_ID = 100,
+ SPIV2_XPRT_ID = SMEM_XPRT_ID,
SMD_TRANS_XPRT_ID = 200,
LLOOP_XPRT_ID = 300,
MOCK_XPRT_HIGH_ID = 390,
@@ -56,6 +57,7 @@ enum xprt_ids {
* @iovec: Pointer to the vector buffer packet.
* @vprovider: Packet-specific virtual buffer provider function.
* @pprovider: Packet-specific physical buffer provider function.
+ * @cookie: Transport-specific cookie
* @pkt_ref: Active references to the packet.
*/
struct glink_core_tx_pkt {
@@ -73,6 +75,7 @@ struct glink_core_tx_pkt {
void *iovec;
void * (*vprovider)(void *iovec, size_t offset, size_t *size);
void * (*pprovider)(void *iovec, size_t offset, size_t *size);
+ void *cookie;
struct rwref_lock pkt_ref;
};
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index ea25ed5d0611..e8a9751fa266 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -1580,6 +1580,14 @@ int icnss_get_soc_info(struct icnss_soc_info *info)
info->v_addr = penv->mem_base_va;
info->p_addr = penv->mem_base_pa;
+ info->chip_id = penv->chip_info.chip_id;
+ info->chip_family = penv->chip_info.chip_family;
+ info->board_id = penv->board_info.board_id;
+ info->soc_id = penv->soc_info.soc_id;
+ info->fw_version = penv->fw_version_info.fw_version;
+ strlcpy(info->fw_build_timestamp,
+ penv->fw_version_info.fw_build_timestamp,
+ QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1);
return 0;
}
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index e163dd79b8b9..b055234326b6 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -189,8 +189,8 @@ int pil_assign_mem_to_subsys(struct pil_desc *desc, phys_addr_t addr,
ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 1);
if (ret)
- pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d\n",
- __func__, &addr, size, desc->subsys_vmid);
+ pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
+ __func__, &addr, size, desc->subsys_vmid, ret);
return ret;
}
EXPORT_SYMBOL(pil_assign_mem_to_subsys);
@@ -205,8 +205,8 @@ int pil_assign_mem_to_linux(struct pil_desc *desc, phys_addr_t addr,
ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 1);
if (ret)
- panic("%s: failed for %pa address of size %zx - subsys VMid %d. Fatal error.\n",
- __func__, &addr, size, desc->subsys_vmid);
+ panic("%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
+ __func__, &addr, size, desc->subsys_vmid, ret);
return ret;
}
@@ -222,8 +222,8 @@ int pil_assign_mem_to_subsys_and_linux(struct pil_desc *desc,
ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 2);
if (ret)
- pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d\n",
- __func__, &addr, size, desc->subsys_vmid);
+ pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
+ __func__, &addr, size, desc->subsys_vmid, ret);
return ret;
}
@@ -642,8 +642,8 @@ static int pil_load_seg(struct pil_desc *desc, struct pil_seg *seg)
seg->filesz, desc->map_fw_mem,
desc->unmap_fw_mem, map_data);
if (ret < 0) {
- pil_err(desc, "Failed to locate blob %s or blob is too big.\n",
- fw_name);
+ pil_err(desc, "Failed to locate blob %s or blob is too big(rc:%d)\n",
+ fw_name, ret);
return ret;
}
@@ -679,7 +679,8 @@ static int pil_load_seg(struct pil_desc *desc, struct pil_seg *seg)
if (desc->ops->verify_blob) {
ret = desc->ops->verify_blob(desc, seg->paddr, seg->sz);
if (ret)
- pil_err(desc, "Blob%u failed verification\n", num);
+ pil_err(desc, "Blob%u failed verification(rc:%d)\n",
+ num, ret);
}
return ret;
@@ -754,7 +755,7 @@ int pil_boot(struct pil_desc *desc)
snprintf(fw_name, sizeof(fw_name), "%s.mdt", desc->fw_name);
ret = request_firmware(&fw, fw_name, desc->dev);
if (ret) {
- pil_err(desc, "Failed to locate %s\n", fw_name);
+ pil_err(desc, "Failed to locate %s(rc:%d)\n", fw_name, ret);
goto out;
}
@@ -792,14 +793,14 @@ int pil_boot(struct pil_desc *desc)
desc->priv->unvoted_flag = 0;
ret = pil_proxy_vote(desc);
if (ret) {
- pil_err(desc, "Failed to proxy vote\n");
+ pil_err(desc, "Failed to proxy vote(rc:%d)\n", ret);
goto release_fw;
}
if (desc->ops->init_image)
ret = desc->ops->init_image(desc, fw->data, fw->size);
if (ret) {
- pil_err(desc, "Invalid firmware metadata\n");
+ pil_err(desc, "Initializing image failed(rc:%d)\n", ret);
goto err_boot;
}
@@ -807,7 +808,7 @@ int pil_boot(struct pil_desc *desc)
ret = desc->ops->mem_setup(desc, priv->region_start,
priv->region_end - priv->region_start);
if (ret) {
- pil_err(desc, "Memory setup error\n");
+ pil_err(desc, "Memory setup error(rc:%d)\n", ret);
goto err_deinit_image;
}
@@ -852,7 +853,7 @@ int pil_boot(struct pil_desc *desc)
ret = desc->ops->auth_and_reset(desc);
if (ret) {
- pil_err(desc, "Failed to bring out of reset\n");
+ pil_err(desc, "Failed to bring out of reset(rc:%d)\n", ret);
goto err_auth_and_reset;
}
pil_info(desc, "Brought out of reset\n");
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index 16c62240ec0a..3873a34c60fb 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -123,7 +123,8 @@ static int pil_mss_power_up(struct q6v5_data *drv)
if (drv->vreg) {
ret = regulator_enable(drv->vreg);
if (ret)
- dev_err(drv->desc.dev, "Failed to enable modem regulator.\n");
+ dev_err(drv->desc.dev, "Failed to enable modem regulator(rc:%d)\n",
+ ret);
}
if (drv->cxrail_bhs) {
@@ -245,7 +246,7 @@ static int pil_msa_wait_for_mba_ready(struct q6v5_data *drv)
ret = readl_poll_timeout(drv->rmb_base + RMB_PBL_STATUS, status,
status != 0, POLL_INTERVAL_US, val);
if (ret) {
- dev_err(dev, "PBL boot timed out\n");
+ dev_err(dev, "PBL boot timed out (rc:%d)\n", ret);
return ret;
}
if (status != STATUS_PBL_SUCCESS) {
@@ -257,7 +258,7 @@ static int pil_msa_wait_for_mba_ready(struct q6v5_data *drv)
ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
status != 0, POLL_INTERVAL_US, val);
if (ret) {
- dev_err(dev, "MBA boot timed out\n");
+ dev_err(dev, "MBA boot timed out (rc:%d)\n", ret);
return ret;
}
if (status != STATUS_XPU_UNLOCKED &&
@@ -299,7 +300,8 @@ int pil_mss_shutdown(struct pil_desc *pil)
if (!ret)
assert_clamps(pil);
else
- dev_err(pil->dev, "error turning ON AHB clock\n");
+ dev_err(pil->dev, "error turning ON AHB clock(rc:%d)\n",
+ ret);
}
ret = pil_mss_restart_reg(drv, 1);
@@ -328,7 +330,8 @@ int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path)
status == STATUS_MBA_UNLOCKED || status < 0,
POLL_INTERVAL_US, val);
if (ret)
- dev_err(pil->dev, "MBA region unlock timed out\n");
+ dev_err(pil->dev, "MBA region unlock timed out(rc:%d)\n",
+ ret);
else if (status < 0)
dev_err(pil->dev, "MBA unlock returned err status: %d\n",
status);
@@ -367,19 +370,20 @@ int pil_mss_make_proxy_votes(struct pil_desc *pil)
ret = of_property_read_u32(pil->dev->of_node, "vdd_mx-uV", &uv);
if (ret) {
- dev_err(pil->dev, "missing vdd_mx-uV property\n");
+ dev_err(pil->dev, "missing vdd_mx-uV property(rc:%d)\n", ret);
return ret;
}
ret = regulator_set_voltage(drv->vreg_mx, uv, INT_MAX);
if (ret) {
- dev_err(pil->dev, "Failed to request vreg_mx voltage\n");
+ dev_err(pil->dev, "Failed to request vreg_mx voltage(rc:%d)\n",
+ ret);
return ret;
}
ret = regulator_enable(drv->vreg_mx);
if (ret) {
- dev_err(pil->dev, "Failed to enable vreg_mx\n");
+ dev_err(pil->dev, "Failed to enable vreg_mx(rc:%d)\n", ret);
regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
return ret;
}
@@ -540,8 +544,8 @@ int pil_mss_reset_load_mba(struct pil_desc *pil)
fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
ret = request_firmware(&fw, fw_name_p, pil->dev);
if (ret) {
- dev_err(pil->dev, "Failed to locate %s\n",
- fw_name_p);
+ dev_err(pil->dev, "Failed to locate %s (rc:%d)\n",
+ fw_name_p, ret);
return ret;
}
@@ -611,14 +615,15 @@ int pil_mss_reset_load_mba(struct pil_desc *pil)
ret = pil_assign_mem_to_subsys(pil, drv->mba_dp_phys,
drv->mba_dp_size);
if (ret) {
- pr_err("scm_call to unprotect MBA and DP mem failed\n");
+ pr_err("scm_call to unprotect MBA and DP mem failed(rc:%d)\n",
+ ret);
goto err_mba_data;
}
}
ret = pil_mss_reset(pil);
if (ret) {
- dev_err(pil->dev, "MBA boot failed.\n");
+ dev_err(pil->dev, "MBA boot failed(rc:%d)\n", ret);
goto err_mss_reset;
}
@@ -673,7 +678,8 @@ static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
ret = pil_assign_mem_to_subsys(pil, mdata_phys,
ALIGN(size, SZ_4K));
if (ret) {
- pr_err("scm_call to unprotect modem metadata mem failed\n");
+ pr_err("scm_call to unprotect modem metadata mem failed(rc:%d)\n",
+ ret);
dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt,
mdata_phys, &attrs);
goto fail;
@@ -690,7 +696,8 @@ static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
POLL_INTERVAL_US, val);
if (ret) {
- dev_err(pil->dev, "MBA authentication of headers timed out\n");
+ dev_err(pil->dev, "MBA authentication of headers timed out(rc:%d)\n",
+ ret);
} else if (status < 0) {
dev_err(pil->dev, "MBA returned error %d for headers\n",
status);
@@ -771,7 +778,8 @@ static int pil_msa_mba_auth(struct pil_desc *pil)
ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
status == STATUS_AUTH_COMPLETE || status < 0, 50, val);
if (ret) {
- dev_err(pil->dev, "MBA authentication of image timed out\n");
+ dev_err(pil->dev, "MBA authentication of image timed out(rc:%d)\n",
+ ret);
} else if (status < 0) {
dev_err(pil->dev, "MBA returned error %d for image\n", status);
ret = -EINVAL;
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index 5c0c1ffa8951..af9cd189cf6d 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -291,11 +291,13 @@ static int pil_mss_loadable_init(struct modem_data *drv,
ret = regulator_set_voltage(q6->vreg, VDD_MSS_UV,
MAX_VDD_MSS_UV);
if (ret)
- dev_err(&pdev->dev, "Failed to set vreg voltage.\n");
+ dev_err(&pdev->dev, "Failed to set vreg voltage(rc:%d)\n",
+ ret);
ret = regulator_set_load(q6->vreg, 100000);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed to set vreg mode.\n");
+ dev_err(&pdev->dev, "Failed to set vreg mode(rc:%d)\n",
+ ret);
return ret;
}
}
@@ -330,7 +332,7 @@ static int pil_mss_loadable_init(struct modem_data *drv,
ret = of_property_read_u32(pdev->dev.of_node,
"qcom,pas-id", &drv->pas_id);
if (ret)
- dev_warn(&pdev->dev, "Failed to find the pas_id.\n");
+ dev_info(&pdev->dev, "No pas_id found.\n");
drv->subsys_desc.pil_mss_memsetup =
of_property_read_bool(pdev->dev.of_node, "qcom,pil-mss-memsetup");
diff --git a/drivers/soc/qcom/pil-q6v5.c b/drivers/soc/qcom/pil-q6v5.c
index 3dcfb5abdb23..f8895e8a7b3d 100644
--- a/drivers/soc/qcom/pil-q6v5.c
+++ b/drivers/soc/qcom/pil-q6v5.c
@@ -91,50 +91,53 @@ int pil_q6v5_make_proxy_votes(struct pil_desc *pil)
ret = of_property_read_u32(pil->dev->of_node, "vdd_cx-voltage", &uv);
if (ret) {
- dev_err(pil->dev, "missing vdd_cx-voltage property\n");
+ dev_err(pil->dev, "missing vdd_cx-voltage property(rc:%d)\n",
+ ret);
return ret;
}
ret = clk_prepare_enable(drv->xo);
if (ret) {
- dev_err(pil->dev, "Failed to vote for XO\n");
+ dev_err(pil->dev, "Failed to vote for XO(rc:%d)\n", ret);
goto out;
}
ret = clk_prepare_enable(drv->pnoc_clk);
if (ret) {
- dev_err(pil->dev, "Failed to vote for pnoc\n");
+ dev_err(pil->dev, "Failed to vote for pnoc(rc:%d)\n", ret);
goto err_pnoc_vote;
}
ret = clk_prepare_enable(drv->qdss_clk);
if (ret) {
- dev_err(pil->dev, "Failed to vote for qdss\n");
+ dev_err(pil->dev, "Failed to vote for qdss(rc:%d)\n", ret);
goto err_qdss_vote;
}
ret = regulator_set_voltage(drv->vreg_cx, uv, INT_MAX);
if (ret) {
- dev_err(pil->dev, "Failed to request vdd_cx voltage.\n");
+ dev_err(pil->dev, "Failed to request vdd_cx voltage(rc:%d)\n",
+ ret);
goto err_cx_voltage;
}
ret = regulator_set_load(drv->vreg_cx, 100000);
if (ret < 0) {
- dev_err(pil->dev, "Failed to set vdd_cx mode.\n");
+ dev_err(pil->dev, "Failed to set vdd_cx mode(rc:%d)\n", ret);
goto err_cx_mode;
}
ret = regulator_enable(drv->vreg_cx);
if (ret) {
- dev_err(pil->dev, "Failed to vote for vdd_cx\n");
+ dev_err(pil->dev, "Failed to vote for vdd_cx(rc:%d)\n", ret);
goto err_cx_enable;
}
if (drv->vreg_pll) {
ret = regulator_enable(drv->vreg_pll);
if (ret) {
- dev_err(pil->dev, "Failed to vote for vdd_pll\n");
+ dev_err(pil->dev, "Failed to vote for vdd_pll(rc:%d)\n",
+ ret);
goto err_vreg_pll;
}
}
@@ -165,7 +168,8 @@ void pil_q6v5_remove_proxy_votes(struct pil_desc *pil)
ret = of_property_read_u32(pil->dev->of_node, "vdd_cx-voltage", &uv);
if (ret) {
- dev_err(pil->dev, "missing vdd_cx-voltage property\n");
+ dev_err(pil->dev, "missing vdd_cx-voltage property(rc:%d)\n",
+ ret);
return;
}
@@ -708,13 +712,15 @@ struct q6v5_data *pil_q6v5_init(struct platform_device *pdev)
ret = regulator_set_voltage(drv->vreg_pll, vdd_pll,
vdd_pll);
if (ret) {
- dev_err(&pdev->dev, "Failed to set vdd_pll voltage.\n");
+ dev_err(&pdev->dev, "Failed to set vdd_pll voltage(rc:%d)\n",
+ ret);
return ERR_PTR(ret);
}
ret = regulator_set_load(drv->vreg_pll, 10000);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed to set vdd_pll mode.\n");
+ dev_err(&pdev->dev, "Failed to set vdd_pll mode(rc:%d)\n",
+ ret);
return ERR_PTR(ret);
}
} else
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index b8cef11f4067..23e32214756a 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -656,13 +656,14 @@ static uint32_t socinfo_get_foundry_id(void)
: 0;
}
-static uint32_t socinfo_get_serial_number(void)
+uint32_t socinfo_get_serial_number(void)
{
return socinfo ?
(socinfo_format >= SOCINFO_VERSION(0, 10) ?
socinfo->v0_10.serial_number : 0)
: 0;
}
+EXPORT_SYMBOL(socinfo_get_serial_number);
static uint32_t socinfo_get_chip_family(void)
{
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index e3e43eee3608..56ca6835fc12 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -339,7 +339,8 @@ static int of_read_regs(struct device *dev, struct reg_info **regs_ref,
rc = of_property_read_u32_array(dev->of_node, reg_uV_uA_name,
vdd_uV_uA, len);
if (rc) {
- dev_err(dev, "Failed to read uV/uA values\n");
+ dev_err(dev, "Failed to read uV/uA values(rc:%d)\n",
+ rc);
return rc;
}
@@ -423,7 +424,8 @@ static int enable_regulators(struct pil_tz_data *d, struct device *dev,
rc = regulator_set_voltage(regs[i].reg,
regs[i].uV, INT_MAX);
if (rc) {
- dev_err(dev, "Failed to request voltage.\n");
+ dev_err(dev, "Failed to request voltage(rc:%d)\n",
+ rc);
goto err_voltage;
}
}
@@ -432,7 +434,8 @@ static int enable_regulators(struct pil_tz_data *d, struct device *dev,
rc = regulator_set_load(regs[i].reg,
regs[i].uA);
if (rc < 0) {
- dev_err(dev, "Failed to set regulator mode\n");
+ dev_err(dev, "Failed to set regulator mode(rc:%d)\n",
+ rc);
goto err_mode;
}
}
@@ -442,7 +445,7 @@ static int enable_regulators(struct pil_tz_data *d, struct device *dev,
rc = regulator_enable(regs[i].reg);
if (rc) {
- dev_err(dev, "Regulator enable failed\n");
+ dev_err(dev, "Regulator enable failed(rc:%d)\n", rc);
goto err_enable;
}
}
@@ -499,7 +502,7 @@ static int prepare_enable_clocks(struct device *dev, struct clk **clks,
for (i = 0; i < clk_count; i++) {
rc = clk_prepare_enable(clks[i]);
if (rc) {
- dev_err(dev, "Clock enable failed\n");
+ dev_err(dev, "Clock enable failed(rc:%d)\n", rc);
goto err;
}
}
@@ -541,7 +544,8 @@ static int pil_make_proxy_vote(struct pil_desc *pil)
if (d->bus_client) {
rc = msm_bus_scale_client_update_request(d->bus_client, 1);
if (rc) {
- dev_err(pil->dev, "bandwidth request failed\n");
+ dev_err(pil->dev, "bandwidth request failed(rc:%d)\n",
+ rc);
goto err_bw;
}
} else
@@ -995,7 +999,8 @@ static int pil_tz_driver_probe(struct platform_device *pdev)
rc = of_property_read_u32(pdev->dev.of_node, "qcom,smem-id",
&d->smem_id);
if (rc) {
- dev_err(&pdev->dev, "Failed to get the smem_id.\n");
+ dev_err(&pdev->dev, "Failed to get the smem_id(rc:%d)\n",
+ rc);
return rc;
}
}
@@ -1019,7 +1024,8 @@ static int pil_tz_driver_probe(struct platform_device *pdev)
rc = of_property_read_u32(pdev->dev.of_node, "qcom,pas-id",
&d->pas_id);
if (rc) {
- dev_err(&pdev->dev, "Failed to find the pas_id.\n");
+ dev_err(&pdev->dev, "Failed to find the pas_id(rc:%d)\n",
+ rc);
return rc;
}
scm_pas_init(MSM_BUS_MASTER_CRYPTO_CORE0);
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index 32041c17d88f..0ed8a6533e00 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -268,7 +268,8 @@ static ssize_t firmware_name_store(struct device *dev,
pr_info("Changing subsys fw_name to %s\n", buf);
mutex_lock(&track->lock);
- strlcpy(subsys->desc->fw_name, buf, count + 1);
+ strlcpy(subsys->desc->fw_name, buf,
+ min(count + 1, sizeof(subsys->desc->fw_name)));
mutex_unlock(&track->lock);
return orig_count;
}
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index dd8149ef097d..f8121eb4f63a 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -799,7 +799,7 @@ static int qusb_phy_probe(struct platform_device *pdev)
}
of_property_read_u32_array(dev->of_node,
- "qcom,qemu-init-seq",
+ "qcom,emu-init-seq",
qphy->emu_init_seq,
qphy->emu_init_seq_len);
} else {
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index 325f5fcf161b..5ec08098d197 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -878,7 +878,7 @@ static int qusb_phy_probe(struct platform_device *pdev)
}
of_property_read_u32_array(dev->of_node,
- "qcom,qemu-init-seq",
+ "qcom,emu-init-seq",
qphy->emu_init_seq,
qphy->emu_init_seq_len);
} else {
diff --git a/drivers/video/fbdev/msm/mdss_compat_utils.c b/drivers/video/fbdev/msm/mdss_compat_utils.c
index e391a5aaa45d..e883f045967d 100644
--- a/drivers/video/fbdev/msm/mdss_compat_utils.c
+++ b/drivers/video/fbdev/msm/mdss_compat_utils.c
@@ -299,7 +299,7 @@ static int __compat_atomic_commit(struct fb_info *info, unsigned int cmd,
struct mdp_layer_commit commit;
struct mdp_layer_commit32 commit32;
u32 layer_count;
- struct mdp_input_layer *layer_list = NULL, *layer;
+ struct mdp_input_layer *layer_list = NULL;
struct mdp_input_layer32 *layer_list32 = NULL;
struct mdp_output_layer *output_layer = NULL;
@@ -370,8 +370,8 @@ static int __compat_atomic_commit(struct fb_info *info, unsigned int cmd,
argp, layer_count);
for (i = 0; i < layer_count; i++) {
- kfree(layer[i].scale);
- mdss_mdp_free_layer_pp_info(&layer[i]);
+ kfree(layer_list[i].scale);
+ mdss_mdp_free_layer_pp_info(&layer_list[i]);
}
kfree(layer_list);
layer_list_err:
diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c
index a99ae97cdb80..eefea9f117c0 100644
--- a/drivers/video/fbdev/msm/mdss_dp.c
+++ b/drivers/video/fbdev/msm/mdss_dp.c
@@ -41,6 +41,41 @@
#define VDDA_UA_ON_LOAD 100000 /* uA units */
#define VDDA_UA_OFF_LOAD 100 /* uA units */
+static char edid_buf1[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x22, 0xf0, 0x52, 0x29, 0x01, 0x01, 0x01, 0x01,
+ 0x16, 0x16, 0x01, 0x03, 0x80, 0x30, 0x1b, 0x78,
+ 0x2e, 0xee, 0x95, 0xa3, 0x54, 0x4c, 0x99, 0x26,
+ 0x0f, 0x50, 0x54, 0xa1, 0x08, 0x00, 0xd1, 0xc0,
+ 0x81, 0xc0, 0xa9, 0xc0, 0xb3, 0x00, 0x95, 0x00,
+ 0x81, 0x40, 0x81, 0x80, 0x01, 0x01, 0x02, 0x3a,
+ 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
+ 0x45, 0x00, 0xdb, 0x0b, 0x11, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32, 0x4c, 0x18,
+ 0x5e, 0x11, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
+ 0x50, 0x20, 0x5a, 0x52, 0x32, 0x32, 0x34, 0x30,
+ 0x77, 0x0a, 0x20, 0x20, 0x00, 0x00, 0x00, 0xff,
+ 0x00, 0x43, 0x4e, 0x34, 0x32, 0x32, 0x32, 0x30,
+ 0x30, 0x33, 0x46, 0x0a, 0x20, 0x20, 0x01, 0xb1,
+
+ 0x02, 0x03, 0x17, 0xb1, 0x4c, 0x90, 0x1f, 0x05,
+ 0x14, 0x04, 0x13, 0x03, 0x02, 0x07, 0x06, 0x12,
+ 0x01, 0x65, 0x03, 0x0c, 0x00, 0x10, 0x00, 0x02,
+ 0x3a, 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58,
+ 0x2c, 0x45, 0x00, 0xdb, 0x0b, 0x11, 0x00, 0x00,
+ 0x1e, 0x02, 0x3a, 0x80, 0xd0, 0x72, 0x38, 0x2d,
+ 0x40, 0x10, 0x2c, 0x45, 0x80, 0xdb, 0x0b, 0x11,
+ 0x00, 0x00, 0x1e, 0x01, 0x1d, 0x00, 0x72, 0x51,
+ 0xd0, 0x1e, 0x20, 0x6e, 0x28, 0x55, 0x00, 0xdb,
+ 0x0b, 0x11, 0x00, 0x00, 0x1e, 0x01, 0x1d, 0x00,
+ 0xbc, 0x52, 0xd0, 0x1e, 0x20, 0xb8, 0x28, 0x55,
+ 0x40, 0xdb, 0x0b, 0x11, 0x00, 0x00, 0x1e, 0x8c,
+ 0x0a, 0xd0, 0x8a, 0x20, 0xe0, 0x2d, 0x10, 0x10,
+ 0x3e, 0x96, 0x00, 0xdb, 0x0b, 0x11, 0x00, 0x00,
+ 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b
+};
static void mdss_dp_put_dt_clk_data(struct device *dev,
@@ -1037,6 +1072,41 @@ int mdss_dp_off(struct mdss_panel_data *pdata)
return 0;
}
+static int mdss_dp_edid_init(struct mdss_panel_data *pdata)
+{
+ struct mdss_dp_drv_pdata *dp_drv = NULL;
+ struct hdmi_edid_init_data edid_init_data = {0};
+ void *edid_data;
+
+ if (!pdata) {
+ pr_err("Invalid input data\n");
+ return -EINVAL;
+ }
+
+ dp_drv = container_of(pdata, struct mdss_dp_drv_pdata,
+ panel_data);
+
+ dp_drv->max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ;
+ edid_init_data.kobj = dp_drv->kobj;
+ edid_init_data.ds_data = dp_drv->ds_data;
+ edid_init_data.max_pclk_khz = dp_drv->max_pclk_khz;
+
+ edid_data = hdmi_edid_init(&edid_init_data);
+ if (!edid_data) {
+ DEV_ERR("%s: edid init failed\n", __func__);
+ return -ENODEV;
+ }
+
+ edid_init_data.buf = edid_init_data.buf;
+ edid_init_data.buf_size = edid_init_data.buf_size;
+
+ /* Use the existing EDID buffer for 1080p */
+ memcpy(edid_init_data.buf, edid_buf1, sizeof(edid_buf1));
+ dp_drv->panel_data.panel_info.edid_data = edid_data;
+
+ return 0;
+}
+
static int mdss_dp_host_init(struct mdss_panel_data *pdata)
{
struct mdss_dp_drv_pdata *dp_drv = NULL;
@@ -1076,8 +1146,16 @@ static int mdss_dp_host_init(struct mdss_panel_data *pdata)
mdss_dp_get_ctrl_hw_version(&dp_drv->ctrl_io),
mdss_dp_get_phy_hw_version(&dp_drv->phy_io));
+ ret = hdmi_edid_parser(dp_drv->panel_data.panel_info.edid_data);
+ if (ret) {
+ DEV_ERR("%s: edid parse failed\n", __func__);
+ goto edid_parser_error;
+ }
+
return ret;
+edid_parser_error:
+ mdss_dp_clk_ctrl(dp_drv, DP_CORE_PM, false);
clk_error:
mdss_dp_regulator_ctrl(dp_drv, false);
vreg_error:
@@ -1088,8 +1166,19 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata,
int event, void *arg)
{
int rc = 0;
+ struct fb_info *fbi;
+ struct mdss_dp_drv_pdata *dp = NULL;
+
+ if (!pdata) {
+ pr_err("%s: Invalid input data\n", __func__);
+ return -EINVAL;
+ }
pr_debug("event=%d\n", event);
+
+ dp = container_of(pdata, struct mdss_dp_drv_pdata,
+ panel_data);
+
switch (event) {
case MDSS_EVENT_UNBLANK:
rc = mdss_dp_on(pdata);
@@ -1097,6 +1186,18 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata,
case MDSS_EVENT_PANEL_OFF:
rc = mdss_dp_off(pdata);
break;
+ case MDSS_EVENT_FB_REGISTERED:
+ fbi = (struct fb_info *)arg;
+ if (!fbi || !fbi->dev)
+ break;
+
+ dp->kobj = &fbi->dev->kobj;
+ dp->fb_node = fbi->node;
+ mdss_dp_edid_init(pdata);
+ break;
+ default:
+ pr_debug("%s: unhandled event=%d\n", __func__, event);
+ break;
}
return rc;
}
diff --git a/drivers/video/fbdev/msm/mdss_dp.h b/drivers/video/fbdev/msm/mdss_dp.h
index 008e7d687dbd..10fcdec49515 100644
--- a/drivers/video/fbdev/msm/mdss_dp.h
+++ b/drivers/video/fbdev/msm/mdss_dp.h
@@ -23,6 +23,7 @@
#include <linux/usb/usbpd.h>
#include "mdss_hdmi_util.h"
+#include "mdss_hdmi_edid.h"
#include "video/msm_hdmi_modes.h"
#include "mdss.h"
#include "mdss_panel.h"
@@ -235,7 +236,7 @@ struct dp_alt_mode {
#define DP_LINK_RATE_MAX DP_LINK_RATE_540
#define DP_LINK_RATE_MULTIPLIER 27000000
-
+#define DP_MAX_PIXEL_CLK_KHZ 675000
struct dpcd_cap {
char major;
char minor;
@@ -429,6 +430,9 @@ struct mdss_dp_drv_pdata {
spinlock_t event_lock;
spinlock_t lock;
struct hdmi_util_ds_data ds_data;
+ struct kobject *kobj;
+ u32 max_pclk_khz;
+ int fb_node;
};
static inline const char *__mdss_dp_pm_name(enum dp_pm_type module)
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
index 06e5502910b2..4285a14e7f35 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.c
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -1747,6 +1747,9 @@ static void __mdss_dsi_update_video_mode_total(struct mdss_panel_data *pdata,
return;
}
+ if (ctrl_pdata->timing_db_mode)
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x1e8, 0x1);
+
vsync_period =
mdss_panel_get_vtotal(&pdata->panel_info);
hsync_period =
@@ -1756,23 +1759,13 @@ static void __mdss_dsi_update_video_mode_total(struct mdss_panel_data *pdata,
new_dsi_v_total =
((vsync_period - 1) << 16) | (hsync_period - 1);
- MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
- (current_dsi_v_total | 0x8000000));
- if (new_dsi_v_total & 0x8000000) {
- MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
- new_dsi_v_total);
- } else {
- MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
- (new_dsi_v_total | 0x8000000));
- MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C,
- (new_dsi_v_total & 0x7ffffff));
- }
+ MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2C, new_dsi_v_total);
if (ctrl_pdata->timing_db_mode)
MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x1e4, 0x1);
- pr_debug("%s new_fps:%d vsync:%d hsync:%d frame_rate:%d\n",
- __func__, new_fps, vsync_period, hsync_period,
+ pr_debug("%s new_fps:%d new_vtotal:0x%X cur_vtotal:0x%X frame_rate:%d\n",
+ __func__, new_fps, new_dsi_v_total, current_dsi_v_total,
ctrl_pdata->panel_data.panel_info.mipi.frame_rate);
ctrl_pdata->panel_data.panel_info.current_fps = new_fps;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
index ee14fd0d0660..72fc20d97f44 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
@@ -1255,16 +1255,7 @@ static int mdss_mdp_video_vfp_fps_update(struct mdss_mdp_video_ctx *ctx,
new_vsync_period_f0 = (vsync_period * hsync_period);
mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
- current_vsync_period_f0 | 0x800000);
- if (new_vsync_period_f0 & 0x800000) {
- mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
new_vsync_period_f0);
- } else {
- mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
- new_vsync_period_f0 | 0x800000);
- mdp_video_write(ctx, MDSS_MDP_REG_INTF_VSYNC_PERIOD_F0,
- new_vsync_period_f0 & 0x7fffff);
- }
pr_debug("if:%d vtotal:%d htotal:%d f0:0x%x nw_f0:0x%x\n",
ctx->intf_num, vsync_period, hsync_period,
@@ -1474,6 +1465,11 @@ static int mdss_mdp_video_config_fps(struct mdss_mdp_ctl *ctl, int new_fps)
}
/*
+ * Make sure controller setting committed
+ */
+ wmb();
+
+ /*
* MDP INTF registers support DB on targets
* starting from MDP v1.5.
*/
@@ -1749,7 +1745,9 @@ static void mdss_mdp_fetch_start_config(struct mdss_mdp_video_ctx *ctx,
h_total = mdss_panel_get_htotal(pinfo, true);
fetch_start = (v_total - pinfo->prg_fet) * h_total + 1;
- fetch_enable = BIT(31);
+
+ fetch_enable = mdp_video_read(ctx, MDSS_MDP_REG_INTF_CONFIG);
+ fetch_enable |= BIT(31);
if (pinfo->dynamic_fps && (pinfo->dfps_update ==
DFPS_IMMEDIATE_CLK_UPDATE_MODE))
diff --git a/include/linux/ipa_uc_offload.h b/include/linux/ipa_uc_offload.h
new file mode 100644
index 000000000000..0277e87a2570
--- /dev/null
+++ b/include/linux/ipa_uc_offload.h
@@ -0,0 +1,259 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_UC_OFFLOAD_H_
+#define _IPA_UC_OFFLOAD_H_
+
+#include <linux/ipa.h>
+
+/**
+ * enum ipa_uc_offload_proto
+ * Protocol type: either WDI or Neutrino
+ *
+ * @IPA_UC_WDI: wdi Protocol
+ * @IPA_UC_NTN: Neutrino Protocol
+ */
+enum ipa_uc_offload_proto {
+ IPA_UC_INVALID = 0,
+ IPA_UC_WDI = 1,
+ IPA_UC_NTN = 2,
+ IPA_UC_MAX_PROT_SIZE
+};
+
+/**
+ * struct ipa_hdr_info - Header to install on IPA HW
+ *
+ * @hdr: header to install on IPA HW
+ * @hdr_len: length of header
+ * @dst_mac_addr_offset: destination mac address offset
+ * @hdr_type: layer two header type
+ */
+struct ipa_hdr_info {
+ u8 *hdr;
+ u8 hdr_len;
+ u8 dst_mac_addr_offset;
+ enum ipa_hdr_l2_type hdr_type;
+};
+
+/**
+ * struct ipa_uc_offload_intf_params - parameters for uC offload
+ * interface registration
+ *
+ * @netdev_name: network interface name
+ * @notify: callback for exception/embedded packets
+ * @priv: callback cookie
+ * @hdr_info: header information
+ * @meta_data: meta data if any
+ * @meta_data_mask: meta data mask
+ * @proto: uC offload protocol type
+ * @alt_dst_pipe: alternate routing output pipe
+ */
+struct ipa_uc_offload_intf_params {
+ const char *netdev_name;
+ ipa_notify_cb notify;
+ void *priv;
+ struct ipa_hdr_info hdr_info[IPA_IP_MAX];
+ u8 is_meta_data_valid;
+ u32 meta_data;
+ u32 meta_data_mask;
+ enum ipa_uc_offload_proto proto;
+ enum ipa_client_type alt_dst_pipe;
+};
+
+/**
+ * struct ipa_ntn_setup_info - NTN TX/Rx configuration
+ * @client: type of "client" (IPA_CLIENT_ODU#_PROD/CONS)
+ * @ring_base_pa: physical address of the base of the Tx/Rx ring
+ * @ntn_ring_size: size of the Tx/Rx ring (in terms of elements)
+ * @buff_pool_base_pa: physical address of the base of the Tx/Rx
+ * buffer pool
+ * @num_buffers: Rx/Tx buffer pool size (in terms of elements)
+ * @data_buff_size: size of the each data buffer allocated in DDR
+ * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN Ring's
+ * tail pointer
+ */
+struct ipa_ntn_setup_info {
+ enum ipa_client_type client;
+ phys_addr_t ring_base_pa;
+ u32 ntn_ring_size;
+
+ phys_addr_t buff_pool_base_pa;
+ u32 num_buffers;
+ u32 data_buff_size;
+
+ phys_addr_t ntn_reg_base_ptr_pa;
+};
+
+/**
+ * struct ipa_uc_offload_out_params - out parameters for uC offload
+ *
+ * @clnt_hndl: Handle that client need to pass during
+ * further operations
+ */
+struct ipa_uc_offload_out_params {
+ u32 clnt_hndl;
+};
+
+/**
+ * struct ipa_ntn_conn_in_params - NTN TX/Rx connect parameters
+ * @ul: parameters to connect UL pipe(from Neutrino to IPA)
+ * @dl: parameters to connect DL pipe(from IPA to Neutrino)
+ */
+struct ipa_ntn_conn_in_params {
+ struct ipa_ntn_setup_info ul;
+ struct ipa_ntn_setup_info dl;
+};
+
+/**
+ * struct ipa_ntn_conn_out_params - information provided
+ * to uC offload client
+ * @ul_uc_db_pa: physical address of IPA uc doorbell for UL
+ * @dl_uc_db_pa: physical address of IPA uc doorbell for DL
+ * @clnt_hdl: opaque handle assigned to offload client
+ */
+struct ipa_ntn_conn_out_params {
+ phys_addr_t ul_uc_db_pa;
+ phys_addr_t dl_uc_db_pa;
+};
+
+/**
+ * struct ipa_uc_offload_conn_in_params - information provided by
+ * uC offload client
+ * @clnt_hndl: Handle that return as part of reg interface
+ * @proto: Protocol to use for offload data path
+ * @ntn: uC RX/Tx configuration info
+ */
+struct ipa_uc_offload_conn_in_params {
+ u32 clnt_hndl;
+ union {
+ struct ipa_ntn_conn_in_params ntn;
+ } u;
+};
+
+/**
+ * struct ipa_uc_offload_conn_out_params - information provided
+ * to uC offload client
+ * @ul_uc_db_pa: physical address of IPA uc doorbell for UL
+ * @dl_uc_db_pa: physical address of IPA uc doorbell for DL
+ * @clnt_hdl: opaque handle assigned to offload client
+ */
+struct ipa_uc_offload_conn_out_params {
+ union {
+ struct ipa_ntn_conn_out_params ntn;
+ } u;
+};
+
+/**
+ * struct ipa_perf_profile - To set BandWidth profile
+ *
+ * @client: type of "client" (IPA_CLIENT_ODU#_PROD/CONS)
+ * @max_supported_bw_mbps: maximum bandwidth needed (in Mbps)
+ */
+struct ipa_perf_profile {
+ enum ipa_client_type client;
+ u32 max_supported_bw_mbps;
+};
+
+#if defined CONFIG_IPA || defined CONFIG_IPA3
+
+/**
+ * ipa_uc_offload_reg_intf - Client should call this function to
+ * init uC offload data path
+ *
+ * @init: [in] initialization parameters
+ *
+ * Note: Should not be called from atomic context and only
+ * after checking IPA readiness using ipa_register_ipa_ready_cb()
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_uc_offload_reg_intf(
+ struct ipa_uc_offload_intf_params *in,
+ struct ipa_uc_offload_out_params *out);
+
+/**
+ * ipa_uc_offload_cleanup - Client Driver should call this
+ * function before unload and after disconnect
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_uc_offload_cleanup(u32 clnt_hdl);
+
+/**
+ * ipa_uc_offload_conn_pipes - Client should call this
+ * function to connect uC pipe for offload data path
+ *
+ * @in: [in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Note: Should not be called from atomic context and only
+ * after checking IPA readiness using ipa_register_ipa_ready_cb()
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *in,
+ struct ipa_uc_offload_conn_out_params *out);
+
+/**
+ * ipa_uc_offload_disconn_pipes() - Client should call this
+ * function to disconnect uC pipe to disable offload data path
+ * @clnt_hdl: [in] opaque client handle assigned by IPA to client
+ *
+ * Note: Should not be called from atomic context
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_uc_offload_disconn_pipes(u32 clnt_hdl);
+
+/**
+ * ipa_set_perf_profile() - Client should call this function to
+ * set IPA clock Band Width based on data rates
+ * @profile: [in] BandWidth profile to use
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_set_perf_profile(struct ipa_perf_profile *profile);
+
+#else /* (CONFIG_IPA || CONFIG_IPA3) */
+
+static inline int ipa_uc_offload_reg_intf(
+ struct ipa_uc_offload_intf_params *in,
+ struct ipa_uc_offload_out_params *out)
+{
+ return -EPERM;
+}
+
+static inline int ipa_uC_offload_cleanup(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_uc_offload_conn_pipes(
+ struct ipa_uc_offload_conn_in_params *in,
+ struct ipa_uc_offload_conn_out_params *out)
+{
+ return -EPERM;
+}
+
+static inline int ipa_uc_offload_disconn_pipes(u32 clnt_hdl)
+{
+ return -EPERM;
+}
+
+static inline int ipa_set_perf_profile(struct ipa_perf_profile *profile)
+{
+ return -EPERM;
+}
+
+#endif /* CONFIG_IPA3 */
+
+#endif /* _IPA_UC_OFFLOAD_H_ */
diff --git a/include/linux/mfd/wcd9xxx/pdata.h b/include/linux/mfd/wcd9xxx/pdata.h
index 52277f26b5a4..7bf2bff2f173 100755
--- a/include/linux/mfd/wcd9xxx/pdata.h
+++ b/include/linux/mfd/wcd9xxx/pdata.h
@@ -189,6 +189,7 @@ struct wcd9xxx_pdata {
u32 mclk_rate;
u32 dmic_sample_rate;
u32 mad_dmic_sample_rate;
+ u32 ecpp_dmic_sample_rate;
u32 dmic_clk_drv;
u16 use_pinctrl;
};
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index 6c3ec33cc613..6275e4536bc0 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -15,6 +15,7 @@
#include <linux/interrupt.h>
#define ICNSS_MAX_IRQ_REGISTRATIONS 12
+#define ICNSS_MAX_TIMESTAMP_LEN 32
struct icnss_driver_ops {
char *name;
@@ -79,7 +80,12 @@ enum icnss_driver_mode {
struct icnss_soc_info {
void __iomem *v_addr;
phys_addr_t p_addr;
- u32 version;
+ uint32_t chip_id;
+ uint32_t chip_family;
+ uint32_t board_id;
+ uint32_t soc_id;
+ uint32_t fw_version;
+ char fw_build_timestamp[ICNSS_MAX_TIMESTAMP_LEN + 1];
};
extern int icnss_register_driver(struct icnss_driver_ops *driver);
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
index 478243712d07..82672bba7c17 100644
--- a/include/soc/qcom/socinfo.h
+++ b/include/soc/qcom/socinfo.h
@@ -224,6 +224,7 @@ char *socinfo_get_build_id(void);
uint32_t socinfo_get_platform_type(void);
uint32_t socinfo_get_platform_subtype(void);
uint32_t socinfo_get_platform_version(void);
+uint32_t socinfo_get_serial_number(void);
enum pmic_model socinfo_get_pmic_model(void);
uint32_t socinfo_get_pmic_die_revision(void);
int __init socinfo_init(void) __must_check;
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index fe19c7596f8c..2cf0469712b6 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -812,6 +812,9 @@ enum v4l2_mpeg_vidc_extradata {
#define V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI \
V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI
V4L2_MPEG_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI = 27,
+#define V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO \
+ V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO
+ V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO = 28,
};
#define V4L2_CID_MPEG_VIDC_SET_PERF_LEVEL (V4L2_CID_MPEG_MSM_VIDC_BASE + 26)
diff --git a/include/uapi/media/msm_vidc.h b/include/uapi/media/msm_vidc.h
index 40cd867a9b7b..45cc81aaaf17 100644
--- a/include/uapi/media/msm_vidc.h
+++ b/include/uapi/media/msm_vidc.h
@@ -199,6 +199,9 @@ enum msm_vidc_extradata_type {
#define MSM_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI \
MSM_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI
MSM_VIDC_EXTRADATA_CONTENT_LIGHT_LEVEL_SEI = 0x00000016,
+#define MSM_VIDC_EXTRADATA_PQ_INFO \
+ MSM_VIDC_EXTRADATA_PQ_INFO
+ MSM_VIDC_EXTRADATA_PQ_INFO = 0x00000017,
MSM_VIDC_EXTRADATA_INPUT_CROP = 0x0700000E,
#define MSM_VIDC_EXTRADATA_OUTPUT_CROP \
MSM_VIDC_EXTRADATA_OUTPUT_CROP
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index 0af7314f1b7b..11993bb9a639 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -5820,6 +5820,18 @@ static u32 tasha_get_dmic_sample_rate(struct snd_soc_codec *codec,
tx_stream_fs = snd_soc_read(codec, tx_fs_reg) & 0x0F;
dmic_fs = tx_stream_fs <= 4 ? WCD9XXX_DMIC_SAMPLE_RATE_2P4MHZ :
WCD9XXX_DMIC_SAMPLE_RATE_4P8MHZ;
+
+ /*
+ * Check for ECPP path selection and DEC1 not connected to
+ * any other audio path to apply ECPP DMIC sample rate
+ */
+ if ((adc_mux_index == 1) &&
+ ((snd_soc_read(codec, WCD9335_CPE_SS_US_EC_MUX_CFG)
+ & 0x0F) == 0x0A) &&
+ ((snd_soc_read(codec, WCD9335_CDC_IF_ROUTER_TX_MUX_CFG0)
+ & 0x0C) == 0x00)) {
+ dmic_fs = pdata->ecpp_dmic_sample_rate;
+ }
} else {
dmic_fs = pdata->dmic_sample_rate;
}
@@ -12476,6 +12488,17 @@ static int tasha_handle_pdata(struct tasha_priv *tasha,
*/
pdata->mad_dmic_sample_rate = pdata->dmic_sample_rate;
}
+ if (pdata->ecpp_dmic_sample_rate ==
+ WCD9XXX_DMIC_SAMPLE_RATE_UNDEFINED) {
+ dev_info(codec->dev,
+ "%s: ecpp_dmic_rate invalid default = %d\n",
+ __func__, def_dmic_rate);
+ /*
+ * use dmic_sample_rate as the default for ECPP DMIC
+ * if ecpp dmic sample rate is undefined
+ */
+ pdata->ecpp_dmic_sample_rate = pdata->dmic_sample_rate;
+ }
if (pdata->dmic_clk_drv ==
WCD9XXX_DMIC_CLK_DRIVE_UNDEFINED) {
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index 75387b7c2069..d7103f1ff00f 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -4924,14 +4924,18 @@ static const struct tavil_reg_mask_val tavil_codec_reg_defaults[] = {
{WCD934X_CDC_RX0_RX_PATH_SEC0, 0x08, 0x0},
{WCD934X_CDC_CLSH_DECAY_CTRL, 0x03, 0x0},
{WCD934X_MICB1_TEST_CTL_2, 0x07, 0x01},
+ {WCD934X_CDC_BOOST0_BOOST_CFG1, 0x3F, 0x12},
+ {WCD934X_CDC_BOOST0_BOOST_CFG2, 0x1C, 0x08},
+ {WCD934X_CDC_BOOST1_BOOST_CFG1, 0x3F, 0x12},
+ {WCD934X_CDC_BOOST1_BOOST_CFG2, 0x1C, 0x08},
};
static const struct tavil_reg_mask_val tavil_codec_reg_init_common_val[] = {
{WCD934X_CDC_CLSH_K2_MSB, 0x0F, 0x00},
{WCD934X_CDC_CLSH_K2_LSB, 0xFF, 0x60},
{WCD934X_CPE_SS_DMIC_CFG, 0x80, 0x00},
- {WCD934X_CDC_BOOST0_BOOST_CTL, 0x70, 0x40},
- {WCD934X_CDC_BOOST1_BOOST_CTL, 0x70, 0x40},
+ {WCD934X_CDC_BOOST0_BOOST_CTL, 0x70, 0x50},
+ {WCD934X_CDC_BOOST1_BOOST_CTL, 0x70, 0x50},
{WCD934X_CDC_RX7_RX_PATH_CFG1, 0x08, 0x08},
{WCD934X_CDC_RX8_RX_PATH_CFG1, 0x08, 0x08},
{WCD934X_CDC_TOP_TOP_CFG1, 0x02, 0x02},
@@ -4949,6 +4953,10 @@ static const struct tavil_reg_mask_val tavil_codec_reg_init_common_val[] = {
{WCD934X_CDC_RX8_RX_PATH_MIX_CFG, 0x01, 0x01},
{WCD934X_DATA_HUB_SB_TX11_INP_CFG, 0x01, 0x01},
{WCD934X_CDC_CLK_RST_CTRL_FS_CNT_CONTROL, 0x01, 0x01},
+ {WCD934X_CDC_COMPANDER7_CTL3, 0x80, 0x80},
+ {WCD934X_CDC_COMPANDER8_CTL3, 0x80, 0x80},
+ {WCD934X_CDC_COMPANDER7_CTL7, 0x01, 0x01},
+ {WCD934X_CDC_COMPANDER8_CTL7, 0x01, 0x01},
};
static void tavil_codec_init_reg(struct snd_soc_codec *codec)
diff --git a/sound/soc/msm/msm8996.c b/sound/soc/msm/msm8996.c
index 631ac1a52864..225726cad812 100644
--- a/sound/soc/msm/msm8996.c
+++ b/sound/soc/msm/msm8996.c
@@ -488,6 +488,37 @@ static int msm8996_mclk_event(struct snd_soc_dapm_widget *w,
return 0;
}
+static int msm_snd_enable_codec_ext_tx_clk(struct snd_soc_codec *codec,
+ int enable, bool dapm)
+{
+ int ret = 0;
+
+ if (!strcmp(dev_name(codec->dev), "tasha_codec"))
+ ret = tasha_cdc_mclk_tx_enable(codec, enable, dapm);
+ else {
+ dev_err(codec->dev, "%s: unknown codec to enable ext clk\n",
+ __func__);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int msm8996_mclk_tx_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ pr_debug("%s: event = %d\n", __func__, event);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ return msm_snd_enable_codec_ext_tx_clk(codec, 1, true);
+ case SND_SOC_DAPM_POST_PMD:
+ return msm_snd_enable_codec_ext_tx_clk(codec, 0, true);
+ }
+ return 0;
+}
+
static int msm_hifi_ctrl_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
@@ -532,6 +563,9 @@ static const struct snd_soc_dapm_widget msm8996_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("MCLK", SND_SOC_NOPM, 0, 0,
msm8996_mclk_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("MCLK TX", SND_SOC_NOPM, 0, 0,
+ msm8996_mclk_tx_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
SND_SOC_DAPM_SPK("Lineout_1 amp", NULL),
SND_SOC_DAPM_SPK("Lineout_3 amp", NULL),
SND_SOC_DAPM_SPK("Lineout_2 amp", NULL),
@@ -557,10 +591,10 @@ static const struct snd_soc_dapm_widget msm8996_dapm_widgets[] = {
};
static struct snd_soc_dapm_route wcd9335_audio_paths[] = {
- {"MIC BIAS1", NULL, "MCLK"},
- {"MIC BIAS2", NULL, "MCLK"},
- {"MIC BIAS3", NULL, "MCLK"},
- {"MIC BIAS4", NULL, "MCLK"},
+ {"MIC BIAS1", NULL, "MCLK TX"},
+ {"MIC BIAS2", NULL, "MCLK TX"},
+ {"MIC BIAS3", NULL, "MCLK TX"},
+ {"MIC BIAS4", NULL, "MCLK TX"},
};
static int slim5_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index 8efa04c3807e..b76cb7f4b210 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -3312,7 +3312,7 @@ int afe_loopback(u16 enable, u16 rx_port, u16 tx_port)
sizeof(struct afe_port_param_data_v2);
lb_cmd.dst_port_id = rx_port;
- lb_cmd.routing_mode = LB_MODE_EC_REF_VOICE_AUDIO;
+ lb_cmd.routing_mode = LB_MODE_DEFAULT;
lb_cmd.enable = (enable ? 1 : 0);
lb_cmd.loopback_cfg_minor_version = AFE_API_VERSION_LOOPBACK_CONFIG;