summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/fb/mdss-dp.txt52
-rw-r--r--Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt4
-rw-r--r--Documentation/devicetree/bindings/regulator/qpnp-oledb-regulator.txt7
-rw-r--r--arch/arm/boot/dts/qcom/Makefile9
-rw-r--r--arch/arm/boot/dts/qcom/msm-pm660l.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-gpu.dtsi9
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-sde.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-mdss.dtsi11
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-qrd-overlay.dts27
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-qrd-skuk-overlay.dts27
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-qrd-vr1-overlay.dts27
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-v2.1-mtp-4k-display.dts51
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi16
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-mdss.dtsi11
-rw-r--r--arch/arm/mm/dma-mapping.c5
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig1
-rw-r--r--arch/arm64/configs/msmcortex_defconfig1
-rw-r--r--drivers/char/Kconfig10
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/diag/diag_masks.c4
-rw-r--r--drivers/char/diag/diag_memorydevice.c94
-rw-r--r--drivers/char/diag/diag_mux.c10
-rw-r--r--drivers/char/diag/diagchar.h19
-rw-r--r--drivers/char/diag/diagchar_core.c136
-rw-r--r--drivers/char/diag/diagfwd.c1
-rw-r--r--drivers/char/diag/diagfwd_cntl.c44
-rw-r--r--drivers/char/diag/diagfwd_peripheral.c374
-rw-r--r--drivers/char/diag/diagfwd_peripheral.h2
-rw-r--r--drivers/char/msm_smd_pkt.c1397
-rw-r--r--drivers/clk/msm/clock-local2.c5
-rw-r--r--drivers/clk/msm/clock-mmss-8998.c1
-rw-r--r--drivers/cpuidle/lpm-levels.c12
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c7
-rw-r--r--drivers/iommu/arm-smmu.c37
-rw-r--r--drivers/iommu/dma-mapping-fast.c38
-rw-r--r--drivers/iommu/io-pgtable-fast.c19
-rw-r--r--drivers/iommu/iommu-debug.c6
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c23
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h5
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c59
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h3
-rw-r--r--drivers/net/wireless/wcnss/wcnss_wlan.c25
-rw-r--r--drivers/perf/arm_pmu.c84
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa.c26
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_dp.c32
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c21
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_rt.c7
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c30
-rw-r--r--drivers/platform/msm/msm_11ad/msm_11ad.c16
-rw-r--r--drivers/power/reset/msm-poweroff.c50
-rw-r--r--drivers/regulator/qpnp-labibb-regulator.c51
-rw-r--r--drivers/regulator/qpnp-oledb-regulator.c62
-rw-r--r--drivers/scsi/ufs/ufshcd.c7
-rw-r--r--drivers/soc/qcom/Kconfig17
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/common_log.c41
-rw-r--r--drivers/soc/qcom/cpuss_dump.c4
-rw-r--r--drivers/soc/qcom/dcc.c6
-rw-r--r--drivers/soc/qcom/memory_dump_v2.c30
-rw-r--r--drivers/soc/qcom/msm_minidump.c371
-rw-r--r--drivers/soc/qcom/watchdog_v2.c18
-rw-r--r--drivers/staging/android/ion/ion.c85
-rw-r--r--drivers/staging/android/sync.c5
-rw-r--r--drivers/video/fbdev/msm/mdss_debug.c4
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.c457
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.h117
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_aux.c443
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_util.c72
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_util.h21
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.c5
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_host.c11
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_status.c31
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.c24
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_hdcp_1x.c4
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_edid.c5
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c3
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_overlay.c15
-rw-r--r--include/linux/io-pgtable-fast.h3
-rw-r--r--include/linux/msm_smd_pkt.h23
-rw-r--r--include/linux/perf/arm_pmu.h8
-rw-r--r--include/linux/qpnp/qpnp-revid.h5
-rw-r--r--include/linux/regulator/qpnp-labibb-regulator.h1
-rw-r--r--include/soc/qcom/minidump.h48
-rw-r--r--kernel/trace/msm_rtb.c10
-rw-r--r--net/ipv6/netfilter/Kconfig12
-rw-r--r--net/ipv6/netfilter/ip6_tables.c30
91 files changed, 4375 insertions, 545 deletions
diff --git a/Documentation/devicetree/bindings/fb/mdss-dp.txt b/Documentation/devicetree/bindings/fb/mdss-dp.txt
index aa227c2628da..707e6edb26ea 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dp.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dp.txt
@@ -27,7 +27,46 @@ Required properties
- qcom,aux-en-gpio: Specifies the aux-channel enable gpio.
- qcom,aux-sel-gpio: Specifies the aux-channel select gpio.
- qcom,usbplug-cc-gpio: Specifies the usbplug orientation gpio.
-- qcom,aux-cfg-settings: An array that specifies the DP AUX configuration settings.
+- qcom,aux-cfg0-settings: Specifies the DP AUX configuration 0 settings. The first
+ entry in this array corresponds to the register offset
+ within DP AUX, while the remaining entries indicate the
+ programmable values.
+- qcom,aux-cfg1-settings: Specifies the DP AUX configuration 1 settings. The first
+ entry in this array corresponds to the register offset
+ within DP AUX, while the remaining entries indicate the
+ programmable values.
+- qcom,aux-cfg2-settings: Specifies the DP AUX configuration 2 settings. The first
+ entry in this array corresponds to the register offset
+ within DP AUX, while the remaining entries indicate the
+ programmable values.
+- qcom,aux-cfg3-settings: Specifies the DP AUX configuration 3 settings. The first
+ entry in this array corresponds to the register offset
+ within DP AUX, while the remaining entries indicate the
+ programmable values.
+- qcom,aux-cfg4-settings: Specifies the DP AUX configuration 4 settings. The first
+ entry in this array corresponds to the register offset
+ within DP AUX, while the remaining entries indicate the
+ programmable values.
+- qcom,aux-cfg5-settings: Specifies the DP AUX configuration 5 settings. The first
+ entry in this array corresponds to the register offset
+ within DP AUX, while the remaining entries indicate the
+ programmable values.
+- qcom,aux-cfg6-settings: Specifies the DP AUX configuration 6 settings. The first
+ entry in this array corresponds to the register offset
+ within DP AUX, while the remaining entries indicate the
+ programmable values.
+- qcom,aux-cfg7-settings: Specifies the DP AUX configuration 7 settings. The first
+ entry in this array corresponds to the register offset
+ within DP AUX, while the remaining entries indicate the
+ programmable values.
+- qcom,aux-cfg8-settings: Specifies the DP AUX configuration 8 settings. The first
+ entry in this array corresponds to the register offset
+ within DP AUX, while the remaining entries indicate the
+ programmable values.
+- qcom,aux-cfg9-settings: Specifies the DP AUX configuration 9 settings. The first
+ entry in this array corresponds to the register offset
+ within DP AUX, while the remaining entries indicate the
+ programmable values.
Optional properties:
- qcom,<type>-supply-entries: A node that lists the elements of the supply used by the
@@ -87,7 +126,16 @@ Example:
"core_aux_clk", "core_cfg_ahb_clk", "ctrl_link_clk",
"ctrl_link_iface_clk", "ctrl_crypto_clk", "ctrl_pixel_clk";
- qcom,aux-cfg-settings = [00 13 00 10 0a 26 0a 03 8b 03];
+ qcom,aux-cfg0-settings = [1c 00];
+ qcom,aux-cfg1-settings = [20 13 23 1d];
+ qcom,aux-cfg2-settings = [24 00];
+ qcom,aux-cfg3-settings = [28 00];
+ qcom,aux-cfg4-settings = [2c 0a];
+ qcom,aux-cfg5-settings = [30 26];
+ qcom,aux-cfg6-settings = [34 0a];
+ qcom,aux-cfg7-settings = [38 03];
+ qcom,aux-cfg8-settings = [3c bb];
+ qcom,aux-cfg9-settings = [40 03];
qcom,logical2physical-lane-map = [02 03 01 00];
qcom,phy-register-offset = <0x4>;
qcom,max-pclk-frequency-khz = <593470>;
diff --git a/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt b/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt
index c9cfc889faba..0d53b9fa4378 100644
--- a/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt
@@ -151,6 +151,10 @@ LAB subnode optional properties:
any value in the allowed limit.
- qcom,notify-lab-vreg-ok-sts: A boolean property which upon set will
poll and notify the lab_vreg_ok status.
+- qcom,qpnp-lab-sc-wait-time-ms: This property is used to specify the time
+ (in ms) to poll for the short circuit
+ detection. If not specified the default time
+ is 5 sec.
Following properties are available only for PM660A:
diff --git a/Documentation/devicetree/bindings/regulator/qpnp-oledb-regulator.txt b/Documentation/devicetree/bindings/regulator/qpnp-oledb-regulator.txt
index efff6c79a9c0..55fde0d4feb6 100644
--- a/Documentation/devicetree/bindings/regulator/qpnp-oledb-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qpnp-oledb-regulator.txt
@@ -62,13 +62,6 @@ Required Node Structure
rail. This property is applicable only if qcom,ext-pin-ctl
property is specified and it is specific to PM660A.
-- qcom,force-pd-control
- Usage: optional
- Value type: <bool>
- Definition: Used to enable the pull down control forcibly via SPMI by
- disabling the pull down configuration done by hardware
- automatically through SWIRE pulses.
-
- qcom,pbs-client
Usage: optional
Value type: <phandle>
diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile
index 0b60560281c7..f5c0da0cd8e7 100644
--- a/arch/arm/boot/dts/qcom/Makefile
+++ b/arch/arm/boot/dts/qcom/Makefile
@@ -109,7 +109,10 @@ dtbo-$(CONFIG_ARCH_MSM8998) += \
msm8998-v2-cdp-overlay.dtbo \
msm8998-v2-mtp-overlay.dtbo \
msm8998-v2.1-cdp-overlay.dtbo \
- msm8998-v2.1-mtp-overlay.dtbo
+ msm8998-v2.1-mtp-overlay.dtbo \
+ msm8998-qrd-overlay.dtbo \
+ msm8998-qrd-vr1-overlay.dtbo \
+ msm8998-qrd-skuk-overlay.dtbo
msm8998-cdp-overlay.dtbo-base := msm8998.dtb
msm8998-mtp-overlay.dtbo-base := msm8998.dtb
@@ -117,6 +120,9 @@ msm8998-v2-cdp-overlay.dtbo-base := msm8998-v2.dtb
msm8998-v2-mtp-overlay.dtbo-base := msm8998-v2.dtb
msm8998-v2.1-cdp-overlay.dtbo-base := msm8998-v2.1.dtb
msm8998-v2.1-mtp-overlay.dtbo-base := msm8998-v2.1.dtb
+msm8998-qrd-overlay.dtbo-base := msm8998-qrd.dtb
+msm8998-qrd-vr1-overlay.dtbo-base := msm8998-qrd-vr1.dtb
+msm8998-qrd-skuk-overlay.dtbo-base := msm8998-qrd-skuk.dtb
else
dtb-$(CONFIG_ARCH_MSM8998) += msm8998-sim.dtb \
msm8998-rumi.dtb \
@@ -141,6 +147,7 @@ dtb-$(CONFIG_ARCH_MSM8998) += msm8998-sim.dtb \
apq8098-v2-qrd.dtb \
apq8098-v2-qrd-skuk-hdk.dtb \
msm8998-v2.1-mtp.dtb \
+ msm8998-v2.1-mtp-4k-display.dtb \
msm8998-v2.1-cdp.dtb \
msm8998-v2.1-qrd.dtb \
apq8098-v2.1-mtp.dtb \
diff --git a/arch/arm/boot/dts/qcom/msm-pm660l.dtsi b/arch/arm/boot/dts/qcom/msm-pm660l.dtsi
index 236565af6af2..679149a78833 100644
--- a/arch/arm/boot/dts/qcom/msm-pm660l.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pm660l.dtsi
@@ -417,6 +417,7 @@
#size-cells = <1>;
qcom,pmic-revid = <&pm660l_revid>;
reg = <0xe000 0x100>;
+ qcom,pbs-client = <&pm660l_pbs>;
label = "oledb";
regulator-name = "regulator-oledb";
@@ -464,6 +465,8 @@
qcom,qpnp-lab-slew-rate = <5000>;
qcom,qpnp-lab-init-voltage = <4600000>;
qcom,qpnp-lab-init-amoled-voltage = <4600000>;
+
+ qcom,notify-lab-vreg-ok-sts;
};
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-gpu.dtsi b/arch/arm/boot/dts/qcom/msm8996-gpu.dtsi
index 215608959dc5..27f692bb14af 100644
--- a/arch/arm/boot/dts/qcom/msm8996-gpu.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-gpu.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -89,6 +89,13 @@
coresight-child-list = <&funnel_in0>;
coresight-child-ports = <4>;
+ /* DRM settings */
+ qcom,gpmu-tsens = <0x00060007>;
+ qcom,lm-max-power = <5448>;
+ qcom,gpmu-firmware = "a530v3_gpmu.fw2";
+ qcom,gpmu-version = <1 0>;
+ qcom,zap-shader = "a530_zap";
+
clocks = <&clock_gpu clk_gpu_gx_gfx3d_clk>,
<&clock_gpu clk_gpu_ahb_clk>,
<&clock_gpu clk_gpu_gx_rbbmtimer_clk>,
diff --git a/arch/arm/boot/dts/qcom/msm8996-sde.dtsi b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi
index 8aebac3b0e22..cb33df82da0d 100644
--- a/arch/arm/boot/dts/qcom/msm8996-sde.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi
@@ -51,6 +51,8 @@
#interrupt-cells = <1>;
iommus = <&mdp_smmu 0>;
+ gpus = <&msm_gpu>;
+
/* hw blocks */
qcom,sde-off = <0x1000>;
qcom,sde-ctl-off = <0x2000 0x2200 0x2400
diff --git a/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi
index d0d13332595a..64f377f1a576 100644
--- a/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi
@@ -10,8 +10,6 @@
* GNU General Public License for more details.
*/
-#include "dsi-panel-sim-video.dtsi"
-#include "dsi-panel-sim-dualmipi-video.dtsi"
#include "dsi-panel-nt35597-dualmipi-wqxga-video.dtsi"
#include "dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi"
#include "dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi"
diff --git a/arch/arm/boot/dts/qcom/msm8998-mdss.dtsi b/arch/arm/boot/dts/qcom/msm8998-mdss.dtsi
index 24186aca22be..2b9e13ea24f2 100644
--- a/arch/arm/boot/dts/qcom/msm8998-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-mdss.dtsi
@@ -502,7 +502,16 @@
qcom,msm_ext_disp = <&msm_ext_disp>;
- qcom,aux-cfg-settings = [00 13 00 10 0a 26 0a 03 8b 03];
+ qcom,aux-cfg0-settings = [1c 00];
+ qcom,aux-cfg1-settings = [20 13 23 1d];
+ qcom,aux-cfg2-settings = [24 00];
+ qcom,aux-cfg3-settings = [28 00];
+ qcom,aux-cfg4-settings = [2c 0a];
+ qcom,aux-cfg5-settings = [30 26];
+ qcom,aux-cfg6-settings = [34 0a];
+ qcom,aux-cfg7-settings = [38 03];
+ qcom,aux-cfg8-settings = [3c bb];
+ qcom,aux-cfg9-settings = [40 03];
qcom,logical2physical-lane-map = [02 03 01 00];
qcom,core-supply-entries {
diff --git a/arch/arm/boot/dts/qcom/msm8998-qrd-overlay.dts b/arch/arm/boot/dts/qcom/msm8998-qrd-overlay.dts
new file mode 100644
index 000000000000..55255261a827
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8998-qrd-overlay.dts
@@ -0,0 +1,27 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/msm-clocks-8998.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+#include "msm8998-qrd.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM 8998 QRD";
+ compatible = "qcom,msm8998-qrd", "qcom,msm8998", "qcom,qrd";
+ qcom,msm-id = <292 0x0>;
+ qcom,board-id = <11 0>;
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-qrd-skuk-overlay.dts b/arch/arm/boot/dts/qcom/msm8998-qrd-skuk-overlay.dts
new file mode 100644
index 000000000000..408a067dbeee
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8998-qrd-skuk-overlay.dts
@@ -0,0 +1,27 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/msm-clocks-8998.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+#include "msm8998-qrd-skuk.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM 8998 QRD SKUK";
+ compatible = "qcom,msm8998-qrd", "qcom,msm8998", "qcom,qrd";
+ qcom,msm-id = <292 0x0>;
+ qcom,board-id = <0x01000b 0x80>;
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-qrd-vr1-overlay.dts b/arch/arm/boot/dts/qcom/msm8998-qrd-vr1-overlay.dts
new file mode 100644
index 000000000000..ff0e24dd0371
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8998-qrd-vr1-overlay.dts
@@ -0,0 +1,27 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/clock/msm-clocks-8998.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+#include "msm8998-qrd-vr1.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM 8998 QRD VR1 Board";
+ compatible = "qcom,msm8998-qrd", "qcom,msm8998", "qcom,qrd";
+ qcom,msm-id = <292 0x0>;
+ qcom,board-id = <0x02000b 0x80>;
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-v2.1-mtp-4k-display.dts b/arch/arm/boot/dts/qcom/msm8998-v2.1-mtp-4k-display.dts
new file mode 100644
index 000000000000..7d537aa35533
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8998-v2.1-mtp-4k-display.dts
@@ -0,0 +1,51 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "msm8998-v2.1.dtsi"
+#include "msm8998-mtp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. MSM 8998 v2.1 MTP, 4k display";
+ compatible = "qcom,msm8998-mtp", "qcom,msm8998", "qcom,mtp";
+ qcom,board-id = <8 4>;
+};
+
+&mdss_mdp {
+ qcom,mdss-pref-prim-intf = "dsi";
+};
+
+&mdss_dsi {
+ hw-config = "split_dsi";
+};
+
+&mdss_dsi0 {
+ qcom,dsi-pref-prim-pan = <&dsi_sharp_4k_dsc_cmd>;
+ pinctrl-names = "mdss_default", "mdss_sleep";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+ qcom,platform-reset-gpio = <&tlmm 94 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,panel-mode-gpio = <&tlmm 91 0>;
+};
+
+&mdss_dsi1 {
+ qcom,dsi-pref-prim-pan = <&dsi_sharp_4k_dsc_cmd>;
+ pinctrl-names = "mdss_default", "mdss_sleep";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+ qcom,platform-reset-gpio = <&tlmm 94 0>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,panel-mode-gpio = <&tlmm 91 0>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi
index 4cd8bf4407ac..19862f02aa84 100644
--- a/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi
@@ -147,7 +147,13 @@
23 1e 07 08 05 03 04 a0
23 18 07 08 04 03 04 a0];
qcom,esd-check-enabled;
- qcom,mdss-dsi-panel-status-check-mode = "bta_check";
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-panel-status-value = <0x9c>;
+ qcom,mdss-dsi-panel-on-check-value = <0x9c>;
+ qcom,mdss-dsi-panel-status-read-length = <1>;
+ qcom,mdss-dsi-panel-max-error-count = <3>;
};
&dsi_dual_nt36850_truly_cmd {
@@ -195,7 +201,13 @@
20 12 05 06 03 13 04 a0];
qcom,config-select = <&dsi_nt35597_truly_dsc_cmd_config2>;
qcom,esd-check-enabled;
- qcom,mdss-dsi-panel-status-check-mode = "bta_check";
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-panel-status-value = <0x9c>;
+ qcom,mdss-dsi-panel-on-check-value = <0x9c>;
+ qcom,mdss-dsi-panel-status-read-length = <1>;
+ qcom,mdss-dsi-panel-max-error-count = <3>;
};
&dsi_dual_nt35597_video {
diff --git a/arch/arm/boot/dts/qcom/sdm660-mdss.dtsi b/arch/arm/boot/dts/qcom/sdm660-mdss.dtsi
index b263d2a68792..787c4f1e2fb6 100644
--- a/arch/arm/boot/dts/qcom/sdm660-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-mdss.dtsi
@@ -505,7 +505,16 @@
qcom,msm_ext_disp = <&msm_ext_disp>;
- qcom,aux-cfg-settings = [00 13 00 00 0a 28 0a 03 b7 03];
+ qcom,aux-cfg0-settings = [20 00];
+ qcom,aux-cfg1-settings = [24 13 23 1d];
+ qcom,aux-cfg2-settings = [28 00];
+ qcom,aux-cfg3-settings = [2c 00];
+ qcom,aux-cfg4-settings = [30 0a];
+ qcom,aux-cfg5-settings = [34 28];
+ qcom,aux-cfg6-settings = [38 0a];
+ qcom,aux-cfg7-settings = [3c 03];
+ qcom,aux-cfg8-settings = [40 b7];
+ qcom,aux-cfg9-settings = [44 03];
qcom,logical2physical-lane-map = [00 01 02 03];
qcom,phy-register-offset = <0x4>;
qcom,max-pclk-frequency-khz = <300000>;
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 8bda55f00b7b..a25d6b0e22a4 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2169,6 +2169,9 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
if (!bitmap_size)
return ERR_PTR(-EINVAL);
+ WARN(!IS_ALIGNED(size, SZ_128M),
+ "size is not aligned to 128M, alignment enforced");
+
if (bitmap_size > PAGE_SIZE) {
extensions = bitmap_size / PAGE_SIZE;
bitmap_size = PAGE_SIZE;
@@ -2191,7 +2194,7 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
mapping->nr_bitmaps = 1;
mapping->extensions = extensions;
mapping->base = base;
- mapping->bits = bits;
+ mapping->bits = BITS_PER_BYTE * bitmap_size;
spin_lock_init(&mapping->lock);
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index 4b0918e2136d..2f468dfe1c5a 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -186,6 +186,7 @@ CONFIG_IP_NF_ARPFILTER=y
CONFIG_IP_NF_ARP_MANGLE=y
CONFIG_NF_CONNTRACK_IPV6=y
CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_IPTABLES_128=y
CONFIG_IP6_NF_MATCH_RPFILTER=y
CONFIG_IP6_NF_FILTER=y
CONFIG_IP6_NF_TARGET_REJECT=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index caa140651102..fdf8deacee17 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -186,6 +186,7 @@ CONFIG_IP_NF_ARPFILTER=y
CONFIG_IP_NF_ARP_MANGLE=y
CONFIG_NF_CONNTRACK_IPV6=y
CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_IPTABLES_128=y
CONFIG_IP6_NF_MATCH_RPFILTER=y
CONFIG_IP6_NF_FILTER=y
CONFIG_IP6_NF_TARGET_REJECT=y
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 8e3bff9c7fe9..4bbe4e5f9a6d 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -592,6 +592,16 @@ config DEVPORT
source "drivers/s390/char/Kconfig"
+config MSM_SMD_PKT
+ bool "Enable device interface for some SMD packet ports"
+ default n
+ depends on MSM_SMD
+ help
+ smd_pkt driver provides the interface for the userspace clients
+ to communicate over smd via device nodes. This enable the
+ usersapce clients to read and write to some smd packets channel
+ for MSM chipset.
+
config TILE_SROM
bool "Character-device access via hypervisor to the Tilera SPI ROM"
depends on TILE
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 7b0bd5408324..77697b8c42c0 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
obj-$(CONFIG_RAW_DRIVER) += raw.o
obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
+obj-$(CONFIG_MSM_SMD_PKT) += msm_smd_pkt.o
obj-$(CONFIG_MSPEC) += mspec.o
obj-$(CONFIG_MMTIMER) += mmtimer.o
obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index 437077c4d44d..3c10462c2274 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -457,7 +457,9 @@ static void diag_send_feature_mask_update(uint8_t peripheral)
if (driver->supports_apps_hdlc_encoding)
DIAG_SET_FEATURE_MASK(F_DIAG_APPS_HDLC_ENCODE);
if (driver->supports_apps_header_untagging) {
- if (peripheral == PERIPHERAL_MODEM) {
+ if (peripheral == PERIPHERAL_MODEM ||
+ peripheral == PERIPHERAL_LPASS ||
+ peripheral == PERIPHERAL_CDSP) {
DIAG_SET_FEATURE_MASK(F_DIAG_PKT_HEADER_UNTAG);
driver->peripheral_untag[peripheral] =
ENABLE_PKT_HEADER_UNTAGGING;
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index dc3029cc459d..bd34e6cceec0 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -129,6 +129,37 @@ void diag_md_close_all()
diag_ws_reset(DIAG_WS_MUX);
}
+static int diag_md_get_peripheral(int ctxt)
+{
+ int peripheral;
+
+ if (driver->num_pd_session) {
+ peripheral = GET_PD_CTXT(ctxt);
+ switch (peripheral) {
+ case UPD_WLAN:
+ case UPD_AUDIO:
+ case UPD_SENSORS:
+ break;
+ case DIAG_ID_MPSS:
+ case DIAG_ID_LPASS:
+ case DIAG_ID_CDSP:
+ default:
+ peripheral =
+ GET_BUF_PERIPHERAL(ctxt);
+ if (peripheral > NUM_PERIPHERALS)
+ peripheral = -EINVAL;
+ break;
+ }
+ } else {
+ /* Account for Apps data as well */
+ peripheral = GET_BUF_PERIPHERAL(ctxt);
+ if (peripheral > NUM_PERIPHERALS)
+ peripheral = -EINVAL;
+ }
+
+ return peripheral;
+}
+
int diag_md_write(int id, unsigned char *buf, int len, int ctx)
{
int i;
@@ -144,26 +175,13 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx)
if (!buf || len < 0)
return -EINVAL;
- if (driver->pd_logging_mode) {
- peripheral = GET_PD_CTXT(ctx);
- switch (peripheral) {
- case UPD_WLAN:
- break;
- case DIAG_ID_MPSS:
- default:
- peripheral = GET_BUF_PERIPHERAL(ctx);
- if (peripheral > NUM_PERIPHERALS)
- return -EINVAL;
- break;
- }
- } else {
- /* Account for Apps data as well */
- peripheral = GET_BUF_PERIPHERAL(ctx);
- if (peripheral > NUM_PERIPHERALS)
- return -EINVAL;
- }
+ peripheral =
+ diag_md_get_peripheral(ctx);
+ if (peripheral < 0)
+ return -EINVAL;
- session_info = diag_md_session_get_peripheral(peripheral);
+ session_info =
+ diag_md_session_get_peripheral(peripheral);
if (!session_info)
return -EIO;
@@ -243,31 +261,15 @@ int diag_md_copy_to_user(char __user *buf, int *pret, size_t buf_size,
entry = &ch->tbl[j];
if (entry->len <= 0)
continue;
- if (driver->pd_logging_mode) {
- peripheral = GET_PD_CTXT(entry->ctx);
- switch (peripheral) {
- case UPD_WLAN:
- break;
- case DIAG_ID_MPSS:
- default:
- peripheral =
- GET_BUF_PERIPHERAL(entry->ctx);
- if (peripheral > NUM_PERIPHERALS)
- goto drop_data;
- break;
- }
- } else {
- /* Account for Apps data as well */
- peripheral = GET_BUF_PERIPHERAL(entry->ctx);
- if (peripheral > NUM_PERIPHERALS)
- goto drop_data;
- }
+
+ peripheral = diag_md_get_peripheral(entry->ctx);
+ if (peripheral < 0)
+ goto drop_data;
session_info =
diag_md_session_get_peripheral(peripheral);
if (!session_info) {
- mutex_unlock(&driver->diagfwd_untag_mutex);
- return -EIO;
+ goto drop_data;
}
if (session_info && info &&
@@ -363,9 +365,15 @@ int diag_md_close_peripheral(int id, uint8_t peripheral)
spin_lock_irqsave(&ch->lock, flags);
for (i = 0; i < ch->num_tbl_entries && !found; i++) {
entry = &ch->tbl[i];
- if ((GET_BUF_PERIPHERAL(entry->ctx) != peripheral) ||
- (GET_PD_CTXT(entry->ctx) != peripheral))
- continue;
+
+ if (peripheral > NUM_PERIPHERALS) {
+ if (GET_PD_CTXT(entry->ctx) != peripheral)
+ continue;
+ } else {
+ if (GET_BUF_PERIPHERAL(entry->ctx) !=
+ peripheral)
+ continue;
+ }
found = 1;
if (ch->ops && ch->ops->write_done) {
ch->ops->write_done(entry->buf, entry->len,
diff --git a/drivers/char/diag/diag_mux.c b/drivers/char/diag/diag_mux.c
index 55c5de1ea9fc..39f4b08d9b0a 100644
--- a/drivers/char/diag/diag_mux.c
+++ b/drivers/char/diag/diag_mux.c
@@ -27,7 +27,7 @@
#include "diag_mux.h"
#include "diag_usb.h"
#include "diag_memorydevice.h"
-
+#include "diag_ipc_logging.h"
struct diag_mux_state_t *diag_mux;
static struct diag_logger_t usb_logger;
@@ -146,7 +146,15 @@ int diag_mux_write(int proc, unsigned char *buf, int len, int ctx)
case DIAG_ID_MPSS:
upd = PERIPHERAL_MODEM;
break;
+ case DIAG_ID_LPASS:
+ upd = PERIPHERAL_LPASS;
+ break;
+ case DIAG_ID_CDSP:
+ upd = PERIPHERAL_CDSP;
+ break;
case UPD_WLAN:
+ case UPD_AUDIO:
+ case UPD_SENSORS:
break;
default:
pr_err("diag: invalid pd ctxt= %d\n", upd);
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index b68a47219132..b17538a10ea9 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -76,7 +76,9 @@
| DIAG_CON_LPASS | DIAG_CON_WCNSS \
| DIAG_CON_SENSORS | DIAG_CON_WDSP \
| DIAG_CON_CDSP)
-#define DIAG_CON_UPD_ALL (DIAG_CON_UPD_WLAN)
+#define DIAG_CON_UPD_ALL (DIAG_CON_UPD_WLAN \
+ | DIAG_CON_UPD_AUDIO \
+ | DIAG_CON_UPD_SENSORS)
#define DIAG_STM_MODEM 0x01
#define DIAG_STM_LPASS 0x02
@@ -222,6 +224,10 @@
#define DIAG_ID_APPS 1
#define DIAG_ID_MPSS 2
#define DIAG_ID_WLAN 3
+#define DIAG_ID_LPASS 4
+#define DIAG_ID_CDSP 5
+#define DIAG_ID_AUDIO 6
+#define DIAG_ID_SENSORS 7
/* Number of sessions possible in Memory Device Mode. +1 for Apps data */
#define NUM_MD_SESSIONS (NUM_PERIPHERALS \
@@ -598,10 +604,15 @@ struct diagchar_dev {
int in_busy_dcipktdata;
int logging_mode;
int logging_mask;
- int pd_logging_mode;
+ int pd_logging_mode[NUM_UPD];
+ int pd_session_clear[NUM_UPD];
int num_pd_session;
- int cpd_len_1;
- int cpd_len_2;
+ int cpd_len_1[NUM_PERIPHERALS];
+ int cpd_len_2[NUM_PERIPHERALS];
+ int upd_len_1_a[NUM_PERIPHERALS];
+ int upd_len_1_b[NUM_PERIPHERALS];
+ int upd_len_2_a;
+ int upd_len_2_b;
int mask_check;
uint32_t md_session_mask;
uint8_t md_session_mode;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 4f56696f52e9..574a13de6a0d 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -397,6 +397,10 @@ static uint32_t diag_translate_kernel_to_user_mask(uint32_t peripheral_mask)
ret |= DIAG_CON_CDSP;
if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_WLAN))
ret |= DIAG_CON_UPD_WLAN;
+ if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_AUDIO))
+ ret |= DIAG_CON_UPD_AUDIO;
+ if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_SENSORS))
+ ret |= DIAG_CON_UPD_SENSORS;
return ret;
}
int diag_mask_param(void)
@@ -426,8 +430,8 @@ void diag_clear_masks(struct diag_md_session_t *info)
static void diag_close_logging_process(const int pid)
{
- int i;
- int session_peripheral_mask;
+ int i, j;
+ int session_mask;
struct diag_md_session_t *session_info = NULL;
struct diag_logging_mode_param_t params;
@@ -443,27 +447,34 @@ static void diag_close_logging_process(const int pid)
mutex_unlock(&driver->diag_maskclear_mutex);
mutex_lock(&driver->diagchar_mutex);
- session_peripheral_mask = session_info->peripheral_mask;
+
+ session_mask = session_info->peripheral_mask;
diag_md_session_close(session_info);
- mutex_unlock(&driver->diagchar_mutex);
+
for (i = 0; i < NUM_MD_SESSIONS; i++)
- if (MD_PERIPHERAL_MASK(i) & session_peripheral_mask)
+ if (MD_PERIPHERAL_MASK(i) & session_mask)
diag_mux_close_peripheral(DIAG_LOCAL_PROC, i);
params.req_mode = USB_MODE;
params.mode_param = 0;
params.peripheral_mask =
- diag_translate_kernel_to_user_mask(session_peripheral_mask);
- if (driver->pd_logging_mode)
- params.pd_mask =
- diag_translate_kernel_to_user_mask(session_peripheral_mask);
-
- if (session_peripheral_mask & MD_PERIPHERAL_MASK(UPD_WLAN)) {
- driver->pd_logging_mode--;
- driver->num_pd_session--;
+ diag_translate_kernel_to_user_mask(session_mask);
+
+ for (i = UPD_WLAN; i < NUM_MD_SESSIONS; i++) {
+ if (session_mask &
+ MD_PERIPHERAL_MASK(i)) {
+ j = i - UPD_WLAN;
+ driver->pd_session_clear[j] = 1;
+ driver->pd_logging_mode[j] = 0;
+ driver->num_pd_session -= 1;
+ params.pd_mask =
+ diag_translate_kernel_to_user_mask(session_mask);
+ } else
+ params.pd_mask = 0;
}
- mutex_lock(&driver->diagchar_mutex);
+
diag_switch_logging(&params);
+
mutex_unlock(&driver->diagchar_mutex);
}
@@ -1562,17 +1573,22 @@ static uint32_t diag_translate_mask(uint32_t peripheral_mask)
ret |= (1 << PERIPHERAL_CDSP);
if (peripheral_mask & DIAG_CON_UPD_WLAN)
ret |= (1 << UPD_WLAN);
+ if (peripheral_mask & DIAG_CON_UPD_AUDIO)
+ ret |= (1 << UPD_AUDIO);
+ if (peripheral_mask & DIAG_CON_UPD_SENSORS)
+ ret |= (1 << UPD_SENSORS);
return ret;
}
static int diag_switch_logging(struct diag_logging_mode_param_t *param)
{
- int new_mode;
+ int new_mode, i;
int curr_mode;
int err = 0;
uint8_t do_switch = 1;
uint32_t peripheral_mask = 0;
+ uint8_t peripheral, upd;
if (!param)
return -EINVAL;
@@ -1583,10 +1599,28 @@ static int diag_switch_logging(struct diag_logging_mode_param_t *param)
return -EINVAL;
}
- switch (param->pd_mask) {
- case DIAG_CON_UPD_WLAN:
- if (driver->md_session_map[PERIPHERAL_MODEM] &&
- (MD_PERIPHERAL_MASK(PERIPHERAL_MODEM) &
+ if (param->pd_mask) {
+ switch (param->pd_mask) {
+ case DIAG_CON_UPD_WLAN:
+ peripheral = PERIPHERAL_MODEM;
+ upd = UPD_WLAN;
+ break;
+ case DIAG_CON_UPD_AUDIO:
+ peripheral = PERIPHERAL_LPASS;
+ upd = UPD_AUDIO;
+ break;
+ case DIAG_CON_UPD_SENSORS:
+ peripheral = PERIPHERAL_LPASS;
+ upd = UPD_SENSORS;
+ break;
+ default:
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "asking for mode switch with no pd mask set\n");
+ return -EINVAL;
+ }
+
+ if (driver->md_session_map[peripheral] &&
+ (MD_PERIPHERAL_MASK(peripheral) &
diag_mux->mux_mask)) {
DIAG_LOG(DIAG_DEBUG_USERSPACE,
"diag_fr: User PD is already logging onto active peripheral logging\n");
@@ -1595,15 +1629,16 @@ static int diag_switch_logging(struct diag_logging_mode_param_t *param)
peripheral_mask =
diag_translate_mask(param->pd_mask);
param->peripheral_mask = peripheral_mask;
- driver->pd_logging_mode++;
- driver->num_pd_session++;
- break;
-
- default:
+ i = upd - UPD_WLAN;
+ if (!driver->pd_session_clear[i]) {
+ driver->pd_logging_mode[i] = 1;
+ driver->num_pd_session += 1;
+ driver->pd_session_clear[i] = 0;
+ }
+ } else {
peripheral_mask =
diag_translate_mask(param->peripheral_mask);
param->peripheral_mask = peripheral_mask;
- break;
}
switch (param->req_mode) {
@@ -1945,9 +1980,36 @@ static int diag_ioctl_hdlc_toggle(unsigned long ioarg)
return 0;
}
-static int diag_ioctl_query_pd_logging(unsigned long ioarg)
+static int diag_ioctl_query_pd_logging(struct diag_logging_mode_param_t *param)
{
int ret = -EINVAL;
+ int peripheral;
+ char *p_str = NULL;
+
+ if (!param)
+ return -EINVAL;
+
+ if (!param->pd_mask) {
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "query with no pd mask set, returning error\n");
+ return -EINVAL;
+ }
+
+ switch (param->pd_mask) {
+ case DIAG_CON_UPD_WLAN:
+ peripheral = PERIPHERAL_MODEM;
+ p_str = "MODEM";
+ break;
+ case DIAG_CON_UPD_AUDIO:
+ case DIAG_CON_UPD_SENSORS:
+ peripheral = PERIPHERAL_LPASS;
+ p_str = "LPASS";
+ break;
+ default:
+ DIAG_LOG(DIAG_DEBUG_USERSPACE,
+ "Invalid pd mask, returning EINVAL\n");
+ return -EINVAL;
+ }
DIAG_LOG(DIAG_DEBUG_USERSPACE,
"diag: %s: Untagging support on APPS is %s\n", __func__,
@@ -1955,12 +2017,13 @@ static int diag_ioctl_query_pd_logging(unsigned long ioarg)
"present" : "absent"));
DIAG_LOG(DIAG_DEBUG_USERSPACE,
- "diag: %s: Tagging support on MODEM is %s\n", __func__,
- (driver->feature[PERIPHERAL_MODEM].untag_header ?
+ "diag: %s: Tagging support on %s is %s\n",
+ __func__, p_str,
+ (driver->feature[peripheral].untag_header ?
"present" : "absent"));
if (driver->supports_apps_header_untagging &&
- driver->feature[PERIPHERAL_MODEM].untag_header)
+ driver->feature[peripheral].untag_header)
ret = 0;
return ret;
@@ -2206,7 +2269,10 @@ long diagchar_compat_ioctl(struct file *filp,
result = diag_ioctl_hdlc_toggle(ioarg);
break;
case DIAG_IOCTL_QUERY_PD_LOGGING:
- result = diag_ioctl_query_pd_logging(ioarg);
+ if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
+ sizeof(mode_param)))
+ return -EFAULT;
+ result = diag_ioctl_query_pd_logging(&mode_param);
break;
}
return result;
@@ -2332,7 +2398,10 @@ long diagchar_ioctl(struct file *filp,
result = diag_ioctl_hdlc_toggle(ioarg);
break;
case DIAG_IOCTL_QUERY_PD_LOGGING:
- result = diag_ioctl_query_pd_logging(ioarg);
+ if (copy_from_user((void *)&mode_param, (void __user *)ioarg,
+ sizeof(mode_param)))
+ return -EFAULT;
+ result = diag_ioctl_query_pd_logging(&mode_param);
break;
}
return result;
@@ -3474,7 +3543,10 @@ static int __init diagchar_init(void)
poolsize_usb_apps + 1 + (NUM_PERIPHERALS * 6));
driver->num_clients = max_clients;
driver->logging_mode = DIAG_USB_MODE;
- driver->pd_logging_mode = 0;
+ for (i = 0; i < NUM_UPD; i++) {
+ driver->pd_logging_mode[i] = 0;
+ driver->pd_session_clear[i] = 0;
+ }
driver->num_pd_session = 0;
driver->mask_check = 0;
driver->in_busy_pktdata = 0;
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 07c90b741fa0..8fb724305c03 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -38,6 +38,7 @@
#include "diag_masks.h"
#include "diag_usb.h"
#include "diag_mux.h"
+#include "diag_ipc_logging.h"
#define STM_CMD_VERSION_OFFSET 4
#define STM_CMD_MASK_OFFSET 5
diff --git a/drivers/char/diag/diagfwd_cntl.c b/drivers/char/diag/diagfwd_cntl.c
index ae749725f6db..82a67f1f6f47 100644
--- a/drivers/char/diag/diagfwd_cntl.c
+++ b/drivers/char/diag/diagfwd_cntl.c
@@ -110,6 +110,8 @@ void diag_notify_md_client(uint8_t peripheral, int data)
{
int stat = 0;
struct siginfo info;
+ struct pid *pid_struct;
+ struct task_struct *result;
if (peripheral > NUM_PERIPHERALS)
return;
@@ -122,20 +124,38 @@ void diag_notify_md_client(uint8_t peripheral, int data)
info.si_code = SI_QUEUE;
info.si_int = (PERIPHERAL_MASK(peripheral) | data);
info.si_signo = SIGCONT;
- if (driver->md_session_map[peripheral] &&
- driver->md_session_map[peripheral]->task) {
- if (driver->md_session_map[peripheral]->
- md_client_thread_info->task != NULL
- && driver->md_session_map[peripheral]->pid ==
- driver->md_session_map[peripheral]->task->tgid) {
+
+ if (!driver->md_session_map[peripheral] ||
+ driver->md_session_map[peripheral]->pid <= 0) {
+ pr_err("diag: md_session_map[%d] is invalid\n", peripheral);
+ mutex_unlock(&driver->md_session_lock);
+ return;
+ }
+
+ pid_struct = find_get_pid(
+ driver->md_session_map[peripheral]->pid);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "md_session_map[%d] pid = %d task = %pK\n",
+ peripheral,
+ driver->md_session_map[peripheral]->pid,
+ driver->md_session_map[peripheral]->task);
+
+ if (pid_struct) {
+ result = get_pid_task(pid_struct, PIDTYPE_PID);
+
+ if (!result) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
- "md_session %d pid = %d, md_session %d task tgid = %d\n",
- peripheral,
- driver->md_session_map[peripheral]->pid,
+ "diag: md_session_map[%d] with pid = %d Exited..\n",
peripheral,
- driver->md_session_map[peripheral]->task->tgid);
- stat = send_sig_info(info.si_signo, &info,
- driver->md_session_map[peripheral]->task);
+ driver->md_session_map[peripheral]->pid);
+ mutex_unlock(&driver->md_session_lock);
+ return;
+ }
+
+ if (driver->md_session_map[peripheral] &&
+ driver->md_session_map[peripheral]->task == result) {
+ stat = send_sig_info(info.si_signo,
+ &info, result);
if (stat)
pr_err("diag: Err sending signal to memory device client, signal data: 0x%x, stat: %d\n",
info.si_int, stat);
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index aaa587975469..e86dc8292bf0 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -244,9 +244,14 @@ static void diagfwd_data_process_done(struct diagfwd_info *fwd_info,
mutex_lock(&driver->hdlc_disable_mutex);
mutex_lock(&fwd_info->data_mutex);
+
peripheral = GET_PD_CTXT(buf->ctxt);
if (peripheral == DIAG_ID_MPSS)
peripheral = PERIPHERAL_MODEM;
+ if (peripheral == DIAG_ID_LPASS)
+ peripheral = PERIPHERAL_LPASS;
+ if (peripheral == DIAG_ID_CDSP)
+ peripheral = PERIPHERAL_CDSP;
session_info =
diag_md_session_get_peripheral(peripheral);
@@ -323,15 +328,19 @@ end:
static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
unsigned char *buf, int len)
{
- int len_cpd = 0, len_upd_1 = 0;
- int ctxt_cpd = 0, ctxt_upd_1 = 0;
+ int len_cpd = 0;
+ int len_upd_1 = 0, len_upd_2 = 0;
+ int ctxt_cpd = 0;
+ int ctxt_upd_1 = 0, ctxt_upd_2 = 0;
int buf_len = 0, processed = 0;
unsigned char *temp_buf_main = NULL;
unsigned char *temp_buf_cpd = NULL;
unsigned char *temp_buf_upd_1 = NULL;
+ unsigned char *temp_buf_upd_2 = NULL;
struct diagfwd_buf_t *temp_ptr_upd = NULL;
struct diagfwd_buf_t *temp_ptr_cpd = NULL;
int flag_buf_1 = 0, flag_buf_2 = 0;
+ uint8_t peripheral;
if (!fwd_info || !buf || len <= 0) {
diag_ws_release();
@@ -349,24 +358,42 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
diag_ws_release();
return;
}
+ peripheral = fwd_info->peripheral;
- if (driver->feature[fwd_info->peripheral].encode_hdlc &&
- driver->feature[fwd_info->peripheral].untag_header &&
- driver->peripheral_untag[fwd_info->peripheral]) {
+ if (driver->feature[peripheral].encode_hdlc &&
+ driver->feature[peripheral].untag_header &&
+ driver->peripheral_untag[peripheral]) {
mutex_lock(&driver->diagfwd_untag_mutex);
temp_buf_cpd = buf;
temp_buf_main = buf;
if (fwd_info->buf_1 &&
fwd_info->buf_1->data_raw == buf) {
flag_buf_1 = 1;
- if (fwd_info->type == TYPE_DATA)
+ temp_ptr_cpd = fwd_info->buf_1;
+ if (fwd_info->type == TYPE_DATA) {
temp_buf_upd_1 =
fwd_info->buf_upd_1_a->data_raw;
- } else {
+ if (peripheral ==
+ PERIPHERAL_LPASS)
+ temp_buf_upd_2 =
+ fwd_info->buf_upd_2_a->data_raw;
+ }
+ } else if (fwd_info->buf_2 &&
+ fwd_info->buf_2->data_raw == buf) {
flag_buf_2 = 1;
+ temp_ptr_cpd = fwd_info->buf_2;
if (fwd_info->type == TYPE_DATA)
temp_buf_upd_1 =
fwd_info->buf_upd_1_b->data_raw;
+ if (peripheral ==
+ PERIPHERAL_LPASS)
+ temp_buf_upd_2 =
+ fwd_info->buf_upd_2_b->data_raw;
+ } else {
+ pr_err("diag: In %s, no match for buffer %pK, peripheral %d, type: %d\n",
+ __func__, buf, peripheral,
+ fwd_info->type);
+ goto end;
}
while (processed < len) {
buf_len =
@@ -390,31 +417,97 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
temp_buf_upd_1 += buf_len;
}
break;
+ case DIAG_ID_LPASS:
+ ctxt_cpd = DIAG_ID_LPASS;
+ len_cpd += buf_len;
+ if (temp_buf_cpd) {
+ memcpy(temp_buf_cpd,
+ (temp_buf_main + 4), buf_len);
+ temp_buf_cpd += buf_len;
+ }
+ break;
+ case DIAG_ID_AUDIO:
+ ctxt_upd_1 = UPD_AUDIO;
+ len_upd_1 += buf_len;
+ if (temp_buf_upd_1) {
+ memcpy(temp_buf_upd_1,
+ (temp_buf_main + 4), buf_len);
+ temp_buf_upd_1 += buf_len;
+ }
+ break;
+ case DIAG_ID_SENSORS:
+ ctxt_upd_2 = UPD_SENSORS;
+ len_upd_2 += buf_len;
+ if (temp_buf_upd_2) {
+ memcpy(temp_buf_upd_2,
+ (temp_buf_main + 4), buf_len);
+ temp_buf_upd_2 += buf_len;
+ }
+ break;
+ case DIAG_ID_CDSP:
+ ctxt_cpd = DIAG_ID_CDSP;
+ len_cpd += buf_len;
+ if (temp_buf_cpd) {
+ memcpy(temp_buf_cpd,
+ (temp_buf_main + 4), buf_len);
+ temp_buf_cpd += buf_len;
+ }
+ break;
+ default:
+ goto end;
}
len = len - 4;
temp_buf_main += (buf_len + 4);
processed += buf_len;
}
- if (fwd_info->type == TYPE_DATA && len_upd_1) {
+ if (peripheral == PERIPHERAL_LPASS &&
+ fwd_info->type == TYPE_DATA && len_upd_2) {
+ if (flag_buf_1) {
+ driver->upd_len_2_a = len_upd_2;
+ temp_ptr_upd = fwd_info->buf_upd_2_a;
+ } else {
+ driver->upd_len_2_b = len_upd_2;
+ temp_ptr_upd = fwd_info->buf_upd_2_b;
+ }
+ temp_ptr_upd->ctxt &= 0x00FFFFFF;
+ temp_ptr_upd->ctxt |=
+ (SET_PD_CTXT(ctxt_upd_2));
+ atomic_set(&temp_ptr_upd->in_busy, 1);
+ diagfwd_data_process_done(fwd_info,
+ temp_ptr_upd, len_upd_2);
+ } else {
if (flag_buf_1)
+ driver->upd_len_2_a = 0;
+ if (flag_buf_2)
+ driver->upd_len_2_b = 0;
+ }
+ if (fwd_info->type == TYPE_DATA && len_upd_1) {
+ if (flag_buf_1) {
+ driver->upd_len_1_a[peripheral] =
+ len_upd_1;
temp_ptr_upd = fwd_info->buf_upd_1_a;
- else
+ } else {
+ driver->upd_len_1_b[peripheral] =
+ len_upd_1;
temp_ptr_upd = fwd_info->buf_upd_1_b;
+ }
temp_ptr_upd->ctxt &= 0x00FFFFFF;
temp_ptr_upd->ctxt |=
(SET_PD_CTXT(ctxt_upd_1));
atomic_set(&temp_ptr_upd->in_busy, 1);
diagfwd_data_process_done(fwd_info,
temp_ptr_upd, len_upd_1);
+ } else {
+ if (flag_buf_1)
+ driver->upd_len_1_a[peripheral] = 0;
+ if (flag_buf_2)
+ driver->upd_len_1_b[peripheral] = 0;
}
if (len_cpd) {
- if (flag_buf_1) {
- driver->cpd_len_1 = len_cpd;
- temp_ptr_cpd = fwd_info->buf_1;
- } else {
- driver->cpd_len_2 = len_cpd;
- temp_ptr_cpd = fwd_info->buf_2;
- }
+ if (flag_buf_1)
+ driver->cpd_len_1[peripheral] = len_cpd;
+ else
+ driver->cpd_len_2[peripheral] = len_cpd;
temp_ptr_cpd->ctxt &= 0x00FFFFFF;
temp_ptr_cpd->ctxt |=
(SET_PD_CTXT(ctxt_cpd));
@@ -422,14 +515,24 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
temp_ptr_cpd, len_cpd);
} else {
if (flag_buf_1)
- driver->cpd_len_1 = 0;
+ driver->cpd_len_1[peripheral] = 0;
if (flag_buf_2)
- driver->cpd_len_2 = 0;
+ driver->cpd_len_2[peripheral] = 0;
}
mutex_unlock(&driver->diagfwd_untag_mutex);
+ return;
} else {
diagfwd_data_read_done(fwd_info, buf, len);
+ return;
}
+end:
+ diag_ws_release();
+ mutex_unlock(&driver->diagfwd_untag_mutex);
+ if (temp_ptr_cpd) {
+ diagfwd_write_done(fwd_info->peripheral, fwd_info->type,
+ GET_BUF_NUM(temp_ptr_cpd->ctxt));
+ }
+ diagfwd_queue_read(fwd_info);
}
static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
@@ -1166,20 +1269,78 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
return;
fwd_info = &peripheral_info[type][peripheral];
+
if (ctxt == 1 && fwd_info->buf_1) {
+ /* Buffer 1 for core PD is freed */
atomic_set(&fwd_info->buf_1->in_busy, 0);
- driver->cpd_len_1 = 0;
+ driver->cpd_len_1[peripheral] = 0;
} else if (ctxt == 2 && fwd_info->buf_2) {
+ /* Buffer 2 for core PD is freed */
atomic_set(&fwd_info->buf_2->in_busy, 0);
- driver->cpd_len_2 = 0;
+ driver->cpd_len_2[peripheral] = 0;
} else if (ctxt == 3 && fwd_info->buf_upd_1_a) {
+ /* Buffer 1 for user pd 1 is freed */
atomic_set(&fwd_info->buf_upd_1_a->in_busy, 0);
- if (driver->cpd_len_1 == 0)
- atomic_set(&fwd_info->buf_1->in_busy, 0);
+
+ if (peripheral == PERIPHERAL_LPASS) {
+ /* if not data in cpd and other user pd
+ * free the core pd buffer for LPASS
+ */
+ if (!driver->cpd_len_1[PERIPHERAL_LPASS] &&
+ !driver->upd_len_2_a)
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ } else {
+ /* if not data in cpd
+ * free the core pd buffer for MPSS
+ */
+ if (!driver->cpd_len_1[PERIPHERAL_MODEM])
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ }
+ driver->upd_len_1_a[peripheral] = 0;
+
} else if (ctxt == 4 && fwd_info->buf_upd_1_b) {
+ /* Buffer 2 for user pd 1 is freed */
atomic_set(&fwd_info->buf_upd_1_b->in_busy, 0);
- if (driver->cpd_len_2 == 0)
+ if (peripheral == PERIPHERAL_LPASS) {
+ /* if not data in cpd and other user pd
+ * free the core pd buffer for LPASS
+ */
+ if (!driver->cpd_len_2[peripheral] &&
+ !driver->upd_len_2_b)
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+ } else {
+ /* if not data in cpd
+ * free the core pd buffer for MPSS
+ */
+ if (!driver->cpd_len_2[PERIPHERAL_MODEM])
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+ }
+ driver->upd_len_1_b[peripheral] = 0;
+
+ } else if (ctxt == 5 && fwd_info->buf_upd_2_a) {
+ /* Buffer 1 for user pd 2 is freed */
+ atomic_set(&fwd_info->buf_upd_2_a->in_busy, 0);
+ /* if not data in cpd and other user pd
+ * free the core pd buffer for LPASS
+ */
+ if (!driver->cpd_len_1[PERIPHERAL_LPASS] &&
+ !driver->upd_len_1_a[PERIPHERAL_LPASS])
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+
+ driver->upd_len_2_a = 0;
+
+ } else if (ctxt == 6 && fwd_info->buf_upd_2_b) {
+ /* Buffer 2 for user pd 2 is freed */
+ atomic_set(&fwd_info->buf_upd_2_b->in_busy, 0);
+ /* if not data in cpd and other user pd
+ * free the core pd buffer for LPASS
+ */
+ if (!driver->cpd_len_2[PERIPHERAL_LPASS] &&
+ !driver->upd_len_1_b[PERIPHERAL_LPASS])
atomic_set(&fwd_info->buf_2->in_busy, 0);
+
+ driver->upd_len_2_b = 0;
+
} else
pr_err("diag: In %s, invalid ctxt %d\n", __func__, ctxt);
@@ -1312,7 +1473,8 @@ static void diagfwd_queue_read(struct diagfwd_info *fwd_info)
void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
{
- unsigned char *temp_buf = NULL;
+ struct diagfwd_buf_t *temp_fwd_buf;
+ unsigned char *temp_char_buf;
if (!fwd_info)
return;
@@ -1383,11 +1545,11 @@ void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
kzalloc(PERIPHERAL_BUF_SZ +
APF_DIAG_PADDING,
GFP_KERNEL);
- temp_buf = fwd_info->buf_upd_1_a->data;
- if (ZERO_OR_NULL_PTR(temp_buf))
+ temp_char_buf = fwd_info->buf_upd_1_a->data;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
goto err;
fwd_info->buf_upd_1_a->len = PERIPHERAL_BUF_SZ;
- kmemleak_not_leak(temp_buf);
+ kmemleak_not_leak(temp_char_buf);
fwd_info->buf_upd_1_a->ctxt = SET_BUF_CTXT(
fwd_info->peripheral,
fwd_info->type, 3);
@@ -1408,16 +1570,76 @@ void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
kzalloc(PERIPHERAL_BUF_SZ +
APF_DIAG_PADDING,
GFP_KERNEL);
- temp_buf = fwd_info->buf_upd_1_b->data;
- if (ZERO_OR_NULL_PTR(temp_buf))
+ temp_char_buf =
+ fwd_info->buf_upd_1_b->data;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
goto err;
fwd_info->buf_upd_1_b->len =
PERIPHERAL_BUF_SZ;
- kmemleak_not_leak(temp_buf);
+ kmemleak_not_leak(temp_char_buf);
fwd_info->buf_upd_1_b->ctxt = SET_BUF_CTXT(
fwd_info->peripheral,
fwd_info->type, 4);
}
+ if (fwd_info->peripheral ==
+ PERIPHERAL_LPASS) {
+ if (!fwd_info->buf_upd_2_a) {
+ fwd_info->buf_upd_2_a =
+ kzalloc(sizeof(struct diagfwd_buf_t),
+ GFP_KERNEL);
+ temp_fwd_buf =
+ fwd_info->buf_upd_2_a;
+ if (ZERO_OR_NULL_PTR(temp_fwd_buf))
+ goto err;
+ kmemleak_not_leak(temp_fwd_buf);
+ }
+
+ if (!fwd_info->buf_upd_2_a->data) {
+ fwd_info->buf_upd_2_a->data =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ temp_char_buf =
+ fwd_info->buf_upd_2_a->data;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
+ goto err;
+ fwd_info->buf_upd_2_a->len =
+ PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(temp_char_buf);
+ fwd_info->buf_upd_2_a->ctxt =
+ SET_BUF_CTXT(
+ fwd_info->peripheral,
+ fwd_info->type, 5);
+ }
+ if (!fwd_info->buf_upd_2_b) {
+ fwd_info->buf_upd_2_b =
+ kzalloc(sizeof(struct diagfwd_buf_t),
+ GFP_KERNEL);
+ temp_fwd_buf =
+ fwd_info->buf_upd_2_b;
+ if (ZERO_OR_NULL_PTR(temp_fwd_buf))
+ goto err;
+ kmemleak_not_leak(temp_fwd_buf);
+ }
+
+ if (!fwd_info->buf_upd_2_b->data) {
+ fwd_info->buf_upd_2_b->data =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ temp_char_buf =
+ fwd_info->buf_upd_2_b->data;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
+ goto err;
+ fwd_info->buf_upd_2_b->len =
+ PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(temp_char_buf);
+ fwd_info->buf_upd_2_b->ctxt =
+ SET_BUF_CTXT(
+ fwd_info->peripheral,
+ fwd_info->type, 6);
+ }
+ }
}
if (driver->supports_apps_hdlc_encoding) {
@@ -1427,12 +1649,13 @@ void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
kzalloc(PERIPHERAL_BUF_SZ +
APF_DIAG_PADDING,
GFP_KERNEL);
- temp_buf = fwd_info->buf_1->data_raw;
- if (ZERO_OR_NULL_PTR(temp_buf))
+ temp_char_buf =
+ fwd_info->buf_1->data_raw;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
goto err;
fwd_info->buf_1->len_raw =
PERIPHERAL_BUF_SZ;
- kmemleak_not_leak(temp_buf);
+ kmemleak_not_leak(temp_char_buf);
}
if (!fwd_info->buf_2->data_raw) {
@@ -1440,12 +1663,13 @@ void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
kzalloc(PERIPHERAL_BUF_SZ +
APF_DIAG_PADDING,
GFP_KERNEL);
- temp_buf = fwd_info->buf_2->data_raw;
- if (ZERO_OR_NULL_PTR(temp_buf))
+ temp_char_buf =
+ fwd_info->buf_2->data_raw;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
goto err;
fwd_info->buf_2->len_raw =
PERIPHERAL_BUF_SZ;
- kmemleak_not_leak(temp_buf);
+ kmemleak_not_leak(temp_char_buf);
}
if (driver->feature[fwd_info->peripheral].
@@ -1456,13 +1680,13 @@ void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
kzalloc(PERIPHERAL_BUF_SZ +
APF_DIAG_PADDING,
GFP_KERNEL);
- temp_buf =
+ temp_char_buf =
fwd_info->buf_upd_1_a->data_raw;
- if (ZERO_OR_NULL_PTR(temp_buf))
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
goto err;
fwd_info->buf_upd_1_a->len_raw =
PERIPHERAL_BUF_SZ;
- kmemleak_not_leak(temp_buf);
+ kmemleak_not_leak(temp_char_buf);
}
if (fwd_info->buf_upd_1_b &&
@@ -1471,13 +1695,41 @@ void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
kzalloc(PERIPHERAL_BUF_SZ +
APF_DIAG_PADDING,
GFP_KERNEL);
- temp_buf =
+ temp_char_buf =
fwd_info->buf_upd_1_b->data_raw;
- if (ZERO_OR_NULL_PTR(temp_buf))
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
goto err;
fwd_info->buf_upd_1_b->len_raw =
PERIPHERAL_BUF_SZ;
- kmemleak_not_leak(temp_buf);
+ kmemleak_not_leak(temp_char_buf);
+ }
+ if (fwd_info->peripheral == PERIPHERAL_LPASS
+ && !fwd_info->buf_upd_2_a->data_raw) {
+ fwd_info->buf_upd_2_a->data_raw =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ temp_char_buf =
+ fwd_info->buf_upd_2_a->data_raw;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
+ goto err;
+ fwd_info->buf_upd_2_a->len_raw =
+ PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(temp_char_buf);
+ }
+ if (fwd_info->peripheral == PERIPHERAL_LPASS
+ && !fwd_info->buf_upd_2_b->data_raw) {
+ fwd_info->buf_upd_2_b->data_raw =
+ kzalloc(PERIPHERAL_BUF_SZ +
+ APF_DIAG_PADDING,
+ GFP_KERNEL);
+ temp_char_buf =
+ fwd_info->buf_upd_2_b->data_raw;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
+ goto err;
+ fwd_info->buf_upd_2_b->len_raw =
+ PERIPHERAL_BUF_SZ;
+ kmemleak_not_leak(temp_char_buf);
}
}
}
@@ -1490,10 +1742,12 @@ void diagfwd_buffers_init(struct diagfwd_info *fwd_info)
fwd_info->buf_1->data_raw = kzalloc(PERIPHERAL_BUF_SZ +
APF_DIAG_PADDING,
GFP_KERNEL);
- if (!fwd_info->buf_1->data_raw)
+ temp_char_buf =
+ fwd_info->buf_1->data_raw;
+ if (ZERO_OR_NULL_PTR(temp_char_buf))
goto err;
fwd_info->buf_1->len_raw = PERIPHERAL_BUF_SZ;
- kmemleak_not_leak(fwd_info->buf_1->data_raw);
+ kmemleak_not_leak(temp_char_buf);
}
}
@@ -1530,6 +1784,38 @@ static void diagfwd_buffers_exit(struct diagfwd_info *fwd_info)
kfree(fwd_info->buf_2);
fwd_info->buf_2 = NULL;
}
+ if (fwd_info->buf_upd_1_a) {
+ kfree(fwd_info->buf_upd_1_a->data);
+ fwd_info->buf_upd_1_a->data = NULL;
+ kfree(fwd_info->buf_upd_1_a->data_raw);
+ fwd_info->buf_upd_1_a->data_raw = NULL;
+ kfree(fwd_info->buf_upd_1_a);
+ fwd_info->buf_upd_1_a = NULL;
+ }
+ if (fwd_info->buf_upd_1_b) {
+ kfree(fwd_info->buf_upd_1_b->data);
+ fwd_info->buf_upd_1_b->data = NULL;
+ kfree(fwd_info->buf_upd_1_b->data_raw);
+ fwd_info->buf_upd_1_b->data_raw = NULL;
+ kfree(fwd_info->buf_upd_1_b);
+ fwd_info->buf_upd_1_b = NULL;
+ }
+ if (fwd_info->buf_upd_2_a) {
+ kfree(fwd_info->buf_upd_2_a->data);
+ fwd_info->buf_upd_2_a->data = NULL;
+ kfree(fwd_info->buf_upd_2_a->data_raw);
+ fwd_info->buf_upd_2_a->data_raw = NULL;
+ kfree(fwd_info->buf_upd_2_a);
+ fwd_info->buf_upd_2_a = NULL;
+ }
+ if (fwd_info->buf_upd_2_b) {
+ kfree(fwd_info->buf_upd_2_b->data);
+ fwd_info->buf_upd_2_b->data = NULL;
+ kfree(fwd_info->buf_upd_2_b->data_raw);
+ fwd_info->buf_upd_2_b->data_raw = NULL;
+ kfree(fwd_info->buf_upd_2_b);
+ fwd_info->buf_upd_2_b = NULL;
+ }
mutex_unlock(&fwd_info->buf_mutex);
}
diff --git a/drivers/char/diag/diagfwd_peripheral.h b/drivers/char/diag/diagfwd_peripheral.h
index f483da81cc96..760f139ff428 100644
--- a/drivers/char/diag/diagfwd_peripheral.h
+++ b/drivers/char/diag/diagfwd_peripheral.h
@@ -80,6 +80,8 @@ struct diagfwd_info {
struct diagfwd_buf_t *buf_2;
struct diagfwd_buf_t *buf_upd_1_a;
struct diagfwd_buf_t *buf_upd_1_b;
+ struct diagfwd_buf_t *buf_upd_2_a;
+ struct diagfwd_buf_t *buf_upd_2_b;
struct diagfwd_buf_t *buf_ptr[NUM_WRITE_BUFFERS];
struct diag_peripheral_ops *p_ops;
struct diag_channel_ops *c_ops;
diff --git a/drivers/char/msm_smd_pkt.c b/drivers/char/msm_smd_pkt.c
new file mode 100644
index 000000000000..a61d273bfb65
--- /dev/null
+++ b/drivers/char/msm_smd_pkt.c
@@ -0,0 +1,1397 @@
+/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * SMD Packet Driver -- Provides a binary SMD non-muxed packet port
+ * interface.
+ */
+
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/completion.h>
+#include <linux/msm_smd_pkt.h>
+#include <linux/poll.h>
+#include <soc/qcom/smd.h>
+#include <soc/qcom/smsm.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <asm/ioctls.h>
+#include <linux/pm.h>
+#include <linux/of.h>
+#include <linux/ipc_logging.h>
+
+#define MODULE_NAME "msm_smdpkt"
+#define DEVICE_NAME "smdpkt"
+#define WAKEUPSOURCE_TIMEOUT (2000) /* two seconds */
+
+struct smd_pkt_dev {
+ struct list_head dev_list;
+ char dev_name[SMD_MAX_CH_NAME_LEN];
+ char ch_name[SMD_MAX_CH_NAME_LEN];
+ uint32_t edge;
+
+ struct cdev cdev;
+ struct device *devicep;
+ void *pil;
+
+ struct smd_channel *ch;
+ struct mutex ch_lock;
+ struct mutex rx_lock;
+ struct mutex tx_lock;
+ wait_queue_head_t ch_read_wait_queue;
+ wait_queue_head_t ch_write_wait_queue;
+ wait_queue_head_t ch_opened_wait_queue;
+
+ int i;
+ int ref_cnt;
+
+ int blocking_write;
+ int is_open;
+ int poll_mode;
+ unsigned ch_size;
+ uint open_modem_wait;
+
+ int has_reset;
+ int do_reset_notification;
+ struct completion ch_allocated;
+ struct wakeup_source pa_ws; /* Packet Arrival Wakeup Source */
+ struct work_struct packet_arrival_work;
+ spinlock_t pa_spinlock;
+ int ws_locked;
+};
+
+
+struct smd_pkt_driver {
+ struct list_head list;
+ int ref_cnt;
+ char pdriver_name[SMD_MAX_CH_NAME_LEN];
+ struct platform_driver driver;
+};
+
+static DEFINE_MUTEX(smd_pkt_driver_lock_lha1);
+static LIST_HEAD(smd_pkt_driver_list);
+
+struct class *smd_pkt_classp;
+static dev_t smd_pkt_number;
+static struct delayed_work loopback_work;
+static void check_and_wakeup_reader(struct smd_pkt_dev *smd_pkt_devp);
+static void check_and_wakeup_writer(struct smd_pkt_dev *smd_pkt_devp);
+static uint32_t is_modem_smsm_inited(void);
+
+static DEFINE_MUTEX(smd_pkt_dev_lock_lha1);
+static LIST_HEAD(smd_pkt_dev_list);
+static int num_smd_pkt_ports;
+
+#define SMD_PKT_IPC_LOG_PAGE_CNT 2
+static void *smd_pkt_ilctxt;
+
+static int msm_smd_pkt_debug_mask;
+module_param_named(debug_mask, msm_smd_pkt_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+enum {
+ SMD_PKT_STATUS = 1U << 0,
+ SMD_PKT_READ = 1U << 1,
+ SMD_PKT_WRITE = 1U << 2,
+ SMD_PKT_POLL = 1U << 5,
+};
+
+#define DEBUG
+
+#ifdef DEBUG
+
+#define SMD_PKT_LOG_STRING(x...) \
+do { \
+ if (smd_pkt_ilctxt) \
+ ipc_log_string(smd_pkt_ilctxt, "<SMD_PKT>: "x); \
+} while (0)
+
+#define D_STATUS(x...) \
+do { \
+ if (msm_smd_pkt_debug_mask & SMD_PKT_STATUS) \
+ pr_info("Status: "x); \
+ SMD_PKT_LOG_STRING(x); \
+} while (0)
+
+#define D_READ(x...) \
+do { \
+ if (msm_smd_pkt_debug_mask & SMD_PKT_READ) \
+ pr_info("Read: "x); \
+ SMD_PKT_LOG_STRING(x); \
+} while (0)
+
+#define D_WRITE(x...) \
+do { \
+ if (msm_smd_pkt_debug_mask & SMD_PKT_WRITE) \
+ pr_info("Write: "x); \
+ SMD_PKT_LOG_STRING(x); \
+} while (0)
+
+#define D_POLL(x...) \
+do { \
+ if (msm_smd_pkt_debug_mask & SMD_PKT_POLL) \
+ pr_info("Poll: "x); \
+ SMD_PKT_LOG_STRING(x); \
+} while (0)
+
+#define E_SMD_PKT_SSR(x) \
+do { \
+ if (x->do_reset_notification) \
+ pr_err("%s notifying reset for smd_pkt_dev id:%d\n", \
+ __func__, x->i); \
+} while (0)
+#else
+#define D_STATUS(x...) do {} while (0)
+#define D_READ(x...) do {} while (0)
+#define D_WRITE(x...) do {} while (0)
+#define D_POLL(x...) do {} while (0)
+#define E_SMD_PKT_SSR(x) do {} while (0)
+#endif
+
+static ssize_t open_timeout_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t n)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+ unsigned long tmp;
+
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ list_for_each_entry(smd_pkt_devp, &smd_pkt_dev_list, dev_list) {
+ if (smd_pkt_devp->devicep == d) {
+ if (!kstrtoul(buf, 10, &tmp)) {
+ smd_pkt_devp->open_modem_wait = tmp;
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ return n;
+ }
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ pr_err("%s: unable to convert: %s to an int\n",
+ __func__, buf);
+ return -EINVAL;
+ }
+ }
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+
+ pr_err("%s: unable to match device to valid smd_pkt port\n", __func__);
+ return -EINVAL;
+}
+
+static ssize_t open_timeout_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ list_for_each_entry(smd_pkt_devp, &smd_pkt_dev_list, dev_list) {
+ if (smd_pkt_devp->devicep == d) {
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ smd_pkt_devp->open_modem_wait);
+ }
+ }
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ pr_err("%s: unable to match device to valid smd_pkt port\n", __func__);
+ return -EINVAL;
+
+}
+
+static DEVICE_ATTR(open_timeout, 0664, open_timeout_show, open_timeout_store);
+
+/**
+ * loopback_edge_store() - Set the edge type for loopback device
+ * @d: Linux device structure
+ * @attr: Device attribute structure
+ * @buf: Input string
+ * @n: Length of the input string
+ *
+ * This function is used to set the loopback device edge runtime
+ * by writing to the loopback_edge node.
+ */
+static ssize_t loopback_edge_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t n)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+ unsigned long tmp;
+
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ list_for_each_entry(smd_pkt_devp, &smd_pkt_dev_list, dev_list) {
+ if (smd_pkt_devp->devicep == d) {
+ if (!kstrtoul(buf, 10, &tmp)) {
+ smd_pkt_devp->edge = tmp;
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ return n;
+ }
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ pr_err("%s: unable to convert: %s to an int\n",
+ __func__, buf);
+ return -EINVAL;
+ }
+ }
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ pr_err("%s: unable to match device to valid smd_pkt port\n", __func__);
+ return -EINVAL;
+}
+
+/**
+ * loopback_edge_show() - Get the edge type for loopback device
+ * @d: Linux device structure
+ * @attr: Device attribute structure
+ * @buf: Output buffer
+ *
+ * This function is used to get the loopback device edge runtime
+ * by reading the loopback_edge node.
+ */
+static ssize_t loopback_edge_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ list_for_each_entry(smd_pkt_devp, &smd_pkt_dev_list, dev_list) {
+ if (smd_pkt_devp->devicep == d) {
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ smd_pkt_devp->edge);
+ }
+ }
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ pr_err("%s: unable to match device to valid smd_pkt port\n", __func__);
+ return -EINVAL;
+
+}
+
+static DEVICE_ATTR(loopback_edge, 0664, loopback_edge_show,
+ loopback_edge_store);
+
+static int notify_reset(struct smd_pkt_dev *smd_pkt_devp)
+{
+ smd_pkt_devp->do_reset_notification = 0;
+
+ return -ENETRESET;
+}
+
+static void clean_and_signal(struct smd_pkt_dev *smd_pkt_devp)
+{
+ smd_pkt_devp->do_reset_notification = 1;
+ smd_pkt_devp->has_reset = 1;
+
+ smd_pkt_devp->is_open = 0;
+
+ wake_up(&smd_pkt_devp->ch_read_wait_queue);
+ wake_up(&smd_pkt_devp->ch_write_wait_queue);
+ wake_up_interruptible(&smd_pkt_devp->ch_opened_wait_queue);
+ D_STATUS("%s smd_pkt_dev id:%d\n", __func__, smd_pkt_devp->i);
+}
+
+static void loopback_probe_worker(struct work_struct *work)
+{
+
+ /* Wait for the modem SMSM to be inited for the SMD
+ ** Loopback channel to be allocated at the modem. Since
+ ** the wait need to be done atmost once, using msleep
+ ** doesn't degrade the performance.
+ */
+ if (!is_modem_smsm_inited())
+ schedule_delayed_work(&loopback_work, msecs_to_jiffies(1000));
+ else
+ smsm_change_state(SMSM_APPS_STATE,
+ 0, SMSM_SMD_LOOPBACK);
+
+}
+
+static void packet_arrival_worker(struct work_struct *work)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+ unsigned long flags;
+
+ smd_pkt_devp = container_of(work, struct smd_pkt_dev,
+ packet_arrival_work);
+ mutex_lock(&smd_pkt_devp->ch_lock);
+ spin_lock_irqsave(&smd_pkt_devp->pa_spinlock, flags);
+ if (smd_pkt_devp->ch && smd_pkt_devp->ws_locked) {
+ D_READ("%s locking smd_pkt_dev id:%d wakeup source\n",
+ __func__, smd_pkt_devp->i);
+ /*
+ * Keep system awake long enough to allow userspace client
+ * to process the packet.
+ */
+ __pm_wakeup_event(&smd_pkt_devp->pa_ws, WAKEUPSOURCE_TIMEOUT);
+ }
+ spin_unlock_irqrestore(&smd_pkt_devp->pa_spinlock, flags);
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+}
+
+static long smd_pkt_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct smd_pkt_dev *smd_pkt_devp;
+ uint32_t val;
+
+ smd_pkt_devp = file->private_data;
+ if (!smd_pkt_devp)
+ return -EINVAL;
+
+ mutex_lock(&smd_pkt_devp->ch_lock);
+ switch (cmd) {
+ case TIOCMGET:
+ D_STATUS("%s TIOCMGET command on smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ ret = smd_tiocmget(smd_pkt_devp->ch);
+ break;
+ case TIOCMSET:
+ ret = get_user(val, (uint32_t *)arg);
+ if (ret) {
+ pr_err("Error getting TIOCMSET value\n");
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+ return ret;
+ }
+ D_STATUS("%s TIOCSET command on smd_pkt_dev id:%d arg[0x%x]\n",
+ __func__, smd_pkt_devp->i, val);
+ ret = smd_tiocmset(smd_pkt_devp->ch, val, ~val);
+ break;
+ case SMD_PKT_IOCTL_BLOCKING_WRITE:
+ ret = get_user(smd_pkt_devp->blocking_write, (int *)arg);
+ break;
+ default:
+ pr_err_ratelimited("%s: Unrecognized ioctl command %d\n",
+ __func__, cmd);
+ ret = -ENOIOCTLCMD;
+ }
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+
+ return ret;
+}
+
+ssize_t smd_pkt_read(struct file *file,
+ char __user *_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int r;
+ int bytes_read;
+ int pkt_size;
+ struct smd_pkt_dev *smd_pkt_devp;
+ unsigned long flags;
+ void *buf;
+
+ smd_pkt_devp = file->private_data;
+
+ if (!smd_pkt_devp) {
+ pr_err_ratelimited("%s on NULL smd_pkt_dev\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!smd_pkt_devp->ch) {
+ pr_err_ratelimited("%s on a closed smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ return -EINVAL;
+ }
+
+ if (smd_pkt_devp->do_reset_notification) {
+ /* notify client that a reset occurred */
+ E_SMD_PKT_SSR(smd_pkt_devp);
+ return notify_reset(smd_pkt_devp);
+ }
+ D_READ("Begin %s on smd_pkt_dev id:%d buffer_size %zu\n",
+ __func__, smd_pkt_devp->i, count);
+
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+wait_for_packet:
+ r = wait_event_interruptible(smd_pkt_devp->ch_read_wait_queue,
+ !smd_pkt_devp->ch ||
+ (smd_cur_packet_size(smd_pkt_devp->ch) > 0
+ && smd_read_avail(smd_pkt_devp->ch)) ||
+ smd_pkt_devp->has_reset);
+
+ mutex_lock(&smd_pkt_devp->rx_lock);
+ if (smd_pkt_devp->has_reset) {
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ E_SMD_PKT_SSR(smd_pkt_devp);
+ kfree(buf);
+ return notify_reset(smd_pkt_devp);
+ }
+
+ if (!smd_pkt_devp->ch) {
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ pr_err_ratelimited("%s on a closed smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ kfree(buf);
+ return -EINVAL;
+ }
+
+ if (r < 0) {
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ /* qualify error message */
+ if (r != -ERESTARTSYS) {
+ /* we get this anytime a signal comes in */
+ pr_err_ratelimited("%s: wait_event_interruptible on smd_pkt_dev id:%d ret %i\n",
+ __func__, smd_pkt_devp->i, r);
+ }
+ kfree(buf);
+ return r;
+ }
+
+ /* Here we have a whole packet waiting for us */
+ pkt_size = smd_cur_packet_size(smd_pkt_devp->ch);
+
+ if (!pkt_size) {
+ pr_err_ratelimited("%s: No data on smd_pkt_dev id:%d, False wakeup\n",
+ __func__, smd_pkt_devp->i);
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ goto wait_for_packet;
+ }
+
+ if (pkt_size < 0) {
+ pr_err_ratelimited("%s: Error %d obtaining packet size for Channel %s",
+ __func__, pkt_size, smd_pkt_devp->ch_name);
+ kfree(buf);
+ return pkt_size;
+ }
+
+ if ((uint32_t)pkt_size > count) {
+ pr_err_ratelimited("%s: failure on smd_pkt_dev id: %d - packet size %d > buffer size %zu,",
+ __func__, smd_pkt_devp->i,
+ pkt_size, count);
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ kfree(buf);
+ return -ETOOSMALL;
+ }
+
+ bytes_read = 0;
+ do {
+ r = smd_read(smd_pkt_devp->ch,
+ (buf + bytes_read),
+ (pkt_size - bytes_read));
+ if (r < 0) {
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ if (smd_pkt_devp->has_reset) {
+ E_SMD_PKT_SSR(smd_pkt_devp);
+ return notify_reset(smd_pkt_devp);
+ }
+ pr_err_ratelimited("%s Error while reading %d\n",
+ __func__, r);
+ kfree(buf);
+ return r;
+ }
+ bytes_read += r;
+ if (pkt_size != bytes_read)
+ wait_event(smd_pkt_devp->ch_read_wait_queue,
+ smd_read_avail(smd_pkt_devp->ch) ||
+ smd_pkt_devp->has_reset);
+ if (smd_pkt_devp->has_reset) {
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ E_SMD_PKT_SSR(smd_pkt_devp);
+ kfree(buf);
+ return notify_reset(smd_pkt_devp);
+ }
+ } while (pkt_size != bytes_read);
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+
+ mutex_lock(&smd_pkt_devp->ch_lock);
+ spin_lock_irqsave(&smd_pkt_devp->pa_spinlock, flags);
+ if (smd_pkt_devp->poll_mode &&
+ !smd_cur_packet_size(smd_pkt_devp->ch)) {
+ __pm_relax(&smd_pkt_devp->pa_ws);
+ smd_pkt_devp->ws_locked = 0;
+ smd_pkt_devp->poll_mode = 0;
+ D_READ("%s unlocked smd_pkt_dev id:%d wakeup_source\n",
+ __func__, smd_pkt_devp->i);
+ }
+ spin_unlock_irqrestore(&smd_pkt_devp->pa_spinlock, flags);
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+
+ r = copy_to_user(_buf, buf, bytes_read);
+ if (r) {
+ kfree(buf);
+ return -EFAULT;
+ }
+ D_READ("Finished %s on smd_pkt_dev id:%d %d bytes\n",
+ __func__, smd_pkt_devp->i, bytes_read);
+ kfree(buf);
+
+ /* check and wakeup read threads waiting on this device */
+ check_and_wakeup_reader(smd_pkt_devp);
+
+ return bytes_read;
+}
+
+ssize_t smd_pkt_write(struct file *file,
+ const char __user *_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int r = 0, bytes_written;
+ struct smd_pkt_dev *smd_pkt_devp;
+ DEFINE_WAIT(write_wait);
+ void *buf;
+
+ smd_pkt_devp = file->private_data;
+
+ if (!smd_pkt_devp) {
+ pr_err_ratelimited("%s on NULL smd_pkt_dev\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!smd_pkt_devp->ch) {
+ pr_err_ratelimited("%s on a closed smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ return -EINVAL;
+ }
+
+ if (smd_pkt_devp->do_reset_notification || smd_pkt_devp->has_reset) {
+ E_SMD_PKT_SSR(smd_pkt_devp);
+ /* notify client that a reset occurred */
+ return notify_reset(smd_pkt_devp);
+ }
+ D_WRITE("Begin %s on smd_pkt_dev id:%d data_size %zu\n",
+ __func__, smd_pkt_devp->i, count);
+
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ r = copy_from_user(buf, _buf, count);
+ if (r) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ mutex_lock(&smd_pkt_devp->tx_lock);
+ if (!smd_pkt_devp->blocking_write) {
+ if (smd_write_avail(smd_pkt_devp->ch) < count) {
+ pr_err_ratelimited("%s: Not enough space in smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ kfree(buf);
+ return -ENOMEM;
+ }
+ }
+
+ r = smd_write_start(smd_pkt_devp->ch, count);
+ if (r < 0) {
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ pr_err_ratelimited("%s: Error:%d in smd_pkt_dev id:%d @ smd_write_start\n",
+ __func__, r, smd_pkt_devp->i);
+ kfree(buf);
+ return r;
+ }
+
+ bytes_written = 0;
+ do {
+ prepare_to_wait(&smd_pkt_devp->ch_write_wait_queue,
+ &write_wait, TASK_UNINTERRUPTIBLE);
+ if (!smd_write_segment_avail(smd_pkt_devp->ch) &&
+ !smd_pkt_devp->has_reset) {
+ smd_enable_read_intr(smd_pkt_devp->ch);
+ schedule();
+ }
+ finish_wait(&smd_pkt_devp->ch_write_wait_queue, &write_wait);
+ smd_disable_read_intr(smd_pkt_devp->ch);
+
+ if (smd_pkt_devp->has_reset) {
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ E_SMD_PKT_SSR(smd_pkt_devp);
+ kfree(buf);
+ return notify_reset(smd_pkt_devp);
+ }
+ r = smd_write_segment(smd_pkt_devp->ch,
+ (void *)(buf + bytes_written),
+ (count - bytes_written));
+ if (r < 0) {
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ if (smd_pkt_devp->has_reset) {
+ E_SMD_PKT_SSR(smd_pkt_devp);
+ return notify_reset(smd_pkt_devp);
+ }
+ pr_err_ratelimited("%s on smd_pkt_dev id:%d failed r:%d\n",
+ __func__, smd_pkt_devp->i, r);
+ kfree(buf);
+ return r;
+ }
+ bytes_written += r;
+ } while (bytes_written != count);
+ smd_write_end(smd_pkt_devp->ch);
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ D_WRITE("Finished %s on smd_pkt_dev id:%d %zu bytes\n",
+ __func__, smd_pkt_devp->i, count);
+
+ kfree(buf);
+ return count;
+}
+
+static unsigned int smd_pkt_poll(struct file *file, poll_table *wait)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+ unsigned int mask = 0;
+
+ smd_pkt_devp = file->private_data;
+ if (!smd_pkt_devp) {
+ pr_err_ratelimited("%s on a NULL device\n", __func__);
+ return POLLERR;
+ }
+
+ smd_pkt_devp->poll_mode = 1;
+ poll_wait(file, &smd_pkt_devp->ch_read_wait_queue, wait);
+ mutex_lock(&smd_pkt_devp->ch_lock);
+ if (smd_pkt_devp->has_reset || !smd_pkt_devp->ch) {
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+ return POLLERR;
+ }
+
+ if (smd_read_avail(smd_pkt_devp->ch)) {
+ mask |= POLLIN | POLLRDNORM;
+ D_POLL("%s sets POLLIN for smd_pkt_dev id: %d\n",
+ __func__, smd_pkt_devp->i);
+ }
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+
+ return mask;
+}
+
+static void check_and_wakeup_reader(struct smd_pkt_dev *smd_pkt_devp)
+{
+ int sz;
+ unsigned long flags;
+
+ if (!smd_pkt_devp) {
+ pr_err("%s on a NULL device\n", __func__);
+ return;
+ }
+
+ if (!smd_pkt_devp->ch) {
+ pr_err("%s on a closed smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ return;
+ }
+
+ sz = smd_cur_packet_size(smd_pkt_devp->ch);
+ if (sz == 0) {
+ D_READ("%s: No packet in smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ return;
+ }
+ if (!smd_read_avail(smd_pkt_devp->ch)) {
+ D_READ(
+ "%s: packet size is %d in smd_pkt_dev id:%d - but the data isn't here\n",
+ __func__, sz, smd_pkt_devp->i);
+ return;
+ }
+
+ /* here we have a packet of size sz ready */
+ spin_lock_irqsave(&smd_pkt_devp->pa_spinlock, flags);
+ __pm_stay_awake(&smd_pkt_devp->pa_ws);
+ smd_pkt_devp->ws_locked = 1;
+ spin_unlock_irqrestore(&smd_pkt_devp->pa_spinlock, flags);
+ wake_up(&smd_pkt_devp->ch_read_wait_queue);
+ schedule_work(&smd_pkt_devp->packet_arrival_work);
+ D_READ("%s: wake_up smd_pkt_dev id:%d\n", __func__, smd_pkt_devp->i);
+}
+
+static void check_and_wakeup_writer(struct smd_pkt_dev *smd_pkt_devp)
+{
+ int sz;
+
+ if (!smd_pkt_devp) {
+ pr_err("%s on a NULL device\n", __func__);
+ return;
+ }
+
+ if (!smd_pkt_devp->ch) {
+ pr_err("%s on a closed smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ return;
+ }
+
+ sz = smd_write_segment_avail(smd_pkt_devp->ch);
+ if (sz) {
+ D_WRITE("%s: %d bytes write space in smd_pkt_dev id:%d\n",
+ __func__, sz, smd_pkt_devp->i);
+ smd_disable_read_intr(smd_pkt_devp->ch);
+ wake_up(&smd_pkt_devp->ch_write_wait_queue);
+ }
+}
+
+static void ch_notify(void *priv, unsigned event)
+{
+ struct smd_pkt_dev *smd_pkt_devp = priv;
+
+ if (smd_pkt_devp->ch == 0) {
+ if (event != SMD_EVENT_CLOSE)
+ pr_err("%s on a closed smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ return;
+ }
+
+ switch (event) {
+ case SMD_EVENT_DATA: {
+ D_STATUS("%s: DATA event in smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ check_and_wakeup_reader(smd_pkt_devp);
+ if (smd_pkt_devp->blocking_write)
+ check_and_wakeup_writer(smd_pkt_devp);
+ break;
+ }
+ case SMD_EVENT_OPEN:
+ D_STATUS("%s: OPEN event in smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ smd_pkt_devp->has_reset = 0;
+ smd_pkt_devp->is_open = 1;
+ wake_up_interruptible(&smd_pkt_devp->ch_opened_wait_queue);
+ break;
+ case SMD_EVENT_CLOSE:
+ D_STATUS("%s: CLOSE event in smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ smd_pkt_devp->is_open = 0;
+ /* put port into reset state */
+ clean_and_signal(smd_pkt_devp);
+ if (!strcmp(smd_pkt_devp->ch_name, "LOOPBACK"))
+ schedule_delayed_work(&loopback_work,
+ msecs_to_jiffies(1000));
+ break;
+ }
+}
+
+static int smd_pkt_dummy_probe(struct platform_device *pdev)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ list_for_each_entry(smd_pkt_devp, &smd_pkt_dev_list, dev_list) {
+ if (smd_pkt_devp->edge == pdev->id
+ && !strcmp(pdev->name, smd_pkt_devp->ch_name)) {
+ complete_all(&smd_pkt_devp->ch_allocated);
+ D_STATUS("%s allocated SMD ch for smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ break;
+ }
+ }
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+ return 0;
+}
+
+static uint32_t is_modem_smsm_inited(void)
+{
+ uint32_t modem_state;
+ uint32_t ready_state = (SMSM_INIT | SMSM_SMDINIT);
+
+ modem_state = smsm_get_state(SMSM_MODEM_STATE);
+ return (modem_state & ready_state) == ready_state;
+}
+
+/**
+ * smd_pkt_add_driver() - Add platform drivers for smd pkt device
+ *
+ * @smd_pkt_devp: pointer to the smd pkt device structure
+ *
+ * @returns: 0 for success, standard Linux error code otherwise
+ *
+ * This function is used to register platform driver once for all
+ * smd pkt devices which have same names and increment the reference
+ * count for 2nd to nth devices.
+ */
+static int smd_pkt_add_driver(struct smd_pkt_dev *smd_pkt_devp)
+{
+ int r = 0;
+ struct smd_pkt_driver *smd_pkt_driverp;
+ struct smd_pkt_driver *item;
+
+ if (!smd_pkt_devp) {
+ pr_err("%s on a NULL device\n", __func__);
+ return -EINVAL;
+ }
+ D_STATUS("Begin %s on smd_pkt_ch[%s]\n", __func__,
+ smd_pkt_devp->ch_name);
+
+ mutex_lock(&smd_pkt_driver_lock_lha1);
+ list_for_each_entry(item, &smd_pkt_driver_list, list) {
+ if (!strcmp(item->pdriver_name, smd_pkt_devp->ch_name)) {
+ D_STATUS("%s:%s Already Platform driver reg. cnt:%d\n",
+ __func__, smd_pkt_devp->ch_name, item->ref_cnt);
+ ++item->ref_cnt;
+ goto exit;
+ }
+ }
+
+ smd_pkt_driverp = kzalloc(sizeof(*smd_pkt_driverp), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(smd_pkt_driverp)) {
+ pr_err("%s: kzalloc() failed for smd_pkt_driver[%s]\n",
+ __func__, smd_pkt_devp->ch_name);
+ r = -ENOMEM;
+ goto exit;
+ }
+
+ smd_pkt_driverp->driver.probe = smd_pkt_dummy_probe;
+ scnprintf(smd_pkt_driverp->pdriver_name, SMD_MAX_CH_NAME_LEN,
+ "%s", smd_pkt_devp->ch_name);
+ smd_pkt_driverp->driver.driver.name = smd_pkt_driverp->pdriver_name;
+ smd_pkt_driverp->driver.driver.owner = THIS_MODULE;
+ r = platform_driver_register(&smd_pkt_driverp->driver);
+ if (r) {
+ pr_err("%s: %s Platform driver reg. failed\n",
+ __func__, smd_pkt_devp->ch_name);
+ kfree(smd_pkt_driverp);
+ goto exit;
+ }
+ ++smd_pkt_driverp->ref_cnt;
+ list_add(&smd_pkt_driverp->list, &smd_pkt_driver_list);
+
+exit:
+ D_STATUS("End %s on smd_pkt_ch[%s]\n", __func__, smd_pkt_devp->ch_name);
+ mutex_unlock(&smd_pkt_driver_lock_lha1);
+ return r;
+}
+
+/**
+ * smd_pkt_remove_driver() - Remove the platform drivers for smd pkt device
+ *
+ * @smd_pkt_devp: pointer to the smd pkt device structure
+ *
+ * This function is used to decrement the reference count on
+ * platform drivers for smd pkt devices and removes the drivers
+ * when the reference count becomes zero.
+ */
+static void smd_pkt_remove_driver(struct smd_pkt_dev *smd_pkt_devp)
+{
+ struct smd_pkt_driver *smd_pkt_driverp;
+ bool found_item = false;
+
+ if (!smd_pkt_devp) {
+ pr_err("%s on a NULL device\n", __func__);
+ return;
+ }
+
+ D_STATUS("Begin %s on smd_pkt_ch[%s]\n", __func__,
+ smd_pkt_devp->ch_name);
+ mutex_lock(&smd_pkt_driver_lock_lha1);
+ list_for_each_entry(smd_pkt_driverp, &smd_pkt_driver_list, list) {
+ if (!strcmp(smd_pkt_driverp->pdriver_name,
+ smd_pkt_devp->ch_name)) {
+ found_item = true;
+ D_STATUS("%s:%s Platform driver cnt:%d\n",
+ __func__, smd_pkt_devp->ch_name,
+ smd_pkt_driverp->ref_cnt);
+ if (smd_pkt_driverp->ref_cnt > 0)
+ --smd_pkt_driverp->ref_cnt;
+ else
+ pr_warn("%s reference count <= 0\n", __func__);
+ break;
+ }
+ }
+ if (!found_item)
+ pr_err("%s:%s No item found in list.\n",
+ __func__, smd_pkt_devp->ch_name);
+
+ if (found_item && smd_pkt_driverp->ref_cnt == 0) {
+ platform_driver_unregister(&smd_pkt_driverp->driver);
+ smd_pkt_driverp->driver.probe = NULL;
+ list_del(&smd_pkt_driverp->list);
+ kfree(smd_pkt_driverp);
+ }
+ mutex_unlock(&smd_pkt_driver_lock_lha1);
+ D_STATUS("End %s on smd_pkt_ch[%s]\n", __func__, smd_pkt_devp->ch_name);
+}
+
+int smd_pkt_open(struct inode *inode, struct file *file)
+{
+ int r = 0;
+ struct smd_pkt_dev *smd_pkt_devp;
+ const char *peripheral = NULL;
+
+ smd_pkt_devp = container_of(inode->i_cdev, struct smd_pkt_dev, cdev);
+
+ if (!smd_pkt_devp) {
+ pr_err_ratelimited("%s on a NULL device\n", __func__);
+ return -EINVAL;
+ }
+ D_STATUS("Begin %s on smd_pkt_dev id:%d\n", __func__, smd_pkt_devp->i);
+
+ file->private_data = smd_pkt_devp;
+
+ mutex_lock(&smd_pkt_devp->ch_lock);
+ if (smd_pkt_devp->ch == 0) {
+ unsigned open_wait_rem = smd_pkt_devp->open_modem_wait * 1000;
+
+ reinit_completion(&smd_pkt_devp->ch_allocated);
+
+ r = smd_pkt_add_driver(smd_pkt_devp);
+ if (r) {
+ pr_err_ratelimited("%s: %s Platform driver reg. failed\n",
+ __func__, smd_pkt_devp->ch_name);
+ goto out;
+ }
+
+ peripheral = smd_edge_to_pil_str(smd_pkt_devp->edge);
+ if (!IS_ERR_OR_NULL(peripheral)) {
+ smd_pkt_devp->pil = subsystem_get(peripheral);
+ if (IS_ERR(smd_pkt_devp->pil)) {
+ r = PTR_ERR(smd_pkt_devp->pil);
+ pr_err_ratelimited("%s failed on smd_pkt_dev id:%d - subsystem_get failed for %s\n",
+ __func__, smd_pkt_devp->i, peripheral);
+ /*
+ * Sleep inorder to reduce the frequency of
+ * retry by user-space modules and to avoid
+ * possible watchdog bite.
+ */
+ msleep(open_wait_rem);
+ goto release_pd;
+ }
+ }
+
+ /* Wait for the modem SMSM to be inited for the SMD
+ ** Loopback channel to be allocated at the modem. Since
+ ** the wait need to be done atmost once, using msleep
+ ** doesn't degrade the performance.
+ */
+ if (!strcmp(smd_pkt_devp->ch_name, "LOOPBACK")) {
+ if (!is_modem_smsm_inited())
+ msleep(5000);
+ smsm_change_state(SMSM_APPS_STATE,
+ 0, SMSM_SMD_LOOPBACK);
+ msleep(100);
+ }
+
+ /*
+ * Wait for a packet channel to be allocated so we know
+ * the modem is ready enough.
+ */
+ if (open_wait_rem) {
+ r = wait_for_completion_interruptible_timeout(
+ &smd_pkt_devp->ch_allocated,
+ msecs_to_jiffies(open_wait_rem));
+ if (r >= 0)
+ open_wait_rem = jiffies_to_msecs(r);
+ if (r == 0)
+ r = -ETIMEDOUT;
+ if (r == -ERESTARTSYS) {
+ pr_info_ratelimited("%s: wait on smd_pkt_dev id:%d allocation interrupted\n",
+ __func__, smd_pkt_devp->i);
+ goto release_pil;
+ }
+ if (r < 0) {
+ pr_err_ratelimited("%s: wait on smd_pkt_dev id:%d allocation failed rc:%d\n",
+ __func__, smd_pkt_devp->i, r);
+ goto release_pil;
+ }
+ }
+
+ r = smd_named_open_on_edge(smd_pkt_devp->ch_name,
+ smd_pkt_devp->edge,
+ &smd_pkt_devp->ch,
+ smd_pkt_devp,
+ ch_notify);
+ if (r < 0) {
+ pr_err_ratelimited("%s: %s open failed %d\n", __func__,
+ smd_pkt_devp->ch_name, r);
+ goto release_pil;
+ }
+
+ open_wait_rem = max_t(unsigned, 2000, open_wait_rem);
+ r = wait_event_interruptible_timeout(
+ smd_pkt_devp->ch_opened_wait_queue,
+ smd_pkt_devp->is_open,
+ msecs_to_jiffies(open_wait_rem));
+ if (r == 0)
+ r = -ETIMEDOUT;
+
+ if (r < 0) {
+ /* close the ch to sync smd's state with smd_pkt */
+ smd_close(smd_pkt_devp->ch);
+ smd_pkt_devp->ch = NULL;
+ }
+
+ if (r == -ERESTARTSYS) {
+ pr_info_ratelimited("%s: wait on smd_pkt_dev id:%d OPEN interrupted\n",
+ __func__, smd_pkt_devp->i);
+ } else if (r < 0) {
+ pr_err_ratelimited("%s: wait on smd_pkt_dev id:%d OPEN event failed rc:%d\n",
+ __func__, smd_pkt_devp->i, r);
+ } else if (!smd_pkt_devp->is_open) {
+ pr_err_ratelimited("%s: Invalid OPEN event on smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ r = -ENODEV;
+ } else {
+ smd_disable_read_intr(smd_pkt_devp->ch);
+ smd_pkt_devp->ch_size =
+ smd_write_avail(smd_pkt_devp->ch);
+ r = 0;
+ smd_pkt_devp->ref_cnt++;
+ D_STATUS("Finished %s on smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+ }
+ } else {
+ smd_pkt_devp->ref_cnt++;
+ }
+release_pil:
+ if (peripheral && (r < 0)) {
+ subsystem_put(smd_pkt_devp->pil);
+ smd_pkt_devp->pil = NULL;
+ }
+
+release_pd:
+ if (r < 0)
+ smd_pkt_remove_driver(smd_pkt_devp);
+out:
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+
+
+ return r;
+}
+
+int smd_pkt_release(struct inode *inode, struct file *file)
+{
+ int r = 0;
+ struct smd_pkt_dev *smd_pkt_devp = file->private_data;
+ unsigned long flags;
+
+ if (!smd_pkt_devp) {
+ pr_err_ratelimited("%s on a NULL device\n", __func__);
+ return -EINVAL;
+ }
+ D_STATUS("Begin %s on smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+
+ mutex_lock(&smd_pkt_devp->ch_lock);
+ mutex_lock(&smd_pkt_devp->rx_lock);
+ mutex_lock(&smd_pkt_devp->tx_lock);
+ if (smd_pkt_devp->ref_cnt > 0)
+ smd_pkt_devp->ref_cnt--;
+
+ if (smd_pkt_devp->ch != 0 && smd_pkt_devp->ref_cnt == 0) {
+ clean_and_signal(smd_pkt_devp);
+ r = smd_close(smd_pkt_devp->ch);
+ smd_pkt_devp->ch = 0;
+ smd_pkt_devp->blocking_write = 0;
+ smd_pkt_devp->poll_mode = 0;
+ smd_pkt_remove_driver(smd_pkt_devp);
+ if (smd_pkt_devp->pil)
+ subsystem_put(smd_pkt_devp->pil);
+ smd_pkt_devp->has_reset = 0;
+ smd_pkt_devp->do_reset_notification = 0;
+ spin_lock_irqsave(&smd_pkt_devp->pa_spinlock, flags);
+ if (smd_pkt_devp->ws_locked) {
+ __pm_relax(&smd_pkt_devp->pa_ws);
+ smd_pkt_devp->ws_locked = 0;
+ }
+ spin_unlock_irqrestore(&smd_pkt_devp->pa_spinlock, flags);
+ }
+ mutex_unlock(&smd_pkt_devp->tx_lock);
+ mutex_unlock(&smd_pkt_devp->rx_lock);
+ mutex_unlock(&smd_pkt_devp->ch_lock);
+
+ if (flush_work(&smd_pkt_devp->packet_arrival_work))
+ D_STATUS("%s: Flushed work for smd_pkt_dev id:%d\n", __func__,
+ smd_pkt_devp->i);
+
+ D_STATUS("Finished %s on smd_pkt_dev id:%d\n",
+ __func__, smd_pkt_devp->i);
+
+ return r;
+}
+
+static const struct file_operations smd_pkt_fops = {
+ .owner = THIS_MODULE,
+ .open = smd_pkt_open,
+ .release = smd_pkt_release,
+ .read = smd_pkt_read,
+ .write = smd_pkt_write,
+ .poll = smd_pkt_poll,
+ .unlocked_ioctl = smd_pkt_ioctl,
+ .compat_ioctl = smd_pkt_ioctl,
+};
+
+static int smd_pkt_init_add_device(struct smd_pkt_dev *smd_pkt_devp, int i)
+{
+ int r = 0;
+
+ smd_pkt_devp->i = i;
+
+ init_waitqueue_head(&smd_pkt_devp->ch_read_wait_queue);
+ init_waitqueue_head(&smd_pkt_devp->ch_write_wait_queue);
+ smd_pkt_devp->is_open = 0;
+ smd_pkt_devp->poll_mode = 0;
+ smd_pkt_devp->ws_locked = 0;
+ init_waitqueue_head(&smd_pkt_devp->ch_opened_wait_queue);
+
+ spin_lock_init(&smd_pkt_devp->pa_spinlock);
+ mutex_init(&smd_pkt_devp->ch_lock);
+ mutex_init(&smd_pkt_devp->rx_lock);
+ mutex_init(&smd_pkt_devp->tx_lock);
+ wakeup_source_init(&smd_pkt_devp->pa_ws, smd_pkt_devp->dev_name);
+ INIT_WORK(&smd_pkt_devp->packet_arrival_work, packet_arrival_worker);
+ init_completion(&smd_pkt_devp->ch_allocated);
+
+ cdev_init(&smd_pkt_devp->cdev, &smd_pkt_fops);
+ smd_pkt_devp->cdev.owner = THIS_MODULE;
+
+ r = cdev_add(&smd_pkt_devp->cdev, (smd_pkt_number + i), 1);
+ if (IS_ERR_VALUE(r)) {
+ pr_err("%s: cdev_add() failed for smd_pkt_dev id:%d ret:%i\n",
+ __func__, i, r);
+ return r;
+ }
+
+ smd_pkt_devp->devicep =
+ device_create(smd_pkt_classp,
+ NULL,
+ (smd_pkt_number + i),
+ NULL,
+ smd_pkt_devp->dev_name);
+
+ if (IS_ERR_OR_NULL(smd_pkt_devp->devicep)) {
+ pr_err("%s: device_create() failed for smd_pkt_dev id:%d\n",
+ __func__, i);
+ r = -ENOMEM;
+ cdev_del(&smd_pkt_devp->cdev);
+ wakeup_source_trash(&smd_pkt_devp->pa_ws);
+ return r;
+ }
+ if (device_create_file(smd_pkt_devp->devicep,
+ &dev_attr_open_timeout))
+ pr_err("%s: unable to create device attr for smd_pkt_dev id:%d\n",
+ __func__, i);
+
+ if (!strcmp(smd_pkt_devp->ch_name, "LOOPBACK")) {
+ if (device_create_file(smd_pkt_devp->devicep,
+ &dev_attr_loopback_edge))
+ pr_err("%s: unable to create device attr for smd_pkt_dev id:%d\n",
+ __func__, i);
+ }
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ list_add(&smd_pkt_devp->dev_list, &smd_pkt_dev_list);
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+
+ return r;
+}
+
+static void smd_pkt_core_deinit(void)
+{
+ struct smd_pkt_dev *smd_pkt_devp;
+ struct smd_pkt_dev *index;
+
+ mutex_lock(&smd_pkt_dev_lock_lha1);
+ list_for_each_entry_safe(smd_pkt_devp, index, &smd_pkt_dev_list,
+ dev_list) {
+ cdev_del(&smd_pkt_devp->cdev);
+ list_del(&smd_pkt_devp->dev_list);
+ device_destroy(smd_pkt_classp,
+ MKDEV(MAJOR(smd_pkt_number), smd_pkt_devp->i));
+ kfree(smd_pkt_devp);
+ }
+ mutex_unlock(&smd_pkt_dev_lock_lha1);
+
+ if (!IS_ERR_OR_NULL(smd_pkt_classp))
+ class_destroy(smd_pkt_classp);
+
+ unregister_chrdev_region(MAJOR(smd_pkt_number), num_smd_pkt_ports);
+}
+
+static int smd_pkt_alloc_chrdev_region(void)
+{
+ int r = alloc_chrdev_region(&smd_pkt_number,
+ 0,
+ num_smd_pkt_ports,
+ DEVICE_NAME);
+
+ if (IS_ERR_VALUE(r)) {
+ pr_err("%s: alloc_chrdev_region() failed ret:%i\n",
+ __func__, r);
+ return r;
+ }
+
+ smd_pkt_classp = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(smd_pkt_classp)) {
+ pr_err("%s: class_create() failed ENOMEM\n", __func__);
+ r = -ENOMEM;
+ unregister_chrdev_region(MAJOR(smd_pkt_number),
+ num_smd_pkt_ports);
+ return r;
+ }
+
+ return 0;
+}
+
+static int parse_smdpkt_devicetree(struct device_node *node,
+ struct smd_pkt_dev *smd_pkt_devp)
+{
+ int edge;
+ char *key;
+ const char *ch_name;
+ const char *dev_name;
+ const char *remote_ss;
+
+ key = "qcom,smdpkt-remote";
+ remote_ss = of_get_property(node, key, NULL);
+ if (!remote_ss)
+ goto error;
+
+ edge = smd_remote_ss_to_edge(remote_ss);
+ if (edge < 0)
+ goto error;
+
+ smd_pkt_devp->edge = edge;
+ D_STATUS("%s: %s = %d", __func__, key, edge);
+
+ key = "qcom,smdpkt-port-name";
+ ch_name = of_get_property(node, key, NULL);
+ if (!ch_name)
+ goto error;
+
+ strlcpy(smd_pkt_devp->ch_name, ch_name, SMD_MAX_CH_NAME_LEN);
+ D_STATUS("%s ch_name = %s\n", __func__, ch_name);
+
+ key = "qcom,smdpkt-dev-name";
+ dev_name = of_get_property(node, key, NULL);
+ if (!dev_name)
+ goto error;
+
+ strlcpy(smd_pkt_devp->dev_name, dev_name, SMD_MAX_CH_NAME_LEN);
+ D_STATUS("%s dev_name = %s\n", __func__, dev_name);
+
+ return 0;
+
+error:
+ pr_err("%s: missing key: %s\n", __func__, key);
+ return -ENODEV;
+
+}
+
+static int smd_pkt_devicetree_init(struct platform_device *pdev)
+{
+ int ret;
+ int i = 0;
+ struct device_node *node;
+ struct smd_pkt_dev *smd_pkt_devp;
+ int subnode_num = 0;
+
+ for_each_child_of_node(pdev->dev.of_node, node)
+ ++subnode_num;
+
+ num_smd_pkt_ports = subnode_num;
+
+ ret = smd_pkt_alloc_chrdev_region();
+ if (ret) {
+ pr_err("%s: smd_pkt_alloc_chrdev_region() failed ret:%i\n",
+ __func__, ret);
+ return ret;
+ }
+
+ for_each_child_of_node(pdev->dev.of_node, node) {
+ smd_pkt_devp = kzalloc(sizeof(struct smd_pkt_dev), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(smd_pkt_devp)) {
+ pr_err("%s: kzalloc() failed for smd_pkt_dev id:%d\n",
+ __func__, i);
+ ret = -ENOMEM;
+ goto error_destroy;
+ }
+
+ ret = parse_smdpkt_devicetree(node, smd_pkt_devp);
+ if (ret) {
+ pr_err(" failed to parse_smdpkt_devicetree %d\n", i);
+ kfree(smd_pkt_devp);
+ goto error_destroy;
+ }
+
+ ret = smd_pkt_init_add_device(smd_pkt_devp, i);
+ if (ret < 0) {
+ pr_err("add device failed for idx:%d ret=%d\n", i, ret);
+ kfree(smd_pkt_devp);
+ goto error_destroy;
+ }
+ i++;
+ }
+
+ INIT_DELAYED_WORK(&loopback_work, loopback_probe_worker);
+
+ D_STATUS("SMD Packet Port Driver Initialized.\n");
+ return 0;
+
+error_destroy:
+ smd_pkt_core_deinit();
+ return ret;
+}
+
+static int msm_smd_pkt_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ if (pdev) {
+ if (pdev->dev.of_node) {
+ D_STATUS("%s device tree implementation\n", __func__);
+ ret = smd_pkt_devicetree_init(pdev);
+ if (ret)
+ pr_err("%s: device tree init failed\n",
+ __func__);
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id msm_smd_pkt_match_table[] = {
+ { .compatible = "qcom,smdpkt" },
+ {},
+};
+
+static struct platform_driver msm_smd_pkt_driver = {
+ .probe = msm_smd_pkt_probe,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_smd_pkt_match_table,
+ },
+};
+
+static int __init smd_pkt_init(void)
+{
+ int rc;
+
+ INIT_LIST_HEAD(&smd_pkt_dev_list);
+ INIT_LIST_HEAD(&smd_pkt_driver_list);
+ rc = platform_driver_register(&msm_smd_pkt_driver);
+ if (rc) {
+ pr_err("%s: msm_smd_driver register failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ smd_pkt_ilctxt = ipc_log_context_create(SMD_PKT_IPC_LOG_PAGE_CNT,
+ "smd_pkt", 0);
+ return 0;
+}
+
+static void __exit smd_pkt_cleanup(void)
+{
+ smd_pkt_core_deinit();
+}
+
+module_init(smd_pkt_init);
+module_exit(smd_pkt_cleanup);
+
+MODULE_DESCRIPTION("MSM Shared Memory Packet Port");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/msm/clock-local2.c b/drivers/clk/msm/clock-local2.c
index 19956f030ae9..adb07cdb7e8d 100644
--- a/drivers/clk/msm/clock-local2.c
+++ b/drivers/clk/msm/clock-local2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -928,7 +928,8 @@ static unsigned long branch_clk_get_rate(struct clk *c)
{
struct branch_clk *branch = to_branch_clk(c);
- if (branch->max_div)
+ if (branch->max_div ||
+ (branch->aggr_sibling_rates && !branch->is_prepared))
return branch->c.rate;
return clk_get_rate(c->parent);
diff --git a/drivers/clk/msm/clock-mmss-8998.c b/drivers/clk/msm/clock-mmss-8998.c
index 6ebb3ed6ed91..fdaaa723accd 100644
--- a/drivers/clk/msm/clock-mmss-8998.c
+++ b/drivers/clk/msm/clock-mmss-8998.c
@@ -359,6 +359,7 @@ static struct rcg_clk mdp_clk_src = {
.set_rate = set_rate_hid,
.freq_tbl = ftbl_mdp_clk_src,
.current_freq = &rcg_dummy_freq,
+ .non_local_children = true,
.base = &virt_base,
.c = {
.dbg_name = "mdp_clk_src",
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index 37535a72e066..4224b594f1b8 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -43,6 +43,7 @@
#include <soc/qcom/event_timer.h>
#include <soc/qcom/lpm-stats.h>
#include <soc/qcom/jtag.h>
+#include <soc/qcom/minidump.h>
#include <asm/cputype.h>
#include <asm/arch_timer.h>
#include <asm/cacheflush.h>
@@ -690,7 +691,7 @@ static int cpu_power_select(struct cpuidle_device *dev,
if (!cpu)
return -EINVAL;
- if (sleep_disabled)
+ if (sleep_disabled && !cpu_isolated(dev->cpu))
return 0;
idx_restrict = cpu->nlevels + 1;
@@ -1854,6 +1855,7 @@ static int lpm_probe(struct platform_device *pdev)
int ret;
int size;
struct kobject *module_kobj = NULL;
+ struct md_region md_entry;
get_online_cpus();
lpm_root_node = lpm_of_parse_cluster(pdev);
@@ -1914,6 +1916,14 @@ static int lpm_probe(struct platform_device *pdev)
goto failed;
}
+ /* Add lpm_debug to Minidump*/
+ strlcpy(md_entry.name, "KLPMDEBUG", sizeof(md_entry.name));
+ md_entry.virt_addr = (uintptr_t)lpm_debug;
+ md_entry.phys_addr = lpm_debug_phys;
+ md_entry.size = size;
+ if (msm_minidump_add_region(&md_entry))
+ pr_info("Failed to add lpm_debug in Minidump\n");
+
return 0;
failed:
free_cluster_node(lpm_root_node);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 287d839f98d0..9f9dd574c8d0 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2557,6 +2557,8 @@ static int etm4_set_reg_dump(struct etmv4_drvdata *drvdata)
drvdata->reg_data.addr = virt_to_phys(baddr);
drvdata->reg_data.len = size;
+ scnprintf(drvdata->reg_data.name, sizeof(drvdata->reg_data.name),
+ "KETM_REG%d", drvdata->cpu);
dump_entry.id = MSM_DUMP_DATA_ETM_REG + drvdata->cpu;
dump_entry.addr = virt_to_phys(&drvdata->reg_data);
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 34b12e015768..c5998bd5ce02 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1726,6 +1726,9 @@ static int tmc_etf_set_buf_dump(struct tmc_drvdata *drvdata)
drvdata->buf_data.addr = virt_to_phys(drvdata->buf);
drvdata->buf_data.len = drvdata->size;
+ scnprintf(drvdata->buf_data.name, sizeof(drvdata->buf_data.name),
+ "KTMC_ETF%d", count);
+
dump_entry.id = MSM_DUMP_DATA_TMC_ETF + count;
dump_entry.addr = virt_to_phys(&drvdata->buf_data);
@@ -1817,6 +1820,8 @@ static int tmc_set_reg_dump(struct tmc_drvdata *drvdata)
drvdata->reg_data.addr = virt_to_phys(drvdata->reg_buf);
drvdata->reg_data.len = size;
+ scnprintf(drvdata->reg_data.name, sizeof(drvdata->reg_data.name),
+ "KTMC_REG%d", count);
dump_entry.id = MSM_DUMP_DATA_TMC_REG + count;
dump_entry.addr = virt_to_phys(&drvdata->reg_data);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 51159711b1d8..25fe6c85a34e 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -3288,6 +3288,43 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1 << DOMAIN_ATTR_ENABLE_TTBR1;
ret = 0;
break;
+ case DOMAIN_ATTR_GEOMETRY: {
+ struct iommu_domain_geometry *geometry =
+ (struct iommu_domain_geometry *)data;
+
+ if (smmu_domain->smmu != NULL) {
+ dev_err(smmu_domain->smmu->dev,
+ "cannot set geometry attribute while attached\n");
+ ret = -EBUSY;
+ break;
+ }
+
+ if (geometry->aperture_start >= SZ_1G * 4ULL ||
+ geometry->aperture_end >= SZ_1G * 4ULL) {
+ pr_err("fastmap does not support IOVAs >= 4GB\n");
+ ret = -EINVAL;
+ break;
+ }
+ if (smmu_domain->attributes
+ & (1 << DOMAIN_ATTR_GEOMETRY)) {
+ if (geometry->aperture_start
+ < domain->geometry.aperture_start)
+ domain->geometry.aperture_start =
+ geometry->aperture_start;
+
+ if (geometry->aperture_end
+ > domain->geometry.aperture_end)
+ domain->geometry.aperture_end =
+ geometry->aperture_end;
+ } else {
+ smmu_domain->attributes |= 1 << DOMAIN_ATTR_GEOMETRY;
+ domain->geometry.aperture_start =
+ geometry->aperture_start;
+ domain->geometry.aperture_end = geometry->aperture_end;
+ }
+ ret = 0;
+ break;
+ }
default:
ret = -ENODEV;
break;
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index 8c6364f03eac..0881d68f34d8 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -188,7 +188,9 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,
iommu_tlbiall(mapping->domain);
mapping->have_stale_tlbs = false;
- av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds, mapping->base,
+ av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds,
+ mapping->domain->geometry.aperture_start,
+ mapping->base,
mapping->base + mapping->size - 1,
skip_sync);
}
@@ -367,7 +369,8 @@ static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page,
if (unlikely(iova == DMA_ERROR_CODE))
goto fail;
- pmd = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, iova);
+ pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
+ mapping->domain->geometry.aperture_start, iova);
if (unlikely(av8l_fast_map_public(pmd, phys_to_map, len, prot)))
goto fail_free_iova;
@@ -391,7 +394,8 @@ static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova,
struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
unsigned long flags;
av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
- mapping->base, iova);
+ mapping->domain->geometry.aperture_start,
+ iova);
unsigned long offset = iova & ~FAST_PAGE_MASK;
size_t len = ALIGN(size + offset, FAST_PAGE_SIZE);
int nptes = len >> FAST_PAGE_SHIFT;
@@ -414,7 +418,8 @@ static void fast_smmu_sync_single_for_cpu(struct device *dev,
{
struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
- mapping->base, iova);
+ mapping->domain->geometry.aperture_start,
+ iova);
unsigned long offset = iova & ~FAST_PAGE_MASK;
struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
@@ -427,7 +432,8 @@ static void fast_smmu_sync_single_for_device(struct device *dev,
{
struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
- mapping->base, iova);
+ mapping->domain->geometry.aperture_start,
+ iova);
unsigned long offset = iova & ~FAST_PAGE_MASK;
struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
@@ -555,8 +561,9 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
while (sg_miter_next(&miter)) {
int nptes = miter.length >> FAST_PAGE_SHIFT;
- ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base,
- iova_iter);
+ ptep = iopte_pmd_offset(mapping->pgtbl_pmds,
+ mapping->domain->geometry.aperture_start,
+ iova_iter);
if (unlikely(av8l_fast_map_public(
ptep, page_to_phys(miter.page),
miter.length, prot))) {
@@ -584,7 +591,9 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
out_unmap:
/* need to take the lock again for page tables and iova */
spin_lock_irqsave(&mapping->lock, flags);
- ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, dma_addr);
+ ptep = iopte_pmd_offset(mapping->pgtbl_pmds,
+ mapping->domain->geometry.aperture_start,
+ dma_addr);
av8l_fast_unmap_public(ptep, size);
fast_dmac_clean_range(mapping, ptep, ptep + count);
out_free_iova:
@@ -616,7 +625,8 @@ static void fast_smmu_free(struct device *dev, size_t size,
pages = area->pages;
dma_common_free_remap(vaddr, size, VM_USERMAP, false);
- ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, dma_handle);
+ ptep = iopte_pmd_offset(mapping->pgtbl_pmds,
+ mapping->domain->geometry.aperture_start, dma_handle);
spin_lock_irqsave(&mapping->lock, flags);
av8l_fast_unmap_public(ptep, size);
fast_dmac_clean_range(mapping, ptep, ptep + count);
@@ -720,7 +730,7 @@ static const struct dma_map_ops fast_smmu_dma_ops = {
*
* Creates a mapping structure which holds information about used/unused IO
* address ranges, which is required to perform mapping with IOMMU aware
- * functions. The only VA range supported is [0, 4GB).
+ * functions. The only VA range supported is [0, 4GB].
*
* The client device need to be attached to the mapping with
* fast_smmu_attach_device function.
@@ -774,6 +784,7 @@ int fast_smmu_attach_device(struct device *dev,
struct iommu_domain *domain = mapping->domain;
struct iommu_pgtbl_info info;
u64 size = (u64)mapping->bits << PAGE_SHIFT;
+ struct iommu_domain_geometry geometry;
if (mapping->base + size > (SZ_1G * 4ULL))
return -EINVAL;
@@ -788,8 +799,11 @@ int fast_smmu_attach_device(struct device *dev,
mapping->fast->domain = domain;
mapping->fast->dev = dev;
- domain->geometry.aperture_start = mapping->base;
- domain->geometry.aperture_end = mapping->base + size - 1;
+ geometry.aperture_start = mapping->base;
+ geometry.aperture_end = mapping->base + size - 1;
+ if (iommu_domain_set_attr(domain, DOMAIN_ATTR_GEOMETRY,
+ &geometry))
+ return -EINVAL;
if (iommu_attach_device(domain, dev))
return -EINVAL;
diff --git a/drivers/iommu/io-pgtable-fast.c b/drivers/iommu/io-pgtable-fast.c
index 3582e206db68..5378e95c4627 100644
--- a/drivers/iommu/io-pgtable-fast.c
+++ b/drivers/iommu/io-pgtable-fast.c
@@ -133,6 +133,9 @@ struct av8l_fast_io_pgtable {
#define AV8L_FAST_TCR_EPD1_SHIFT 23
#define AV8L_FAST_TCR_EPD1_FAULT 1
+#define AV8L_FAST_TCR_SEP_SHIFT (15 + 32)
+#define AV8L_FAST_TCR_SEP_UPSTREAM 7ULL
+
#define AV8L_FAST_MAIR_ATTR_SHIFT(n) ((n) << 3)
#define AV8L_FAST_MAIR_ATTR_MASK 0xff
#define AV8L_FAST_MAIR_ATTR_DEVICE 0x04
@@ -173,12 +176,12 @@ static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep)
}
void av8l_fast_clear_stale_ptes(av8l_fast_iopte *pmds, u64 base,
- u64 end, bool skip_sync)
+ u64 start, u64 end, bool skip_sync)
{
int i;
- av8l_fast_iopte *pmdp = pmds;
+ av8l_fast_iopte *pmdp = iopte_pmd_offset(pmds, base, start);
- for (i = base >> AV8L_FAST_PAGE_SHIFT;
+ for (i = start >> AV8L_FAST_PAGE_SHIFT;
i <= (end >> AV8L_FAST_PAGE_SHIFT); ++i) {
if (!(*pmdp & AV8L_FAST_PTE_VALID)) {
*pmdp = 0;
@@ -256,16 +259,17 @@ void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size)
__av8l_fast_unmap(ptep, size, true);
}
-/* upper layer must take care of TLB invalidation */
static size_t av8l_fast_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t size)
{
struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
+ struct io_pgtable *iop = &data->iop;
av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, data->base, iova);
unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT;
__av8l_fast_unmap(ptep, size, false);
dmac_clean_range(ptep, ptep + nptes);
+ iop->cfg.tlb->tlb_flush_all(iop->cookie);
return size;
}
@@ -522,6 +526,7 @@ av8l_fast_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
#if defined(CONFIG_ARM)
reg |= ARM_32_LPAE_TCR_EAE;
#endif
+ reg |= AV8L_FAST_TCR_SEP_UPSTREAM << AV8L_FAST_TCR_SEP_SHIFT;
cfg->av8l_fast_cfg.tcr = reg;
/* MAIRs */
@@ -668,7 +673,7 @@ static int __init av8l_fast_positive_testing(void)
}
/* sweep up TLB proving PTEs */
- av8l_fast_clear_stale_ptes(pmds, base, max, false);
+ av8l_fast_clear_stale_ptes(pmds, base, base, max, false);
/* map the entire 4GB VA space with 8K map calls */
for (iova = base; iova < max; iova += SZ_8K) {
@@ -689,7 +694,7 @@ static int __init av8l_fast_positive_testing(void)
}
/* sweep up TLB proving PTEs */
- av8l_fast_clear_stale_ptes(pmds, base, max, false);
+ av8l_fast_clear_stale_ptes(pmds, base, base, max, false);
/* map the entire 4GB VA space with 16K map calls */
for (iova = base; iova < max; iova += SZ_16K) {
@@ -710,7 +715,7 @@ static int __init av8l_fast_positive_testing(void)
}
/* sweep up TLB proving PTEs */
- av8l_fast_clear_stale_ptes(pmds, base, max, false);
+ av8l_fast_clear_stale_ptes(pmds, base, base, max, false);
/* map the entire 4GB VA space with 64K map calls */
for (iova = base; iova < max; iova += SZ_64K) {
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 75fcde6e2c20..22b4934df6df 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -787,9 +787,13 @@ static int iommu_debug_profiling_fast_show(struct seq_file *s, void *ignored)
enum iommu_attr attrs[] = {
DOMAIN_ATTR_FAST,
DOMAIN_ATTR_ATOMIC,
+ DOMAIN_ATTR_GEOMETRY,
};
int one = 1;
- void *attr_values[] = { &one, &one, &one };
+ struct iommu_domain_geometry geometry = {0, 0, 0};
+ void *attr_values[] = { &one, &one, &geometry};
+
+ geometry.aperture_end = (dma_addr_t)(SZ_1G * 4ULL - 1);
iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
ARRAY_SIZE(attrs), sizes);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 5f1c9c2f9436..63e46125c292 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -3432,22 +3432,21 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
/*
* If frame_id = 1 then no eof check is needed
*/
- if (vfe_dev->axi_data.src_info[VFE_PIX_0].active &&
- vfe_dev->axi_data.src_info[VFE_PIX_0].accept_frame == false) {
+ if (vfe_dev->axi_data.src_info[frame_src].active &&
+ frame_src == VFE_PIX_0 &&
+ vfe_dev->axi_data.src_info[frame_src].accept_frame == false) {
pr_debug("%s:%d invalid time to request frame %d\n",
__func__, __LINE__, frame_id);
goto error;
}
- if ((vfe_dev->axi_data.src_info[VFE_PIX_0].active && (frame_id !=
- vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id + vfe_dev->
- axi_data.src_info[VFE_PIX_0].sof_counter_step)) ||
- ((!vfe_dev->axi_data.src_info[VFE_PIX_0].active) && (frame_id !=
+ if ((vfe_dev->axi_data.src_info[frame_src].active && (frame_id !=
vfe_dev->axi_data.src_info[frame_src].frame_id + vfe_dev->
- axi_data.src_info[frame_src].sof_counter_step))) {
+ axi_data.src_info[VFE_PIX_0].sof_counter_step)) ||
+ ((!vfe_dev->axi_data.src_info[frame_src].active))) {
pr_debug("%s:%d invalid frame id %d cur frame id %d pix %d\n",
__func__, __LINE__, frame_id,
- vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
- vfe_dev->axi_data.src_info[VFE_PIX_0].active);
+ vfe_dev->axi_data.src_info[frame_src].frame_id,
+ vfe_dev->axi_data.src_info[frame_src].active);
goto error;
}
if (stream_info->undelivered_request_cnt >= MAX_BUFFERS_IN_HW) {
@@ -3954,6 +3953,12 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
&update_cmd->req_frm_ver2;
stream_info = msm_isp_get_stream_common_data(vfe_dev,
HANDLE_TO_IDX(req_frm->stream_handle));
+ if (stream_info == NULL) {
+ pr_err_ratelimited("%s: stream_info is NULL\n",
+ __func__);
+ rc = -EINVAL;
+ break;
+ }
rc = msm_isp_request_frame(vfe_dev, stream_info,
req_frm->user_stream_id,
req_frm->frame_id,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
index 65009cb22286..a8d4cfb43927 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
@@ -141,6 +141,11 @@ static inline struct msm_vfe_axi_stream *msm_isp_get_stream_common_data(
struct msm_vfe_common_dev_data *common_data = vfe_dev->common_data;
struct msm_vfe_axi_stream *stream_info;
+ if (stream_idx >= VFE_AXI_SRC_MAX) {
+ pr_err("invalid stream_idx %d\n", stream_idx);
+ return NULL;
+ }
+
if (vfe_dev->is_split && stream_idx < RDI_INTF_0)
stream_info = &common_data->streams[stream_idx];
else
diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
index b067c4916341..0ff270bb8410 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
@@ -201,6 +201,7 @@ static int32_t msm_cci_validate_queue(struct cci_device *cci_dev,
enum cci_i2c_queue_t queue)
{
int32_t rc = 0;
+ unsigned long flags;
uint32_t read_val = 0;
uint32_t reg_offset = master * 0x200 + queue * 0x100;
read_val = msm_camera_io_r_mb(cci_dev->base +
@@ -223,6 +224,8 @@ static int32_t msm_cci_validate_queue(struct cci_device *cci_dev,
CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
reg_val = 1 << ((master * 2) + queue);
CDBG("%s:%d CCI_QUEUE_START_ADDR\n", __func__, __LINE__);
+ spin_lock_irqsave(&cci_dev->cci_master_info[master].
+ lock_q[queue], flags);
atomic_set(&cci_dev->cci_master_info[master].
done_pending[queue], 1);
msm_camera_io_w_mb(reg_val, cci_dev->base +
@@ -230,6 +233,8 @@ static int32_t msm_cci_validate_queue(struct cci_device *cci_dev,
CDBG("%s line %d wait_for_completion_timeout\n",
__func__, __LINE__);
atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
+ spin_unlock_irqrestore(&cci_dev->cci_master_info[master].
+ lock_q[queue], flags);
rc = wait_for_completion_timeout(&cci_dev->
cci_master_info[master].report_q[queue], CCI_TIMEOUT);
if (rc <= 0) {
@@ -438,10 +443,17 @@ static int32_t msm_cci_wait_report_cmd(struct cci_device *cci_dev,
enum cci_i2c_master_t master,
enum cci_i2c_queue_t queue)
{
+ unsigned long flags;
uint32_t reg_val = 1 << ((master * 2) + queue);
msm_cci_load_report_cmd(cci_dev, master, queue);
+
+ spin_lock_irqsave(&cci_dev->cci_master_info[master].
+ lock_q[queue], flags);
atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
atomic_set(&cci_dev->cci_master_info[master].done_pending[queue], 1);
+ spin_unlock_irqrestore(&cci_dev->cci_master_info[master].
+ lock_q[queue], flags);
+
msm_camera_io_w_mb(reg_val, cci_dev->base +
CCI_QUEUE_START_ADDR);
return msm_cci_wait(cci_dev, master, queue);
@@ -451,13 +463,19 @@ static void msm_cci_process_half_q(struct cci_device *cci_dev,
enum cci_i2c_master_t master,
enum cci_i2c_queue_t queue)
{
+ unsigned long flags;
uint32_t reg_val = 1 << ((master * 2) + queue);
+
+ spin_lock_irqsave(&cci_dev->cci_master_info[master].
+ lock_q[queue], flags);
if (0 == atomic_read(&cci_dev->cci_master_info[master].q_free[queue])) {
msm_cci_load_report_cmd(cci_dev, master, queue);
atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
msm_camera_io_w_mb(reg_val, cci_dev->base +
CCI_QUEUE_START_ADDR);
}
+ spin_unlock_irqrestore(&cci_dev->cci_master_info[master].
+ lock_q[queue], flags);
}
static int32_t msm_cci_process_full_q(struct cci_device *cci_dev,
@@ -465,15 +483,23 @@ static int32_t msm_cci_process_full_q(struct cci_device *cci_dev,
enum cci_i2c_queue_t queue)
{
int32_t rc = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cci_dev->cci_master_info[master].
+ lock_q[queue], flags);
if (1 == atomic_read(&cci_dev->cci_master_info[master].q_free[queue])) {
atomic_set(&cci_dev->cci_master_info[master].
done_pending[queue], 1);
+ spin_unlock_irqrestore(&cci_dev->cci_master_info[master].
+ lock_q[queue], flags);
rc = msm_cci_wait(cci_dev, master, queue);
if (rc < 0) {
pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
return rc;
}
} else {
+ spin_unlock_irqrestore(&cci_dev->cci_master_info[master].
+ lock_q[queue], flags);
rc = msm_cci_wait_report_cmd(cci_dev, master, queue);
if (rc < 0) {
pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
@@ -501,8 +527,13 @@ static int32_t msm_cci_transfer_end(struct cci_device *cci_dev,
enum cci_i2c_queue_t queue)
{
int32_t rc = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&cci_dev->cci_master_info[master].
+ lock_q[queue], flags);
if (0 == atomic_read(&cci_dev->cci_master_info[master].q_free[queue])) {
+ spin_unlock_irqrestore(&cci_dev->cci_master_info[master].
+ lock_q[queue], flags);
rc = msm_cci_lock_queue(cci_dev, master, queue, 0);
if (rc < 0) {
pr_err("%s failed line %d\n", __func__, __LINE__);
@@ -516,6 +547,8 @@ static int32_t msm_cci_transfer_end(struct cci_device *cci_dev,
} else {
atomic_set(&cci_dev->cci_master_info[master].
done_pending[queue], 1);
+ spin_unlock_irqrestore(&cci_dev->cci_master_info[master].
+ lock_q[queue], flags);
rc = msm_cci_wait(cci_dev, master, queue);
if (rc < 0) {
pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
@@ -570,6 +603,7 @@ static int32_t msm_cci_data_queue(struct cci_device *cci_dev,
uint32_t reg_offset;
uint32_t val = 0;
uint32_t max_queue_size;
+ unsigned long flags;
if (i2c_cmd == NULL) {
pr_err("%s:%d Failed line\n", __func__,
@@ -613,7 +647,11 @@ static int32_t msm_cci_data_queue(struct cci_device *cci_dev,
msm_camera_io_w_mb(val, cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
reg_offset);
+ spin_lock_irqsave(&cci_dev->cci_master_info[master].
+ lock_q[queue], flags);
atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 0);
+ spin_unlock_irqrestore(&cci_dev->cci_master_info[master].
+ lock_q[queue], flags);
max_queue_size = cci_dev->cci_i2c_queue_info[master][queue].
max_queue_size;
@@ -1641,6 +1679,7 @@ static int32_t msm_cci_config(struct v4l2_subdev *sd,
static irqreturn_t msm_cci_irq(int irq_num, void *data)
{
uint32_t irq;
+ unsigned long flags;
struct cci_device *cci_dev = data;
irq = msm_camera_io_r_mb(cci_dev->base + CCI_IRQ_STATUS_0_ADDR);
msm_camera_io_w_mb(irq, cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
@@ -1667,22 +1706,30 @@ static irqreturn_t msm_cci_irq(int irq_num, void *data)
if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK) {
struct msm_camera_cci_master_info *cci_master_info;
cci_master_info = &cci_dev->cci_master_info[MASTER_0];
+ spin_lock_irqsave(&cci_dev->cci_master_info[MASTER_0].
+ lock_q[QUEUE_0], flags);
atomic_set(&cci_master_info->q_free[QUEUE_0], 0);
cci_master_info->status = 0;
if (atomic_read(&cci_master_info->done_pending[QUEUE_0]) == 1) {
complete(&cci_master_info->report_q[QUEUE_0]);
atomic_set(&cci_master_info->done_pending[QUEUE_0], 0);
}
+ spin_unlock_irqrestore(&cci_dev->cci_master_info[MASTER_0].
+ lock_q[QUEUE_0], flags);
}
if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK) {
struct msm_camera_cci_master_info *cci_master_info;
cci_master_info = &cci_dev->cci_master_info[MASTER_0];
+ spin_lock_irqsave(&cci_dev->cci_master_info[MASTER_0].
+ lock_q[QUEUE_1], flags);
atomic_set(&cci_master_info->q_free[QUEUE_1], 0);
cci_master_info->status = 0;
if (atomic_read(&cci_master_info->done_pending[QUEUE_1]) == 1) {
complete(&cci_master_info->report_q[QUEUE_1]);
atomic_set(&cci_master_info->done_pending[QUEUE_1], 0);
}
+ spin_unlock_irqrestore(&cci_dev->cci_master_info[MASTER_0].
+ lock_q[QUEUE_1], flags);
}
if (irq & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) {
cci_dev->cci_master_info[MASTER_1].status = 0;
@@ -1691,22 +1738,30 @@ static irqreturn_t msm_cci_irq(int irq_num, void *data)
if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK) {
struct msm_camera_cci_master_info *cci_master_info;
cci_master_info = &cci_dev->cci_master_info[MASTER_1];
+ spin_lock_irqsave(&cci_dev->cci_master_info[MASTER_1].
+ lock_q[QUEUE_0], flags);
atomic_set(&cci_master_info->q_free[QUEUE_0], 0);
cci_master_info->status = 0;
if (atomic_read(&cci_master_info->done_pending[QUEUE_0]) == 1) {
complete(&cci_master_info->report_q[QUEUE_0]);
atomic_set(&cci_master_info->done_pending[QUEUE_0], 0);
}
+ spin_unlock_irqrestore(&cci_dev->cci_master_info[MASTER_1].
+ lock_q[QUEUE_0], flags);
}
if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK) {
struct msm_camera_cci_master_info *cci_master_info;
cci_master_info = &cci_dev->cci_master_info[MASTER_1];
+ spin_lock_irqsave(&cci_dev->cci_master_info[MASTER_1].
+ lock_q[QUEUE_1], flags);
atomic_set(&cci_master_info->q_free[QUEUE_1], 0);
cci_master_info->status = 0;
if (atomic_read(&cci_master_info->done_pending[QUEUE_1]) == 1) {
complete(&cci_master_info->report_q[QUEUE_1]);
atomic_set(&cci_master_info->done_pending[QUEUE_1], 0);
}
+ spin_unlock_irqrestore(&cci_dev->cci_master_info[MASTER_1].
+ lock_q[QUEUE_1], flags);
}
if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) {
cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
@@ -1795,7 +1850,9 @@ static void msm_cci_init_cci_params(struct cci_device *new_cci_dev)
mutex_init(&new_cci_dev->cci_master_info[i].mutex_q[j]);
init_completion(&new_cci_dev->
cci_master_info[i].report_q[j]);
- }
+ spin_lock_init(&new_cci_dev->
+ cci_master_info[i].lock_q[j]);
+ }
}
return;
}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h
index 6e39d814bd73..eb615cc7a62c 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -125,6 +125,7 @@ struct msm_camera_cci_master_info {
struct mutex mutex_q[NUM_QUEUES];
struct completion report_q[NUM_QUEUES];
atomic_t done_pending[NUM_QUEUES];
+ spinlock_t lock_q[NUM_QUEUES];
};
struct msm_cci_clk_params_t {
diff --git a/drivers/net/wireless/wcnss/wcnss_wlan.c b/drivers/net/wireless/wcnss/wcnss_wlan.c
index 3f9eeabc5464..9db2871e8150 100644
--- a/drivers/net/wireless/wcnss/wcnss_wlan.c
+++ b/drivers/net/wireless/wcnss/wcnss_wlan.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -192,6 +192,8 @@ static DEFINE_SPINLOCK(reg_spinlock);
#define WCNSS_USR_WLAN_MAC_ADDR (WCNSS_USR_CTRL_MSG_START + 3)
#define MAC_ADDRESS_STR "%02x:%02x:%02x:%02x:%02x:%02x"
+#define SHOW_MAC_ADDRESS_STR "%02x:%02x:%02x:%02x:%02x:%02x\n"
+#define WCNSS_USER_MAC_ADDR_LENGTH 18
/* message types */
#define WCNSS_CTRL_MSG_START 0x01000000
@@ -427,23 +429,28 @@ static struct {
static ssize_t wcnss_wlan_macaddr_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- char macAddr[WLAN_MAC_ADDR_SIZE];
+ int index;
+ int macAddr[WLAN_MAC_ADDR_SIZE];
if (!penv)
return -ENODEV;
- pr_debug("%s: Receive MAC Addr From user space: %s\n", __func__, buf);
+ if (strlen(buf) != WCNSS_USER_MAC_ADDR_LENGTH) {
+ dev_err(dev, "%s: Invalid MAC addr length\n", __func__);
+ return -EINVAL;
+ }
if (WLAN_MAC_ADDR_SIZE != sscanf(buf, MAC_ADDRESS_STR,
- (int *)&macAddr[0], (int *)&macAddr[1],
- (int *)&macAddr[2], (int *)&macAddr[3],
- (int *)&macAddr[4], (int *)&macAddr[5])) {
-
+ &macAddr[0], &macAddr[1], &macAddr[2],
+ &macAddr[3], &macAddr[4], &macAddr[5])) {
pr_err("%s: Failed to Copy MAC\n", __func__);
return -EINVAL;
}
- memcpy(penv->wlan_nv_macAddr, macAddr, sizeof(penv->wlan_nv_macAddr));
+ for (index = 0; index < WLAN_MAC_ADDR_SIZE; index++) {
+ memcpy(&penv->wlan_nv_macAddr[index],
+ (char *)&macAddr[index], sizeof(char));
+ }
pr_info("%s: Write MAC Addr:" MAC_ADDRESS_STR "\n", __func__,
penv->wlan_nv_macAddr[0], penv->wlan_nv_macAddr[1],
@@ -459,7 +466,7 @@ static ssize_t wcnss_wlan_macaddr_show(struct device *dev,
if (!penv)
return -ENODEV;
- return scnprintf(buf, PAGE_SIZE, MAC_ADDRESS_STR,
+ return scnprintf(buf, PAGE_SIZE, SHOW_MAC_ADDRESS_STR,
penv->wlan_nv_macAddr[0], penv->wlan_nv_macAddr[1],
penv->wlan_nv_macAddr[2], penv->wlan_nv_macAddr[3],
penv->wlan_nv_macAddr[4], penv->wlan_nv_macAddr[5]);
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 4c75b4d392c6..39400dda27c2 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -367,6 +367,8 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
return err;
}
+ armpmu->pmu_state = ARM_PMU_STATE_RUNNING;
+
return 0;
}
@@ -601,10 +603,12 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
struct platform_device *pmu_device = cpu_pmu->plat_device;
struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
+ cpu_pmu->pmu_state = ARM_PMU_STATE_GOING_DOWN;
+
irqs = min(pmu_device->num_resources, num_possible_cpus());
irq = platform_get_irq(pmu_device, 0);
- if (irq >= 0 && irq_is_percpu(irq)) {
+ if (irq > 0 && irq_is_percpu(irq)) {
on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
free_percpu_irq(irq, &hw_events->percpu_pmu);
} else {
@@ -617,10 +621,11 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
continue;
irq = platform_get_irq(pmu_device, i);
- if (irq >= 0)
+ if (irq > 0)
free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
}
}
+ cpu_pmu->pmu_state = ARM_PMU_STATE_OFF;
}
static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
@@ -639,7 +644,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
}
irq = platform_get_irq(pmu_device, 0);
- if (irq >= 0 && irq_is_percpu(irq)) {
+ if (irq > 0 && irq_is_percpu(irq)) {
err = request_percpu_irq(irq, handler, "arm-pmu",
&hw_events->percpu_pmu);
if (err) {
@@ -648,6 +653,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
return err;
}
on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
+ cpu_pmu->percpu_irq = irq;
} else {
for (i = 0; i < irqs; ++i) {
int cpu = i;
@@ -754,13 +760,6 @@ static void cpu_pm_pmu_common(void *info)
return;
}
- /*
- * Always reset the PMU registers on power-up even if
- * there are no events running.
- */
- if (cmd == CPU_PM_EXIT && armpmu->reset)
- armpmu->reset(armpmu);
-
if (!enabled) {
data->ret = NOTIFY_OK;
return;
@@ -795,6 +794,13 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
.cpu = smp_processor_id(),
};
+ /*
+ * Always reset the PMU registers on power-up even if
+ * there are no events running.
+ */
+ if (cmd == CPU_PM_EXIT && data.armpmu->reset)
+ data.armpmu->reset(data.armpmu);
+
cpu_pm_pmu_common(&data);
return data.ret;
}
@@ -824,6 +830,7 @@ static inline void cpu_pm_pmu_common(void *info) { }
static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
void *hcpu)
{
+ int irq = -1;
unsigned long masked_action = (action & ~CPU_TASKS_FROZEN);
struct cpu_pm_pmu_args data = {
.armpmu = container_of(b, struct arm_pmu, hotplug_nb),
@@ -835,37 +842,37 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
switch (masked_action) {
case CPU_STARTING:
- data.cmd = CPU_PM_EXIT;
- break;
- case CPU_DYING:
- data.cmd = CPU_PM_ENTER;
- break;
case CPU_DOWN_FAILED:
- data.cmd = CPU_PM_ENTER_FAILED;
- break;
- case CPU_ONLINE:
- if (data.armpmu->plat_device) {
- struct platform_device *pmu_device =
- data.armpmu->plat_device;
- int irq = platform_get_irq(pmu_device, 0);
-
- if (irq >= 0 && irq_is_percpu(irq)) {
- smp_call_function_single(data.cpu,
- cpu_pmu_enable_percpu_irq, &irq, 1);
- }
+ /*
+ * Always reset the PMU registers on power-up even if
+ * there are no events running.
+ */
+ if (data.armpmu->reset)
+ data.armpmu->reset(data.armpmu);
+ if (data.armpmu->pmu_state == ARM_PMU_STATE_RUNNING) {
+ if (data.armpmu->plat_device)
+ irq = data.armpmu->percpu_irq;
+ /* Arm the PMU IRQ before appearing. */
+ if (irq > 0 && irq_is_percpu(irq))
+ cpu_pmu_enable_percpu_irq(&irq);
+ data.cmd = CPU_PM_EXIT;
+ cpu_pm_pmu_common(&data);
}
- return NOTIFY_DONE;
+ return NOTIFY_OK;
+ case CPU_DYING:
+ if (data.armpmu->pmu_state != ARM_PMU_STATE_OFF) {
+ data.cmd = CPU_PM_ENTER;
+ cpu_pm_pmu_common(&data);
+ /* Disarm the PMU IRQ before disappearing. */
+ if (data.armpmu->plat_device)
+ irq = data.armpmu->percpu_irq;
+ if (irq > 0 && irq_is_percpu(irq))
+ cpu_pmu_disable_percpu_irq(&irq);
+ }
+ return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
-
- if (smp_processor_id() == data.cpu)
- cpu_pm_pmu_common(&data);
- else
- smp_call_function_single(data.cpu,
- cpu_pm_pmu_common, &data, 1);
-
- return data.ret;
}
static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
@@ -966,7 +973,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
/* Check the IRQ type and prohibit a mix of PPIs and SPIs */
irq = platform_get_irq(pdev, i);
- if (irq >= 0) {
+ if (irq > 0) {
bool spi = !irq_is_percpu(irq);
if (i > 0 && spi != using_spi) {
@@ -1085,6 +1092,9 @@ int arm_pmu_device_probe(struct platform_device *pdev,
if (ret)
goto out_destroy;
+ pmu->pmu_state = ARM_PMU_STATE_OFF;
+ pmu->percpu_irq = -1;
+
pr_info("enabled with %s PMU driver, %d counters available\n",
pmu->name, pmu->num_events);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 100bbd582a5e..f01743d04e84 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -1584,6 +1584,7 @@ static int ipa_init_smem_region(int memory_region_size,
struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL;
struct ipa_desc desc;
struct ipa_mem_buffer mem;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
int rc;
if (memory_region_size == 0)
@@ -1603,7 +1604,7 @@ static int ipa_init_smem_region(int memory_region_size,
memset(mem.base, 0, mem.size);
cmd = kzalloc(sizeof(*cmd),
- GFP_KERNEL);
+ flag);
if (cmd == NULL) {
IPAERR("Failed to alloc immediate command object\n");
rc = -ENOMEM;
@@ -2166,6 +2167,7 @@ int _ipa_init_sram_v2(void)
struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL;
struct ipa_desc desc = {0};
struct ipa_mem_buffer mem;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
int rc = 0;
phys_addr = ipa_ctx->ipa_wrapper_base +
@@ -2203,7 +2205,7 @@ int _ipa_init_sram_v2(void)
}
memset(mem.base, 0, mem.size);
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), flag);
if (cmd == NULL) {
IPAERR("Failed to alloc immediate command object\n");
rc = -ENOMEM;
@@ -2314,6 +2316,7 @@ int _ipa_init_hdr_v2(void)
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_hdr_init_local *cmd = NULL;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
int rc = 0;
mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
@@ -2325,7 +2328,7 @@ int _ipa_init_hdr_v2(void)
}
memset(mem.base, 0, mem.size);
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), flag);
if (cmd == NULL) {
IPAERR("Failed to alloc header init command object\n");
rc = -ENOMEM;
@@ -2360,6 +2363,7 @@ int _ipa_init_hdr_v2_5(void)
struct ipa_mem_buffer mem;
struct ipa_hdr_init_local *cmd = NULL;
struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
@@ -2370,7 +2374,7 @@ int _ipa_init_hdr_v2_5(void)
}
memset(mem.base, 0, mem.size);
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), flag);
if (cmd == NULL) {
IPAERR("Failed to alloc header init command object\n");
dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
@@ -2411,7 +2415,7 @@ int _ipa_init_hdr_v2_5(void)
memset(mem.base, 0, mem.size);
memset(&desc, 0, sizeof(desc));
- dma_cmd = kzalloc(sizeof(*dma_cmd), GFP_KERNEL);
+ dma_cmd = kzalloc(sizeof(*dma_cmd), flag);
if (dma_cmd == NULL) {
IPAERR("Failed to alloc immediate command object\n");
dma_free_coherent(ipa_ctx->pdev,
@@ -2462,6 +2466,7 @@ int _ipa_init_rt4_v2(void)
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_ip_v4_routing_init *v4_cmd = NULL;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
u32 *entry;
int i;
int rc = 0;
@@ -2486,7 +2491,7 @@ int _ipa_init_rt4_v2(void)
entry++;
}
- v4_cmd = kzalloc(sizeof(*v4_cmd), GFP_KERNEL);
+ v4_cmd = kzalloc(sizeof(*v4_cmd), flag);
if (v4_cmd == NULL) {
IPAERR("Failed to alloc v4 routing init command object\n");
rc = -ENOMEM;
@@ -2522,6 +2527,7 @@ int _ipa_init_rt6_v2(void)
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_ip_v6_routing_init *v6_cmd = NULL;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
u32 *entry;
int i;
int rc = 0;
@@ -2546,7 +2552,7 @@ int _ipa_init_rt6_v2(void)
entry++;
}
- v6_cmd = kzalloc(sizeof(*v6_cmd), GFP_KERNEL);
+ v6_cmd = kzalloc(sizeof(*v6_cmd), flag);
if (v6_cmd == NULL) {
IPAERR("Failed to alloc v6 routing init command object\n");
rc = -ENOMEM;
@@ -2582,6 +2588,7 @@ int _ipa_init_flt4_v2(void)
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_ip_v4_filter_init *v4_cmd = NULL;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
u32 *entry;
int i;
int rc = 0;
@@ -2604,7 +2611,7 @@ int _ipa_init_flt4_v2(void)
entry++;
}
- v4_cmd = kzalloc(sizeof(*v4_cmd), GFP_KERNEL);
+ v4_cmd = kzalloc(sizeof(*v4_cmd), flag);
if (v4_cmd == NULL) {
IPAERR("Failed to alloc v4 fliter init command object\n");
rc = -ENOMEM;
@@ -2640,6 +2647,7 @@ int _ipa_init_flt6_v2(void)
struct ipa_desc desc = { 0 };
struct ipa_mem_buffer mem;
struct ipa_ip_v6_filter_init *v6_cmd = NULL;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
u32 *entry;
int i;
int rc = 0;
@@ -2662,7 +2670,7 @@ int _ipa_init_flt6_v2(void)
entry++;
}
- v6_cmd = kzalloc(sizeof(*v6_cmd), GFP_KERNEL);
+ v6_cmd = kzalloc(sizeof(*v6_cmd), flag);
if (v6_cmd == NULL) {
IPAERR("Failed to alloc v6 fliter init command object\n");
rc = -ENOMEM;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 25364e8efa38..2fdb20d99ce2 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -322,8 +322,8 @@ int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
dma_address = desc->dma_address;
tx_pkt->no_unmap_dma = true;
}
- if (!dma_address) {
- IPAERR("failed to DMA wrap\n");
+ if (dma_mapping_error(ipa_ctx->pdev, dma_address)) {
+ IPAERR("dma_map_single failed\n");
goto fail_dma_map;
}
@@ -445,7 +445,7 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
}
dma_addr = dma_map_single(ipa_ctx->pdev,
transfer.iovec, size, DMA_TO_DEVICE);
- if (!dma_addr) {
+ if (dma_mapping_error(ipa_ctx->pdev, dma_addr)) {
IPAERR("dma_map_single failed for sps xfr buff\n");
kfree(transfer.iovec);
return -EFAULT;
@@ -493,6 +493,15 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
tx_pkt->mem.base,
tx_pkt->mem.size,
DMA_TO_DEVICE);
+
+ if (dma_mapping_error(ipa_ctx->pdev,
+ tx_pkt->mem.phys_base)) {
+ IPAERR("dma_map_single ");
+ IPAERR("failed\n");
+ fail_dma_wrap = 1;
+ goto failure;
+ }
+
} else {
tx_pkt->mem.phys_base = desc[i].dma_address;
tx_pkt->no_unmap_dma = true;
@@ -1874,8 +1883,8 @@ begin:
rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
sys->rx_buff_sz,
DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0) {
+ if (dma_mapping_error(ipa_ctx->pdev,
+ rx_pkt->data.dma_addr)) {
pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n",
__func__, (void *)rx_pkt->data.dma_addr,
ptr, sys);
@@ -2030,8 +2039,8 @@ static void ipa_alloc_wlan_rx_common_cache(u32 size)
ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0) {
+ if (dma_mapping_error(ipa_ctx->pdev,
+ rx_pkt->data.dma_addr)) {
IPAERR("dma_map_single failure %p for %p\n",
(void *)rx_pkt->data.dma_addr, ptr);
goto fail_dma_mapping;
@@ -2102,8 +2111,8 @@ static void ipa_replenish_rx_cache(struct ipa_sys_context *sys)
rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
sys->rx_buff_sz,
DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0) {
+ if (dma_mapping_error(ipa_ctx->pdev,
+ rx_pkt->data.dma_addr)) {
IPAERR("dma_map_single failure %p for %p\n",
(void *)rx_pkt->data.dma_addr, ptr);
goto fail_dma_mapping;
@@ -2160,9 +2169,10 @@ static void ipa_replenish_rx_cache_recycle(struct ipa_sys_context *sys)
ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev,
ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
- if (rx_pkt->data.dma_addr == 0 ||
- rx_pkt->data.dma_addr == ~0)
+ if (dma_mapping_error(ipa_ctx->pdev, rx_pkt->data.dma_addr)) {
+ IPAERR("dma_map_single failure for rx_pkt\n");
goto fail_dma_mapping;
+ }
list_add_tail(&rx_pkt->link, &sys->head_desc_list);
rx_len_cached = ++sys->len;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index e23de3f26613..f43981f15c31 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -268,6 +268,7 @@ int __ipa_commit_hdr_v2(void)
struct ipa_mem_buffer mem;
struct ipa_hdr_init_system *cmd = NULL;
struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd = NULL;
+ gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
int rc = -EFAULT;
if (ipa_generate_hdr_hw_tbl(&mem)) {
@@ -281,7 +282,7 @@ int __ipa_commit_hdr_v2(void)
IPA_MEM_PART(apps_hdr_size));
goto fail_send_cmd;
} else {
- dma_cmd = kzalloc(sizeof(*dma_cmd), GFP_ATOMIC);
+ dma_cmd = kzalloc(sizeof(*dma_cmd), flag);
if (dma_cmd == NULL) {
IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM;
@@ -303,7 +304,7 @@ int __ipa_commit_hdr_v2(void)
IPA_MEM_PART(apps_hdr_size_ddr));
goto fail_send_cmd;
} else {
- cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), flag);
if (cmd == NULL) {
IPAERR("fail to alloc hdr init cmd\n");
rc = -ENOMEM;
@@ -359,6 +360,7 @@ int __ipa_commit_hdr_v2_5(void)
struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_hdr = NULL;
struct ipa_hw_imm_cmd_dma_shared_mem *dma_cmd_ctx = NULL;
struct ipa_register_write *reg_write_cmd = NULL;
+ gfp_t flag = GFP_ATOMIC | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
int rc = -EFAULT;
u32 proc_ctx_size;
u32 proc_ctx_ofst;
@@ -383,7 +385,7 @@ int __ipa_commit_hdr_v2_5(void)
IPA_MEM_PART(apps_hdr_size));
goto fail_send_cmd1;
} else {
- dma_cmd_hdr = kzalloc(sizeof(*dma_cmd_hdr), GFP_ATOMIC);
+ dma_cmd_hdr = kzalloc(sizeof(*dma_cmd_hdr), flag);
if (dma_cmd_hdr == NULL) {
IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM;
@@ -406,7 +408,7 @@ int __ipa_commit_hdr_v2_5(void)
goto fail_send_cmd1;
} else {
hdr_init_cmd = kzalloc(sizeof(*hdr_init_cmd),
- GFP_ATOMIC);
+ flag);
if (hdr_init_cmd == NULL) {
IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM;
@@ -431,7 +433,7 @@ int __ipa_commit_hdr_v2_5(void)
goto fail_send_cmd1;
} else {
dma_cmd_ctx = kzalloc(sizeof(*dma_cmd_ctx),
- GFP_ATOMIC);
+ flag);
if (dma_cmd_ctx == NULL) {
IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM;
@@ -456,7 +458,7 @@ int __ipa_commit_hdr_v2_5(void)
goto fail_send_cmd1;
} else {
reg_write_cmd = kzalloc(sizeof(*reg_write_cmd),
- GFP_ATOMIC);
+ flag);
if (reg_write_cmd == NULL) {
IPAERR("fail to alloc immediate cmd\n");
rc = -ENOMEM;
@@ -722,6 +724,11 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
entry->hdr,
entry->hdr_len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(ipa_ctx->pdev,
+ entry->phys_base)) {
+ IPAERR("dma_map_single failure for entry\n");
+ goto fail_dma_mapping;
+ }
}
} else {
entry->is_hdr_proc_ctx = false;
@@ -798,6 +805,8 @@ fail_add_proc_ctx:
list_del(&entry->link);
dma_unmap_single(ipa_ctx->pdev, entry->phys_base,
entry->hdr_len, DMA_TO_DEVICE);
+fail_dma_mapping:
+ entry->is_hdr_proc_ctx = false;
bad_hdr_len:
entry->cookie = 0;
kmem_cache_free(ipa_ctx->hdr_cache, entry);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index f5dea76764f8..11c77934e04f 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -698,6 +698,7 @@ int __ipa_commit_rt_v2(enum ipa_ip_type ip)
struct ipa_mem_buffer head;
struct ipa_hw_imm_cmd_dma_shared_mem *cmd1 = NULL;
struct ipa_hw_imm_cmd_dma_shared_mem *cmd2 = NULL;
+ gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
u16 avail;
u32 num_modem_rt_index;
int rc = 0;
@@ -748,7 +749,7 @@ int __ipa_commit_rt_v2(enum ipa_ip_type ip)
}
cmd1 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
- GFP_KERNEL);
+ flag);
if (cmd1 == NULL) {
IPAERR("Failed to alloc immediate command object\n");
rc = -ENOMEM;
@@ -765,7 +766,7 @@ int __ipa_commit_rt_v2(enum ipa_ip_type ip)
if (lcl) {
cmd2 = kzalloc(sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
- GFP_KERNEL);
+ flag);
if (cmd2 == NULL) {
IPAERR("Failed to alloc immediate command object\n");
rc = -ENOMEM;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 7aaddbbdabaf..5ee6e5d2d9e3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -2276,6 +2276,36 @@ static int ipa3_q6_set_ex_path_to_apps(void)
desc[num_descs].len = cmd_pyld->len;
num_descs++;
}
+
+ /* disable statuses for modem producers */
+ if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
+ ipa_assert_on(num_descs >= ipa3_ctx->ipa_num_pipes);
+
+ reg_write.skip_pipeline_clear = false;
+ reg_write.pipeline_clear_options =
+ IPAHAL_HPS_CLEAR;
+ reg_write.offset =
+ ipahal_get_reg_n_ofst(IPA_ENDP_STATUS_n,
+ ep_idx);
+ reg_write.value = 0;
+ reg_write.value_mask = ~0;
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_REGISTER_WRITE, &reg_write, false);
+ if (!cmd_pyld) {
+ IPAERR("fail construct register_write cmd\n");
+ ipa_assert();
+ return -EFAULT;
+ }
+
+ desc[num_descs].opcode = ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_REGISTER_WRITE);
+ desc[num_descs].type = IPA_IMM_CMD_DESC;
+ desc[num_descs].callback = ipa3_destroy_imm;
+ desc[num_descs].user1 = cmd_pyld;
+ desc[num_descs].pyld = cmd_pyld->data;
+ desc[num_descs].len = cmd_pyld->len;
+ num_descs++;
+ }
}
/* Will wait 500msecs for IPA tag process completion */
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index c6009d767db5..e88f3d8c14d2 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -643,6 +643,9 @@ static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx)
int rc;
int force_pt_coherent = 1;
int smmu_bypass = !ctx->smmu_s1_en;
+ dma_addr_t iova_base = 0;
+ dma_addr_t iova_end = ctx->smmu_base + ctx->smmu_size - 1;
+ struct iommu_domain_geometry geometry;
if (!ctx->use_smmu)
return 0;
@@ -700,6 +703,17 @@ static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx)
rc);
goto release_mapping;
}
+ memset(&geometry, 0, sizeof(geometry));
+ geometry.aperture_start = iova_base;
+ geometry.aperture_end = iova_end;
+ rc = iommu_domain_set_attr(ctx->mapping->domain,
+ DOMAIN_ATTR_GEOMETRY,
+ &geometry);
+ if (rc) {
+ dev_err(ctx->dev, "Set geometry attribute to SMMU failed (%d)\n",
+ rc);
+ goto release_mapping;
+ }
}
}
@@ -864,6 +878,8 @@ static int msm_11ad_ssr_init(struct msm11ad_ctx *ctx)
ctx->dump_data.addr = virt_to_phys(ctx->ramdump_addr);
ctx->dump_data.len = WIGIG_RAMDUMP_SIZE;
+ strlcpy(ctx->dump_data.name, "KWIGIG",
+ sizeof(ctx->dump_data.name));
dump_entry.id = MSM_DUMP_DATA_WIGIG;
dump_entry.addr = virt_to_phys(&ctx->dump_data);
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index a30ed90d6e92..8d038ba0770d 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -33,6 +33,7 @@
#include <soc/qcom/scm.h>
#include <soc/qcom/restart.h>
#include <soc/qcom/watchdog.h>
+#include <soc/qcom/minidump.h>
#define EMERGENCY_DLOAD_MAGIC1 0x322A4F99
#define EMERGENCY_DLOAD_MAGIC2 0xC67E4350
@@ -42,9 +43,10 @@
#define SCM_IO_DISABLE_PMIC_ARBITER 1
#define SCM_IO_DEASSERT_PS_HOLD 2
#define SCM_WDOG_DEBUG_BOOT_PART 0x9
-#define SCM_DLOAD_MODE 0X10
+#define SCM_DLOAD_FULLDUMP 0X10
#define SCM_EDLOAD_MODE 0X01
#define SCM_DLOAD_CMD 0x10
+#define SCM_DLOAD_MINIDUMP 0X20
static int restart_mode;
@@ -69,6 +71,7 @@ static void scm_disable_sdi(void);
#endif
static int in_panic;
+static int dload_type = SCM_DLOAD_FULLDUMP;
static int download_mode = 1;
static struct kobject dload_kobj;
static void *dload_mode_addr, *dload_type_addr;
@@ -142,7 +145,7 @@ static void set_dload_mode(int on)
mb();
}
- ret = scm_set_dload_mode(on ? SCM_DLOAD_MODE : 0, 0);
+ ret = scm_set_dload_mode(on ? dload_type : 0, 0);
if (ret)
pr_err("Failed to set secure DLOAD mode: %d\n", ret);
@@ -185,7 +188,6 @@ static int dload_set(const char *val, struct kernel_param *kp)
int old_val = download_mode;
ret = param_set_int(val, kp);
-
if (ret)
return ret;
@@ -454,7 +456,7 @@ static ssize_t show_emmc_dload(struct kobject *kobj, struct attribute *attr,
else
show_val = 0;
- return snprintf(buf, sizeof(show_val), "%u\n", show_val);
+ return scnprintf(buf, sizeof(show_val), "%u\n", show_val);
}
static size_t store_emmc_dload(struct kobject *kobj, struct attribute *attr,
@@ -477,10 +479,50 @@ static size_t store_emmc_dload(struct kobject *kobj, struct attribute *attr,
return count;
}
+
+#ifdef CONFIG_QCOM_MINIDUMP
+
+static DEFINE_MUTEX(tcsr_lock);
+
+static ssize_t show_dload_mode(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "DLOAD dump type: %s\n",
+ (dload_type == SCM_DLOAD_MINIDUMP) ? "mini" : "full");
+}
+
+static size_t store_dload_mode(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ if (sysfs_streq(buf, "full")) {
+ dload_type = SCM_DLOAD_FULLDUMP;
+ } else if (sysfs_streq(buf, "mini")) {
+ if (!msm_minidump_enabled()) {
+ pr_info("Minidump is not enabled\n");
+ return -ENODEV;
+ }
+ dload_type = SCM_DLOAD_MINIDUMP;
+ } else {
+ pr_info("Invalid value. Use 'full' or 'mini'\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&tcsr_lock);
+ /*Overwrite TCSR reg*/
+ set_dload_mode(dload_type);
+ mutex_unlock(&tcsr_lock);
+ return count;
+}
+RESET_ATTR(dload_mode, 0644, show_dload_mode, store_dload_mode);
+#endif
+
RESET_ATTR(emmc_dload, 0644, show_emmc_dload, store_emmc_dload);
static struct attribute *reset_attrs[] = {
&reset_attr_emmc_dload.attr,
+#ifdef CONFIG_QCOM_MINIDUMP
+ &reset_attr_dload_mode.attr,
+#endif
NULL
};
diff --git a/drivers/regulator/qpnp-labibb-regulator.c b/drivers/regulator/qpnp-labibb-regulator.c
index dbe2a08f1776..858ddcc228df 100644
--- a/drivers/regulator/qpnp-labibb-regulator.c
+++ b/drivers/regulator/qpnp-labibb-regulator.c
@@ -559,6 +559,7 @@ struct lab_regulator {
int step_size;
int slew_rate;
int soft_start;
+ int sc_wait_time_ms;
int vreg_enabled;
};
@@ -608,6 +609,8 @@ struct qpnp_labibb {
bool skip_2nd_swire_cmd;
bool pfm_enable;
bool notify_lab_vreg_ok_sts;
+ bool detect_lab_sc;
+ bool sc_detected;
u32 swire_2nd_cmd_delay;
u32 swire_ibb_ps_enable_delay;
};
@@ -2138,8 +2141,10 @@ static void qpnp_lab_vreg_notifier_work(struct work_struct *work)
u8 val;
struct qpnp_labibb *labibb = container_of(work, struct qpnp_labibb,
lab_vreg_ok_work);
+ if (labibb->lab_vreg.sc_wait_time_ms != -EINVAL)
+ retries = labibb->lab_vreg.sc_wait_time_ms / 5;
- while (retries--) {
+ while (retries) {
rc = qpnp_labibb_read(labibb, labibb->lab_base +
REG_LAB_STATUS1, &val, 1);
if (rc < 0) {
@@ -2155,10 +2160,30 @@ static void qpnp_lab_vreg_notifier_work(struct work_struct *work)
}
usleep_range(dly, dly + 100);
+ retries--;
}
- if (!retries)
- pr_err("LAB_VREG_OK not set, failed to notify\n");
+ if (!retries) {
+ if (labibb->detect_lab_sc) {
+ pr_crit("short circuit detected on LAB rail.. disabling the LAB/IBB/OLEDB modules\n");
+ /* Disable LAB module */
+ val = 0;
+ rc = qpnp_labibb_write(labibb, labibb->lab_base +
+ REG_LAB_MODULE_RDY, &val, 1);
+ if (rc < 0) {
+ pr_err("write register %x failed rc = %d\n",
+ REG_LAB_MODULE_RDY, rc);
+ return;
+ }
+ raw_notifier_call_chain(&labibb_notifier,
+ LAB_VREG_NOT_OK, NULL);
+ labibb->sc_detected = true;
+ labibb->lab_vreg.vreg_enabled = 0;
+ labibb->ibb_vreg.vreg_enabled = 0;
+ } else {
+ pr_err("LAB_VREG_OK not set, failed to notify\n");
+ }
+ }
}
static int qpnp_labibb_regulator_enable(struct qpnp_labibb *labibb)
@@ -2323,6 +2348,11 @@ static int qpnp_lab_regulator_enable(struct regulator_dev *rdev)
struct qpnp_labibb *labibb = rdev_get_drvdata(rdev);
+ if (labibb->sc_detected) {
+ pr_info("Short circuit detected: disabled LAB/IBB rails\n");
+ return 0;
+ }
+
if (labibb->skip_2nd_swire_cmd) {
rc = qpnp_ibb_ps_config(labibb, false);
if (rc < 0) {
@@ -2363,7 +2393,7 @@ static int qpnp_lab_regulator_enable(struct regulator_dev *rdev)
labibb->lab_vreg.vreg_enabled = 1;
}
- if (labibb->notify_lab_vreg_ok_sts)
+ if (labibb->notify_lab_vreg_ok_sts || labibb->detect_lab_sc)
schedule_work(&labibb->lab_vreg_ok_work);
return 0;
@@ -2621,6 +2651,12 @@ static int register_qpnp_lab_regulator(struct qpnp_labibb *labibb,
labibb->notify_lab_vreg_ok_sts = of_property_read_bool(of_node,
"qcom,notify-lab-vreg-ok-sts");
+ labibb->lab_vreg.sc_wait_time_ms = -EINVAL;
+ if (labibb->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE &&
+ labibb->detect_lab_sc)
+ of_property_read_u32(of_node, "qcom,qpnp-lab-sc-wait-time-ms",
+ &labibb->lab_vreg.sc_wait_time_ms);
+
rc = of_property_read_u32(of_node, "qcom,qpnp-lab-soft-start",
&(labibb->lab_vreg.soft_start));
if (!rc) {
@@ -3255,6 +3291,11 @@ static int qpnp_ibb_regulator_enable(struct regulator_dev *rdev)
u8 val;
struct qpnp_labibb *labibb = rdev_get_drvdata(rdev);
+ if (labibb->sc_detected) {
+ pr_info("Short circuit detected: disabled LAB/IBB rails\n");
+ return 0;
+ }
+
if (!labibb->ibb_vreg.vreg_enabled && !labibb->swire_control) {
if (!labibb->standalone)
@@ -3731,6 +3772,8 @@ static int qpnp_labibb_regulator_probe(struct platform_device *pdev)
if (labibb->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE) {
labibb->mode = QPNP_LABIBB_AMOLED_MODE;
+ /* Enable polling for LAB short circuit detection for PM660A */
+ labibb->detect_lab_sc = true;
} else {
rc = of_property_read_string(labibb->dev->of_node,
"qcom,qpnp-labibb-mode", &mode_name);
diff --git a/drivers/regulator/qpnp-oledb-regulator.c b/drivers/regulator/qpnp-oledb-regulator.c
index fa14445f9d26..bee9a3d82eeb 100644
--- a/drivers/regulator/qpnp-oledb-regulator.c
+++ b/drivers/regulator/qpnp-oledb-regulator.c
@@ -183,6 +183,8 @@ struct qpnp_oledb {
bool dynamic_ext_pinctl_config;
bool pbs_control;
bool force_pd_control;
+ bool handle_lab_sc_notification;
+ bool lab_sc_detected;
};
static const u16 oledb_warmup_dly_ns[] = {6700, 13300, 26700, 53400};
@@ -277,6 +279,11 @@ static int qpnp_oledb_regulator_enable(struct regulator_dev *rdev)
struct qpnp_oledb *oledb = rdev_get_drvdata(rdev);
+ if (oledb->lab_sc_detected == true) {
+ pr_info("Short circuit detected: Disabled OLEDB rail\n");
+ return 0;
+ }
+
if (oledb->ext_pin_control) {
rc = qpnp_oledb_read(oledb, oledb->base + OLEDB_EXT_PIN_CTL,
&val, 1);
@@ -370,12 +377,19 @@ static int qpnp_oledb_regulator_disable(struct regulator_dev *rdev)
}
if (val & OLEDB_FORCE_PD_CTL_SPARE_BIT) {
- rc = qpnp_pbs_trigger_event(oledb->pbs_dev_node,
- trigger_bitmap);
+ rc = qpnp_oledb_sec_masked_write(oledb, oledb->base +
+ OLEDB_SPARE_CTL,
+ OLEDB_FORCE_PD_CTL_SPARE_BIT, 0);
if (rc < 0) {
- pr_err("Failed to trigger the PBS sequence\n");
+ pr_err("Failed to write SPARE_CTL rc=%d\n", rc);
return rc;
}
+
+ rc = qpnp_pbs_trigger_event(oledb->pbs_dev_node,
+ trigger_bitmap);
+ if (rc < 0)
+ pr_err("Failed to trigger the PBS sequence\n");
+
pr_debug("PBS event triggered\n");
} else {
pr_debug("OLEDB_SPARE_CTL register bit not set\n");
@@ -1116,8 +1130,14 @@ static int qpnp_oledb_parse_dt(struct qpnp_oledb *oledb)
oledb->pbs_control =
of_property_read_bool(of_node, "qcom,pbs-control");
- oledb->force_pd_control =
- of_property_read_bool(of_node, "qcom,force-pd-control");
+ /* Use the force_pd_control only for PM660A versions <= v2.0 */
+ if (oledb->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE &&
+ oledb->pmic_rev_id->rev4 <= PM660L_V2P0_REV4) {
+ if (!(oledb->pmic_rev_id->rev4 == PM660L_V2P0_REV4 &&
+ oledb->pmic_rev_id->rev2 > PM660L_V2P0_REV2)) {
+ oledb->force_pd_control = true;
+ }
+ }
if (oledb->force_pd_control) {
oledb->pbs_dev_node = of_parse_phandle(of_node,
@@ -1215,13 +1235,6 @@ static int qpnp_oledb_force_pulldown_config(struct qpnp_oledb *oledb)
int rc = 0;
u8 val;
- rc = qpnp_oledb_sec_masked_write(oledb, oledb->base +
- OLEDB_SPARE_CTL, OLEDB_FORCE_PD_CTL_SPARE_BIT, 0);
- if (rc < 0) {
- pr_err("Failed to write SPARE_CTL rc=%d\n", rc);
- return rc;
- }
-
val = 1;
rc = qpnp_oledb_write(oledb, oledb->base + OLEDB_PD_CTL,
&val, 1);
@@ -1243,14 +1256,31 @@ static int qpnp_labibb_notifier_cb(struct notifier_block *nb,
unsigned long action, void *data)
{
int rc = 0;
+ u8 val;
struct qpnp_oledb *oledb = container_of(nb, struct qpnp_oledb,
oledb_nb);
+ if (action == LAB_VREG_NOT_OK) {
+ /* short circuit detected. Disable OLEDB module */
+ val = 0;
+ rc = qpnp_oledb_write(oledb, oledb->base + OLEDB_MODULE_RDY,
+ &val, 1);
+ if (rc < 0) {
+ pr_err("Failed to write MODULE_RDY rc=%d\n", rc);
+ return NOTIFY_STOP;
+ }
+ oledb->lab_sc_detected = true;
+ oledb->mod_enable = false;
+ pr_crit("LAB SC detected, disabling OLEDB forever!\n");
+ }
+
if (action == LAB_VREG_OK) {
/* Disable SWIRE pull down control and enable via spmi mode */
rc = qpnp_oledb_force_pulldown_config(oledb);
- if (rc < 0)
+ if (rc < 0) {
+ pr_err("Failed to config force pull down\n");
return NOTIFY_STOP;
+ }
}
return NOTIFY_OK;
@@ -1297,7 +1327,11 @@ static int qpnp_oledb_regulator_probe(struct platform_device *pdev)
return rc;
}
- if (oledb->force_pd_control) {
+ /* Enable LAB short circuit notification support */
+ if (oledb->pmic_rev_id->pmic_subtype == PM660L_SUBTYPE)
+ oledb->handle_lab_sc_notification = true;
+
+ if (oledb->force_pd_control || oledb->handle_lab_sc_notification) {
oledb->oledb_nb.notifier_call = qpnp_labibb_notifier_cb;
rc = qpnp_labibb_notifier_register(&oledb->oledb_nb);
if (rc < 0) {
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 79bb3337ba36..c5393d517432 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -9238,10 +9238,11 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
/* scale up to G3 now */
new_pwr_info.gear_tx = UFS_HS_G3;
new_pwr_info.gear_rx = UFS_HS_G3;
- ret = ufshcd_change_power_mode(hba, &new_pwr_info);
- if (ret)
- goto out;
+ /* now, fall through to set the HS-G3 */
}
+ ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+ if (ret)
+ goto out;
} else {
memcpy(&new_pwr_info, &hba->pwr_info,
sizeof(struct ufs_pa_layer_attr));
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 75bebf66376d..34b0adb108eb 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -441,6 +441,23 @@ config QCOM_MEMORY_DUMP_V2
of deadlocks or cpu hangs these dump regions are captured to
give a snapshot of the system at the time of the crash.
+config QCOM_MINIDUMP
+ bool "QCOM Minidump Support"
+ depends on MSM_SMEM && QCOM_DLOAD_MODE
+ help
+ This enables minidump feature. It allows various clients to
+ register to dump their state at system bad state (panic/WDT,etc.,).
+ This uses SMEM to store all registered client information.
+ This will dump all registered entries, only when DLOAD mode is enabled.
+
+config MINIDUMP_MAX_ENTRIES
+ int "Minidump Maximum num of entries"
+ default 200
+ depends on QCOM_MINIDUMP
+ help
+ This defines maximum number of entries to be allocated for application
+ subsytem in Minidump SMEM table.
+
config ICNSS
tristate "Platform driver for Q6 integrated connectivity"
---help---
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index fa350d122384..87698b75d3b8 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -66,6 +66,7 @@ obj-$(CONFIG_QCOM_SCM_XPU) += scm-xpu.o
obj-$(CONFIG_QCOM_WATCHDOG_V2) += watchdog_v2.o
obj-$(CONFIG_QCOM_MEMORY_DUMP) += memory_dump.o
obj-$(CONFIG_QCOM_MEMORY_DUMP_V2) += memory_dump_v2.o
+obj-$(CONFIG_QCOM_MINIDUMP) += msm_minidump.o
obj-$(CONFIG_QCOM_DCC) += dcc.o
obj-$(CONFIG_QCOM_WATCHDOG_V2) += watchdog_v2.o
obj-$(CONFIG_QCOM_COMMON_LOG) += common_log.o
diff --git a/drivers/soc/qcom/common_log.c b/drivers/soc/qcom/common_log.c
index ecf89b2b3b37..f001e820b797 100644
--- a/drivers/soc/qcom/common_log.c
+++ b/drivers/soc/qcom/common_log.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,6 +18,8 @@
#include <linux/slab.h>
#include <linux/kmemleak.h>
#include <soc/qcom/memory_dump.h>
+#include <soc/qcom/minidump.h>
+#include <asm/sections.h>
#define MISC_DUMP_DATA_LEN 4096
#define PMIC_DUMP_DATA_LEN (64 * 1024)
@@ -38,6 +40,8 @@ void register_misc_dump(void)
misc_buf = kzalloc(MISC_DUMP_DATA_LEN, GFP_KERNEL);
if (!misc_buf)
goto err0;
+
+ strlcpy(misc_data->name, "KMISC", sizeof(misc_data->name));
misc_data->addr = virt_to_phys(misc_buf);
misc_data->len = MISC_DUMP_DATA_LEN;
dump_entry.id = MSM_DUMP_DATA_MISC;
@@ -70,6 +74,7 @@ static void register_pmic_dump(void)
if (!dump_addr)
goto err0;
+ strlcpy(dump_data->name, "KPMIC", sizeof(dump_data->name));
dump_data->addr = virt_to_phys(dump_addr);
dump_data->len = PMIC_DUMP_DATA_LEN;
dump_entry.id = MSM_DUMP_DATA_PMIC;
@@ -104,6 +109,8 @@ static void register_vsense_dump(void)
if (!dump_addr)
goto err0;
+ strlcpy(dump_data->name, "KVSENSE",
+ sizeof(dump_data->name));
dump_data->addr = virt_to_phys(dump_addr);
dump_data->len = VSENSE_DUMP_DATA_LEN;
dump_entry.id = MSM_DUMP_DATA_VSENSE;
@@ -136,6 +143,7 @@ void register_rpm_dump(void)
if (!dump_addr)
goto err0;
+ strlcpy(dump_data->name, "KRPM", sizeof(dump_data->name));
dump_data->addr = virt_to_phys(dump_addr);
dump_data->len = RPM_DUMP_DATA_LEN;
dump_entry.id = MSM_DUMP_DATA_RPM;
@@ -217,8 +225,39 @@ static void __init common_log_register_log_buf(void)
}
}
+static void __init register_kernel_sections(void)
+{
+ struct md_region ksec_entry;
+ char *data_name = "KDATABSS";
+ const size_t static_size = __per_cpu_end - __per_cpu_start;
+ void __percpu *base = (void __percpu *)__per_cpu_start;
+ unsigned int cpu;
+
+ strlcpy(ksec_entry.name, data_name, sizeof(ksec_entry.name));
+ ksec_entry.virt_addr = (uintptr_t)_sdata;
+ ksec_entry.phys_addr = virt_to_phys(_sdata);
+ ksec_entry.size = roundup((__bss_stop - _sdata), 4);
+ if (msm_minidump_add_region(&ksec_entry))
+ pr_err("Failed to add data section in Minidump\n");
+
+ /* Add percpu static sections */
+ for_each_possible_cpu(cpu) {
+ void *start = per_cpu_ptr(base, cpu);
+
+ memset(&ksec_entry, 0, sizeof(ksec_entry));
+ scnprintf(ksec_entry.name, sizeof(ksec_entry.name),
+ "KSPERCPU%d", cpu);
+ ksec_entry.virt_addr = (uintptr_t)start;
+ ksec_entry.phys_addr = per_cpu_ptr_to_phys(start);
+ ksec_entry.size = static_size;
+ if (msm_minidump_add_region(&ksec_entry))
+ pr_err("Failed to add percpu sections in Minidump\n");
+ }
+}
+
static int __init msm_common_log_init(void)
{
+ register_kernel_sections();
common_log_register_log_buf();
register_misc_dump();
register_pmic_dump();
diff --git a/drivers/soc/qcom/cpuss_dump.c b/drivers/soc/qcom/cpuss_dump.c
index 93c876a7ad73..2e08b78dee94 100644
--- a/drivers/soc/qcom/cpuss_dump.c
+++ b/drivers/soc/qcom/cpuss_dump.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -75,6 +75,8 @@ static int cpuss_dump_probe(struct platform_device *pdev)
dump_data->addr = dump_addr;
dump_data->len = size;
+ scnprintf(dump_data->name, sizeof(dump_data->name),
+ "KCPUSS%X", id);
dump_entry.id = id;
dump_entry.addr = virt_to_phys(dump_data);
ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
diff --git a/drivers/soc/qcom/dcc.c b/drivers/soc/qcom/dcc.c
index aced50bf7fda..e5f3f119065b 100644
--- a/drivers/soc/qcom/dcc.c
+++ b/drivers/soc/qcom/dcc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1173,6 +1173,8 @@ static void dcc_allocate_dump_mem(struct dcc_drvdata *drvdata)
/* Allocate memory for dcc reg dump */
drvdata->reg_buf = devm_kzalloc(dev, drvdata->reg_size, GFP_KERNEL);
if (drvdata->reg_buf) {
+ strlcpy(drvdata->reg_data.name, "KDCC_REG",
+ sizeof(drvdata->reg_data.name));
drvdata->reg_data.addr = virt_to_phys(drvdata->reg_buf);
drvdata->reg_data.len = drvdata->reg_size;
reg_dump_entry.id = MSM_DUMP_DATA_DCC_REG;
@@ -1190,6 +1192,8 @@ static void dcc_allocate_dump_mem(struct dcc_drvdata *drvdata)
/* Allocate memory for dcc sram dump */
drvdata->sram_buf = devm_kzalloc(dev, drvdata->ram_size, GFP_KERNEL);
if (drvdata->sram_buf) {
+ strlcpy(drvdata->sram_data.name, "KDCC_SRAM",
+ sizeof(drvdata->sram_data.name));
drvdata->sram_data.addr = virt_to_phys(drvdata->sram_buf);
drvdata->sram_data.len = drvdata->ram_size;
sram_dump_entry.id = MSM_DUMP_DATA_DCC_SRAM;
diff --git a/drivers/soc/qcom/memory_dump_v2.c b/drivers/soc/qcom/memory_dump_v2.c
index af141c808c81..092b1c1af44b 100644
--- a/drivers/soc/qcom/memory_dump_v2.c
+++ b/drivers/soc/qcom/memory_dump_v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,7 @@
#include <linux/kmemleak.h>
#include <soc/qcom/memory_dump.h>
#include <soc/qcom/scm.h>
+#include <soc/qcom/minidump.h>
#define MSM_DUMP_TABLE_VERSION MSM_DUMP_MAKE_VERSION(2, 0)
@@ -87,6 +88,29 @@ static struct msm_dump_table *msm_dump_get_table(enum msm_dump_table_ids id)
return table;
}
+int msm_dump_data_add_minidump(struct msm_dump_entry *entry)
+{
+ struct msm_dump_data *data;
+ struct md_region md_entry;
+
+ data = (struct msm_dump_data *)(phys_to_virt(entry->addr));
+ if (!strcmp(data->name, "")) {
+ pr_info("Entry name is NULL, Use ID %d for minidump\n",
+ entry->id);
+ snprintf(md_entry.name, sizeof(md_entry.name), "KMDT0x%X",
+ entry->id);
+ } else {
+ strlcpy(md_entry.name, data->name, sizeof(md_entry.name));
+ }
+
+ md_entry.phys_addr = data->addr;
+ md_entry.virt_addr = (uintptr_t)phys_to_virt(data->addr);
+ md_entry.size = data->len;
+ md_entry.id = entry->id;
+
+ return msm_minidump_add_region(&md_entry);
+}
+
int msm_dump_data_register(enum msm_dump_table_ids id,
struct msm_dump_entry *entry)
{
@@ -107,6 +131,10 @@ int msm_dump_data_register(enum msm_dump_table_ids id,
table->num_entries++;
dmac_flush_range(table, (void *)table + sizeof(struct msm_dump_table));
+
+ if (msm_dump_data_add_minidump(entry))
+ pr_info("Failed to add entry in Minidump table\n");
+
return 0;
}
EXPORT_SYMBOL(msm_dump_data_register);
diff --git a/drivers/soc/qcom/msm_minidump.c b/drivers/soc/qcom/msm_minidump.c
new file mode 100644
index 000000000000..1cb36bf98555
--- /dev/null
+++ b/drivers/soc/qcom/msm_minidump.c
@@ -0,0 +1,371 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "Minidump: " fmt
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/elf.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <soc/qcom/smem.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/minidump.h>
+
+
+#define MAX_NUM_ENTRIES (CONFIG_MINIDUMP_MAX_ENTRIES + 1)
+#define SMEM_ENTRY_SIZE 32
+#define MAX_MEM_LENGTH (SMEM_ENTRY_SIZE * MAX_NUM_ENTRIES)
+#define MAX_STRTBL_SIZE (MAX_NUM_ENTRIES * MAX_NAME_LENGTH)
+#define SMEM_MINIDUMP_TABLE_ID 602
+
+/* Bootloader Minidump table */
+struct md_smem_table {
+ u32 version;
+ u32 smem_length;
+ u64 next_avail_offset;
+ char reserved[MAX_NAME_LENGTH];
+ u64 *region_start;
+};
+
+/* Bootloader Minidump region */
+struct md_smem_region {
+ char name[MAX_NAME_LENGTH];
+ u64 address;
+ u64 size;
+};
+
+/* md_table: APPS minidump table
+ * @num_regions: Number of entries registered
+ * @region_base_offset: APPS region start offset smem table
+ * @md_smem_table: Pointer smem table
+ * @region: Pointer to APPS region in smem table
+ * @entry: All registered client entries.
+ */
+
+struct md_table {
+ u32 num_regions;
+ u32 region_base_offset;
+ struct md_smem_table *md_smem_table;
+ struct md_smem_region *region;
+ struct md_region entry[MAX_NUM_ENTRIES];
+};
+
+/* Protect elfheader and smem table from deferred calls contention */
+static DEFINE_SPINLOCK(mdt_lock);
+static bool minidump_enabled;
+static struct md_table minidump_table;
+static unsigned int pendings;
+static unsigned int region_idx = 1; /* First entry is ELF header*/
+
+/* ELF Header */
+static struct elfhdr *md_ehdr;
+/* ELF Program header */
+static struct elf_phdr *phdr;
+/* ELF Section header */
+static struct elf_shdr *shdr;
+/* Section offset in elf image */
+static u64 elf_offset;
+/* String table index, first byte must be '\0' */
+static unsigned int stringtable_idx = 1;
+
+static inline struct elf_shdr *elf_sheader(struct elfhdr *hdr)
+{
+ return (struct elf_shdr *)((size_t)hdr + (size_t)hdr->e_shoff);
+}
+
+static inline struct elf_shdr *elf_section(struct elfhdr *hdr, int idx)
+{
+ return &elf_sheader(hdr)[idx];
+}
+
+static inline char *elf_str_table(struct elfhdr *hdr)
+{
+ if (hdr->e_shstrndx == SHN_UNDEF)
+ return NULL;
+ return (char *)hdr + elf_section(hdr, hdr->e_shstrndx)->sh_offset;
+}
+
+static inline char *elf_lookup_string(struct elfhdr *hdr, int offset)
+{
+ char *strtab = elf_str_table(hdr);
+
+ if ((strtab == NULL) | (stringtable_idx < offset))
+ return NULL;
+ return strtab + offset;
+}
+
+static inline unsigned int set_section_name(const char *name)
+{
+ char *strtab = elf_str_table(md_ehdr);
+ int ret = 0;
+
+ if ((strtab == NULL) | (name == NULL))
+ return 0;
+
+ ret = stringtable_idx;
+ stringtable_idx += strlcpy((strtab + stringtable_idx),
+ name, MAX_NAME_LENGTH);
+ stringtable_idx += 1;
+ return ret;
+}
+
+/* return 1 if name already exists */
+static inline bool md_check_name(const char *name)
+{
+ struct md_region *mde = minidump_table.entry;
+ int i, regno = minidump_table.num_regions;
+
+ for (i = 0; i < regno; i++, mde++)
+ if (!strcmp(mde->name, name))
+ return true;
+ return false;
+}
+
+/* Update Mini dump table in SMEM */
+static int md_update_smem_table(const struct md_region *entry)
+{
+ struct md_smem_region *mdr;
+
+ if (!minidump_enabled) {
+ pr_info("Table in smem is not setup\n");
+ return -ENODEV;
+ }
+
+ mdr = &minidump_table.region[region_idx++];
+
+ strlcpy(mdr->name, entry->name, sizeof(mdr->name));
+ mdr->address = entry->phys_addr;
+ mdr->size = entry->size;
+
+ /* Update elf header */
+ shdr->sh_type = SHT_PROGBITS;
+ shdr->sh_name = set_section_name(mdr->name);
+ shdr->sh_addr = (elf_addr_t)entry->virt_addr;
+ shdr->sh_size = mdr->size;
+ shdr->sh_flags = SHF_WRITE;
+ shdr->sh_offset = elf_offset;
+ shdr->sh_entsize = 0;
+
+ phdr->p_type = PT_LOAD;
+ phdr->p_offset = elf_offset;
+ phdr->p_vaddr = entry->virt_addr;
+ phdr->p_paddr = entry->phys_addr;
+ phdr->p_filesz = phdr->p_memsz = mdr->size;
+ phdr->p_flags = PF_R | PF_W;
+
+ md_ehdr->e_shnum += 1;
+ md_ehdr->e_phnum += 1;
+ elf_offset += shdr->sh_size;
+ shdr++;
+ phdr++;
+
+ return 0;
+}
+
+bool msm_minidump_enabled(void)
+{
+ bool ret;
+
+ spin_lock(&mdt_lock);
+ ret = minidump_enabled;
+ spin_unlock(&mdt_lock);
+ return ret;
+}
+EXPORT_SYMBOL(msm_minidump_enabled);
+
+int msm_minidump_add_region(const struct md_region *entry)
+{
+ u32 entries;
+ struct md_region *mdr;
+ int ret = 0;
+
+ if (!entry)
+ return -EINVAL;
+
+ if (((strlen(entry->name) > MAX_NAME_LENGTH) ||
+ md_check_name(entry->name)) && !entry->virt_addr) {
+ pr_info("Invalid entry details\n");
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(entry->size, 4)) {
+ pr_info("size should be 4 byte aligned\n");
+ return -EINVAL;
+ }
+
+ spin_lock(&mdt_lock);
+ entries = minidump_table.num_regions;
+ if (entries >= MAX_NUM_ENTRIES) {
+ pr_info("Maximum entries reached.\n");
+ spin_unlock(&mdt_lock);
+ return -ENOMEM;
+ }
+
+ mdr = &minidump_table.entry[entries];
+ strlcpy(mdr->name, entry->name, sizeof(mdr->name));
+ mdr->virt_addr = entry->virt_addr;
+ mdr->phys_addr = entry->phys_addr;
+ mdr->size = entry->size;
+ mdr->id = entry->id;
+
+ minidump_table.num_regions = entries + 1;
+
+ if (minidump_enabled)
+ ret = md_update_smem_table(entry);
+ else
+ pendings++;
+
+ spin_unlock(&mdt_lock);
+
+ pr_debug("Minidump: added %s to %s list\n",
+ mdr->name, minidump_enabled ? "":"pending");
+ return ret;
+}
+EXPORT_SYMBOL(msm_minidump_add_region);
+
+static int msm_minidump_add_header(void)
+{
+ struct md_smem_region *mdreg = &minidump_table.region[0];
+ char *banner;
+ unsigned int strtbl_off, elfh_size, phdr_off;
+
+ elfh_size = sizeof(*md_ehdr) + MAX_STRTBL_SIZE + MAX_MEM_LENGTH +
+ ((sizeof(*shdr) + sizeof(*phdr)) * (MAX_NUM_ENTRIES + 1));
+
+ md_ehdr = kzalloc(elfh_size, GFP_KERNEL);
+ if (!md_ehdr)
+ return -ENOMEM;
+
+ strlcpy(mdreg->name, "KELF_HEADER", sizeof(mdreg->name));
+ mdreg->address = virt_to_phys(md_ehdr);
+ mdreg->size = elfh_size;
+
+ /* Section headers*/
+ shdr = (struct elf_shdr *)(md_ehdr + 1);
+ phdr = (struct elf_phdr *)(shdr + MAX_NUM_ENTRIES);
+ phdr_off = sizeof(*md_ehdr) + (sizeof(*shdr) * MAX_NUM_ENTRIES);
+
+ memcpy(md_ehdr->e_ident, ELFMAG, SELFMAG);
+ md_ehdr->e_ident[EI_CLASS] = ELF_CLASS;
+ md_ehdr->e_ident[EI_DATA] = ELF_DATA;
+ md_ehdr->e_ident[EI_VERSION] = EV_CURRENT;
+ md_ehdr->e_ident[EI_OSABI] = ELF_OSABI;
+ md_ehdr->e_type = ET_CORE;
+ md_ehdr->e_machine = ELF_ARCH;
+ md_ehdr->e_version = EV_CURRENT;
+ md_ehdr->e_ehsize = sizeof(*md_ehdr);
+ md_ehdr->e_phoff = phdr_off;
+ md_ehdr->e_phentsize = sizeof(*phdr);
+ md_ehdr->e_phnum = 1;
+ md_ehdr->e_shoff = sizeof(*md_ehdr);
+ md_ehdr->e_shentsize = sizeof(*shdr);
+ md_ehdr->e_shnum = 3; /* NULL, STR TABLE, Linux banner */
+ md_ehdr->e_shstrndx = 1;
+
+ elf_offset = elfh_size;
+ strtbl_off = sizeof(*md_ehdr) +
+ ((sizeof(*phdr) + sizeof(*shdr)) * MAX_NUM_ENTRIES);
+ /* First section header should be NULL
+ * 2nd entry for string table
+ */
+ shdr++;
+ shdr->sh_type = SHT_STRTAB;
+ shdr->sh_offset = (elf_addr_t)strtbl_off;
+ shdr->sh_size = MAX_STRTBL_SIZE;
+ shdr->sh_entsize = 0;
+ shdr->sh_flags = 0;
+ shdr->sh_name = set_section_name("STR_TBL");
+ shdr++;
+
+ /* 3rd entry for linux banner */
+ banner = (char *)md_ehdr + strtbl_off + MAX_STRTBL_SIZE;
+ strlcpy(banner, linux_banner, MAX_MEM_LENGTH);
+
+ shdr->sh_type = SHT_PROGBITS;
+ shdr->sh_offset = (elf_addr_t)(strtbl_off + MAX_STRTBL_SIZE);
+ shdr->sh_size = strlen(linux_banner) + 1;
+ shdr->sh_addr = (elf_addr_t)linux_banner;
+ shdr->sh_entsize = 0;
+ shdr->sh_flags = SHF_WRITE;
+ shdr->sh_name = set_section_name("linux_banner");
+ shdr++;
+
+ phdr->p_type = PT_LOAD;
+ phdr->p_offset = (elf_addr_t)(strtbl_off + MAX_STRTBL_SIZE);
+ phdr->p_vaddr = (elf_addr_t)linux_banner;
+ phdr->p_paddr = virt_to_phys(linux_banner);
+ phdr->p_filesz = phdr->p_memsz = strlen(linux_banner) + 1;
+ phdr->p_flags = PF_R | PF_W;
+
+ md_ehdr->e_phnum += 1;
+ phdr++;
+
+ return 0;
+}
+
+static int __init msm_minidump_init(void)
+{
+ unsigned int i, size;
+ struct md_region *mdr;
+ struct md_smem_table *smem_table;
+
+ /* Get Minidump table */
+ smem_table = smem_get_entry(SMEM_MINIDUMP_TABLE_ID, &size, 0,
+ SMEM_ANY_HOST_FLAG);
+ if (IS_ERR_OR_NULL(smem_table)) {
+ pr_info("SMEM is not initialized.\n");
+ return -ENODEV;
+ }
+
+ if ((smem_table->next_avail_offset + MAX_MEM_LENGTH) >
+ smem_table->smem_length) {
+ pr_info("SMEM memory not available.\n");
+ return -ENOMEM;
+ }
+
+ /* Get next_avail_offset and update it to reserve memory */
+ minidump_table.region_base_offset = smem_table->next_avail_offset;
+ minidump_table.region = (struct md_smem_region *)((uintptr_t)smem_table
+ + minidump_table.region_base_offset);
+
+ smem_table->next_avail_offset =
+ minidump_table.region_base_offset + MAX_MEM_LENGTH;
+ minidump_table.md_smem_table = smem_table;
+
+ msm_minidump_add_header();
+
+ /* Add pending entries to smem table */
+ spin_lock(&mdt_lock);
+ minidump_enabled = true;
+
+ for (i = 0; i < pendings; i++) {
+ mdr = &minidump_table.entry[i];
+ if (md_update_smem_table(mdr)) {
+ pr_info("Unable to add entry %s to smem table\n",
+ mdr->name);
+ spin_unlock(&mdt_lock);
+ return -ENODEV;
+ }
+ }
+
+ pendings = 0;
+ spin_unlock(&mdt_lock);
+
+ pr_info("Enabled, region base:%d, region 0x%pK\n",
+ minidump_table.region_base_offset, minidump_table.region);
+
+ return 0;
+}
+subsys_initcall(msm_minidump_init)
diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c
index 470ecfdd9f5e..745a069df88a 100644
--- a/drivers/soc/qcom/watchdog_v2.c
+++ b/drivers/soc/qcom/watchdog_v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,7 @@
#include <linux/wait.h>
#include <soc/qcom/scm.h>
#include <soc/qcom/memory_dump.h>
+#include <soc/qcom/minidump.h>
#include <soc/qcom/watchdog.h>
#define MODULE_NAME "msm_watchdog"
@@ -521,6 +522,8 @@ void register_scan_dump(struct msm_watchdog_data *wdog_dd)
dump_data->addr = virt_to_phys(dump_addr);
dump_data->len = wdog_dd->scandump_size;
+ strlcpy(dump_data->name, "KSCANDUMP", sizeof(dump_data->name));
+
dump_entry.id = MSM_DUMP_DATA_SCANDUMP;
dump_entry.addr = virt_to_phys(dump_data);
ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
@@ -605,6 +608,9 @@ static void configure_bark_dump(struct msm_watchdog_data *wdog_dd)
cpu_data[cpu].addr = virt_to_phys(cpu_buf +
cpu * MAX_CPU_CTX_SIZE);
cpu_data[cpu].len = MAX_CPU_CTX_SIZE;
+ snprintf(cpu_data[cpu].name, sizeof(cpu_data[cpu].name),
+ "KCPU_CTX%d", cpu);
+
dump_entry.id = MSM_DUMP_DATA_CPU_CTX + cpu;
dump_entry.addr = virt_to_phys(&cpu_data[cpu]);
ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
@@ -820,6 +826,7 @@ static int msm_watchdog_probe(struct platform_device *pdev)
{
int ret;
struct msm_watchdog_data *wdog_dd;
+ struct md_region md_entry;
if (!pdev->dev.of_node || !enable)
return -ENODEV;
@@ -841,6 +848,15 @@ static int msm_watchdog_probe(struct platform_device *pdev)
goto err;
}
init_watchdog_data(wdog_dd);
+
+ /* Add wdog info to minidump table */
+ strlcpy(md_entry.name, "KWDOGDATA", sizeof(md_entry.name));
+ md_entry.virt_addr = (uintptr_t)wdog_dd;
+ md_entry.phys_addr = virt_to_phys(wdog_dd);
+ md_entry.size = sizeof(*wdog_dd);
+ if (msm_minidump_add_region(&md_entry))
+ pr_info("Failed to add RTB in Minidump\n");
+
return 0;
err:
kzfree(wdog_dd);
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index faa81c28a0d3..58bf3d2f52bd 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -114,6 +114,7 @@ struct ion_client {
*/
struct ion_handle {
struct kref ref;
+ unsigned int user_ref_count;
struct ion_client *client;
struct ion_buffer *buffer;
struct rb_node node;
@@ -433,6 +434,50 @@ int ion_handle_put(struct ion_handle *handle)
return ret;
}
+/* Must hold the client lock */
+static void user_ion_handle_get(struct ion_handle *handle)
+{
+ if (handle->user_ref_count++ == 0)
+ kref_get(&handle->ref);
+}
+
+/* Must hold the client lock */
+static struct ion_handle *user_ion_handle_get_check_overflow(
+ struct ion_handle *handle)
+{
+ if (handle->user_ref_count + 1 == 0)
+ return ERR_PTR(-EOVERFLOW);
+ user_ion_handle_get(handle);
+ return handle;
+}
+
+/* passes a kref to the user ref count.
+ * We know we're holding a kref to the object before and
+ * after this call, so no need to reverify handle.
+ */
+static struct ion_handle *pass_to_user(struct ion_handle *handle)
+{
+ struct ion_client *client = handle->client;
+ struct ion_handle *ret;
+
+ mutex_lock(&client->lock);
+ ret = user_ion_handle_get_check_overflow(handle);
+ ion_handle_put_nolock(handle);
+ mutex_unlock(&client->lock);
+ return ret;
+}
+
+/* Must hold the client lock */
+static int user_ion_handle_put_nolock(struct ion_handle *handle)
+{
+ int ret = 0;
+
+ if (--handle->user_ref_count == 0)
+ ret = ion_handle_put_nolock(handle);
+
+ return ret;
+}
+
static struct ion_handle *ion_handle_lookup(struct ion_client *client,
struct ion_buffer *buffer)
{
@@ -644,6 +689,25 @@ static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle
ion_handle_put_nolock(handle);
}
+static void user_ion_free_nolock(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ bool valid_handle;
+
+ WARN_ON(client != handle->client);
+
+ valid_handle = ion_handle_validate(client, handle);
+ if (!valid_handle) {
+ WARN(1, "%s: invalid handle passed to free.\n", __func__);
+ return;
+ }
+ if (!handle->user_ref_count > 0) {
+ WARN(1, "%s: User does not have access!\n", __func__);
+ return;
+ }
+ user_ion_handle_put_nolock(handle);
+}
+
void ion_free(struct ion_client *client, struct ion_handle *handle)
{
BUG_ON(client != handle->client);
@@ -1518,7 +1582,7 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
data.allocation.flags, true);
if (IS_ERR(handle))
return PTR_ERR(handle);
-
+ pass_to_user(handle);
data.allocation.handle = handle->id;
cleanup_handle = handle;
@@ -1534,7 +1598,7 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
mutex_unlock(&client->lock);
return PTR_ERR(handle);
}
- ion_free_nolock(client, handle);
+ user_ion_free_nolock(client, handle);
ion_handle_put_nolock(handle);
mutex_unlock(&client->lock);
break;
@@ -1558,10 +1622,15 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
struct ion_handle *handle;
handle = ion_import_dma_buf(client, data.fd.fd);
- if (IS_ERR(handle))
+ if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
- else
- data.handle.handle = handle->id;
+ } else {
+ handle = pass_to_user(handle);
+ if (IS_ERR(handle))
+ ret = PTR_ERR(handle);
+ else
+ data.handle.handle = handle->id;
+ }
break;
}
case ION_IOC_SYNC:
@@ -1593,8 +1662,10 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (dir & _IOC_READ) {
if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
if (cleanup_handle) {
- ion_free(client, cleanup_handle);
- ion_handle_put(cleanup_handle);
+ mutex_lock(&client->lock);
+ user_ion_free_nolock(client, cleanup_handle);
+ ion_handle_put_nolock(cleanup_handle);
+ mutex_unlock(&client->lock);
}
return -EFAULT;
}
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 9b8d17ce3a5e..5238d67490ce 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -29,6 +29,7 @@
#include "sync.h"
#define CREATE_TRACE_POINTS
+#define SYNC_DUMP_TIME_LIMIT 7000
#include "trace/sync.h"
static const struct fence_ops android_fence_ops;
@@ -392,7 +393,9 @@ int sync_fence_wait(struct sync_fence *fence, long timeout)
if (timeout) {
pr_info("fence timeout on [%pK] after %dms\n", fence,
jiffies_to_msecs(timeout));
- sync_dump();
+ if (jiffies_to_msecs(timeout) >=
+ SYNC_DUMP_TIME_LIMIT)
+ sync_dump();
}
return -ETIME;
}
diff --git a/drivers/video/fbdev/msm/mdss_debug.c b/drivers/video/fbdev/msm/mdss_debug.c
index e60869144339..0ecf1ef92ab3 100644
--- a/drivers/video/fbdev/msm/mdss_debug.c
+++ b/drivers/video/fbdev/msm/mdss_debug.c
@@ -59,11 +59,13 @@ static int panel_debug_base_open(struct inode *inode, struct file *file)
static int panel_debug_base_release(struct inode *inode, struct file *file)
{
struct mdss_debug_base *dbg = file->private_data;
+ mutex_lock(&mdss_debug_lock);
if (dbg && dbg->buf) {
kfree(dbg->buf);
dbg->buf_len = 0;
dbg->buf = NULL;
}
+ mutex_unlock(&mdss_debug_lock);
return 0;
}
@@ -385,11 +387,13 @@ static int mdss_debug_base_open(struct inode *inode, struct file *file)
static int mdss_debug_base_release(struct inode *inode, struct file *file)
{
struct mdss_debug_base *dbg = file->private_data;
+ mutex_lock(&mdss_debug_lock);
if (dbg && dbg->buf) {
kfree(dbg->buf);
dbg->buf_len = 0;
dbg->buf = NULL;
}
+ mutex_unlock(&mdss_debug_lock);
return 0;
}
diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c
index 7a1b563fbb6c..1e878e9a00cb 100644
--- a/drivers/video/fbdev/msm/mdss_dp.c
+++ b/drivers/video/fbdev/msm/mdss_dp.c
@@ -69,6 +69,11 @@ static int mdss_dp_process_phy_test_pattern_request(
static int mdss_dp_send_audio_notification(
struct mdss_dp_drv_pdata *dp, int val);
+static inline void mdss_dp_reset_sink_count(struct mdss_dp_drv_pdata *dp)
+{
+ memset(&dp->sink_count, 0, sizeof(dp->sink_count));
+}
+
static inline void mdss_dp_reset_test_data(struct mdss_dp_drv_pdata *dp)
{
dp->test_data = (const struct dpcd_test_request){ 0 };
@@ -133,22 +138,77 @@ static int mdss_dp_is_clk_prefix(const char *clk_prefix, const char *clk_name)
return !strncmp(clk_name, clk_prefix, strlen(clk_prefix));
}
+static void mdss_dp_reset_phy_config_indices(struct mdss_dp_drv_pdata *dp)
+{
+ int i = 0;
+
+ for (i = 0; i < PHY_AUX_CFG_MAX; i++)
+ dp->aux_cfg[i].current_index = 0;
+}
+
+static void mdss_dp_phy_aux_cfg_reset(struct mdss_dp_drv_pdata *dp)
+{
+ int i = 0;
+
+ for (i = 0; i < PHY_AUX_CFG_MAX; i++)
+ dp->aux_cfg[i] = (const struct mdss_dp_phy_cfg){ 0 };
+}
+
+static int mdss_dp_parse_aux_cfg(struct platform_device *pdev,
+ struct mdss_dp_drv_pdata *dp)
+{
+ int len = 0, i = 0, j = 0, config_count = 0;
+ const char *data;
+ int const minimum_config_count = 1;
+
+ for (i = 0; i < PHY_AUX_CFG_MAX; i++) {
+ const char *property = mdss_dp_get_phy_aux_config_property(i);
+
+ data = of_get_property(pdev->dev.of_node, property, &len);
+ if (!data) {
+ pr_err("Unable to read %s\n", property);
+ goto error;
+ }
+
+ config_count = len - 1;
+ if ((config_count < minimum_config_count) ||
+ (config_count > MDSS_DP_MAX_PHY_CFG_VALUE_CNT)) {
+ pr_err("Invalid config count (%d) configs for %s\n",
+ config_count, property);
+ goto error;
+ }
+
+ dp->aux_cfg[i].offset = data[0];
+ dp->aux_cfg[i].cfg_cnt = config_count;
+ pr_debug("%s offset=0x%x, cfg_cnt=%d\n",
+ property,
+ dp->aux_cfg[i].offset,
+ dp->aux_cfg[i].cfg_cnt);
+ for (j = 1; j < len; j++) {
+ dp->aux_cfg[i].lut[j - 1] = data[j];
+ pr_debug("%s lut[%d]=0x%x\n",
+ property,
+ i,
+ dp->aux_cfg[i].lut[j - 1]);
+ }
+ }
+
+ return 0;
+
+error:
+ mdss_dp_phy_aux_cfg_reset(dp);
+ return -EINVAL;
+}
+
static int mdss_dp_parse_prop(struct platform_device *pdev,
struct mdss_dp_drv_pdata *dp_drv)
{
int len = 0, i = 0, rc = 0;
const char *data;
- data = of_get_property(pdev->dev.of_node,
- "qcom,aux-cfg-settings", &len);
- if ((!data) || (len != AUX_CFG_LEN)) {
- pr_err("%s:%d, Unable to read DP AUX CFG settings",
- __func__, __LINE__);
- return -EINVAL;
- }
-
- for (i = 0; i < len; i++)
- dp_drv->aux_cfg[i] = data[i];
+ rc = mdss_dp_parse_aux_cfg(pdev, dp_drv);
+ if (rc)
+ return rc;
data = of_get_property(pdev->dev.of_node,
"qcom,logical2physical-lane-map", &len);
@@ -958,6 +1018,12 @@ void mdss_dp_config_ctrl(struct mdss_dp_drv_pdata *dp)
mdss_dp_configuration_ctrl(&dp->ctrl_io, data);
}
+static inline void mdss_dp_ack_state(struct mdss_dp_drv_pdata *dp, int val)
+{
+ if (dp && dp->ext_audio_data.intf_ops.notify)
+ dp->ext_audio_data.intf_ops.notify(dp->ext_pdev, val);
+}
+
static int mdss_dp_wait4video_ready(struct mdss_dp_drv_pdata *dp_drv)
{
int ret = 0;
@@ -989,26 +1055,28 @@ static int mdss_dp_wait4video_ready(struct mdss_dp_drv_pdata *dp_drv)
static void mdss_dp_update_cable_status(struct mdss_dp_drv_pdata *dp,
bool connected)
{
- mutex_lock(&dp->pd_msg_mutex);
+ mutex_lock(&dp->attention_lock);
pr_debug("cable_connected to %d\n", connected);
if (dp->cable_connected != connected)
dp->cable_connected = connected;
else
pr_debug("no change in cable status\n");
- mutex_unlock(&dp->pd_msg_mutex);
+ mutex_unlock(&dp->attention_lock);
}
static int dp_get_cable_status(struct platform_device *pdev, u32 vote)
{
- struct mdss_dp_drv_pdata *dp_ctrl = platform_get_drvdata(pdev);
+ struct mdss_dp_drv_pdata *dp = platform_get_drvdata(pdev);
u32 hpd;
- if (!dp_ctrl) {
+ if (!dp) {
DEV_ERR("%s: invalid input\n", __func__);
return -ENODEV;
}
- hpd = dp_ctrl->cable_connected;
+ mutex_lock(&dp->attention_lock);
+ hpd = dp->cable_connected;
+ mutex_unlock(&dp->attention_lock);
return hpd;
}
@@ -1230,12 +1298,6 @@ static int dp_init_panel_info(struct mdss_dp_drv_pdata *dp_drv, u32 vic)
return 0;
} /* dp_init_panel_info */
-static inline void mdss_dp_ack_state(struct mdss_dp_drv_pdata *dp, int val)
-{
- if (dp && dp->ext_audio_data.intf_ops.notify)
- dp->ext_audio_data.intf_ops.notify(dp->ext_pdev, val);
-}
-
/**
* mdss_dp_get_lane_mapping() - returns lane mapping based on given orientation
* @orientation: usb plug orientation
@@ -1621,15 +1683,19 @@ int mdss_dp_on(struct mdss_panel_data *pdata)
dp_drv = container_of(pdata, struct mdss_dp_drv_pdata,
panel_data);
- if (dp_drv->power_on) {
- /*
- * Acknowledge the connection event if link training has already
- * been done. This will unblock the external display thread and
- * allow the driver to progress. For example, in the case of
- * video test pattern requests, to send the test response and
- * start transmitting the test pattern.
- */
- mdss_dp_ack_state(dp_drv, true);
+ /*
+ * If the link already active, then nothing needs to be done here.
+ * However, it is possible that the the power_on flag could be
+ * set to true but we would still need to initialize the DP host.
+ * An example of this use-case is when a multiport dongle is connected
+ * and subsequently the downstream sink is disconnected. This would
+ * only go through the IRQ HPD path where we tear down the link but
+ * the power_on flag remains set to true. When the downstream sink
+ * is subsequently connected again, we need to re-initialize DP
+ * host
+ */
+ if (dp_drv->power_on &&
+ (dp_drv->new_vic && (dp_drv->new_vic == dp_drv->vic))) {
pr_debug("Link already setup, return\n");
return 0;
}
@@ -1647,6 +1713,23 @@ int mdss_dp_on(struct mdss_panel_data *pdata)
return mdss_dp_on_hpd(dp_drv);
}
+static bool mdss_dp_is_ds_bridge(struct mdss_dp_drv_pdata *dp)
+{
+ return dp->dpcd.downstream_port.dfp_present;
+}
+
+static bool mdss_dp_is_ds_bridge_sink_count_zero(struct mdss_dp_drv_pdata *dp)
+{
+ return (mdss_dp_is_ds_bridge(dp) &&
+ (dp->sink_count.count == 0));
+}
+
+static bool mdss_dp_is_ds_bridge_no_local_edid(struct mdss_dp_drv_pdata *dp)
+{
+ return (mdss_dp_is_ds_bridge_sink_count_zero(dp) &&
+ !(dp->dpcd.flags & DPCD_PORT_0_EDID_PRESENTED));
+}
+
static int mdss_dp_off_irq(struct mdss_dp_drv_pdata *dp_drv)
{
if (!dp_drv->power_on) {
@@ -1664,10 +1747,16 @@ static int mdss_dp_off_irq(struct mdss_dp_drv_pdata *dp_drv)
/* Make sure DP mainlink and audio engines are disabled */
wmb();
- mdss_dp_ack_state(dp_drv, false);
+ /*
+ * If downstream device is a brige which no longer has any
+ * downstream devices connected to it, then we should reset
+ * the current panel info
+ */
+ if (mdss_dp_is_ds_bridge_sink_count_zero(dp_drv))
+ dp_init_panel_info(dp_drv, HDMI_VFRMT_UNKNOWN);
+
mutex_unlock(&dp_drv->train_mutex);
- complete_all(&dp_drv->irq_comp);
pr_debug("end\n");
return 0;
@@ -1694,11 +1783,11 @@ static int mdss_dp_off_hpd(struct mdss_dp_drv_pdata *dp_drv)
mdss_dp_host_deinit(dp_drv);
dp_drv->power_on = false;
- dp_drv->sink_info_read = false;
dp_init_panel_info(dp_drv, HDMI_VFRMT_UNKNOWN);
- mdss_dp_ack_state(dp_drv, false);
mdss_dp_reset_test_data(dp_drv);
+ mdss_dp_reset_sink_count(dp_drv);
+ dp_drv->prev_sink_count = dp_drv->sink_count;
mutex_unlock(&dp_drv->train_mutex);
pr_debug("DP off done\n");
@@ -1737,8 +1826,9 @@ static int mdss_dp_send_audio_notification(
if (mdss_dp_sink_audio_supp(dp) || dp->audio_test_req) {
dp->audio_test_req = false;
- pr_debug("sending audio notification\n");
flags |= MSM_EXT_DISP_HPD_AUDIO;
+ pr_debug("sending audio notification = %d, flags = %d\n", val,
+ flags);
if (dp->ext_audio_data.intf_ops.hpd)
ret = dp->ext_audio_data.intf_ops.hpd(dp->ext_pdev,
@@ -1763,7 +1853,8 @@ static int mdss_dp_send_video_notification(
goto end;
}
- flags |= MSM_EXT_DISP_HPD_VIDEO;
+ flags |= MSM_EXT_DISP_HPD_ASYNC_VIDEO;
+ pr_debug("sending video notification = %d, flags = %d\n", val, flags);
if (dp->ext_audio_data.intf_ops.hpd)
ret = dp->ext_audio_data.intf_ops.hpd(dp->ext_pdev,
@@ -1820,6 +1911,8 @@ static int mdss_dp_edid_init(struct mdss_panel_data *pdata)
dp_drv->edid_buf = edid_init_data.buf;
dp_drv->edid_buf_size = edid_init_data.buf_size;
+ mdss_dp_set_default_resolution(dp_drv);
+
return 0;
}
@@ -1871,14 +1964,16 @@ static int mdss_dp_host_init(struct mdss_panel_data *pdata)
mdss_dp_ctrl_reset(&dp_drv->ctrl_io);
mdss_dp_phy_reset(&dp_drv->ctrl_io);
mdss_dp_aux_reset(&dp_drv->ctrl_io);
+ mdss_dp_aux_set_limits(&dp_drv->ctrl_io);
+
mdss_dp_aux_ctrl(&dp_drv->ctrl_io, true);
pr_debug("Ctrl_hw_rev =0x%x, phy hw_rev =0x%x\n",
mdss_dp_get_ctrl_hw_version(&dp_drv->ctrl_io),
mdss_dp_get_phy_hw_version(&dp_drv->phy_io));
- mdss_dp_phy_aux_setup(&dp_drv->phy_io, dp_drv->aux_cfg,
- dp_drv->phy_reg_offset);
+ mdss_dp_reset_phy_config_indices(dp_drv);
+ mdss_dp_phy_aux_setup(dp_drv);
mdss_dp_irq_enable(dp_drv);
dp_drv->dp_initialized = true;
@@ -1946,10 +2041,11 @@ static int mdss_dp_host_deinit(struct mdss_dp_drv_pdata *dp)
static int mdss_dp_notify_clients(struct mdss_dp_drv_pdata *dp,
enum notification_status status)
{
- const int irq_comp_timeout = HZ * 2;
int ret = 0;
+ bool notify = false;
+ bool connect;
- mutex_lock(&dp->pd_msg_mutex);
+ pr_debug("beginning notification\n");
if (status == dp->hpd_notification_status) {
pr_debug("No change in status %s --> %s\n",
mdss_dp_notification_status_to_string(status),
@@ -1962,39 +2058,40 @@ static int mdss_dp_notify_clients(struct mdss_dp_drv_pdata *dp,
case NOTIFY_CONNECT_IRQ_HPD:
if (dp->hpd_notification_status != NOTIFY_DISCONNECT_IRQ_HPD)
goto invalid_request;
- /* Follow the same programming as for NOTIFY_CONNECT */
- mdss_dp_host_init(&dp->panel_data);
- mdss_dp_send_video_notification(dp, true);
+ notify = true;
+ connect = true;
break;
case NOTIFY_CONNECT:
- if ((dp->hpd_notification_status == NOTIFY_CONNECT_IRQ_HPD) ||
- (dp->hpd_notification_status ==
- NOTIFY_DISCONNECT_IRQ_HPD))
+ if (dp->hpd_notification_status == NOTIFY_CONNECT_IRQ_HPD)
goto invalid_request;
- mdss_dp_host_init(&dp->panel_data);
- mdss_dp_send_video_notification(dp, true);
+ notify = true;
+ connect = true;
break;
case NOTIFY_DISCONNECT:
- mdss_dp_send_audio_notification(dp, false);
- mdss_dp_send_video_notification(dp, false);
+ /*
+ * Userspace triggers a disconnect event on boot up, this must
+ * not be processed as there was no previously connected sink
+ * device.
+ */
+ if (dp->hpd_notification_status == NOTIFY_UNKNOWN)
+ goto invalid_request;
+ if (dp->hpd_notification_status == NOTIFY_DISCONNECT_IRQ_HPD) {
+ /*
+ * user modules already turned off. Need to explicitly
+ * turn off DP core here.
+ */
+ mdss_dp_off_hpd(dp);
+ } else {
+ notify = true;
+ connect = false;
+ }
break;
case NOTIFY_DISCONNECT_IRQ_HPD:
if (dp->hpd_notification_status == NOTIFY_DISCONNECT)
goto invalid_request;
- mdss_dp_send_audio_notification(dp, false);
- mdss_dp_send_video_notification(dp, false);
- if (!IS_ERR_VALUE(ret) && ret) {
- reinit_completion(&dp->irq_comp);
- ret = wait_for_completion_timeout(&dp->irq_comp,
- irq_comp_timeout);
- if (ret <= 0) {
- pr_warn("irq_comp timed out\n");
- ret = -EINVAL;
- } else {
- ret = 0;
- }
- }
+ notify = true;
+ connect = false;
break;
default:
pr_err("Invalid notification status = %d\n", status);
@@ -2002,7 +2099,7 @@ static int mdss_dp_notify_clients(struct mdss_dp_drv_pdata *dp,
break;
}
- goto end;
+ goto notify;
invalid_request:
pr_err("Invalid request %s --> %s\n",
@@ -2011,16 +2108,34 @@ invalid_request:
mdss_dp_notification_status_to_string(status));
ret = -EINVAL;
-end:
+notify:
+ if (ret || !notify) {
+ pr_debug("not sending notification\n");
+ goto end;
+ }
+
+ atomic_set(&dp->notification_pending, 1);
+ if (connect) {
+ mdss_dp_host_init(&dp->panel_data);
+ ret = mdss_dp_send_video_notification(dp, true);
+ } else {
+ mdss_dp_send_audio_notification(dp, false);
+ ret = mdss_dp_send_video_notification(dp, false);
+ }
+
if (!ret) {
pr_debug("Successfully sent notification %s --> %s\n",
mdss_dp_notification_status_to_string(
dp->hpd_notification_status),
mdss_dp_notification_status_to_string(status));
- dp->hpd_notification_status = status;
+ } else {
+ pr_err("%s Notification failed\n",
+ mdss_dp_notification_status_to_string(status));
+ atomic_set(&dp->notification_pending, 0);
}
- mutex_unlock(&dp->pd_msg_mutex);
+end:
+ dp->hpd_notification_status = status;
return ret;
}
@@ -2029,9 +2144,6 @@ static int mdss_dp_process_hpd_high(struct mdss_dp_drv_pdata *dp)
int ret;
u32 max_pclk_khz;
- if (dp->sink_info_read)
- return 0;
-
pr_debug("start\n");
ret = mdss_dp_dpcd_cap_read(dp);
@@ -2044,8 +2156,25 @@ static int mdss_dp_process_hpd_high(struct mdss_dp_drv_pdata *dp)
*/
pr_err("dpcd read failed, set failsafe parameters\n");
mdss_dp_set_default_link_parameters(dp);
+ goto read_edid;
}
+ /*
+ * When connected to a multiport adaptor which does not have a
+ * local EDID present, do not attempt to read the EDID.
+ * When connected to a multiport adaptor with no downstream device
+ * connected to it, do not attempt to read the EDID. It is possible
+ * that the adaptor may advertise the presence of local EDID, but it
+ * is not guaranteed to work.
+ */
+ if (mdss_dp_is_ds_bridge_sink_count_zero(dp)) {
+ if (mdss_dp_is_ds_bridge_no_local_edid(dp))
+ pr_debug("No local EDID present on DS branch device\n");
+ pr_info("no downstream devices, skip client notification\n");
+ goto end;
+ }
+
+read_edid:
ret = mdss_dp_edid_read(dp);
if (ret) {
pr_err("edid read error, setting default resolution\n");
@@ -2056,14 +2185,18 @@ static int mdss_dp_process_hpd_high(struct mdss_dp_drv_pdata *dp)
hdmi_edid_set_max_pclk_rate(dp->panel_data.panel_info.edid_data,
min(dp->max_pclk_khz, max_pclk_khz));
+ if (dp->dpcd_read_required) {
+ pr_debug("reading DPCD with updated AUX config\n");
+ mdss_dp_dpcd_cap_read(dp);
+ dp->dpcd_read_required = false;
+ }
+
ret = hdmi_edid_parser(dp->panel_data.panel_info.edid_data);
if (ret) {
pr_err("edid parse failed, setting default resolution\n");
goto notify;
}
- dp->sink_info_read = true;
-
notify:
if (ret) {
/* set failsafe parameters */
@@ -2090,7 +2223,6 @@ notify:
end:
pr_debug("end\n");
return ret;
-
}
static int mdss_dp_check_params(struct mdss_dp_drv_pdata *dp, void *arg)
@@ -2291,12 +2423,16 @@ static ssize_t mdss_dp_rda_connected(struct device *dev,
{
ssize_t ret;
struct mdss_dp_drv_pdata *dp = mdss_dp_get_drvdata(dev);
+ bool cable_connected;
if (!dp)
return -EINVAL;
- ret = snprintf(buf, PAGE_SIZE, "%d\n", dp->cable_connected);
- pr_debug("%d\n", dp->cable_connected);
+ mutex_lock(&dp->attention_lock);
+ cable_connected = dp->cable_connected;
+ mutex_unlock(&dp->attention_lock);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", cable_connected);
+ pr_debug("%d\n", cable_connected);
return ret;
}
@@ -2465,6 +2601,7 @@ static ssize_t mdss_dp_wta_hpd(struct device *dev,
int hpd, rc;
ssize_t ret = strnlen(buf, PAGE_SIZE);
struct mdss_dp_drv_pdata *dp = mdss_dp_get_drvdata(dev);
+ bool cable_connected;
if (!dp) {
pr_err("invalid data\n");
@@ -2480,9 +2617,13 @@ static ssize_t mdss_dp_wta_hpd(struct device *dev,
}
dp->hpd = !!hpd;
- pr_debug("hpd=%d\n", dp->hpd);
+ mutex_lock(&dp->attention_lock);
+ cable_connected = dp->cable_connected;
+ mutex_unlock(&dp->attention_lock);
+ pr_debug("hpd=%d cable_connected=%s\n", dp->hpd,
+ cable_connected ? "true" : "false");
- if (dp->hpd && dp->cable_connected) {
+ if (dp->hpd && cable_connected) {
if (dp->alt_mode.current_state & DP_CONFIGURE_DONE) {
mdss_dp_host_init(&dp->panel_data);
mdss_dp_process_hpd_high(dp);
@@ -2812,8 +2953,6 @@ static void mdss_dp_mainlink_push_idle(struct mdss_panel_data *pdata)
/* wait until link training is completed */
mutex_lock(&dp_drv->train_mutex);
- mdss_dp_aux_set_sink_power_state(dp_drv, SINK_POWER_OFF);
-
reinit_completion(&dp_drv->idle_comp);
mdss_dp_state_ctrl(&dp_drv->ctrl_io, ST_PUSH_IDLE);
if (!wait_for_completion_timeout(&dp_drv->idle_comp,
@@ -2903,28 +3042,33 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata,
switch (event) {
case MDSS_EVENT_UNBLANK:
- mdss_dp_ack_state(dp, true);
rc = mdss_dp_on(pdata);
break;
case MDSS_EVENT_PANEL_ON:
mdss_dp_update_hdcp_info(dp);
if (dp_is_hdcp_enabled(dp)) {
- cancel_delayed_work(&dp->hdcp_cb_work);
+ cancel_delayed_work_sync(&dp->hdcp_cb_work);
dp->hdcp_status = HDCP_STATE_AUTHENTICATING;
queue_delayed_work(dp->workq,
&dp->hdcp_cb_work, HZ / 2);
}
break;
+ case MDSS_EVENT_POST_PANEL_ON:
+ atomic_set(&dp->notification_pending, 0);
+ complete_all(&dp->notification_comp);
+ break;
case MDSS_EVENT_PANEL_OFF:
rc = mdss_dp_off(pdata);
+ atomic_set(&dp->notification_pending, 0);
+ complete_all(&dp->notification_comp);
break;
case MDSS_EVENT_BLANK:
if (dp_is_hdcp_enabled(dp)) {
dp->hdcp_status = HDCP_STATE_INACTIVE;
- cancel_delayed_work(&dp->hdcp_cb_work);
+ cancel_delayed_work_sync(&dp->hdcp_cb_work);
if (dp->hdcp.ops->off)
dp->hdcp.ops->off(dp->hdcp.data);
}
@@ -3081,6 +3225,7 @@ static int mdss_retrieve_dp_ctrl_resources(struct platform_device *pdev,
static void mdss_dp_video_ready(struct mdss_dp_drv_pdata *dp)
{
pr_debug("dp_video_ready\n");
+ mdss_dp_ack_state(dp, true);
complete(&dp->video_comp);
}
@@ -3112,10 +3257,21 @@ static int mdss_dp_event_thread(void *data)
ev_data = (struct mdss_dp_event_data *)data;
+ pr_debug("starting\n");
while (!kthread_should_stop()) {
wait_event(ev_data->event_q,
(ev_data->pndx != ev_data->gndx) ||
- kthread_should_stop());
+ kthread_should_stop() ||
+ kthread_should_park());
+ if (kthread_should_stop())
+ return 0;
+
+ if (kthread_should_park()) {
+ pr_debug("parking event thread\n");
+ kthread_parkme();
+ continue;
+ }
+
spin_lock_irqsave(&ev_data->event_lock, flag);
ev = &(ev_data->event_list[ev_data->gndx++]);
todo = ev->id;
@@ -3207,6 +3363,7 @@ irqreturn_t dp_isr(int irq, void *ptr)
spin_lock(&dp->lock);
isr1 = dp_read(base + DP_INTR_STATUS);
isr2 = dp_read(base + DP_INTR_STATUS2);
+ pr_debug("isr1=0x%08x, isr2=0x%08x\n", isr1, isr2);
mask1 = isr1 & dp->mask1;
@@ -3290,6 +3447,27 @@ static int mdss_dp_event_setup(struct mdss_dp_drv_pdata *dp)
return 0;
}
+static void mdss_dp_reset_event_list(struct mdss_dp_drv_pdata *dp)
+{
+ struct mdss_dp_event_data *ev_data = &dp->dp_event;
+
+ spin_lock(&ev_data->event_lock);
+ ev_data->pndx = ev_data->gndx = 0;
+ spin_unlock(&ev_data->event_lock);
+
+ mutex_lock(&dp->attention_lock);
+ INIT_LIST_HEAD(&dp->attention_head);
+ mutex_unlock(&dp->attention_lock);
+}
+
+static void mdss_dp_reset_sw_state(struct mdss_dp_drv_pdata *dp)
+{
+ pr_debug("enter\n");
+ mdss_dp_reset_event_list(dp);
+ atomic_set(&dp->notification_pending, 0);
+ complete_all(&dp->notification_comp);
+}
+
static void usbpd_connect_callback(struct usbpd_svid_handler *hdlr)
{
struct mdss_dp_drv_pdata *dp_drv;
@@ -3301,6 +3479,8 @@ static void usbpd_connect_callback(struct usbpd_svid_handler *hdlr)
}
mdss_dp_update_cable_status(dp_drv, true);
+ if (dp_drv->ev_thread)
+ kthread_unpark(dp_drv->ev_thread);
if (dp_drv->hpd)
dp_send_events(dp_drv, EV_USBPD_DISCOVER_MODES);
@@ -3320,6 +3500,9 @@ static void usbpd_disconnect_callback(struct usbpd_svid_handler *hdlr)
mdss_dp_update_cable_status(dp_drv, false);
dp_drv->alt_mode.current_state = UNKNOWN_STATE;
+ mdss_dp_reset_sw_state(dp_drv);
+ kthread_park(dp_drv->ev_thread);
+
/**
* Manually turn off the DP controller if we are in PHY
* testing mode.
@@ -3423,6 +3606,17 @@ static inline void mdss_dp_link_maintenance(struct mdss_dp_drv_pdata *dp,
if (mdss_dp_notify_clients(dp, NOTIFY_DISCONNECT_IRQ_HPD))
return;
+ if (atomic_read(&dp->notification_pending)) {
+ int ret;
+
+ pr_debug("waiting for the disconnect to finish\n");
+ ret = wait_for_completion_timeout(&dp->notification_comp, HZ);
+ if (ret <= 0) {
+ pr_warn("NOTIFY_DISCONNECT_IRQ_HPD timed out\n");
+ return;
+ }
+ }
+
mdss_dp_on_irq(dp, lt_needed);
}
@@ -3574,7 +3768,7 @@ static int mdss_dp_process_audio_pattern_request(struct mdss_dp_drv_pdata *dp)
return -EINVAL;
if (dp_is_hdcp_enabled(dp) && dp->hdcp.ops->off) {
- cancel_delayed_work(&dp->hdcp_cb_work);
+ cancel_delayed_work_sync(&dp->hdcp_cb_work);
dp->hdcp.ops->off(dp->hdcp.data);
}
@@ -3620,10 +3814,46 @@ static int mdss_dp_process_audio_pattern_request(struct mdss_dp_drv_pdata *dp)
static int mdss_dp_process_downstream_port_status_change(
struct mdss_dp_drv_pdata *dp)
{
- if (!mdss_dp_is_downstream_port_status_changed(dp))
+ bool ds_status_changed = false;
+
+ if (mdss_dp_is_downstream_port_status_changed(dp)) {
+ pr_debug("downstream port status changed\n");
+ ds_status_changed = true;
+ }
+
+ /*
+ * Ideally sink should update the downstream port status changed
+ * whenever it updates the downstream sink count. However, it is
+ * possible that only the sink count is updated without setting
+ * the downstream port status changed bit.
+ */
+ if (dp->sink_count.count != dp->prev_sink_count.count) {
+ pr_debug("downstream sink count changed from %d --> %d\n",
+ dp->prev_sink_count.count, dp->sink_count.count);
+ ds_status_changed = true;
+ }
+
+ if (!ds_status_changed)
return -EINVAL;
- return mdss_dp_edid_read(dp);
+ mdss_dp_notify_clients(dp, NOTIFY_DISCONNECT_IRQ_HPD);
+ if (atomic_read(&dp->notification_pending)) {
+ int ret;
+
+ pr_debug("waiting for the disconnect to finish\n");
+ ret = wait_for_completion_timeout(&dp->notification_comp, HZ);
+ if (ret <= 0) {
+ pr_warn("NOTIFY_DISCONNECT_IRQ_HPD timed out\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ if (mdss_dp_is_ds_bridge_sink_count_zero(dp)) {
+ pr_debug("sink count is zero, nothing to do\n");
+ return 0;
+ }
+
+ return mdss_dp_process_hpd_high(dp);
}
static bool mdss_dp_video_pattern_test_lt_needed(struct mdss_dp_drv_pdata *dp)
@@ -3721,19 +3951,19 @@ static int mdss_dp_process_hpd_irq_high(struct mdss_dp_drv_pdata *dp)
mdss_dp_aux_parse_sink_status_field(dp);
- ret = mdss_dp_process_link_training_request(dp);
+ ret = mdss_dp_process_downstream_port_status_change(dp);
if (!ret)
goto exit;
- ret = mdss_dp_process_phy_test_pattern_request(dp);
+ ret = mdss_dp_process_link_training_request(dp);
if (!ret)
goto exit;
- ret = mdss_dp_process_link_status_update(dp);
+ ret = mdss_dp_process_phy_test_pattern_request(dp);
if (!ret)
goto exit;
- ret = mdss_dp_process_downstream_port_status_change(dp);
+ ret = mdss_dp_process_link_status_update(dp);
if (!ret)
goto exit;
@@ -3746,7 +3976,6 @@ static int mdss_dp_process_hpd_irq_high(struct mdss_dp_drv_pdata *dp)
goto exit;
pr_debug("done\n");
-
exit:
dp->hpd_irq_on = false;
return ret;
@@ -3792,7 +4021,8 @@ static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd,
node->vdo = *vdos;
mutex_lock(&dp_drv->attention_lock);
- list_add_tail(&node->list, &dp_drv->attention_head);
+ if (dp_drv->cable_connected)
+ list_add_tail(&node->list, &dp_drv->attention_head);
mutex_unlock(&dp_drv->attention_lock);
dp_send_events(dp_drv, EV_USBPD_ATTENTION);
@@ -3840,11 +4070,21 @@ static void mdss_dp_process_attention(struct mdss_dp_drv_pdata *dp_drv)
if (!dp_drv->alt_mode.dp_status.hpd_high) {
pr_debug("Attention: HPD low\n");
+ if (!dp_drv->power_on) {
+ pr_debug("HPD already low\n");
+ return;
+ }
+
if (dp_is_hdcp_enabled(dp_drv) && dp_drv->hdcp.ops->off) {
- cancel_delayed_work(&dp_drv->hdcp_cb_work);
+ cancel_delayed_work_sync(&dp_drv->hdcp_cb_work);
dp_drv->hdcp.ops->off(dp_drv->hdcp.data);
}
+ /*
+ * Reset the sink count before nofifying clients since HPD Low
+ * indicates that the downstream device has been disconnected.
+ */
+ mdss_dp_reset_sink_count(dp_drv);
mdss_dp_notify_clients(dp_drv, NOTIFY_DISCONNECT);
pr_debug("Attention: Notified clients\n");
@@ -3872,6 +4112,11 @@ static void mdss_dp_process_attention(struct mdss_dp_drv_pdata *dp_drv)
pr_debug("Attention: HPD high\n");
+ if (dp_drv->power_on) {
+ pr_debug("HPD high processed already\n");
+ return;
+ }
+
dp_drv->alt_mode.current_state |= DP_STATUS_DONE;
if (dp_drv->alt_mode.current_state & DP_CONFIGURE_DONE) {
@@ -3893,7 +4138,13 @@ static void mdss_dp_handle_attention(struct mdss_dp_drv_pdata *dp)
pr_debug("processing item %d in the list\n", ++i);
+ reinit_completion(&dp->notification_comp);
mutex_lock(&dp->attention_lock);
+ if (!dp->cable_connected) {
+ pr_debug("cable disconnected, returning\n");
+ mutex_unlock(&dp->attention_lock);
+ goto exit;
+ }
node = list_first_entry(&dp->attention_head,
struct mdss_dp_attention_node, list);
@@ -3907,9 +4158,25 @@ static void mdss_dp_handle_attention(struct mdss_dp_drv_pdata *dp)
mdss_dp_usbpd_ext_dp_status(&dp->alt_mode.dp_status);
mdss_dp_process_attention(dp);
+ if (atomic_read(&dp->notification_pending)) {
+ pr_debug("waiting for the attention event to finish\n");
+ /*
+ * This wait is intentionally implemented without a
+ * timeout since this is happens only in possible error
+ * conditions e.g. if the display framework does not
+ * power off/on the DisplayPort device in time. Other
+ * events might already be queued from the sink at this
+ * point and they cannot be processed until the power
+ * off/on is complete otherwise we might have problems
+ * with interleaving of these events e.g. un-clocked
+ * register access.
+ */
+ wait_for_completion(&dp->notification_comp);
+ }
pr_debug("done processing item %d in the list\n", i);
};
+exit:
pr_debug("exit\n");
}
@@ -3985,7 +4252,6 @@ static int mdss_dp_probe(struct platform_device *pdev)
dp_drv->mask1 = EDP_INTR_MASK1;
dp_drv->mask2 = EDP_INTR_MASK2;
mutex_init(&dp_drv->emutex);
- mutex_init(&dp_drv->pd_msg_mutex);
mutex_init(&dp_drv->attention_lock);
mutex_init(&dp_drv->hdcp_mutex);
spin_lock_init(&dp_drv->lock);
@@ -4078,8 +4344,9 @@ static int mdss_dp_probe(struct platform_device *pdev)
dp_drv->inited = true;
dp_drv->hpd_irq_on = false;
+ atomic_set(&dp_drv->notification_pending, 0);
mdss_dp_reset_test_data(dp_drv);
- init_completion(&dp_drv->irq_comp);
+ init_completion(&dp_drv->notification_comp);
dp_drv->suspend_vic = HDMI_VFRMT_UNKNOWN;
pr_debug("done\n");
diff --git a/drivers/video/fbdev/msm/mdss_dp.h b/drivers/video/fbdev/msm/mdss_dp.h
index 4decb26ea073..f358aad8a667 100644
--- a/drivers/video/fbdev/msm/mdss_dp.h
+++ b/drivers/video/fbdev/msm/mdss_dp.h
@@ -77,7 +77,7 @@
#define EDP_INTR_I2C_NACK BIT(18)
#define EDP_INTR_I2C_DEFER BIT(21)
#define EDP_INTR_PLL_UNLOCKED BIT(24)
-#define EDP_INTR_AUX_ERROR BIT(27)
+#define EDP_INTR_PHY_AUX_ERR BIT(27)
#define EDP_INTR_STATUS1 \
@@ -85,7 +85,7 @@
EDP_INTR_WRONG_ADDR | EDP_INTR_TIMEOUT | \
EDP_INTR_NACK_DEFER | EDP_INTR_WRONG_DATA_CNT | \
EDP_INTR_I2C_NACK | EDP_INTR_I2C_DEFER | \
- EDP_INTR_PLL_UNLOCKED | EDP_INTR_AUX_ERROR)
+ EDP_INTR_PLL_UNLOCKED | EDP_INTR_PHY_AUX_ERR)
#define EDP_INTR_MASK1 (EDP_INTR_STATUS1 << 2)
@@ -110,6 +110,8 @@ struct edp_buf {
int len; /* dara length */
char trans_num; /* transaction number */
char i2c; /* 1 == i2c cmd, 0 == native cmd */
+ bool no_send_addr;
+ bool no_send_stop;
};
/* USBPD-TypeC specific Macros */
@@ -186,6 +188,7 @@ struct dp_alt_mode {
#define DPCD_MAX_DOWNSPREAD_0_5 BIT(2)
#define DPCD_NO_AUX_HANDSHAKE BIT(3)
#define DPCD_PORT_0_EDID_PRESENTED BIT(4)
+#define DPCD_PORT_1_EDID_PRESENTED BIT(5)
/* event */
#define EV_EDP_AUX_SETUP BIT(0)
@@ -239,6 +242,8 @@ struct downstream_port_config {
bool oui_support;
};
+#define DP_MAX_DS_PORT_COUNT 2
+
struct dpcd_cap {
char major;
char minor;
@@ -249,7 +254,7 @@ struct dpcd_cap {
char enhanced_frame;
u32 max_link_rate; /* 162, 270 and 540 Mb, divided by 10 */
u32 flags;
- u32 rx_port0_buf_size;
+ u32 rx_port_buf_size[DP_MAX_DS_PORT_COUNT];
u32 training_read_interval;/* us */
struct downstream_port_config downstream_port;
};
@@ -426,6 +431,102 @@ struct mdss_dp_crc_data {
u32 b_cb;
};
+#define MDSS_DP_MAX_PHY_CFG_VALUE_CNT 3
+struct mdss_dp_phy_cfg {
+ u32 cfg_cnt;
+ u32 current_index;
+ u32 offset;
+ u32 lut[MDSS_DP_MAX_PHY_CFG_VALUE_CNT];
+};
+
+/* PHY AUX config registers */
+enum dp_phy_aux_config_type {
+ PHY_AUX_CFG0,
+ PHY_AUX_CFG1,
+ PHY_AUX_CFG2,
+ PHY_AUX_CFG3,
+ PHY_AUX_CFG4,
+ PHY_AUX_CFG5,
+ PHY_AUX_CFG6,
+ PHY_AUX_CFG7,
+ PHY_AUX_CFG8,
+ PHY_AUX_CFG9,
+ PHY_AUX_CFG_MAX,
+};
+
+static inline const char *mdss_dp_get_phy_aux_config_property(u32 cfg_type)
+{
+ switch (cfg_type) {
+ case PHY_AUX_CFG0:
+ return "qcom,aux-cfg0-settings";
+ case PHY_AUX_CFG1:
+ return "qcom,aux-cfg1-settings";
+ case PHY_AUX_CFG2:
+ return "qcom,aux-cfg2-settings";
+ case PHY_AUX_CFG3:
+ return "qcom,aux-cfg3-settings";
+ case PHY_AUX_CFG4:
+ return "qcom,aux-cfg4-settings";
+ case PHY_AUX_CFG5:
+ return "qcom,aux-cfg5-settings";
+ case PHY_AUX_CFG6:
+ return "qcom,aux-cfg6-settings";
+ case PHY_AUX_CFG7:
+ return "qcom,aux-cfg7-settings";
+ case PHY_AUX_CFG8:
+ return "qcom,aux-cfg8-settings";
+ case PHY_AUX_CFG9:
+ return "qcom,aux-cfg9-settings";
+ default:
+ return "unknown";
+ }
+}
+
+static inline char *mdss_dp_phy_aux_config_type_to_string(u32 cfg_type)
+{
+ switch (cfg_type) {
+ case PHY_AUX_CFG0:
+ return DP_ENUM_STR(PHY_AUX_CFG0);
+ case PHY_AUX_CFG1:
+ return DP_ENUM_STR(PHY_AUX_CFG1);
+ case PHY_AUX_CFG2:
+ return DP_ENUM_STR(PHY_AUX_CFG2);
+ case PHY_AUX_CFG3:
+ return DP_ENUM_STR(PHY_AUX_CFG3);
+ case PHY_AUX_CFG4:
+ return DP_ENUM_STR(PHY_AUX_CFG4);
+ case PHY_AUX_CFG5:
+ return DP_ENUM_STR(PHY_AUX_CFG5);
+ case PHY_AUX_CFG6:
+ return DP_ENUM_STR(PHY_AUX_CFG6);
+ case PHY_AUX_CFG7:
+ return DP_ENUM_STR(PHY_AUX_CFG7);
+ case PHY_AUX_CFG8:
+ return DP_ENUM_STR(PHY_AUX_CFG8);
+ case PHY_AUX_CFG9:
+ return DP_ENUM_STR(PHY_AUX_CFG9);
+ default:
+ return "unknown";
+ }
+}
+
+enum dp_aux_transaction {
+ DP_AUX_WRITE,
+ DP_AUX_READ
+};
+
+static inline char *mdss_dp_aux_transaction_to_string(u32 transaction)
+{
+ switch (transaction) {
+ case DP_AUX_WRITE:
+ return DP_ENUM_STR(DP_AUX_WRITE);
+ case DP_AUX_READ:
+ return DP_ENUM_STR(DP_AUX_READ);
+ default:
+ return "unknown";
+ }
+}
+
struct mdss_dp_drv_pdata {
/* device driver */
int (*on) (struct mdss_panel_data *pdata);
@@ -449,11 +550,11 @@ struct mdss_dp_drv_pdata {
bool core_clks_on;
bool link_clks_on;
bool power_on;
- bool sink_info_read;
u32 suspend_vic;
bool hpd;
bool psm_enabled;
bool audio_test_req;
+ bool dpcd_read_required;
/* dp specific */
unsigned char *base;
@@ -513,10 +614,9 @@ struct mdss_dp_drv_pdata {
struct completion aux_comp;
struct completion idle_comp;
struct completion video_comp;
- struct completion irq_comp;
+ struct completion notification_comp;
struct mutex aux_mutex;
struct mutex train_mutex;
- struct mutex pd_msg_mutex;
struct mutex attention_lock;
struct mutex hdcp_mutex;
bool cable_connected;
@@ -540,13 +640,14 @@ struct mdss_dp_drv_pdata {
struct dp_statistic dp_stat;
bool hpd_irq_on;
u32 hpd_notification_status;
+ atomic_t notification_pending;
struct mdss_dp_event_data dp_event;
struct task_struct *ev_thread;
/* dt settings */
char l_map[4];
- u32 aux_cfg[AUX_CFG_LEN];
+ struct mdss_dp_phy_cfg aux_cfg[PHY_AUX_CFG_MAX];
struct workqueue_struct *workq;
struct delayed_work hdcp_cb_work;
@@ -562,6 +663,7 @@ struct mdss_dp_drv_pdata {
struct dpcd_test_request test_data;
struct dpcd_sink_count sink_count;
+ struct dpcd_sink_count prev_sink_count;
struct list_head attention_head;
};
@@ -688,6 +790,7 @@ enum dp_aux_error {
EDP_AUX_ERR_NACK = -3,
EDP_AUX_ERR_DEFER = -4,
EDP_AUX_ERR_NACK_DEFER = -5,
+ EDP_AUX_ERR_PHY = -6,
};
static inline char *mdss_dp_get_aux_error(u32 aux_error)
diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c
index 8566b1d6985a..37209c161366 100644
--- a/drivers/video/fbdev/msm/mdss_dp_aux.c
+++ b/drivers/video/fbdev/msm/mdss_dp_aux.c
@@ -73,6 +73,21 @@ static int dp_buf_trailing(struct edp_buf *eb)
return (int)(eb->end - eb->data);
}
+static void mdss_dp_aux_clear_hw_interrupts(void __iomem *phy_base)
+{
+ u32 data;
+
+ data = dp_read(phy_base + DP_PHY_AUX_INTERRUPT_STATUS);
+ pr_debug("PHY_AUX_INTERRUPT_STATUS=0x%08x\n", data);
+
+ dp_write(phy_base + DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
+ dp_write(phy_base + DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
+ dp_write(phy_base + DP_PHY_AUX_INTERRUPT_CLEAR, 0);
+
+ /* Ensure that all interrupts are cleared and acked */
+ wmb();
+}
+
/*
* edp aux dp_buf_add_cmd:
* NO native and i2c command mix allowed
@@ -123,35 +138,46 @@ static int dp_buf_add_cmd(struct edp_buf *eb, struct edp_cmd *cmd)
return cmd->len - 1;
}
-static int dp_cmd_fifo_tx(struct edp_buf *tp, unsigned char *base)
+static int dp_cmd_fifo_tx(struct mdss_dp_drv_pdata *dp)
{
u32 data;
- char *dp;
+ char *datap;
int len, cnt;
+ struct edp_buf *tp = &dp->txp;
+ void __iomem *base = dp->base;
+
len = tp->len; /* total byte to cmd fifo */
if (len == 0)
return 0;
cnt = 0;
- dp = tp->start;
+ datap = tp->start;
while (cnt < len) {
- data = *dp; /* data byte */
+ data = *datap; /* data byte */
data <<= 8;
data &= 0x00ff00; /* index = 0, write */
if (cnt == 0)
data |= BIT(31); /* INDEX_WRITE */
dp_write(base + DP_AUX_DATA, data);
cnt++;
- dp++;
+ datap++;
}
+ /* clear the current tx request before queuing a new one */
+ dp_write(base + DP_AUX_TRANS_CTRL, 0);
+
+ /* clear any previous PHY AUX interrupts */
+ mdss_dp_aux_clear_hw_interrupts(dp->phy_io.base);
+
data = (tp->trans_num - 1);
if (tp->i2c) {
data |= BIT(8); /* I2C */
- data |= BIT(10); /* NO SEND ADDR */
- data |= BIT(11); /* NO SEND STOP */
+ if (tp->no_send_addr)
+ data |= BIT(10); /* NO SEND ADDR */
+ if (tp->no_send_stop)
+ data |= BIT(11); /* NO SEND STOP */
}
data |= BIT(9); /* GO */
@@ -164,7 +190,7 @@ static int dp_cmd_fifo_rx(struct edp_buf *rp, int len, unsigned char *base)
{
u32 data;
char *dp;
- int i;
+ int i, actual_i;
data = 0; /* index = 0 */
data |= BIT(31); /* INDEX_WRITE */
@@ -177,7 +203,12 @@ static int dp_cmd_fifo_rx(struct edp_buf *rp, int len, unsigned char *base)
data = dp_read(base + DP_AUX_DATA);
for (i = 0; i < len; i++) {
data = dp_read(base + DP_AUX_DATA);
- *dp++ = (char)((data >> 8) & 0xff);
+ *dp++ = (char)((data >> 8) & 0xFF);
+
+ actual_i = (data >> 16) & 0xFF;
+ if (i != actual_i)
+ pr_warn("Index mismatch: expected %d, found %d\n",
+ i, actual_i);
}
rp->len = len;
@@ -214,9 +245,16 @@ static int dp_aux_write_cmds(struct mdss_dp_drv_pdata *ep,
reinit_completion(&ep->aux_comp);
- len = dp_cmd_fifo_tx(&ep->txp, ep->base);
+ tp->no_send_addr = true;
+ tp->no_send_stop = true;
+ len = dp_cmd_fifo_tx(ep);
- wait_for_completion_timeout(&ep->aux_comp, HZ/4);
+ if (!wait_for_completion_timeout(&ep->aux_comp, HZ/4)) {
+ pr_err("aux write timeout\n");
+ ep->aux_error_num = EDP_AUX_ERR_TOUT;
+ /* Reset the AUX controller state machine */
+ mdss_dp_aux_reset(&ep->ctrl_io);
+ }
if (ep->aux_error_num == EDP_AUX_ERR_NONE)
ret = len;
@@ -228,13 +266,6 @@ static int dp_aux_write_cmds(struct mdss_dp_drv_pdata *ep,
return ret;
}
-int dp_aux_write(void *ep, struct edp_cmd *cmd)
-{
- int rc = dp_aux_write_cmds(ep, cmd);
-
- return rc < 0 ? -EINVAL : 0;
-}
-
static int dp_aux_read_cmds(struct mdss_dp_drv_pdata *ep,
struct edp_cmd *cmds)
{
@@ -242,6 +273,7 @@ static int dp_aux_read_cmds(struct mdss_dp_drv_pdata *ep,
struct edp_buf *tp;
struct edp_buf *rp;
int len, ret;
+ u32 data;
mutex_lock(&ep->aux_mutex);
ep->aux_cmd_busy = 1;
@@ -270,10 +302,23 @@ static int dp_aux_read_cmds(struct mdss_dp_drv_pdata *ep,
reinit_completion(&ep->aux_comp);
- dp_cmd_fifo_tx(tp, ep->base);
+ tp->no_send_addr = true;
+ tp->no_send_stop = false;
+ dp_cmd_fifo_tx(ep);
- wait_for_completion_timeout(&ep->aux_comp, HZ/4);
+ if (!wait_for_completion_timeout(&ep->aux_comp, HZ/4)) {
+ pr_err("aux read timeout\n");
+ ep->aux_error_num = EDP_AUX_ERR_TOUT;
+ /* Reset the AUX controller state machine */
+ mdss_dp_aux_reset(&ep->ctrl_io);
+ ret = ep->aux_error_num;
+ goto end;
+ }
+ /* clear the current rx request before queuing a new one */
+ data = dp_read(ep->base + DP_AUX_TRANS_CTRL);
+ data &= (~BIT(9));
+ dp_write(ep->base + DP_AUX_TRANS_CTRL, data);
if (ep->aux_error_num == EDP_AUX_ERR_NONE) {
ret = dp_cmd_fifo_rx(rp, len, ep->base);
@@ -284,58 +329,128 @@ static int dp_aux_read_cmds(struct mdss_dp_drv_pdata *ep,
ret = ep->aux_error_num;
}
+end:
ep->aux_cmd_busy = 0;
mutex_unlock(&ep->aux_mutex);
return ret;
}
-int dp_aux_read(void *ep, struct edp_cmd *cmds)
-{
- int rc = dp_aux_read_cmds(ep, cmds);
-
- return rc < 0 ? -EINVAL : 0;
-}
-
void dp_aux_native_handler(struct mdss_dp_drv_pdata *ep, u32 isr)
{
- if (isr & EDP_INTR_AUX_I2C_DONE)
+ pr_debug("isr=0x%08x\n", isr);
+ if (isr & EDP_INTR_AUX_I2C_DONE) {
ep->aux_error_num = EDP_AUX_ERR_NONE;
- else if (isr & EDP_INTR_WRONG_ADDR)
+ } else if (isr & EDP_INTR_WRONG_ADDR) {
ep->aux_error_num = EDP_AUX_ERR_ADDR;
- else if (isr & EDP_INTR_TIMEOUT)
+ } else if (isr & EDP_INTR_TIMEOUT) {
ep->aux_error_num = EDP_AUX_ERR_TOUT;
- if (isr & EDP_INTR_NACK_DEFER)
+ } else if (isr & EDP_INTR_NACK_DEFER) {
ep->aux_error_num = EDP_AUX_ERR_NACK;
+ } else if (isr & EDP_INTR_PHY_AUX_ERR) {
+ ep->aux_error_num = EDP_AUX_ERR_PHY;
+ mdss_dp_aux_clear_hw_interrupts(ep->phy_io.base);
+ } else {
+ ep->aux_error_num = EDP_AUX_ERR_NONE;
+ }
complete(&ep->aux_comp);
}
void dp_aux_i2c_handler(struct mdss_dp_drv_pdata *ep, u32 isr)
{
+ pr_debug("isr=0x%08x\n", isr);
if (isr & EDP_INTR_AUX_I2C_DONE) {
if (isr & (EDP_INTR_I2C_NACK | EDP_INTR_I2C_DEFER))
ep->aux_error_num = EDP_AUX_ERR_NACK;
else
ep->aux_error_num = EDP_AUX_ERR_NONE;
} else {
- if (isr & EDP_INTR_WRONG_ADDR)
+ if (isr & EDP_INTR_WRONG_ADDR) {
ep->aux_error_num = EDP_AUX_ERR_ADDR;
- else if (isr & EDP_INTR_TIMEOUT)
+ } else if (isr & EDP_INTR_TIMEOUT) {
ep->aux_error_num = EDP_AUX_ERR_TOUT;
- if (isr & EDP_INTR_NACK_DEFER)
+ } else if (isr & EDP_INTR_NACK_DEFER) {
ep->aux_error_num = EDP_AUX_ERR_NACK_DEFER;
- if (isr & EDP_INTR_I2C_NACK)
+ } else if (isr & EDP_INTR_I2C_NACK) {
ep->aux_error_num = EDP_AUX_ERR_NACK;
- if (isr & EDP_INTR_I2C_DEFER)
+ } else if (isr & EDP_INTR_I2C_DEFER) {
ep->aux_error_num = EDP_AUX_ERR_DEFER;
+ } else if (isr & EDP_INTR_PHY_AUX_ERR) {
+ ep->aux_error_num = EDP_AUX_ERR_PHY;
+ mdss_dp_aux_clear_hw_interrupts(ep->phy_io.base);
+ } else {
+ ep->aux_error_num = EDP_AUX_ERR_NONE;
+ }
}
complete(&ep->aux_comp);
}
-static int dp_aux_write_buf(struct mdss_dp_drv_pdata *ep, u32 addr,
- char *buf, int len, int i2c)
+static int dp_aux_rw_cmds_retry(struct mdss_dp_drv_pdata *dp,
+ struct edp_cmd *cmd, enum dp_aux_transaction transaction)
+{
+ int const retry_count = 5;
+ int adjust_count = 0;
+ int i;
+ u32 aux_cfg1_config_count;
+ int ret;
+
+ aux_cfg1_config_count = mdss_dp_phy_aux_get_config_cnt(dp,
+ PHY_AUX_CFG1);
+retry:
+ i = 0;
+ ret = 0;
+ do {
+ struct edp_cmd cmd1 = *cmd;
+
+ dp->aux_error_num = EDP_AUX_ERR_NONE;
+ pr_debug("Trying %s, iteration count: %d\n",
+ mdss_dp_aux_transaction_to_string(transaction),
+ i + 1);
+ if (transaction == DP_AUX_READ)
+ ret = dp_aux_read_cmds(dp, &cmd1);
+ else if (transaction == DP_AUX_WRITE)
+ ret = dp_aux_write_cmds(dp, &cmd1);
+
+ i++;
+ } while ((i < retry_count) && (ret < 0));
+
+ if (ret >= 0) /* rw success */
+ goto end;
+
+ if (adjust_count >= aux_cfg1_config_count) {
+ pr_err("PHY_AUX_CONFIG1 calibration failed\n");
+ goto end;
+ }
+
+ /* Adjust AUX configuration and retry */
+ pr_debug("AUX failure (%d), adjust AUX settings\n", ret);
+ mdss_dp_phy_aux_update_config(dp, PHY_AUX_CFG1);
+ adjust_count++;
+ goto retry;
+
+end:
+ return ret;
+}
+
+/**
+ * dp_aux_write_buf_retry() - send a AUX write command
+ * @dp: display port driver data
+ * @addr: AUX address (in hex) to write the command to
+ * @buf: the buffer containing the actual payload
+ * @len: the length of the buffer @buf
+ * @i2c: indicates if it is an i2c-over-aux transaction
+ * @retry: specifies if retries should be attempted upon failures
+ *
+ * Send an AUX write command with the specified payload over the AUX
+ * channel. This function can send both native AUX command or an
+ * i2c-over-AUX command. In addition, if specified, it can also retry
+ * when failures are detected. The retry logic would adjust AUX PHY
+ * parameters on the fly.
+ */
+static int dp_aux_write_buf_retry(struct mdss_dp_drv_pdata *dp, u32 addr,
+ char *buf, int len, int i2c, bool retry)
{
struct edp_cmd cmd;
@@ -346,11 +461,42 @@ static int dp_aux_write_buf(struct mdss_dp_drv_pdata *ep, u32 addr,
cmd.len = len & 0x0ff;
cmd.next = 0;
- return dp_aux_write_cmds(ep, &cmd);
+ if (retry)
+ return dp_aux_rw_cmds_retry(dp, &cmd, DP_AUX_WRITE);
+ else
+ return dp_aux_write_cmds(dp, &cmd);
}
-static int dp_aux_read_buf(struct mdss_dp_drv_pdata *ep, u32 addr,
- int len, int i2c)
+static int dp_aux_write_buf(struct mdss_dp_drv_pdata *dp, u32 addr,
+ char *buf, int len, int i2c)
+{
+ return dp_aux_write_buf_retry(dp, addr, buf, len, i2c, true);
+}
+
+int dp_aux_write(void *dp, struct edp_cmd *cmd)
+{
+ int rc = dp_aux_write_cmds(dp, cmd);
+
+ return rc < 0 ? -EINVAL : 0;
+}
+
+/**
+ * dp_aux_read_buf_retry() - send a AUX read command
+ * @dp: display port driver data
+ * @addr: AUX address (in hex) to write the command to
+ * @buf: the buffer containing the actual payload
+ * @len: the length of the buffer @buf
+ * @i2c: indicates if it is an i2c-over-aux transaction
+ * @retry: specifies if retries should be attempted upon failures
+ *
+ * Send an AUX write command with the specified payload over the AUX
+ * channel. This function can send both native AUX command or an
+ * i2c-over-AUX command. In addition, if specified, it can also retry
+ * when failures are detected. The retry logic would adjust AUX PHY
+ * parameters on the fly.
+ */
+static int dp_aux_read_buf_retry(struct mdss_dp_drv_pdata *dp, u32 addr,
+ int len, int i2c, bool retry)
{
struct edp_cmd cmd = {0};
@@ -361,7 +507,23 @@ static int dp_aux_read_buf(struct mdss_dp_drv_pdata *ep, u32 addr,
cmd.len = len & 0x0ff;
cmd.next = 0;
- return dp_aux_read_cmds(ep, &cmd);
+ if (retry)
+ return dp_aux_rw_cmds_retry(dp, &cmd, DP_AUX_READ);
+ else
+ return dp_aux_read_cmds(dp, &cmd);
+}
+
+static int dp_aux_read_buf(struct mdss_dp_drv_pdata *dp, u32 addr,
+ int len, int i2c)
+{
+ return dp_aux_read_buf_retry(dp, addr, len, i2c, true);
+}
+
+int dp_aux_read(void *dp, struct edp_cmd *cmds)
+{
+ int rc = dp_aux_read_cmds(dp, cmds);
+
+ return rc < 0 ? -EINVAL : 0;
}
/*
@@ -733,16 +895,68 @@ static void dp_aux_send_checksum(struct mdss_dp_drv_pdata *dp, u32 checksum)
dp_aux_write_buf(dp, 0x260, data, 1, 0);
}
+int mdss_dp_aux_read_edid(struct mdss_dp_drv_pdata *dp,
+ u8 *buf, int size, int blk_num)
+{
+ int max_size_bytes = 16;
+ int rc, read_size;
+ int ret = 0;
+ u8 offset_lut[] = {0x0, 0x80};
+ u8 offset;
+
+ if (dp->test_data.test_requested == TEST_EDID_READ)
+ max_size_bytes = 128;
+
+ /*
+ * Calculate the offset of the desired EDID block to be read.
+ * For even blocks, offset starts at 0x0
+ * For odd blocks, offset starts at 0x80
+ */
+ if (blk_num % 2)
+ offset = offset_lut[1];
+ else
+ offset = offset_lut[0];
+
+ do {
+ struct edp_cmd cmd = {0};
+
+ read_size = min(size, max_size_bytes);
+ cmd.read = 1;
+ cmd.addr = EDID_START_ADDRESS;
+ cmd.len = read_size;
+ cmd.out_buf = buf;
+ cmd.i2c = 1;
+
+ /* Write the offset first prior to reading the data */
+ pr_debug("offset=0x%x, size=%d\n", offset, size);
+ dp_aux_write_buf_retry(dp, EDID_START_ADDRESS, &offset, 1, 1,
+ false);
+ rc = dp_aux_read(dp, &cmd);
+ if (rc < 0) {
+ pr_err("aux read failed\n");
+ return rc;
+ }
+
+ print_hex_dump(KERN_DEBUG, "DP:EDID: ", DUMP_PREFIX_NONE, 16, 1,
+ buf, read_size, false);
+ buf += read_size;
+ offset += read_size;
+ size -= read_size;
+ ret += read_size;
+ } while (size > 0);
+
+ return ret;
+}
+
int mdss_dp_edid_read(struct mdss_dp_drv_pdata *dp)
{
- struct edp_buf *rp = &dp->rxp;
int rlen, ret = 0;
int edid_blk = 0, blk_num = 0, retries = 10;
bool edid_parsing_done = false;
- const u8 cea_tag = 0x02, start_ext_blk = 0x1;
u32 const segment_addr = 0x30;
u32 checksum = 0;
- char segment = 0x1;
+ bool phy_aux_update_requested = false;
+ bool ext_block_parsing_done = false;
ret = dp_aux_chan_ready(dp);
if (ret) {
@@ -750,72 +964,91 @@ int mdss_dp_edid_read(struct mdss_dp_drv_pdata *dp)
return ret;
}
+ memset(dp->edid_buf, 0, dp->edid_buf_size);
+
/**
* Parse the test request vector to see whether there is a
* TEST_EDID_READ test request.
*/
dp_sink_parse_test_request(dp);
- do {
- rlen = dp_aux_read_buf(dp, EDID_START_ADDRESS +
- (blk_num * EDID_BLOCK_SIZE),
- EDID_BLOCK_SIZE, 1);
+ while (retries) {
+ u8 segment;
+ u8 edid_buf[EDID_BLOCK_SIZE] = {0};
+
+ /*
+ * Write the segment first.
+ * Segment = 0, for blocks 0 and 1
+ * Segment = 1, for blocks 2 and 3
+ * Segment = 2, for blocks 3 and 4
+ * and so on ...
+ */
+ segment = blk_num >> 1;
+ dp_aux_write_buf_retry(dp, segment_addr, &segment, 1, 1, false);
+
+ rlen = mdss_dp_aux_read_edid(dp, edid_buf, EDID_BLOCK_SIZE,
+ blk_num);
if (rlen != EDID_BLOCK_SIZE) {
- pr_err("Read failed. rlen=%d\n", rlen);
+ pr_err("Read failed. rlen=%s\n",
+ mdss_dp_get_aux_error(rlen));
+ mdss_dp_phy_aux_update_config(dp, PHY_AUX_CFG1);
+ phy_aux_update_requested = true;
+ retries--;
continue;
}
-
pr_debug("blk_num=%d, rlen=%d\n", blk_num, rlen);
-
- if (dp_edid_is_valid_header(rp->data)) {
- ret = dp_edid_buf_error(rp->data, rp->len);
+ print_hex_dump(KERN_DEBUG, "DP:EDID: ", DUMP_PREFIX_NONE, 16, 1,
+ edid_buf, EDID_BLOCK_SIZE, false);
+ if (dp_edid_is_valid_header(edid_buf)) {
+ ret = dp_edid_buf_error(edid_buf, rlen);
if (ret) {
pr_err("corrupt edid block detected\n");
+ mdss_dp_phy_aux_update_config(dp, PHY_AUX_CFG1);
+ phy_aux_update_requested = true;
+ retries--;
continue;
}
if (edid_parsing_done) {
+ pr_debug("block 0 parsed already\n");
blk_num++;
+ retries--;
continue;
}
- dp_extract_edid_manufacturer(&dp->edid, rp->data);
- dp_extract_edid_product(&dp->edid, rp->data);
- dp_extract_edid_version(&dp->edid, rp->data);
- dp_extract_edid_ext_block_cnt(&dp->edid, rp->data);
- dp_extract_edid_video_support(&dp->edid, rp->data);
- dp_extract_edid_feature(&dp->edid, rp->data);
+ dp_extract_edid_manufacturer(&dp->edid, edid_buf);
+ dp_extract_edid_product(&dp->edid, edid_buf);
+ dp_extract_edid_version(&dp->edid, edid_buf);
+ dp_extract_edid_ext_block_cnt(&dp->edid, edid_buf);
+ dp_extract_edid_video_support(&dp->edid, edid_buf);
+ dp_extract_edid_feature(&dp->edid, edid_buf);
dp_extract_edid_detailed_timing_description(&dp->edid,
- rp->data);
+ edid_buf);
edid_parsing_done = true;
+ } else if (!edid_parsing_done) {
+ pr_debug("Invalid edid block 0 header\n");
+ /* Retry block 0 with adjusted phy aux settings */
+ mdss_dp_phy_aux_update_config(dp, PHY_AUX_CFG1);
+ phy_aux_update_requested = true;
+ retries--;
+ continue;
} else {
edid_blk++;
blk_num++;
-
- /* fix dongle byte shift issue */
- if (edid_blk == 1 && rp->data[0] != cea_tag) {
- u8 tmp[EDID_BLOCK_SIZE - 1];
-
- memcpy(tmp, rp->data, EDID_BLOCK_SIZE - 1);
- rp->data[0] = cea_tag;
- memcpy(rp->data + 1, tmp, EDID_BLOCK_SIZE - 1);
- }
}
memcpy(dp->edid_buf + (edid_blk * EDID_BLOCK_SIZE),
- rp->data, EDID_BLOCK_SIZE);
+ edid_buf, EDID_BLOCK_SIZE);
- checksum = rp->data[rp->len - 1];
+ checksum = edid_buf[rlen - 1];
/* break if no more extension blocks present */
- if (edid_blk == dp->edid.ext_block_cnt)
+ if (edid_blk >= dp->edid.ext_block_cnt) {
+ ext_block_parsing_done = true;
break;
-
- /* write segment number to read block 3 onwards */
- if (edid_blk == start_ext_blk)
- dp_aux_write_buf(dp, segment_addr, &segment, 1, 1);
- } while (retries--);
+ }
+ }
if (dp->test_data.test_requested == TEST_EDID_READ) {
pr_debug("sending checksum %d\n", checksum);
@@ -823,6 +1056,18 @@ int mdss_dp_edid_read(struct mdss_dp_drv_pdata *dp)
dp->test_data = (const struct dpcd_test_request){ 0 };
}
+ /*
+ * Trigger the reading of DPCD if there was a change in the AUX
+ * configuration caused by a failure while reading the EDID.
+ * This is required to ensure the integrity and validity
+ * of the sink capabilities read that will subsequently be used
+ * to establish the mainlink.
+ */
+ if (edid_parsing_done && ext_block_parsing_done
+ && phy_aux_update_requested) {
+ dp->dpcd_read_required = true;
+ }
+
return ret;
}
@@ -834,6 +1079,10 @@ int mdss_dp_dpcd_cap_read(struct mdss_dp_drv_pdata *ep)
struct dpcd_cap *cap;
struct edp_buf *rp;
int rlen;
+ int i;
+
+ cap = &ep->dpcd;
+ memset(cap, 0, sizeof(*cap));
rlen = dp_aux_read_buf(ep, 0, len, 0);
if (rlen <= 0) {
@@ -848,11 +1097,8 @@ int mdss_dp_dpcd_cap_read(struct mdss_dp_drv_pdata *ep)
}
rp = &ep->rxp;
- cap = &ep->dpcd;
bp = rp->data;
- memset(cap, 0, sizeof(*cap));
-
data = *bp++; /* byte 0 */
cap->major = (data >> 4) & 0x0f;
cap->minor = data & 0x0f;
@@ -909,6 +1155,11 @@ int mdss_dp_dpcd_cap_read(struct mdss_dp_drv_pdata *ep)
data = *bp++; /* Byte 7: DOWN_STREAM_PORT_COUNT */
cap->downstream_port.dfp_count = data & 0x7;
+ if (cap->downstream_port.dfp_count > DP_MAX_DS_PORT_COUNT) {
+ pr_debug("DS port count %d greater that max (%d) supported\n",
+ cap->downstream_port.dfp_count, DP_MAX_DS_PORT_COUNT);
+ cap->downstream_port.dfp_count = DP_MAX_DS_PORT_COUNT;
+ }
cap->downstream_port.msa_timing_par_ignored = data & BIT(6);
cap->downstream_port.oui_support = data & BIT(7);
pr_debug("dfp_count = %d, msa_timing_par_ignored = %d\n",
@@ -916,17 +1167,23 @@ int mdss_dp_dpcd_cap_read(struct mdss_dp_drv_pdata *ep)
cap->downstream_port.msa_timing_par_ignored);
pr_debug("oui_support = %d\n", cap->downstream_port.oui_support);
- data = *bp++; /* byte 8 */
- if (data & BIT(1)) {
- cap->flags |= DPCD_PORT_0_EDID_PRESENTED;
- pr_debug("edid presented\n");
- }
-
- data = *bp++; /* byte 9 */
- cap->rx_port0_buf_size = (data + 1) * 32;
- pr_debug("lane_buf_size=%d\n", cap->rx_port0_buf_size);
+ for (i = 0; i < DP_MAX_DS_PORT_COUNT; i++) {
+ data = *bp++; /* byte 8 + i*2 */
+ pr_debug("parsing capabilities for DS port %d\n", i);
+ if (data & BIT(1)) {
+ if (i == 0)
+ cap->flags |= DPCD_PORT_0_EDID_PRESENTED;
+ else
+ cap->flags |= DPCD_PORT_1_EDID_PRESENTED;
+ pr_debug("local edid present\n");
+ } else {
+ pr_debug("local edid absent\n");
+ }
- bp += 2; /* skip 10, 11 port1 capability */
+ data = *bp++; /* byte 9 + i*2 */
+ cap->rx_port_buf_size[i] = (data + 1) * 32;
+ pr_debug("lane_buf_size=%d\n", cap->rx_port_buf_size[i]);
+ }
data = *bp++; /* byte 12 */
cap->i2c_speed_ctrl = data;
@@ -1258,6 +1515,8 @@ static void dp_sink_parse_sink_count(struct mdss_dp_drv_pdata *ep)
int const param_len = 0x1;
int const sink_count_addr = 0x200;
+ ep->prev_sink_count = ep->sink_count;
+
rlen = dp_aux_read_buf(ep, sink_count_addr, param_len, 0);
if (rlen < param_len) {
pr_err("failed to read sink count\n");
@@ -2363,8 +2622,8 @@ clear:
void mdss_dp_aux_parse_sink_status_field(struct mdss_dp_drv_pdata *ep)
{
dp_sink_parse_sink_count(ep);
- dp_sink_parse_test_request(ep);
mdss_dp_aux_link_status_read(ep, 6);
+ dp_sink_parse_test_request(ep);
}
int mdss_dp_dpcd_status_read(struct mdss_dp_drv_pdata *ep)
diff --git a/drivers/video/fbdev/msm/mdss_dp_util.c b/drivers/video/fbdev/msm/mdss_dp_util.c
index ea492f54054c..0d9cf7b72b4d 100644
--- a/drivers/video/fbdev/msm/mdss_dp_util.c
+++ b/drivers/video/fbdev/msm/mdss_dp_util.c
@@ -858,6 +858,48 @@ void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io, u8 link_rate,
pr_debug("dp_tu=0x%x\n", dp_tu);
}
+void mdss_dp_aux_set_limits(struct dss_io_data *ctrl_io)
+{
+ u32 const max_aux_timeout_count = 0xFFFFF;
+ u32 const max_aux_limits = 0xFFFFFFFF;
+
+ pr_debug("timeout=0x%x, limits=0x%x\n",
+ max_aux_timeout_count, max_aux_limits);
+
+ writel_relaxed(max_aux_timeout_count,
+ ctrl_io->base + DP_AUX_TIMEOUT_COUNT);
+ writel_relaxed(max_aux_limits, ctrl_io->base + DP_AUX_LIMITS);
+}
+
+void mdss_dp_phy_aux_update_config(struct mdss_dp_drv_pdata *dp,
+ enum dp_phy_aux_config_type config_type)
+{
+ u32 new_index;
+ struct dss_io_data *phy_io = &dp->phy_io;
+ struct mdss_dp_phy_cfg *cfg = mdss_dp_phy_aux_get_config(dp,
+ config_type);
+
+ if (!cfg) {
+ pr_err("invalid config type %s",
+ mdss_dp_phy_aux_config_type_to_string(config_type));
+ return;
+ }
+
+ new_index = (cfg->current_index + 1) % cfg->cfg_cnt;
+
+ pr_debug("Updating %s from 0x%08x to 0x%08x\n",
+ mdss_dp_phy_aux_config_type_to_string(config_type),
+ cfg->lut[cfg->current_index], cfg->lut[new_index]);
+ writel_relaxed(cfg->lut[new_index], phy_io->base + cfg->offset);
+ cfg->current_index = new_index;
+
+ /* Make sure the new HW configuration takes effect */
+ wmb();
+
+ /* Reset the AUX controller before any subsequent transactions */
+ mdss_dp_aux_reset(&dp->ctrl_io);
+}
+
void mdss_dp_ctrl_lane_mapping(struct dss_io_data *ctrl_io, char *l_map)
{
u8 bits_per_lane = 2;
@@ -870,26 +912,24 @@ void mdss_dp_ctrl_lane_mapping(struct dss_io_data *ctrl_io, char *l_map)
ctrl_io->base + DP_LOGICAL2PHYSCIAL_LANE_MAPPING);
}
-void mdss_dp_phy_aux_setup(struct dss_io_data *phy_io, u32 *aux_cfg,
- u32 phy_reg_offset)
+void mdss_dp_phy_aux_setup(struct mdss_dp_drv_pdata *dp)
{
- void __iomem *adjusted_phy_io_base = phy_io->base + phy_reg_offset;
+ int i;
+ void __iomem *adjusted_phy_io_base = dp->phy_io.base +
+ dp->phy_reg_offset;
writel_relaxed(0x3d, adjusted_phy_io_base + DP_PHY_PD_CTL);
- /* DP AUX CFG register programming */
- writel_relaxed(aux_cfg[0], adjusted_phy_io_base + DP_PHY_AUX_CFG0);
- writel_relaxed(aux_cfg[1], adjusted_phy_io_base + DP_PHY_AUX_CFG1);
- writel_relaxed(aux_cfg[2], adjusted_phy_io_base + DP_PHY_AUX_CFG2);
- writel_relaxed(aux_cfg[3], adjusted_phy_io_base + DP_PHY_AUX_CFG3);
- writel_relaxed(aux_cfg[4], adjusted_phy_io_base + DP_PHY_AUX_CFG4);
- writel_relaxed(aux_cfg[5], adjusted_phy_io_base + DP_PHY_AUX_CFG5);
- writel_relaxed(aux_cfg[6], adjusted_phy_io_base + DP_PHY_AUX_CFG6);
- writel_relaxed(aux_cfg[7], adjusted_phy_io_base + DP_PHY_AUX_CFG7);
- writel_relaxed(aux_cfg[8], adjusted_phy_io_base + DP_PHY_AUX_CFG8);
- writel_relaxed(aux_cfg[9], adjusted_phy_io_base + DP_PHY_AUX_CFG9);
-
- writel_relaxed(0x1f, adjusted_phy_io_base + DP_PHY_AUX_INTERRUPT_MASK);
+ for (i = 0; i < PHY_AUX_CFG_MAX; i++) {
+ struct mdss_dp_phy_cfg *cfg = mdss_dp_phy_aux_get_config(dp, i);
+
+ pr_debug("%s: offset=0x%08x, value=0x%08x\n",
+ mdss_dp_phy_aux_config_type_to_string(i), cfg->offset,
+ cfg->lut[cfg->current_index]);
+ writel_relaxed(cfg->lut[cfg->current_index],
+ dp->phy_io.base + cfg->offset);
+ };
+ writel_relaxed(0x1e, adjusted_phy_io_base + DP_PHY_AUX_INTERRUPT_MASK);
}
int mdss_dp_irq_setup(struct mdss_dp_drv_pdata *dp_drv)
diff --git a/drivers/video/fbdev/msm/mdss_dp_util.h b/drivers/video/fbdev/msm/mdss_dp_util.h
index 8f19e7cdf3cf..4c93e48e97dc 100644
--- a/drivers/video/fbdev/msm/mdss_dp_util.h
+++ b/drivers/video/fbdev/msm/mdss_dp_util.h
@@ -35,6 +35,8 @@
#define DP_AUX_CTRL (0x00000230)
#define DP_AUX_DATA (0x00000234)
#define DP_AUX_TRANS_CTRL (0x00000238)
+#define DP_AUX_TIMEOUT_COUNT (0x0000023C)
+#define DP_AUX_LIMITS (0x00000240)
#define DP_AUX_STATUS (0x00000244)
#define DP_DPCD_CP_IRQ (0x201)
@@ -163,6 +165,7 @@
#define DP_PHY_AUX_CFG9 (0x00000040)
#define DP_PHY_AUX_INTERRUPT_MASK (0x00000044)
#define DP_PHY_AUX_INTERRUPT_CLEAR (0x00000048)
+#define DP_PHY_AUX_INTERRUPT_STATUS (0x000000B8)
#define DP_PHY_SPARE0 0x00A8
@@ -271,6 +274,19 @@ static const struct dp_vc_tu_mapping_table tu_table[] = {
0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
};
+static inline struct mdss_dp_phy_cfg *mdss_dp_phy_aux_get_config(
+ struct mdss_dp_drv_pdata *dp, enum dp_phy_aux_config_type cfg_type)
+{
+ return &dp->aux_cfg[cfg_type];
+}
+
+static inline u32 mdss_dp_phy_aux_get_config_cnt(
+ struct mdss_dp_drv_pdata *dp, enum dp_phy_aux_config_type cfg_type)
+{
+ return dp->aux_cfg[cfg_type].cfg_cnt;
+}
+
+void mdss_dp_aux_set_limits(struct dss_io_data *ctrl_io);
int dp_aux_read(void *ep, struct edp_cmd *cmds);
int dp_aux_write(void *ep, struct edp_cmd *cmd);
void mdss_dp_state_ctrl(struct dss_io_data *ctrl_io, u32 data);
@@ -285,8 +301,9 @@ void mdss_dp_assert_phy_reset(struct dss_io_data *ctrl_io, bool assert);
void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io, u8 link_rate,
u8 ln_cnt, u32 res, struct mdss_panel_info *pinfo);
void mdss_dp_config_misc(struct mdss_dp_drv_pdata *dp, u32 bd, u32 cc);
-void mdss_dp_phy_aux_setup(struct dss_io_data *phy_io, u32 *aux_cfg,
- u32 phy_reg_offset);
+void mdss_dp_phy_aux_setup(struct mdss_dp_drv_pdata *dp);
+void mdss_dp_phy_aux_update_config(struct mdss_dp_drv_pdata *dp,
+ enum dp_phy_aux_config_type config_type);
void mdss_dp_hpd_configure(struct dss_io_data *ctrl_io, bool enable);
void mdss_dp_aux_ctrl(struct dss_io_data *ctrl_io, bool enable);
void mdss_dp_mainlink_ctrl(struct dss_io_data *ctrl_io, bool enable);
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
index 17722eac3006..b1552829508d 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.c
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -2713,10 +2713,7 @@ static int mdss_dsi_event_handler(struct mdss_panel_data *pdata,
rc = mdss_dsi_reconfig(pdata, mode);
break;
case MDSS_EVENT_DSI_PANEL_STATUS:
- if (ctrl_pdata->check_status)
- rc = ctrl_pdata->check_status(ctrl_pdata);
- else
- rc = true;
+ rc = mdss_dsi_check_panel_status(ctrl_pdata, arg);
break;
case MDSS_EVENT_PANEL_TIMING_SWITCH:
rc = mdss_dsi_panel_timing_switch(ctrl_pdata, arg);
diff --git a/drivers/video/fbdev/msm/mdss_dsi.h b/drivers/video/fbdev/msm/mdss_dsi.h
index 2a76466abf3e..00f23380591b 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.h
+++ b/drivers/video/fbdev/msm/mdss_dsi.h
@@ -704,6 +704,7 @@ void mdss_dsi_cfg_lane_ctrl(struct mdss_dsi_ctrl_pdata *ctrl,
void mdss_dsi_set_reg(struct mdss_dsi_ctrl_pdata *ctrl, int off,
u32 mask, u32 val);
int mdss_dsi_phy_pll_reset_status(struct mdss_dsi_ctrl_pdata *ctrl);
+int mdss_dsi_check_panel_status(struct mdss_dsi_ctrl_pdata *ctrl, void *arg);
static inline const char *__mdss_dsi_pm_name(enum dsi_pm_type module)
{
diff --git a/drivers/video/fbdev/msm/mdss_dsi_host.c b/drivers/video/fbdev/msm/mdss_dsi_host.c
index 37f3929a3a2c..3b48bb642792 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_host.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_host.c
@@ -3031,7 +3031,10 @@ bool mdss_dsi_ack_err_status(struct mdss_dsi_ctrl_pdata *ctrl)
* warning message is ignored.
*/
if (ctrl->panel_data.panel_info.esd_check_enabled &&
- (ctrl->status_mode == ESD_BTA) && (status & 0x1008000))
+ ((ctrl->status_mode == ESD_BTA) ||
+ (ctrl->status_mode == ESD_REG) ||
+ (ctrl->status_mode == ESD_REG_NT35596)) &&
+ (status & 0x1008000))
return false;
pr_err("%s: status=%x\n", __func__, status);
@@ -3262,8 +3265,10 @@ irqreturn_t mdss_dsi_isr(int irq, void *ptr)
* cleared.
*/
if (ctrl->panel_data.panel_info.esd_check_enabled &&
- (ctrl->status_mode == ESD_BTA) &&
- (ctrl->panel_mode == DSI_VIDEO_MODE)) {
+ ((ctrl->status_mode == ESD_BTA) ||
+ (ctrl->status_mode == ESD_REG) ||
+ (ctrl->status_mode == ESD_REG_NT35596)) &&
+ (ctrl->panel_mode == DSI_VIDEO_MODE)) {
isr &= ~DSI_INTR_ERROR;
/* clear only overflow */
mdss_dsi_set_reg(ctrl, 0x0c, 0x44440000, 0x44440000);
diff --git a/drivers/video/fbdev/msm/mdss_dsi_status.c b/drivers/video/fbdev/msm/mdss_dsi_status.c
index 4208c2c43efb..0f24f66dbcc6 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_status.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_status.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -39,6 +39,35 @@ static uint32_t interval = STATUS_CHECK_INTERVAL_MS;
static int32_t dsi_status_disable = DSI_STATUS_CHECK_INIT;
struct dsi_status_data *pstatus_data;
+int mdss_dsi_check_panel_status(struct mdss_dsi_ctrl_pdata *ctrl, void *arg)
+{
+ struct mdss_mdp_ctl *ctl = NULL;
+ struct msm_fb_data_type *mfd = arg;
+ int ret = 0;
+
+ if (!mfd)
+ return -EINVAL;
+
+ ctl = mfd_to_ctl(mfd);
+
+ if (!ctl || !ctrl)
+ return -EINVAL;
+
+ mutex_lock(&ctl->offlock);
+ /*
+ * if check_status method is not defined
+ * then no need to fail this function,
+ * instead return a positive value.
+ */
+ if (ctrl->check_status)
+ ret = ctrl->check_status(ctrl);
+ else
+ ret = 1;
+ mutex_unlock(&ctl->offlock);
+
+ return ret;
+}
+
/*
* check_dsi_ctrl_status() - Reads MFD structure and
* calls platform specific DSI ctrl Status function.
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index 93643246935e..698c5633cf6a 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -653,7 +653,7 @@ static ssize_t mdss_fb_get_panel_status(struct device *dev,
ret = scnprintf(buf, PAGE_SIZE, "panel_status=%s\n", "suspend");
} else {
panel_status = mdss_fb_send_panel_event(mfd,
- MDSS_EVENT_DSI_PANEL_STATUS, NULL);
+ MDSS_EVENT_DSI_PANEL_STATUS, mfd);
ret = scnprintf(buf, PAGE_SIZE, "panel_status=%s\n",
panel_status > 0 ? "alive" : "dead");
}
@@ -1596,13 +1596,30 @@ static int mdss_fb_resume(struct platform_device *pdev)
static int mdss_fb_pm_suspend(struct device *dev)
{
struct msm_fb_data_type *mfd = dev_get_drvdata(dev);
+ int rc = 0;
if (!mfd)
return -ENODEV;
dev_dbg(dev, "display pm suspend\n");
- return mdss_fb_suspend_sub(mfd);
+ rc = mdss_fb_suspend_sub(mfd);
+
+ /*
+ * Call MDSS footswitch control to ensure GDSC is
+ * off after pm suspend call. There are cases when
+ * mdss runtime call doesn't trigger even when clock
+ * ref count is zero after fb pm suspend.
+ */
+ if (!rc) {
+ if (mfd->mdp.footswitch_ctrl)
+ mfd->mdp.footswitch_ctrl(false);
+ } else {
+ pr_err("fb pm suspend failed, rc: %d\n", rc);
+ }
+
+ return rc;
+
}
static int mdss_fb_pm_resume(struct device *dev)
@@ -1622,6 +1639,9 @@ static int mdss_fb_pm_resume(struct device *dev)
pm_runtime_set_suspended(dev);
pm_runtime_enable(dev);
+ if (mfd->mdp.footswitch_ctrl)
+ mfd->mdp.footswitch_ctrl(true);
+
return mdss_fb_resume_sub(mfd);
}
#endif
diff --git a/drivers/video/fbdev/msm/mdss_fb.h b/drivers/video/fbdev/msm/mdss_fb.h
index 8e5fc5949770..518c24810acd 100644
--- a/drivers/video/fbdev/msm/mdss_fb.h
+++ b/drivers/video/fbdev/msm/mdss_fb.h
@@ -232,6 +232,7 @@ struct msm_mdp_interface {
int (*configure_panel)(struct msm_fb_data_type *mfd, int mode,
int dest_ctrl);
int (*input_event_handler)(struct msm_fb_data_type *mfd);
+ void (*footswitch_ctrl)(bool on);
int (*pp_release_fnc)(struct msm_fb_data_type *mfd);
void *private1;
};
diff --git a/drivers/video/fbdev/msm/mdss_hdcp_1x.c b/drivers/video/fbdev/msm/mdss_hdcp_1x.c
index 834726e84bda..2dc9c8f96c5b 100644
--- a/drivers/video/fbdev/msm/mdss_hdcp_1x.c
+++ b/drivers/video/fbdev/msm/mdss_hdcp_1x.c
@@ -1381,7 +1381,8 @@ int hdcp_1x_authenticate(void *input)
flush_delayed_work(&hdcp->hdcp_auth_work);
- if (!hdcp_1x_state(HDCP_STATE_INACTIVE)) {
+ if (!hdcp_1x_state(HDCP_STATE_INACTIVE) &&
+ !hdcp_1x_state(HDCP_STATE_AUTH_FAIL)) {
pr_err("invalid state\n");
return -EINVAL;
}
@@ -1443,7 +1444,6 @@ int hdcp_1x_reauthenticate(void *input)
DSS_REG_W(io, reg_set->reset, reg & ~reg_set->reset_bit);
- hdcp->hdcp_state = HDCP_STATE_INACTIVE;
hdcp_1x_authenticate(hdcp);
return ret;
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.c b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
index 37c4be6135aa..599f6cb44c63 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_edid.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
@@ -2604,6 +2604,11 @@ void hdmi_edid_set_video_resolution(void *input, u32 resolution, bool reset)
return;
}
+ if (resolution == HDMI_VFRMT_UNKNOWN) {
+ pr_debug("%s: Default video resolution not set\n", __func__);
+ return;
+ }
+
edid_ctrl->video_resolution = resolution;
if (reset) {
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index 171f44815430..a645a3495593 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -233,7 +233,6 @@ static struct mdss_mdp_irq mdp_irq_map[] = {
static struct intr_callback *mdp_intr_cb;
-static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on);
static int mdss_mdp_parse_dt(struct platform_device *pdev);
static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev);
static int mdss_mdp_parse_dt_mixer(struct platform_device *pdev);
@@ -5172,7 +5171,7 @@ static void mdss_mdp_notify_idle_pc(struct mdss_data_type *mdata)
* active (but likely in an idle state), the vote for the CX and the batfet
* rails should not be released.
*/
-static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on)
+void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on)
{
int ret;
int active_cnt = 0;
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index 56af021e8cfc..db037ed263b4 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -1640,6 +1640,7 @@ int mdss_mdp_set_intr_callback_nosync(u32 intr_type, u32 intf_num,
void (*fnc_ptr)(void *), void *arg);
u32 mdss_mdp_get_irq_mask(u32 intr_type, u32 intf_num);
+void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on);
void mdss_mdp_footswitch_ctrl_splash(int on);
void mdss_mdp_batfet_ctrl(struct mdss_data_type *mdata, int enable);
void mdss_mdp_set_clk_rate(unsigned long min_clk_rate, bool locked);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
index c800bbe4963c..8c612e2b83fb 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -6291,6 +6291,13 @@ int mdss_mdp_input_event_handler(struct msm_fb_data_type *mfd)
return rc;
}
+void mdss_mdp_footswitch_ctrl_handler(bool on)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ mdss_mdp_footswitch_ctrl(mdata, on);
+}
+
int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd)
{
struct device *dev = mfd->fbi->dev;
@@ -6333,6 +6340,14 @@ int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd)
mdp5_interface->configure_panel = mdss_mdp_update_panel_info;
mdp5_interface->input_event_handler = mdss_mdp_input_event_handler;
+ /*
+ * Register footswitch control only for primary fb pm
+ * suspend/resume calls.
+ */
+ if (mfd->panel_info->is_prim_panel)
+ mdp5_interface->footswitch_ctrl =
+ mdss_mdp_footswitch_ctrl_handler;
+
if (mfd->panel_info->type == WRITEBACK_PANEL) {
mdp5_interface->atomic_validate =
mdss_mdp_layer_atomic_validate_wfd;
diff --git a/include/linux/io-pgtable-fast.h b/include/linux/io-pgtable-fast.h
index 029e11f9919b..6a56f0039f15 100644
--- a/include/linux/io-pgtable-fast.h
+++ b/include/linux/io-pgtable-fast.h
@@ -37,7 +37,7 @@ void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size);
#define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0xa
void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds, u64 base,
- u64 end, bool skip_sync);
+ u64 start, u64 end, bool skip_sync);
void av8l_register_notify(struct notifier_block *nb);
#else /* !CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB */
@@ -46,6 +46,7 @@ void av8l_register_notify(struct notifier_block *nb);
static inline void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds,
u64 base,
+ u64 start,
u64 end,
bool skip_sync)
{
diff --git a/include/linux/msm_smd_pkt.h b/include/linux/msm_smd_pkt.h
new file mode 100644
index 000000000000..c79933d27d4a
--- /dev/null
+++ b/include/linux/msm_smd_pkt.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2010,2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __LINUX_MSM_SMD_PKT_H
+#define __LINUX_MSM_SMD_PKT_H
+
+#include <linux/ioctl.h>
+
+#define SMD_PKT_IOCTL_MAGIC (0xC2)
+
+#define SMD_PKT_IOCTL_BLOCKING_WRITE \
+ _IOR(SMD_PKT_IOCTL_MAGIC, 0, unsigned int)
+
+#endif /* __LINUX_MSM_SMD_PKT_H */
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 654bb97a3188..5bc4836af286 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -77,6 +77,12 @@ struct pmu_hw_events {
struct arm_pmu *percpu_pmu;
};
+enum armpmu_pmu_states {
+ ARM_PMU_STATE_OFF,
+ ARM_PMU_STATE_RUNNING,
+ ARM_PMU_STATE_GOING_DOWN,
+};
+
struct arm_pmu {
struct pmu pmu;
cpumask_t active_irqs;
@@ -101,6 +107,8 @@ struct arm_pmu {
void (*free_irq)(struct arm_pmu *);
int (*map_event)(struct perf_event *event);
int num_events;
+ int pmu_state;
+ int percpu_irq;
atomic_t active_events;
struct mutex reserve_mutex;
u64 max_period;
diff --git a/include/linux/qpnp/qpnp-revid.h b/include/linux/qpnp/qpnp-revid.h
index 7fca674b6230..b025df568259 100644
--- a/include/linux/qpnp/qpnp-revid.h
+++ b/include/linux/qpnp/qpnp-revid.h
@@ -214,6 +214,11 @@
#define PM660L_V1P1_REV3 0x01
#define PM660L_V1P1_REV4 0x01
+#define PM660L_V2P0_REV1 0x00
+#define PM660L_V2P0_REV2 0x00
+#define PM660L_V2P0_REV3 0x00
+#define PM660L_V2P0_REV4 0x02
+
/* PMI8998 FAB_ID */
#define PMI8998_FAB_ID_SMIC 0x11
#define PMI8998_FAB_ID_GF 0x30
diff --git a/include/linux/regulator/qpnp-labibb-regulator.h b/include/linux/regulator/qpnp-labibb-regulator.h
index 247069507fd9..33985afeb6e9 100644
--- a/include/linux/regulator/qpnp-labibb-regulator.h
+++ b/include/linux/regulator/qpnp-labibb-regulator.h
@@ -15,6 +15,7 @@
enum labibb_notify_event {
LAB_VREG_OK = 1,
+ LAB_VREG_NOT_OK,
};
int qpnp_labibb_notifier_register(struct notifier_block *nb);
diff --git a/include/soc/qcom/minidump.h b/include/soc/qcom/minidump.h
new file mode 100644
index 000000000000..9d993a17fb89
--- /dev/null
+++ b/include/soc/qcom/minidump.h
@@ -0,0 +1,48 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MINIDUMP_H
+#define __MINIDUMP_H
+
+#define MAX_NAME_LENGTH 16
+/* md_region - Minidump table entry
+ * @name: Entry name, Minidump will dump binary with this name.
+ * @id: Entry ID, used only for SDI dumps.
+ * @virt_addr: Address of the entry.
+ * @phys_addr: Physical address of the entry to dump.
+ * @size: Number of byte to dump from @address location
+ * it should be 4 byte aligned.
+ */
+struct md_region {
+ char name[MAX_NAME_LENGTH];
+ u32 id;
+ u64 virt_addr;
+ u64 phys_addr;
+ u64 size;
+};
+
+/* Register an entry in Minidump table
+ * Returns:
+ * Zero: on successful addition
+ * Negetive error number on failures
+ */
+#ifdef CONFIG_QCOM_MINIDUMP
+extern int msm_minidump_add_region(const struct md_region *entry);
+extern bool msm_minidump_enabled(void);
+#else
+static inline int msm_minidump_add_region(const struct md_region *entry)
+{
+ return -ENODEV;
+}
+static inline bool msm_minidump_enabled(void) { return false; }
+#endif
+#endif
diff --git a/kernel/trace/msm_rtb.c b/kernel/trace/msm_rtb.c
index 80058b544cb5..587082117842 100644
--- a/kernel/trace/msm_rtb.c
+++ b/kernel/trace/msm_rtb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -28,6 +28,7 @@
#include <asm-generic/sizes.h>
#include <linux/msm_rtb.h>
#include <asm/timex.h>
+#include <soc/qcom/minidump.h>
#define SENTINEL_BYTE_1 0xFF
#define SENTINEL_BYTE_2 0xAA
@@ -243,6 +244,7 @@ EXPORT_SYMBOL(uncached_logk);
static int msm_rtb_probe(struct platform_device *pdev)
{
struct msm_rtb_platform_data *d = pdev->dev.platform_data;
+ struct md_region md_entry;
#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS)
unsigned int cpu;
#endif
@@ -294,6 +296,12 @@ static int msm_rtb_probe(struct platform_device *pdev)
memset(msm_rtb.rtb, 0, msm_rtb.size);
+ strlcpy(md_entry.name, "KRTB_BUF", sizeof(md_entry.name));
+ md_entry.virt_addr = (uintptr_t)msm_rtb.rtb;
+ md_entry.phys_addr = msm_rtb.phys;
+ md_entry.size = msm_rtb.size;
+ if (msm_minidump_add_region(&md_entry))
+ pr_info("Failed to add RTB in Minidump\n");
#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS)
for_each_possible_cpu(cpu) {
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index e10a04c9cdc7..cf336d670f8b 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -135,6 +135,18 @@ config IP6_NF_IPTABLES
if IP6_NF_IPTABLES
+config IP6_NF_IPTABLES_128
+ tristate "128 bit arithmetic for iptables matching"
+ depends on IP6_NF_IPTABLES
+ help
+ This enables 128 bit matching in ip6tables to help optimize cases
+ where there is no match required. ip6tables matching for ipv6 always
+ has a mask if an address is specified for match. Adding a check for
+ mask prior to that helps to improve performance as it avoids the
+ masked comparison.
+
+ Note that this feature depends on the architecture. If unsure, say N.
+
# The simple matches.
config IP6_NF_MATCH_AH
tristate '"ah" match support'
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 22f39e00bef3..6fd784643d6e 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -94,22 +94,26 @@ ip6_packet_match(const struct sk_buff *skb,
{
unsigned long ret;
const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
+#if IS_ENABLED(IP6_NF_IPTABLES_128)
+ const __uint128_t *ulm1 = (const __uint128_t *)&ip6info->smsk;
+ const __uint128_t *ulm2 = (const __uint128_t *)&ip6info->dmsk;
+#endif
#define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
- if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
- &ip6info->src), IP6T_INV_SRCIP) ||
- FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
- &ip6info->dst), IP6T_INV_DSTIP)) {
- dprintf("Source or dest mismatch.\n");
-/*
- dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
- ipinfo->smsk.s_addr, ipinfo->src.s_addr,
- ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
- dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
- ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
- ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
- return false;
+#if IS_ENABLED(IP6_NF_IPTABLES_128)
+ if (*ulm1 || *ulm2)
+#endif
+ {
+ if (FWINV(ipv6_masked_addr_cmp
+ (&ipv6->saddr, &ip6info->smsk, &ip6info->src),
+ IP6T_INV_SRCIP) ||
+ FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
+ &ip6info->dst),
+ IP6T_INV_DSTIP)) {
+ dprintf("Source or dest mismatch.\n");
+ return false;
+ }
}
ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);