summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/cnss/cnss-sdio-wlan.txt3
-rw-r--r--Documentation/devicetree/bindings/iio/adc/qcom-tadc.txt141
-rw-r--r--Documentation/devicetree/bindings/media/video/msm-cci.txt3
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi62
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi18
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi110
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt.dtsi2
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig3
-rw-r--r--arch/arm64/configs/msmcortex_defconfig2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/char/diag/diagfwd_mhi.c5
-rw-r--r--drivers/gpu/msm/adreno_debugfs.c2
-rw-r--r--drivers/gpu/msm/kgsl.c5
-rw-r--r--drivers/gpu/msm/kgsl_cffdump.c7
-rw-r--r--drivers/gpu/msm/kgsl_cmdbatch.c2
-rw-r--r--drivers/gpu/msm/kgsl_snapshot.c3
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c23
-rw-r--r--drivers/iio/adc/Kconfig12
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/qcom-tadc.c742
-rw-r--r--drivers/media/platform/msm/camera_v2/Kconfig48
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c163
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h16
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.c5
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.h79
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp32.c71
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp40.c198
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp44.c203
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp46.c196
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.c193
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.h6
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c3086
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h101
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c1153
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h54
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c82
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h2
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c115
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h6
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/Makefile3
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c.h56
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c1093
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c46
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/msm_sensor.h3
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c12
-rw-r--r--drivers/media/platform/msm/vidc/msm_vdec.c2
-rw-r--r--drivers/media/platform/msm/vidc/msm_venc.c4
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc.c22
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.c33
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_dcvs.c247
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_dcvs.h5
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_internal.h3
-rw-r--r--drivers/mfd/qcom-i2c-pmic.c3
-rw-r--r--drivers/mmc/host/sdhci-msm.c14
-rw-r--r--drivers/net/wireless/cnss/cnss_pci.c45
-rw-r--r--drivers/net/wireless/cnss/cnss_sdio.c212
-rw-r--r--drivers/pci/host/pci-msm.c121
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-v3.c20
-rw-r--r--drivers/phy/phy-qcom-ufs-qmp-v3.h103
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa.c235
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_i.h14
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c426
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c257
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c25
-rw-r--r--drivers/soc/qcom/glink.c205
-rw-r--r--drivers/soc/qcom/icnss.c84
-rw-r--r--drivers/usb/gadget/function/f_accessory.c15
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.c22
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_overlay.c8
-rw-r--r--drivers/video/fbdev/msm/msm_ext_display.c6
-rw-r--r--include/linux/msm_ext_display.h55
-rw-r--r--include/linux/qpnp/qpnp-haptic.h5
-rw-r--r--include/net/cfg80211.h7
-rw-r--r--include/uapi/linux/nl80211.h4
-rw-r--r--include/uapi/media/msm_cam_sensor.h12
-rw-r--r--include/uapi/media/msmb_isp.h123
-rw-r--r--kernel/locking/mutex.c12
-rw-r--r--net/ipc_router/ipc_router_core.c17
-rw-r--r--net/wireless/nl80211.c10
-rw-r--r--net/wireless/scan.c2
-rw-r--r--net/wireless/sme.c2
-rw-r--r--net/wireless/trace.h6
83 files changed, 7412 insertions, 3118 deletions
diff --git a/Documentation/devicetree/bindings/cnss/cnss-sdio-wlan.txt b/Documentation/devicetree/bindings/cnss/cnss-sdio-wlan.txt
index 72bbb6180258..187c5604b521 100644
--- a/Documentation/devicetree/bindings/cnss/cnss-sdio-wlan.txt
+++ b/Documentation/devicetree/bindings/cnss/cnss-sdio-wlan.txt
@@ -29,7 +29,8 @@ Optional properties:
- qcom,msm-bus,num-cases: number of cases for bus scaling.
- qcom,msm-bus,num-paths: number of paths for bus scale vector.
- qcom,msm-bus,vectors-KBps: bus scale vector table.
-
+ - qcom,skip-wlan-en-toggle: Boolean property to be enabled for platforms where
+ wlan_en toggling is not supported.
Example:
qcom,cnss-sdio {
compatible = "qcom,cnss_sdio";
diff --git a/Documentation/devicetree/bindings/iio/adc/qcom-tadc.txt b/Documentation/devicetree/bindings/iio/adc/qcom-tadc.txt
new file mode 100644
index 000000000000..6880d304367d
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/qcom-tadc.txt
@@ -0,0 +1,141 @@
+Qualcomm Technologies, Inc. TADC Specific Bindings
+
+TADC (Telemetry ADC) is a 10 bit resolution ADC which has 8 channels: battery
+temperature, skin temperature, die temperature, battery current, battery
+voltage, input current, input voltage, and OTG current.
+
+=======================
+Required Node Structure
+=======================
+
+A TADC must be described in two levels of devices nodes.
+
+=======================
+First Level Node - TADC
+=======================
+
+- reg
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Address and size of the TADC register block.
+
+TADC specific properties:
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: Must be "qcom,tadc".
+
+- interrupts
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Peripheral interrupt specifier.
+
+- interrupt-names
+ Usage: required
+ Value type: <stringlist>
+ Definition: Interrupt names. This list must match up 1-to-1 with the
+ interrupts specified in the 'interrupts' property.
+
+=============================================
+Second Level Nodes - TADC Thermistor Channels
+=============================================
+
+- reg
+ Usage: required
+ Value type: <u32>
+ Definition: The 0 based channel number.
+
+TADC thermistor channel specific properties:
+- qcom,rbias
+ Usage: required
+ Value type: <u32>
+ Definition: The bias resistor value.
+
+- qcom,therm-at-25degc
+ Usage: required
+ Value type: <u32>
+ Definition: The thermistor resistance at 25 DegC.
+
+- qcom,beta-coefficient
+ Usage: required
+ Value type: <u32>
+ Definition: The beta coefficeent or B-parameter of the thermistor.
+
+===============================================
+Second Level Nodes - TADC Scale/Offset Channels
+===============================================
+
+- reg
+ Usage: required
+ Value type: <u32>
+ Definition: The 0 based channel number.
+
+TADC scale/offset channel specific properties:
+- qcom,scale
+ Usage: required
+ Value type: <s32>
+ Definition: The RAW scaling factor.
+
+- qcom,offset
+ Usage: optional
+ Value type: <s32>
+ Definition: The offset after scaling.
+
+=======
+Example
+=======
+
+smb138x_tadc: qcom,tadc@3600 {
+ compatible = "qcom,tadc";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #io-channel-cells = <1>;
+ interrupts = <0x36 0x0 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "eoc";
+
+ batt_temp@0 {
+ reg = <0>;
+ qcom,rbias = <68100>;
+ qcom,rtherm-at-25degc = <68000>;
+ qcom,beta-coefficient = <3450>;
+ };
+
+ skin_temp@1 {
+ reg = <1>;
+ qcom,rbias = <33000>;
+ qcom,rtherm-at-25degc = <68000>;
+ qcom,beta-coefficient = <3450>;
+ };
+
+ die_temp@2 {
+ reg = <2>;
+ qcom,scale = <(-1032)>;
+ qcom,offset = <344125>;
+ };
+
+ batt_i@3 {
+ reg = <3>;
+ qcom,channel = <3>;
+ qcom,scale = <20000000>;
+ };
+
+ batt_v@4 {
+ reg = <4>;
+ qcom,scale = <5000000>;
+ };
+
+ input_i@5 {
+ reg = <5>;
+ qcom,scale = <14285714>;
+ };
+
+ input_v@6 {
+ reg = <6>;
+ qcom,scale = <25000000>;
+ };
+
+ otg_i@7 {
+ reg = <7>;
+ qcom,scale = <5714286>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cci.txt
index 086af9b337f4..fb1ca0261f9c 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cci.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cci.txt
@@ -106,6 +106,8 @@ Optional properties:
- qcom,mount-angle : should contain the physical mount angle of the sensor on
the target
- 0, 90, 180, 360
+- qcom,secure : should be enabled to operate the camera in secure mode
+ - 0, 1
- qcom,mclk-23880000 : should be enabled if the supported mclk is 23.88Mhz and
not 24 Mhz.
- qcom,gpio-no-mux : should contain field to indicate whether gpio mux table is
@@ -273,6 +275,7 @@ Example:
qcom,csiphy-sd-index = <0>;
qcom,csid-sd-index = <0>;
qcom,mount-angle = <90>;
+ qcom,secure = <1>;
qcom,led-flash-src = <&led_flash0>;
qcom,actuator-src = <&actuator0>;
qcom,eeprom-src = <&eeprom0>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
index d1a8ae03cde2..d44002efea11 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
@@ -205,6 +205,68 @@
};
};
+&i2c_7 {
+ status = "okay";
+ qcom,smb138x@8 {
+ compatible = "qcom,i2c-pmic";
+ reg = <0x8>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-parent = <&spmi_bus>;
+ interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt_names = "smb138x";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ qcom,periph-map = <0x10 0x11 0x12 0x13 0x14 0x16 0x36>;
+
+ smb138x_tadc: qcom,tadc@3600 {
+ compatible = "qcom,tadc";
+ reg = <0x3600 0x100>;
+
+ interrupts = <0x36 0x0 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "eoc";
+
+ batt_therm {
+ qcom,rbias = <68100>;
+ qcom,rtherm-at-25degc = <68000>;
+ qcom,beta-coefficient = <3450>;
+ };
+
+ skin_temp {
+ qcom,rbias = <33000>;
+ qcom,rtherm-at-25degc = <68000>;
+ qcom,beta-coefficient = <3450>;
+ };
+
+ die_temp {
+ qcom,scale = <(-1032)>;
+ qcom,offset = <344125>;
+ };
+
+ batt_i {
+ qcom,channel = <3>;
+ qcom,scale = <20000000>;
+ };
+
+ batt_v {
+ qcom,scale = <5000000>;
+ };
+
+ input_i {
+ qcom,scale = <14285714>;
+ };
+
+ input_v {
+ qcom,scale = <25000000>;
+ };
+
+ otg_i {
+ qcom,scale = <5714286>;
+ };
+ };
+ };
+};
+
&mdss_mdp {
qcom,mdss-pref-prim-intf = "dsi";
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi
index b0e3751792dd..85d6b1d5fba3 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi
@@ -448,6 +448,14 @@
qcom,mdss-fb-map = <&mdss_fb1>;
};
+ msm_ext_disp: qcom,msm_ext_disp {
+ compatible = "qcom,msm-ext-disp";
+
+ hdmi_audio: qcom,msm-hdmi-audio-rx {
+ compatible = "qcom,msm-hdmi-audio-codec-rx";
+ };
+ };
+
mdss_dp_ctrl: qcom,dp_ctrl@c990000 {
cell-index = <0>;
compatible = "qcom,mdss-dp";
@@ -483,6 +491,8 @@
qcom,dp-usbpd-detection = <&pmicobalt_pdphy>;
+ qcom,msm_ext_disp = <&msm_ext_disp>;
+
qcom,core-supply-entries {
#address-cells = <1>;
#size-cells = <0>;
@@ -595,10 +605,6 @@
};
};
- msm_ext_disp: qcom,msm_ext_disp {
- compatible = "qcom,msm-ext-disp";
- };
-
mdss_hdmi_tx: qcom,hdmi_tx@c9a0000 {
cell-index = <0>;
compatible = "qcom,hdmi-tx";
@@ -631,10 +637,6 @@
qcom,mdss-fb-map = <&mdss_fb2>;
qcom,pluggable;
-
- hdmi_audio: qcom,msm-hdmi-audio-rx {
- compatible = "qcom,msm-hdmi-audio-codec-rx";
- };
};
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
index b84f63ecbd1e..1d64cefaeb4a 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
@@ -206,6 +206,89 @@
};
};
+&i2c_7 {
+ status = "okay";
+ qcom,smb138x@8 {
+ compatible = "qcom,i2c-pmic";
+ reg = <0x8>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-parent = <&spmi_bus>;
+ interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt_names = "smb138x";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ qcom,periph-map = <0x10 0x11 0x12 0x13 0x14 0x16 0x36>;
+
+ smb138x_revid: qcom,revid@100 {
+ compatible = "qcom,qpnp-revid";
+ reg = <0x100 0x100>;
+ };
+
+ smb138x_tadc: qcom,tadc@3600 {
+ compatible = "qcom,tadc";
+ reg = <0x3600 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ #io-channel-cells = <1>;
+ interrupts = <0x36 0x0 IRQ_TYPE_EDGE_BOTH>;
+ interrupt-names = "eoc";
+
+ batt_temp@0 {
+ reg = <0>;
+ qcom,rbias = <68100>;
+ qcom,rtherm-at-25degc = <68000>;
+ qcom,beta-coefficient = <3450>;
+ };
+
+ skin_temp@1 {
+ reg = <1>;
+ qcom,rbias = <33000>;
+ qcom,rtherm-at-25degc = <68000>;
+ qcom,beta-coefficient = <3450>;
+ };
+
+ die_temp@2 {
+ reg = <2>;
+ qcom,scale = <(-1032)>;
+ qcom,offset = <344125>;
+ };
+
+ batt_i@3 {
+ reg = <3>;
+ qcom,channel = <3>;
+ qcom,scale = <20000000>;
+ };
+
+ batt_v@4 {
+ reg = <4>;
+ qcom,scale = <5000000>;
+ };
+
+ input_i@5 {
+ reg = <5>;
+ qcom,scale = <14285714>;
+ };
+
+ input_v@6 {
+ reg = <6>;
+ qcom,scale = <25000000>;
+ };
+
+ otg_i@7 {
+ reg = <7>;
+ qcom,scale = <5714286>;
+ };
+ };
+
+ smb138x_parallel_slave: qcom,smb138x-parallel-slave@1000 {
+ compatible = "qcom,smb138x-parallel-slave";
+ qcom,pmic-revid = <&smb138x_revid>;
+ reg = <0x1000 0x700>;
+ };
+ };
+};
+
&mdss_hdmi_tx {
pinctrl-names = "hdmi_hpd_active", "hdmi_ddc_active", "hdmi_cec_active",
"hdmi_active", "hdmi_sleep";
@@ -356,33 +439,6 @@
qcom,5v-boost-gpio = <&tlmm 51 0>;
};
-&i2c_7 {
- status = "okay";
- qcom,smb138x@8 {
- compatible = "qcom,i2c-pmic";
- reg = <0x8>;
- #address-cells = <2>;
- #size-cells = <0>;
- interrupt-parent = <&spmi_bus>;
- interrupts = <0x0 0xd1 0x0 IRQ_TYPE_LEVEL_LOW>;
- interrupt_names = "smb138x";
- interrupt-controller;
- #interrupt-cells = <3>;
- qcom,periph-map = <0x10 0x11 0x12 0x13 0x14 0x16 0x36>;
-
- smb138x_revid: qcom,revid@100 {
- compatible = "qcom,qpnp-revid";
- reg = <0x100 0x100>;
- };
-
- smb138x_parallel_slave: qcom,smb138x-parallel-slave@1000 {
- compatible = "qcom,smb138x-parallel-slave";
- qcom,pmic-revid = <&smb138x_revid>;
- reg = <0x1000 0x700>;
- };
- };
-};
-
&pmicobalt_haptics {
status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
index f830b2172050..a531226eba47 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
@@ -1518,7 +1518,7 @@
gdsc-vdd-supply = <&gdsc_pcie_0>;
vreg-1.8-supply = <&pmcobalt_l2>;
vreg-0.9-supply = <&pmcobalt_l1>;
- vreg-cx-supply = <&pmcobalt_s1_level_ao>;
+ vreg-cx-supply = <&pmcobalt_s1_level>;
qcom,vreg-1.8-voltage-level = <1200000 1200000 24000>;
qcom,vreg-0.9-voltage-level = <880000 880000 24000>;
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index 2d3ddbaccb72..3adda1fc4109 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -483,6 +483,7 @@ CONFIG_ARM_SMMU=y
CONFIG_IOMMU_DEBUG=y
CONFIG_IOMMU_DEBUG_TRACKING=y
CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_COMMON_LOG=y
CONFIG_MSM_SMEM=y
CONFIG_QPNP_HAPTIC=y
CONFIG_MSM_SMD=y
@@ -541,6 +542,7 @@ CONFIG_DEVFREQ_SPDM=y
CONFIG_EXTCON=y
CONFIG_IIO=y
CONFIG_QCOM_RRADC=y
+CONFIG_QCOM_TADC=y
CONFIG_PWM=y
CONFIG_PWM_QPNP=y
CONFIG_ARM_GIC_V3_ACL=y
@@ -573,6 +575,7 @@ CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_DEBUG_RODATA=y
CONFIG_DEBUG_ALIGN_RODATA=y
CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_EVENT=y
CONFIG_CORESIGHT_LINKS_AND_SINKS=y
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
CONFIG_CORESIGHT_QCOM_REPLICATOR=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index 3523cb67d49a..686e1c22c5ae 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -560,6 +560,7 @@ CONFIG_DEVFREQ_SPDM=y
CONFIG_EXTCON=y
CONFIG_IIO=y
CONFIG_QCOM_RRADC=y
+CONFIG_QCOM_TADC=y
CONFIG_PWM=y
CONFIG_PWM_QPNP=y
CONFIG_ARM_GIC_V3_ACL=y
@@ -637,6 +638,7 @@ CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_DEBUG_RODATA=y
CONFIG_FREE_PAGES_RDONLY=y
CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_EVENT=y
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
CONFIG_CORESIGHT_SOURCE_ETM4X=y
CONFIG_CORESIGHT_REMOTE_ETM=y
diff --git a/drivers/Makefile b/drivers/Makefile
index 1761f8f2cda7..eb67aadf2ee0 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/
obj-$(CONFIG_NUBUS) += nubus/
obj-y += macintosh/
obj-$(CONFIG_IDE) += ide/
+obj-$(CONFIG_CRYPTO) += crypto/
obj-$(CONFIG_SCSI) += scsi/
obj-y += nvme/
obj-$(CONFIG_ATA) += ata/
@@ -126,7 +127,6 @@ obj-$(CONFIG_SWITCH) += switch/
obj-$(CONFIG_INFINIBAND) += infiniband/
obj-$(CONFIG_SGI_SN) += sn/
obj-y += firmware/
-obj-$(CONFIG_CRYPTO) += crypto/
obj-$(CONFIG_SUPERH) += sh/
obj-$(CONFIG_ARCH_SHMOBILE) += sh/
ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
diff --git a/drivers/char/diag/diagfwd_mhi.c b/drivers/char/diag/diagfwd_mhi.c
index b8ed216faaf6..f7b1e98f22b0 100644
--- a/drivers/char/diag/diagfwd_mhi.c
+++ b/drivers/char/diag/diagfwd_mhi.c
@@ -404,8 +404,11 @@ static void mhi_read_done_work_fn(struct work_struct *work)
* buffers here and do not forward them to the mux layer.
*/
if ((atomic_read(&(mhi_info->read_ch.opened)))) {
- diag_remote_dev_read_done(mhi_info->dev_id, buf,
+ err = diag_remote_dev_read_done(mhi_info->dev_id, buf,
result.bytes_xferd);
+ if (err)
+ mhi_buf_tbl_remove(mhi_info, TYPE_MHI_READ_CH,
+ buf, result.bytes_xferd);
} else {
mhi_buf_tbl_remove(mhi_info, TYPE_MHI_READ_CH, buf,
result.bytes_xferd);
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index 9cbcd06d7658..680827e5b848 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -138,7 +138,7 @@ static void sync_event_print(struct seq_file *s,
break;
}
case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
- seq_printf(s, "sync: [%p] %s", sync_event->handle,
+ seq_printf(s, "sync: [%pK] %s", sync_event->handle,
(sync_event->handle && sync_event->handle->fence)
? sync_event->handle->fence->name : "NULL");
break;
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 24005a1fda72..691f687cd839 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -4457,9 +4457,8 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
disable_irq(device->pwrctrl.interrupt_num);
KGSL_DRV_INFO(device,
- "dev_id %d regs phys 0x%08lx size 0x%08x virt %p\n",
- device->id, device->reg_phys, device->reg_len,
- device->reg_virt);
+ "dev_id %d regs phys 0x%08lx size 0x%08x\n",
+ device->id, device->reg_phys, device->reg_len);
rwlock_init(&device->context_lock);
diff --git a/drivers/gpu/msm/kgsl_cffdump.c b/drivers/gpu/msm/kgsl_cffdump.c
index 2e90f78a303c..8e783f8ce017 100644
--- a/drivers/gpu/msm/kgsl_cffdump.c
+++ b/drivers/gpu/msm/kgsl_cffdump.c
@@ -513,10 +513,6 @@ EXPORT_SYMBOL(kgsl_cffdump_waitirq);
static int subbuf_start_handler(struct rchan_buf *buf,
void *subbuf, void *prev_subbuf, size_t prev_padding)
{
- pr_debug("kgsl: cffdump: subbuf_start_handler(subbuf=%p, prev_subbuf"
- "=%p, prev_padding=%08zx)\n", subbuf, prev_subbuf,
- prev_padding);
-
if (relay_buf_full(buf)) {
if (!suspended) {
suspended = 1;
@@ -573,9 +569,6 @@ static struct rchan *create_channel(unsigned subbuf_size, unsigned n_subbufs)
{
struct rchan *chan;
- pr_info("kgsl: cffdump: relay: create_channel: subbuf_size %u, "
- "n_subbufs %u, dir 0x%p\n", subbuf_size, n_subbufs, dir);
-
chan = relay_open("cpu", dir, subbuf_size,
n_subbufs, &relay_callbacks, NULL);
if (!chan) {
diff --git a/drivers/gpu/msm/kgsl_cmdbatch.c b/drivers/gpu/msm/kgsl_cmdbatch.c
index ceca8b1e1522..6272410ce544 100644
--- a/drivers/gpu/msm/kgsl_cmdbatch.c
+++ b/drivers/gpu/msm/kgsl_cmdbatch.c
@@ -80,7 +80,7 @@ void kgsl_dump_syncpoints(struct kgsl_device *device,
}
case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
if (event->handle)
- dev_err(device->dev, " fence: [%p] %s\n",
+ dev_err(device->dev, " fence: [%pK] %s\n",
event->handle->fence,
event->handle->name);
else
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index f9d3ede718ab..dd004f9588e9 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -1042,9 +1042,6 @@ void kgsl_snapshot_save_frozen_objs(struct work_struct *work)
goto done;
snapshot->mempool = vmalloc(size);
- if (snapshot->mempool != NULL)
- KGSL_CORE_ERR("snapshot: mempool address %p, size %zx\n",
- snapshot->mempool, size);
ptr = snapshot->mempool;
snapshot->mempool_size = 0;
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 26fbfba23c94..a234d61802ce 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -22,6 +22,7 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <linux/bitmap.h>
#include <linux/of.h>
@@ -135,7 +136,6 @@ struct stm_drvdata {
struct device *dev;
struct coresight_device *csdev;
struct miscdevice miscdev;
- struct clk *clk;
spinlock_t spinlock;
struct channel_space chs;
bool enable;
@@ -270,8 +270,8 @@ static int stm_enable(struct coresight_device *csdev)
int ret;
unsigned long flags;
- ret = clk_prepare_enable(drvdata->clk);
- if (ret)
+ ret = pm_runtime_get_sync(drvdata->dev);
+ if (ret < 0)
return ret;
spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -349,7 +349,7 @@ static void stm_disable(struct coresight_device *csdev)
/* Wait for 100ms so that pending data has been written to HW */
msleep(100);
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
dev_info(drvdata->dev, "STM tracing disabled\n");
}
@@ -360,7 +360,7 @@ static int stm_trace_id(struct coresight_device *csdev)
unsigned long flags;
int trace_id = -1;
- if (clk_prepare_enable(drvdata->clk))
+ if (pm_runtime_get_sync(drvdata->dev) < 0)
goto out;
spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -370,7 +370,7 @@ static int stm_trace_id(struct coresight_device *csdev)
CS_LOCK(drvdata->base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(drvdata->dev);
out:
return trace_id;
}
@@ -806,19 +806,14 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&drvdata->spinlock);
- drvdata->clk = adev->pclk;
- ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(drvdata->clk);
+ ret = clk_set_rate(adev->pclk, CORESIGHT_CLK_RATE_TRACE);
if (ret)
return ret;
if (!coresight_authstatus_enabled(drvdata->base))
goto err1;
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(&adev->dev);
bitmap_fill(drvdata->entities, OST_ENTITY_MAX);
@@ -856,7 +851,7 @@ err:
coresight_unregister(drvdata->csdev);
return ret;
err1:
- clk_disable_unprepare(drvdata->clk);
+ pm_runtime_put(&adev->dev);
return -EPERM;
}
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 8a0e2c809fb2..90135f496aaf 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -318,6 +318,18 @@ config QCOM_RRADC
To compile this driver as a module, choose M here: the module will
be called qcom-rradc.
+config QCOM_TADC
+ tristate "Qualcomm Technologies Inc. TADC driver"
+ depends on MFD_I2C_PMIC
+ help
+ Say yes here to support the Qualcomm Technologies Inc. telemetry ADC.
+ The TADC provides battery temperature, skin temperature,
+ die temperature, battery voltage, battery current, input voltage,
+ input current, and OTG current.
+
+ The driver can also be built as a module. If so, the module will be
+ called qcom-tadc.
+
config ROCKCHIP_SARADC
tristate "Rockchip SARADC driver"
depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST)
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 59cac58d769e..9124e49a5f65 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_NAU7802) += nau7802.o
obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o
obj-$(CONFIG_QCOM_RRADC) += qcom-rradc.o
+obj-$(CONFIG_QCOM_TADC) += qcom-tadc.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
obj-$(CONFIG_TI_ADC128S052) += ti-adc128s052.o
diff --git a/drivers/iio/adc/qcom-tadc.c b/drivers/iio/adc/qcom-tadc.c
new file mode 100644
index 000000000000..3cc2694f9a03
--- /dev/null
+++ b/drivers/iio/adc/qcom-tadc.c
@@ -0,0 +1,742 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define TADC_REVISION1_REG 0x00
+#define TADC_REVISION2_REG 0x01
+#define TADC_REVISION3_REG 0x02
+#define TADC_REVISION4_REG 0x03
+#define TADC_PERPH_TYPE_REG 0x04
+#define TADC_PERPH_SUBTYPE_REG 0x05
+
+/* TADC register definitions */
+#define TADC_SW_CH_CONV_REG(chip) (chip->tadc_base + 0x06)
+#define TADC_MBG_ERR_REG(chip) (chip->tadc_base + 0x07)
+#define TADC_EN_CTL_REG(chip) (chip->tadc_base + 0x46)
+#define TADC_CONV_REQ_REG(chip) (chip->tadc_base + 0x51)
+#define TADC_HWTRIG_CONV_CH_EN_REG(chip) (chip->tadc_base + 0x52)
+#define TADC_HW_SETTLE_DELAY_REG(chip) (chip->tadc_base + 0x53)
+#define TADC_LONG_HW_SETTLE_DLY_EN_REG(chip) (chip->tadc_base + 0x54)
+#define TADC_LONG_HW_SETTLE_DLY_REG(chip) (chip->tadc_base + 0x55)
+#define TADC_ADC_BUF_CH_REG(chip) (chip->tadc_base + 0x56)
+#define TADC_ADC_AAF_CH_REG(chip) (chip->tadc_base + 0x57)
+#define TADC_ADC_DATA_RDBK_REG(chip) (chip->tadc_base + 0x58)
+#define TADC_CH1_ADC_LO_REG(chip) (chip->tadc_base + 0x60)
+#define TADC_CH1_ADC_HI_REG(chip) (chip->tadc_base + 0x61)
+#define TADC_CH2_ADC_LO_REG(chip) (chip->tadc_base + 0x62)
+#define TADC_CH2_ADC_HI_REG(chip) (chip->tadc_base + 0x63)
+#define TADC_CH3_ADC_LO_REG(chip) (chip->tadc_base + 0x64)
+#define TADC_CH3_ADC_HI_REG(chip) (chip->tadc_base + 0x65)
+#define TADC_CH4_ADC_LO_REG(chip) (chip->tadc_base + 0x66)
+#define TADC_CH4_ADC_HI_REG(chip) (chip->tadc_base + 0x67)
+#define TADC_CH5_ADC_LO_REG(chip) (chip->tadc_base + 0x68)
+#define TADC_CH5_ADC_HI_REG(chip) (chip->tadc_base + 0x69)
+#define TADC_CH6_ADC_LO_REG(chip) (chip->tadc_base + 0x70)
+#define TADC_CH6_ADC_HI_REG(chip) (chip->tadc_base + 0x71)
+#define TADC_CH7_ADC_LO_REG(chip) (chip->tadc_base + 0x72)
+#define TADC_CH7_ADC_HI_REG(chip) (chip->tadc_base + 0x73)
+#define TADC_CH8_ADC_LO_REG(chip) (chip->tadc_base + 0x74)
+#define TADC_CH8_ADC_HI_REG(chip) (chip->tadc_base + 0x75)
+
+/* TADC_CMP register definitions */
+#define TADC_CMP_THR1_CMP_REG(chip) (chip->tadc_cmp_base + 0x51)
+#define TADC_CMP_THR1_CH1_CMP_LO_REG(chip) (chip->tadc_cmp_base + 0x52)
+#define TADC_CMP_THR1_CH1_CMP_HI_REG(chip) (chip->tadc_cmp_base + 0x53)
+#define TADC_CMP_THR1_CH2_CMP_LO_REG(chip) (chip->tadc_cmp_base + 0x54)
+#define TADC_CMP_THR1_CH2_CMP_HI_REG(chip) (chip->tadc_cmp_base + 0x55)
+#define TADC_CMP_THR1_CH3_CMP_LO_REG(chip) (chip->tadc_cmp_base + 0x56)
+#define TADC_CMP_THR1_CH3_CMP_HI_REG(chip) (chip->tadc_cmp_base + 0x57)
+#define TADC_CMP_THR2_CMP_REG(chip) (chip->tadc_cmp_base + 0x67)
+#define TADC_CMP_THR2_CH1_CMP_LO_REG(chip) (chip->tadc_cmp_base + 0x68)
+#define TADC_CMP_THR2_CH1_CMP_HI_REG(chip) (chip->tadc_cmp_base + 0x69)
+#define TADC_CMP_THR2_CH2_CMP_LO_REG(chip) (chip->tadc_cmp_base + 0x6A)
+#define TADC_CMP_THR2_CH2_CMP_HI_REG(chip) (chip->tadc_cmp_base + 0x6B)
+#define TADC_CMP_THR2_CH3_CMP_LO_REG(chip) (chip->tadc_cmp_base + 0x6C)
+#define TADC_CMP_THR2_CH3_CMP_HI_REG(chip) (chip->tadc_cmp_base + 0x6D)
+#define TADC_CMP_THR3_CMP_REG(chip) (chip->tadc_cmp_base + 0x7D)
+#define TADC_CMP_THR3_CH1_CMP_LO_REG(chip) (chip->tadc_cmp_base + 0x7E)
+#define TADC_CMP_THR3_CH1_CMP_HI_REG(chip) (chip->tadc_cmp_base + 0x7F)
+#define TADC_CMP_THR3_CH2_CMP_LO_REG(chip) (chip->tadc_cmp_base + 0x80)
+#define TADC_CMP_THR3_CH2_CMP_HI_REG(chip) (chip->tadc_cmp_base + 0x81)
+#define TADC_CMP_THR3_CH3_CMP_LO_REG(chip) (chip->tadc_cmp_base + 0x82)
+#define TADC_CMP_THR3_CH3_CMP_HI_REG(chip) (chip->tadc_cmp_base + 0x83)
+#define TADC_CMP_THR4_CMP_REG(chip) (chip->tadc_cmp_base + 0x93)
+#define TADC_CMP_THR4_CH1_CMP_LO_REG(chip) (chip->tadc_cmp_base + 0x94)
+#define TADC_CMP_THR4_CH1_CMP_HI_REG(chip) (chip->tadc_cmp_base + 0x95)
+#define TADC_CMP_THR1_CH1_HYST_REG(chip) (chip->tadc_cmp_base + 0xB0)
+#define TADC_CMP_THR2_CH1_HYST_REG(chip) (chip->tadc_cmp_base + 0xB1)
+#define TADC_CMP_THR3_CH1_HYST_REG(chip) (chip->tadc_cmp_base + 0xB2)
+#define TADC_CMP_THR4_CH1_HYST_REG(chip) (chip->tadc_cmp_base + 0xB3)
+
+/* 10 bits of resolution */
+#define TADC_RESOLUTION 1024
+/* number of hardware channels */
+#define TADC_NUM_CH 8
+
+enum tadc_chan_id {
+ TADC_THERM1 = 0,
+ TADC_THERM2,
+ TADC_DIE_TEMP,
+ TADC_BATT_I,
+ TADC_BATT_V,
+ TADC_INPUT_I,
+ TADC_INPUT_V,
+ TADC_OTG_I,
+ /* virtual channels */
+ TADC_BATT_P,
+ TADC_INPUT_P,
+ TADC_THERM1_THR1,
+ TADC_THERM2_THR1,
+ TADC_DIE_TEMP_THR1,
+};
+
+#define TADC_CHAN(_name, _type, _channel, _info_mask) \
+{ \
+ .type = _type, \
+ .channel = _channel, \
+ .info_mask_separate = _info_mask, \
+ .extend_name = _name, \
+}
+
+#define TADC_THERM_CHAN(_name, _channel) \
+TADC_CHAN(_name, IIO_TEMP, _channel, \
+ BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_PROCESSED))
+
+#define TADC_TEMP_CHAN(_name, _channel) \
+TADC_CHAN(_name, IIO_TEMP, _channel, \
+ BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_PROCESSED) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET))
+
+#define TADC_CURRENT_CHAN(_name, _channel) \
+TADC_CHAN(_name, IIO_CURRENT, _channel, \
+ BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_PROCESSED) | \
+ BIT(IIO_CHAN_INFO_SCALE))
+
+
+#define TADC_VOLTAGE_CHAN(_name, _channel) \
+TADC_CHAN(_name, IIO_VOLTAGE, _channel, \
+ BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_PROCESSED) | \
+ BIT(IIO_CHAN_INFO_SCALE))
+
+#define TADC_POWER_CHAN(_name, _channel) \
+TADC_CHAN(_name, IIO_POWER, _channel, \
+ BIT(IIO_CHAN_INFO_PROCESSED))
+
+static const struct iio_chan_spec tadc_iio_chans[] = {
+ [TADC_THERM1] = TADC_THERM_CHAN(
+ "batt", TADC_THERM1),
+ [TADC_THERM2] = TADC_THERM_CHAN(
+ "skin", TADC_THERM2),
+ [TADC_DIE_TEMP] = TADC_TEMP_CHAN(
+ "die", TADC_DIE_TEMP),
+ [TADC_BATT_I] = TADC_CURRENT_CHAN(
+ "batt", TADC_BATT_I),
+ [TADC_BATT_V] = TADC_VOLTAGE_CHAN(
+ "batt", TADC_BATT_V),
+ [TADC_INPUT_I] = TADC_CURRENT_CHAN(
+ "input", TADC_INPUT_I),
+ [TADC_INPUT_V] = TADC_VOLTAGE_CHAN(
+ "input", TADC_INPUT_V),
+ [TADC_OTG_I] = TADC_CURRENT_CHAN(
+ "otg", TADC_OTG_I),
+ [TADC_BATT_P] = TADC_POWER_CHAN(
+ "batt", TADC_BATT_P),
+ [TADC_INPUT_P] = TADC_POWER_CHAN(
+ "input", TADC_INPUT_P),
+ [TADC_THERM1_THR1] = TADC_THERM_CHAN(
+ "batt_hot", TADC_THERM1_THR1),
+ [TADC_THERM2_THR1] = TADC_THERM_CHAN(
+ "skin_hot", TADC_THERM2_THR1),
+ [TADC_DIE_TEMP_THR1] = TADC_THERM_CHAN(
+ "die_hot", TADC_DIE_TEMP_THR1),
+};
+
+struct tadc_chan_data {
+ s32 scale;
+ s32 offset;
+ u32 rbias;
+ const struct tadc_pt *table;
+ size_t tablesize;
+};
+
+struct tadc_chip {
+ struct device *dev;
+ struct regmap *regmap;
+ u32 tadc_base;
+ u32 tadc_cmp_base;
+ struct tadc_chan_data chans[TADC_NUM_CH];
+ struct completion eoc_complete;
+};
+
+struct tadc_pt {
+ s32 x;
+ s32 y;
+};
+
+/*
+ * Thermistor tables are generated by the B-parameter equation which is a
+ * simplifed version of the Steinhart-Hart equation.
+ *
+ * (1 / T) = (1 / T0) + (1 / B) * ln(R / R0)
+ *
+ * Where R0 is the resistance at temperature T0, and T0 is typically room
+ * temperature (25C).
+ */
+static const struct tadc_pt tadc_therm_3450b_68k[] = {
+ { 4151, 120000 },
+ { 4648, 115000 },
+ { 5220, 110000 },
+ { 5880, 105000 },
+ { 6644, 100000 },
+ { 7533, 95000 },
+ { 8571, 90000 },
+ { 9786, 85000 },
+ { 11216, 80000 },
+ { 12906, 75000 },
+ { 14910, 70000 },
+ { 17300, 65000 },
+ { 20163, 60000 },
+ { 23609, 55000 },
+ { 27780, 50000 },
+ { 32855, 45000 },
+ { 39065, 40000 },
+ { 46712, 35000 },
+ { 56185, 30000 },
+ { 68000, 25000 },
+ { 82837, 20000 },
+ { 101604, 15000 },
+ { 125525, 10000 },
+ { 156261, 5000 },
+ { 196090, 0 },
+ { 248163, -5000 },
+ { 316887, -10000 },
+ { 408493, -15000 },
+ { 531889, -20000 },
+ { 699966, -25000 },
+ { 931618, -30000 },
+ { 1254910, -35000 },
+ { 1712127, -40000 },
+};
+
+static int tadc_read(struct tadc_chip *chip, u16 reg, u8 *val,
+ size_t val_count)
+{
+ int rc = 0;
+
+ rc = regmap_bulk_read(chip->regmap, reg, val, val_count);
+ if (rc < 0)
+ pr_err("Couldn't read %04x rc=%d\n", reg, rc);
+
+ return rc;
+}
+
+static int tadc_write(struct tadc_chip *chip, u16 reg, u8 data)
+{
+ int rc = 0;
+
+ rc = regmap_write(chip->regmap, reg, data);
+ if (rc < 0)
+ pr_err("Couldn't write %02x to %04x rc=%d\n",
+ data, reg, rc);
+
+ return rc;
+}
+
+static int tadc_lerp(const struct tadc_pt *pts, size_t tablesize, s32 input,
+ s32 *output)
+{
+ int i;
+ s64 temp;
+
+ if (pts == NULL) {
+ pr_err("Table is NULL\n");
+ return -EINVAL;
+ }
+
+ if (tablesize < 1) {
+ pr_err("Table has no entries\n");
+ return -ENOENT;
+ }
+
+ if (tablesize == 1) {
+ *output = pts[0].y;
+ return 0;
+ }
+
+ if (pts[0].x > pts[1].x) {
+ pr_err("Table is not in acending order\n");
+ return -EINVAL;
+ }
+
+ if (input <= pts[0].x) {
+ *output = pts[0].y;
+ return 0;
+ }
+
+ if (input >= pts[tablesize - 1].x) {
+ *output = pts[tablesize - 1].y;
+ return 0;
+ }
+
+ for (i = 1; i < tablesize; i++)
+ if (input <= pts[i].x)
+ break;
+
+ temp = (s64)(pts[i].y - pts[i - 1].y) * (s64)(input - pts[i - 1].x);
+ temp = div_s64(temp, pts[i].x - pts[i - 1].x);
+ *output = temp + pts[i - 1].y;
+ return 0;
+}
+
+/*
+ * Process the result of a thermistor reading.
+ *
+ * The voltage input to the ADC is a result of a voltage divider circuit.
+ * Vout = (Rtherm / (Rbias + Rtherm)) * Vbias
+ *
+ * The ADC value is based on the output voltage of the voltage divider, and the
+ * bias voltage.
+ * ADC = (Vin * 1024) / Vbias
+ *
+ * Combine these equations and solve for Rtherm
+ * Rtherm = (ADC * Rbias) / (1024 - ADC)
+ */
+static int tadc_process_therm(const struct tadc_chan_data *chan_data,
+ s16 adc, s32 *result)
+{
+ s64 rtherm;
+
+ rtherm = (s64)adc * (s64)chan_data->rbias;
+ rtherm = div_s64(rtherm, TADC_RESOLUTION - adc);
+ return tadc_lerp(chan_data->table, chan_data->tablesize, rtherm,
+ result);
+}
+
+static int tadc_read_channel(struct tadc_chip *chip, u16 address, int *adc)
+{
+ u8 val[2];
+ int rc;
+
+ rc = tadc_read(chip, address, val, ARRAY_SIZE(val));
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read channel rc=%d\n", rc);
+ return rc;
+ }
+
+ *adc = (s16)(val[0] | val[1] << BITS_PER_BYTE);
+ return 0;
+}
+
+#define CONVERSION_TIMEOUT_MS 100
+static int tadc_do_conversion(struct tadc_chip *chip, u8 channels, s16 *adc)
+{
+ unsigned long timeout, timeleft;
+ u8 val[TADC_NUM_CH * 2];
+ int rc, i;
+
+ rc = tadc_read(chip, TADC_MBG_ERR_REG(chip), val, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read mbg error status rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (val[0] != 0) {
+ tadc_write(chip, TADC_EN_CTL_REG(chip), 0);
+ tadc_write(chip, TADC_EN_CTL_REG(chip), 0x80);
+ }
+
+ rc = tadc_write(chip, TADC_CONV_REQ_REG(chip), channels);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't write conversion request rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ timeout = msecs_to_jiffies(CONVERSION_TIMEOUT_MS);
+ timeleft = wait_for_completion_timeout(&chip->eoc_complete, timeout);
+
+ if (timeleft == 0) {
+ rc = tadc_read(chip, TADC_SW_CH_CONV_REG(chip), val, 1);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read conversion status rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (val[0] != channels) {
+ dev_err(chip->dev, "Conversion timed out\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ rc = tadc_read(chip, TADC_CH1_ADC_LO_REG(chip), val, ARRAY_SIZE(val));
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read adc channels rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ for (i = 0; i < TADC_NUM_CH; i++)
+ adc[i] = val[i * 2] | val[i * 2 + 1] << BITS_PER_BYTE;
+
+ return jiffies_to_msecs(timeout - timeleft);
+}
+
+static int tadc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask)
+{
+ struct tadc_chip *chip = iio_priv(indio_dev);
+ const struct tadc_chan_data *chan_data = &chip->chans[chan->channel];
+ int rc = 0, offset = 0, scale, scale2, scale_type;
+ s16 adc[TADC_NUM_CH];
+
+ switch (chan->channel) {
+ case TADC_THERM1_THR1:
+ chan_data = &chip->chans[TADC_THERM1];
+ break;
+ case TADC_THERM2_THR1:
+ chan_data = &chip->chans[TADC_THERM2];
+ break;
+ case TADC_DIE_TEMP_THR1:
+ chan_data = &chip->chans[TADC_DIE_TEMP];
+ break;
+ default:
+ break;
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->channel) {
+ case TADC_THERM1_THR1:
+ rc = tadc_read_channel(chip,
+ TADC_CMP_THR1_CH1_CMP_LO_REG(chip), val);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read THERM1 threshold rc=%d\n",
+ rc);
+ return rc;
+ }
+ break;
+ case TADC_THERM2_THR1:
+ rc = tadc_read_channel(chip,
+ TADC_CMP_THR1_CH2_CMP_LO_REG(chip), val);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read THERM2 threshold rc=%d\n",
+ rc);
+ return rc;
+ }
+ break;
+ case TADC_DIE_TEMP_THR1:
+ rc = tadc_read_channel(chip,
+ TADC_CMP_THR1_CH3_CMP_LO_REG(chip), val);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read DIE_TEMP threshold rc=%d\n",
+ rc);
+ return rc;
+ }
+ break;
+ default:
+ rc = tadc_do_conversion(chip, BIT(chan->channel), adc);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read channel %d\n",
+ chan->channel);
+ return rc;
+ }
+ *val = adc[chan->channel];
+ break;
+ }
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_PROCESSED:
+ switch (chan->channel) {
+ case TADC_THERM1:
+ case TADC_THERM2:
+ case TADC_THERM1_THR1:
+ case TADC_THERM2_THR1:
+ rc = tadc_read_raw(indio_dev, chan, val, NULL,
+ IIO_CHAN_INFO_RAW);
+ if (rc < 0)
+ return rc;
+
+ rc = tadc_process_therm(chan_data, *val, val);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't process 0x%04x from channel %d rc=%d\n",
+ *val, chan->channel, rc);
+ return rc;
+ }
+ break;
+ case TADC_BATT_P:
+ rc = tadc_do_conversion(chip,
+ BIT(TADC_BATT_I) | BIT(TADC_BATT_V), adc);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read battery current and voltage channels\n");
+ return rc;
+ }
+
+ *val = adc[TADC_BATT_I] * adc[TADC_BATT_V];
+ break;
+ case TADC_INPUT_P:
+ rc = tadc_do_conversion(chip,
+ BIT(TADC_INPUT_I) | BIT(TADC_INPUT_V), adc);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read input current and voltage channels\n");
+ return rc;
+ }
+
+ *val = adc[TADC_INPUT_I] * adc[TADC_INPUT_V];
+ break;
+ default:
+ rc = tadc_read_raw(indio_dev, chan, val, NULL,
+ IIO_CHAN_INFO_RAW);
+ if (rc < 0)
+ return rc;
+
+ /* offset is optional */
+ rc = tadc_read_raw(indio_dev, chan, &offset, NULL,
+ IIO_CHAN_INFO_OFFSET);
+ if (rc < 0)
+ return rc;
+
+ scale_type = tadc_read_raw(indio_dev, chan,
+ &scale, &scale2, IIO_CHAN_INFO_SCALE);
+ switch (scale_type) {
+ case IIO_VAL_INT:
+ *val = *val * scale + offset;
+ break;
+ case IIO_VAL_FRACTIONAL:
+ *val = div_s64((s64)*val * scale + offset,
+ scale2);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ }
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->channel) {
+ case TADC_DIE_TEMP:
+ case TADC_DIE_TEMP_THR1:
+ *val = chan_data->scale;
+ return IIO_VAL_INT;
+ case TADC_BATT_I:
+ case TADC_BATT_V:
+ case TADC_INPUT_I:
+ case TADC_INPUT_V:
+ case TADC_OTG_I:
+ *val = chan_data->scale;
+ *val2 = TADC_RESOLUTION;
+ return IIO_VAL_FRACTIONAL;
+ }
+ return -EINVAL;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = chan_data->offset;
+ return IIO_VAL_INT;
+ }
+ return -EINVAL;
+}
+
+static irqreturn_t handle_eoc(int irq, void *dev_id)
+{
+ struct tadc_chip *chip = dev_id;
+
+ complete(&chip->eoc_complete);
+ return IRQ_HANDLED;
+}
+
+static int tadc_set_therm_table(struct tadc_chan_data *chan_data, u32 beta,
+ u32 rtherm)
+{
+ if (beta == 3450 && rtherm == 68000) {
+ chan_data->table = tadc_therm_3450b_68k;
+ chan_data->tablesize = ARRAY_SIZE(tadc_therm_3450b_68k);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int tadc_parse_dt(struct tadc_chip *chip)
+{
+ struct device_node *child, *node;
+ struct tadc_chan_data *chan_data;
+ u32 chan_id, rtherm, beta;
+ int rc = 0;
+
+ node = chip->dev->of_node;
+ for_each_available_child_of_node(node, child) {
+ rc = of_property_read_u32(child, "reg", &chan_id);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't find channel for %s rc=%d",
+ child->name, rc);
+ return rc;
+ }
+
+ if (chan_id > TADC_NUM_CH - 1) {
+ dev_err(chip->dev, "Channel %d is out of range [0, %d]\n",
+ chan_id, TADC_NUM_CH - 1);
+ return -EINVAL;
+ }
+
+ chan_data = &chip->chans[chan_id];
+ switch (chan_id) {
+ case TADC_THERM1:
+ case TADC_THERM2:
+ rc = of_property_read_u32(child,
+ "qcom,rbias", &chan_data->rbias);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read qcom,rbias rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(child,
+ "qcom,beta-coefficient", &beta);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read qcom,beta-coefficient rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32(child,
+ "qcom,rtherm-at-25degc", &rtherm);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read qcom,rtherm-at-25degc rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = tadc_set_therm_table(chan_data, beta, rtherm);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't set therm table rc=%d\n",
+ rc);
+ return rc;
+ }
+ break;
+ default:
+ rc = of_property_read_s32(child, "qcom,scale",
+ &chan_data->scale);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read scale rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ of_property_read_s32(child, "qcom,offset",
+ &chan_data->offset);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static const struct iio_info tadc_info = {
+ .read_raw = &tadc_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int tadc_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct iio_dev *indio_dev;
+ struct tadc_chip *chip;
+ int rc = 0, irq;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ chip = iio_priv(indio_dev);
+ chip->dev = &pdev->dev;
+ init_completion(&chip->eoc_complete);
+
+ rc = of_property_read_u32(node, "reg", &chip->tadc_base);
+ if (rc < 0) {
+ dev_err(chip->dev, "Couldn't read base address rc=%d\n", rc);
+ return rc;
+ }
+ chip->tadc_cmp_base = chip->tadc_base + 0x100;
+
+ chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
+ if (!chip->regmap) {
+ pr_err("Couldn't get regmap\n");
+ return -ENODEV;
+ }
+
+ rc = tadc_parse_dt(chip);
+ if (rc < 0) {
+ pr_err("Couldn't parse device tree rc=%d\n", rc);
+ return rc;
+ }
+
+ irq = of_irq_get_byname(node, "eoc");
+ if (irq < 0) {
+ pr_err("Couldn't get eoc irq rc=%d\n", irq);
+ return irq;
+ }
+
+ rc = devm_request_threaded_irq(chip->dev, irq, NULL, handle_eoc,
+ IRQF_ONESHOT, "eoc", chip);
+ if (rc < 0) {
+ pr_err("Couldn't request irq %d rc=%d\n", irq, rc);
+ return rc;
+ }
+
+ indio_dev->dev.parent = chip->dev;
+ indio_dev->name = pdev->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &tadc_info;
+ indio_dev->channels = tadc_iio_chans;
+ indio_dev->num_channels = ARRAY_SIZE(tadc_iio_chans);
+
+ rc = devm_iio_device_register(chip->dev, indio_dev);
+ if (rc < 0)
+ dev_err(chip->dev, "Couldn't register IIO device rc=%d\n", rc);
+
+ return rc;
+}
+
+static int tadc_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id tadc_match_table[] = {
+ { .compatible = "qcom,tadc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tadc_match_table);
+
+static struct platform_driver tadc_driver = {
+ .driver = {
+ .name = "qcom-tadc",
+ .of_match_table = tadc_match_table,
+ },
+ .probe = tadc_probe,
+ .remove = tadc_remove,
+};
+module_platform_driver(tadc_driver);
+
+MODULE_DESCRIPTION("Qualcomm Technologies Inc. TADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/Kconfig b/drivers/media/platform/msm/camera_v2/Kconfig
index 7f7cc8ee9be3..568f817e8614 100644
--- a/drivers/media/platform/msm/camera_v2/Kconfig
+++ b/drivers/media/platform/msm/camera_v2/Kconfig
@@ -1,5 +1,5 @@
config MSM_CAMERA_SENSOR
- bool "Qualcomm MSM camera sensor support"
+ bool "QTI MSM camera sensor support"
depends on MSMB_CAMERA
select NEW_LEDS
select LEDS_CLASS
@@ -10,7 +10,7 @@ config MSM_CAMERA_SENSOR
subdev APIs.
config MSM_CPP
- bool "Qualcomm MSM Camera Post Processing Engine support"
+ bool "QTI MSM Camera Post Processing Engine support"
depends on MSMB_CAMERA
---help---
Enable support for Camera Post-processing Engine
@@ -19,7 +19,7 @@ config MSM_CPP
APIs.
config MSM_CCI
- bool "Qualcomm MSM Camera Control Interface support"
+ bool "QTI MSM Camera Control Interface support"
depends on MSMB_CAMERA
---help---
Enable support for Camera Control Interface driver only
@@ -29,7 +29,7 @@ config MSM_CCI
GPIO and data frames.
config MSM_CSI20_HEADER
- bool "Qualcomm MSM CSI 2.0 Header"
+ bool "QTI MSM CSI 2.0 Header"
depends on MSMB_CAMERA
---help---
Enable support for CSI drivers to include 2.0
@@ -39,7 +39,7 @@ config MSM_CSI20_HEADER
8930 and 8064 platforms.
config MSM_CSI22_HEADER
- bool "Qualcomm MSM CSI 2.2 Header"
+ bool "QTI MSM CSI 2.2 Header"
depends on MSMB_CAMERA
---help---
Enable support for CSI drivers to include 2.2
@@ -49,7 +49,7 @@ config MSM_CSI22_HEADER
platform.
config MSM_CSI30_HEADER
- bool "Qualcomm MSM CSI 3.0 Header"
+ bool "QTI MSM CSI 3.0 Header"
depends on MSMB_CAMERA
---help---
Enable support for CSI drivers to include 3.0
@@ -59,7 +59,7 @@ config MSM_CSI30_HEADER
8064 platforms.
config MSM_CSI31_HEADER
- bool "Qualcomm MSM CSI 3.1 Header"
+ bool "QTI MSM CSI 3.1 Header"
depends on MSMB_CAMERA
---help---
Enable support for CSI drivers to include 3.0
@@ -69,7 +69,7 @@ config MSM_CSI31_HEADER
APQ8084 platform.
config MSM_CSIPHY
- bool "Qualcomm MSM Camera Serial Interface Physical receiver support"
+ bool "QTI MSM Camera Serial Interface Physical receiver support"
depends on MSMB_CAMERA
---help---
Enable support for Camera Serial Interface
@@ -78,7 +78,7 @@ config MSM_CSIPHY
signalling.
config MSM_CSID
- bool "Qualcomm MSM Camera Serial Interface decoder support"
+ bool "QTI MSM Camera Serial Interface decoder support"
depends on MSMB_CAMERA
---help---
Enable support for Camera Serial Interface decoder.
@@ -87,7 +87,7 @@ config MSM_CSID
and datatype.
config MSM_EEPROM
- bool "Qualcomm MSM Camera ROM Interface for Calibration support"
+ bool "QTI MSM Camera ROM Interface for Calibration support"
depends on MSMB_CAMERA
---help---
Enable support for ROM Interface for Calibration
@@ -96,7 +96,7 @@ config MSM_EEPROM
Currently supports I2C, CCI and SPI protocol
config MSM_ISPIF
- bool "Qualcomm MSM Image Signal Processing interface support"
+ bool "QTI MSM Image Signal Processing interface support"
depends on MSMB_CAMERA
---help---
Enable support for Image Signal Processing interface module.
@@ -105,7 +105,7 @@ config MSM_ISPIF
data interface in VFE.
config MSM_ISPIF_V1
- bool "Qualcomm MSM Image Signal Processing interface support"
+ bool "QTI MSM Image Signal Processing interface support"
depends on MSMB_CAMERA
---help---
Enable support for Image Signal Processing interface module.
@@ -114,7 +114,7 @@ config MSM_ISPIF_V1
or raw data interface in VFE.
config MSM_ISPIF_V2
- bool "Qualcomm MSM Image Signal Processing interface support"
+ bool "QTI MSM Image Signal Processing interface support"
depends on MSMB_CAMERA
---help---
Enable support for Image Signal Processing interface module.
@@ -204,7 +204,7 @@ config OV12830
2 lanes max fps is 18, 4 lanes max fps is 24.
config MSM_V4L2_VIDEO_OVERLAY_DEVICE
- tristate "Qualcomm MSM V4l2 video overlay device"
+ tristate "QTI MSM V4l2 video overlay device"
---help---
Enables support for the MSM V4L2 video
overlay driver. This allows video rendering
@@ -212,7 +212,7 @@ config MSM_V4L2_VIDEO_OVERLAY_DEVICE
APIs, by using /dev/videoX device
config MSMB_JPEG
- tristate "Qualcomm MSM Jpeg Encoder Engine support"
+ tristate "QTI MSM Jpeg Encoder Engine support"
depends on MSMB_CAMERA && (ARCH_MSM8974 || ARCH_MSM8226 || ARCH_APQ8084 || ARCH_MSM8916 || ARCH_QCOM)
---help---
Enable support for Jpeg Encoder/Decoder
@@ -221,7 +221,7 @@ config MSMB_JPEG
for the JPEG 1.0 encoder and decoder.
config MSM_GEMINI
- tristate "Qualcomm MSM Gemini JPEG engine support"
+ tristate "QTI MSM Gemini JPEG engine support"
depends on MSMB_CAMERA && (ARCH_MSM7X30 || ARCH_MSM8X60 || ARCH_MSM8960)
---help---
Enables support for the Gemini JPEG encoder
@@ -230,14 +230,26 @@ config MSM_GEMINI
for JPEG encoding functionality.
config MSM_FD
- tristate "Qualcomm MSM FD face detection engine support"
+ tristate "QTI MSM FD face detection engine support"
depends on MSMB_CAMERA
---help---
Enables support for the MSM FD face detection engine.
config MSM_JPEGDMA
- tristate "Qualcomm Technologies Inc. MSM Jpeg dma"
+ tristate "QTI MSM Jpeg dma"
depends on MSMB_CAMERA
select V4L2_MEM2MEM_DEV
---help---
Enable support for Jpeg dma engine.
+
+config MSM_SEC_CCI_TA_NAME
+ string "Name of TA to handle Secure CCI transactions"
+ depends on MSM_CCI
+ default "seccamdemo64"
+
+config MSM_SEC_CCI_DEBUG
+ bool "QTI MSM Secure CCI Relay Debug"
+ depends on MSM_CCI
+ ---help---
+ Enables simulation of secure camera for Secure CCI Realy
+ debugging.
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
index d3f6fa3fa52d..4200215705d0 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
@@ -463,7 +463,8 @@ static int msm_isp_buf_unprepare(struct msm_isp_buf_mgr *buf_mgr,
static int msm_isp_get_buf(struct msm_isp_buf_mgr *buf_mgr, uint32_t id,
- uint32_t bufq_handle, struct msm_isp_buffer **buf_info)
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct msm_isp_buffer **buf_info)
{
int rc = -1;
unsigned long flags;
@@ -513,8 +514,12 @@ static int msm_isp_get_buf(struct msm_isp_buf_mgr *buf_mgr, uint32_t id,
}
break;
case MSM_ISP_BUFFER_SRC_HAL:
- vb2_v4l2_buf = buf_mgr->vb2_ops->get_buf(
- bufq->session_id, bufq->stream_id);
+ if (buf_index == MSM_ISP_INVALID_BUF_INDEX)
+ vb2_v4l2_buf = buf_mgr->vb2_ops->get_buf(
+ bufq->session_id, bufq->stream_id);
+ else
+ vb2_v4l2_buf = buf_mgr->vb2_ops->get_buf_by_idx(
+ bufq->session_id, bufq->stream_id, buf_index);
if (vb2_v4l2_buf) {
if (vb2_v4l2_buf->vb2_buf.index < bufq->num_bufs) {
*buf_info = &bufq->bufs[vb2_v4l2_buf
@@ -649,95 +654,35 @@ static int msm_isp_put_buf(struct msm_isp_buf_mgr *buf_mgr,
return rc;
}
-static int msm_isp_update_put_buf_cnt_unsafe(
- struct msm_isp_buf_mgr *buf_mgr,
- uint32_t id, uint32_t bufq_handle, int32_t buf_index,
- struct timeval *tv, uint32_t frame_id, uint32_t pingpong_bit)
+static int msm_isp_buf_divert(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct timeval *tv, uint32_t frame_id)
{
- int rc = -1;
+ unsigned long flags;
struct msm_isp_bufq *bufq = NULL;
struct msm_isp_buffer *buf_info = NULL;
- uint8_t *put_buf_mask = NULL;
bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
if (!bufq) {
pr_err("Invalid bufq\n");
- return rc;
- }
-
- put_buf_mask = &bufq->put_buf_mask[pingpong_bit];
-
- if (buf_index >= 0) {
- buf_info = msm_isp_get_buf_ptr(buf_mgr, bufq_handle, buf_index);
- if (!buf_info) {
- pr_err("%s: buf not found\n", __func__);
- return -EFAULT;
- }
- if (buf_info->state != MSM_ISP_BUFFER_STATE_DEQUEUED) {
- pr_err(
- "%s: Invalid state, bufq_handle %x stream id %x, state %d\n",
- __func__, bufq_handle,
- bufq->stream_id, buf_info->state);
- return -EFAULT;
- }
- if (buf_info->pingpong_bit != pingpong_bit) {
- pr_err("%s: Pingpong bit mismatch\n", __func__);
- return -EFAULT;
- }
- }
-
- if (bufq->buf_type != ISP_SHARE_BUF ||
- (*put_buf_mask == 0)) {
- if (buf_info)
- buf_info->frame_id = frame_id;
- }
-
- if (bufq->buf_type == ISP_SHARE_BUF &&
- ((*put_buf_mask & (1 << id)) == 0)) {
- *put_buf_mask |= (1 << id);
- if (*put_buf_mask != ISP_SHARE_BUF_MASK) {
- rc = *put_buf_mask;
- return 1;
- }
- *put_buf_mask = 0;
- rc = 0;
- } else if (bufq->buf_type == ISP_SHARE_BUF &&
- (*put_buf_mask & (1 << id)) != 0) {
- return -ENOTEMPTY;
+ return -EINVAL;
}
- if (buf_info &&
- MSM_ISP_BUFFER_SRC_NATIVE == BUF_SRC(bufq->stream_id)) {
- buf_info->state = MSM_ISP_BUFFER_STATE_DIVERTED;
- buf_info->tv = tv;
- }
- return 0;
-}
-
-static int msm_isp_update_put_buf_cnt(struct msm_isp_buf_mgr *buf_mgr,
- uint32_t id, uint32_t bufq_handle, int32_t buf_index,
- struct timeval *tv, uint32_t frame_id, uint32_t pingpong_bit)
-{
- int rc = -1;
- struct msm_isp_bufq *bufq = NULL;
- unsigned long flags;
-
- bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
- if (!bufq) {
- pr_err("Invalid bufq\n");
- return rc;
+ buf_info = msm_isp_get_buf_ptr(buf_mgr, bufq_handle, buf_index);
+ if (!buf_info) {
+ pr_err("%s: buf not found\n", __func__);
+ return -EINVAL;
}
spin_lock_irqsave(&bufq->bufq_lock, flags);
- rc = msm_isp_update_put_buf_cnt_unsafe(buf_mgr, id, bufq_handle,
- buf_index, tv, frame_id, pingpong_bit);
- if (-ENOTEMPTY == rc) {
- pr_err("%s: Error! Uncleared put_buf_mask for pingpong(%d) from vfe %d bufq 0x%x buf_idx %d\n",
- __func__, pingpong_bit, id, bufq_handle, buf_index);
- rc = -EFAULT;
+
+ buf_info->frame_id = frame_id;
+ if (BUF_SRC(bufq->stream_id) == MSM_ISP_BUFFER_SRC_NATIVE) {
+ buf_info->state = MSM_ISP_BUFFER_STATE_DIVERTED;
+ buf_info->tv = tv;
}
spin_unlock_irqrestore(&bufq->bufq_lock, flags);
- return rc;
+ return 0;
}
static int msm_isp_buf_done(struct msm_isp_buf_mgr *buf_mgr,
@@ -795,11 +740,11 @@ done:
return rc;
}
-static int msm_isp_flush_buf(struct msm_isp_buf_mgr *buf_mgr, uint32_t id,
+static int msm_isp_flush_buf(struct msm_isp_buf_mgr *buf_mgr,
uint32_t bufq_handle, enum msm_isp_buffer_flush_t flush_type,
struct timeval *tv, uint32_t frame_id)
{
- int rc = 0, i;
+ int i;
struct msm_isp_bufq *bufq = NULL;
struct msm_isp_buffer *buf_info = NULL;
unsigned long flags;
@@ -817,43 +762,27 @@ static int msm_isp_flush_buf(struct msm_isp_buf_mgr *buf_mgr, uint32_t id,
pr_err("%s: buf not found\n", __func__);
continue;
}
- if (flush_type == MSM_ISP_BUFFER_FLUSH_DIVERTED &&
- buf_info->state == MSM_ISP_BUFFER_STATE_DIVERTED) {
+ switch (flush_type) {
+ case MSM_ISP_BUFFER_FLUSH_DIVERTED:
+ if (buf_info->state !=
+ MSM_ISP_BUFFER_STATE_DIVERTED)
+ continue;
buf_info->state = MSM_ISP_BUFFER_STATE_PREPARED;
msm_isp_put_buf_unsafe(buf_mgr,
- bufq_handle, buf_info->buf_idx);
- } else if (flush_type == MSM_ISP_BUFFER_FLUSH_ALL) {
- if (buf_info->state == MSM_ISP_BUFFER_STATE_DIVERTED) {
- CDBG("%s: no need to queue Diverted buffer\n",
- __func__);
- } else if (buf_info->state ==
- MSM_ISP_BUFFER_STATE_DEQUEUED) {
- rc = msm_isp_update_put_buf_cnt_unsafe(buf_mgr,
- id, bufq_handle, buf_info->buf_idx, tv,
- frame_id, buf_info->pingpong_bit);
- if (-ENOTEMPTY == rc) {
- rc = 0;
- continue;
- }
-
- if (rc == 0) {
- buf_info->buf_debug.put_state[
- buf_info->buf_debug.
- put_state_last]
- = MSM_ISP_BUFFER_STATE_FLUSH;
- buf_info->buf_debug.put_state_last ^= 1;
- buf_info->state =
- MSM_ISP_BUFFER_STATE_PREPARED;
- rc = msm_isp_put_buf_unsafe(buf_mgr,
- bufq_handle, buf_info->buf_idx);
- if (rc == -EFAULT) {
- spin_unlock_irqrestore(
- &bufq->bufq_lock,
- flags);
- return rc;
- }
- }
- }
+ bufq_handle, buf_info->buf_idx);
+ break;
+ case MSM_ISP_BUFFER_FLUSH_ALL:
+ if (buf_info->state ==
+ MSM_ISP_BUFFER_STATE_DIVERTED)
+ continue;
+ if (buf_info->state !=
+ MSM_ISP_BUFFER_STATE_DEQUEUED)
+ continue;
+ msm_isp_put_buf_unsafe(buf_mgr,
+ bufq_handle, buf_info->buf_idx);
+ break;
+ default:
+ WARN(1, "Invalid flush type %d\n", flush_type);
}
}
@@ -1031,8 +960,6 @@ static int msm_isp_request_bufq(struct msm_isp_buf_mgr *buf_mgr,
bufq->stream_id = buf_request->stream_id;
bufq->num_bufs = buf_request->num_buf;
bufq->buf_type = buf_request->buf_type;
- for (i = 0; i < ISP_NUM_BUF_MASK; i++)
- bufq->put_buf_mask[i] = 0;
INIT_LIST_HEAD(&bufq->head);
for (i = 0; i < buf_request->num_buf; i++) {
@@ -1448,7 +1375,7 @@ static struct msm_isp_buf_ops isp_buf_ops = {
.buf_mgr_deinit = msm_isp_deinit_isp_buf_mgr,
.buf_mgr_debug = msm_isp_buf_mgr_debug,
.get_bufq = msm_isp_get_bufq,
- .update_put_buf_cnt = msm_isp_update_put_buf_cnt,
+ .buf_divert = msm_isp_buf_divert,
};
int msm_isp_create_isp_buf_mgr(
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
index b22fb6a43145..43519ee74062 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -32,6 +32,8 @@
#define BUF_MGR_NUM_BUF_Q 28
#define MAX_IOMMU_CTX 2
+#define MSM_ISP_INVALID_BUF_INDEX 0xFFFFFFFF
+
struct msm_isp_buf_mgr;
enum msm_isp_buffer_src_t {
@@ -115,7 +117,6 @@ struct msm_isp_bufq {
enum msm_isp_buf_type buf_type;
struct msm_isp_buffer *bufs;
spinlock_t bufq_lock;
- uint8_t put_buf_mask[ISP_NUM_BUF_MASK];
/*Native buffer queue*/
struct list_head head;
};
@@ -140,7 +141,8 @@ struct msm_isp_buf_ops {
uint32_t bufq_handle, uint32_t *buf_src);
int (*get_buf)(struct msm_isp_buf_mgr *buf_mgr, uint32_t id,
- uint32_t bufq_handle, struct msm_isp_buffer **buf_info);
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct msm_isp_buffer **buf_info);
int (*get_buf_by_index)(struct msm_isp_buf_mgr *buf_mgr,
uint32_t bufq_handle, uint32_t buf_index,
@@ -154,7 +156,7 @@ struct msm_isp_buf_ops {
int (*put_buf)(struct msm_isp_buf_mgr *buf_mgr,
uint32_t bufq_handle, uint32_t buf_index);
- int (*flush_buf)(struct msm_isp_buf_mgr *buf_mgr, uint32_t id,
+ int (*flush_buf)(struct msm_isp_buf_mgr *buf_mgr,
uint32_t bufq_handle, enum msm_isp_buffer_flush_t flush_type,
struct timeval *tv, uint32_t frame_id);
@@ -171,9 +173,9 @@ struct msm_isp_buf_ops {
unsigned long fault_addr);
struct msm_isp_bufq * (*get_bufq)(struct msm_isp_buf_mgr *buf_mgr,
uint32_t bufq_handle);
- int (*update_put_buf_cnt)(struct msm_isp_buf_mgr *buf_mgr,
- uint32_t id, uint32_t bufq_handle, int32_t buf_index,
- struct timeval *tv, uint32_t frame_id, uint32_t pingpong_bit);
+ int (*buf_divert)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct timeval *tv, uint32_t frame_id);
};
struct msm_isp_buf_mgr {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
index d3c2d77b0107..094996b2d60b 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
@@ -498,7 +498,12 @@ static int vfe_probe(struct platform_device *pdev)
vfe_parent_dev->common_sd->common_data = &vfe_common_data;
memset(&vfe_common_data, 0, sizeof(vfe_common_data));
+ mutex_init(&vfe_common_data.vfe_common_mutex);
spin_lock_init(&vfe_common_data.common_dev_data_lock);
+ for (i = 0; i < (VFE_AXI_SRC_MAX * MAX_VFE); i++)
+ spin_lock_init(&(vfe_common_data.streams[i].lock));
+ for (i = 0; i < (MSM_ISP_STATS_MAX * MAX_VFE); i++)
+ spin_lock_init(&(vfe_common_data.stats_streams[i].lock));
of_property_read_u32(pdev->dev.of_node,
"num_child", &vfe_parent_dev->num_hw_sd);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index 763b6a575326..3b6a2eecb4b6 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -169,7 +169,7 @@ struct msm_vfe_axi_ops {
int32_t (*cfg_io_format)(struct vfe_device *vfe_dev,
enum msm_vfe_axi_stream_src stream_src,
uint32_t io_format);
- void (*cfg_framedrop)(void __iomem *vfe_base,
+ void (*cfg_framedrop)(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info,
uint32_t framedrop_pattern, uint32_t framedrop_period);
void (*clear_framedrop)(struct vfe_device *vfe_dev,
@@ -207,7 +207,7 @@ struct msm_vfe_axi_ops {
uint32_t (*get_comp_mask)(uint32_t irq_status0, uint32_t irq_status1);
uint32_t (*get_pingpong_status)(struct vfe_device *vfe_dev);
int (*halt)(struct vfe_device *vfe_dev, uint32_t blocking);
- int (*restart)(struct vfe_device *vfe_dev, uint32_t blocking,
+ void (*restart)(struct vfe_device *vfe_dev, uint32_t blocking,
uint32_t enable_camif);
void (*update_cgc_override)(struct vfe_device *vfe_dev,
uint8_t wm_idx, uint8_t cgc_override);
@@ -270,7 +270,7 @@ struct msm_vfe_stats_ops {
void (*enable_module)(struct vfe_device *vfe_dev,
uint32_t stats_mask, uint8_t enable);
- void (*update_ping_pong_addr)(void __iomem *vfe_base,
+ void (*update_ping_pong_addr)(struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info,
uint32_t pingpong_status, dma_addr_t paddr);
@@ -373,12 +373,6 @@ enum msm_vfe_axi_state {
UPDATING,
};
-enum msm_vfe_axi_cfg_update_state {
- NO_AXI_CFG_UPDATE,
- APPLYING_UPDATE_RESUME,
- UPDATE_REQUESTED,
-};
-
#define VFE_NO_DROP 0xFFFFFFFF
#define VFE_DROP_EVERY_2FRAME 0x55555555
#define VFE_DROP_EVERY_4FRAME 0x11111111
@@ -394,9 +388,18 @@ enum msm_vfe_axi_stream_type {
struct msm_vfe_frame_request_queue {
struct list_head list;
enum msm_vfe_buff_queue_id buff_queue_id;
+ uint32_t buf_index;
uint8_t cmd_used;
};
+enum msm_isp_comp_irq_types {
+ MSM_ISP_COMP_IRQ_REG_UPD = 0,
+ MSM_ISP_COMP_IRQ_EPOCH = 1,
+ MSM_ISP_COMP_IRQ_PING_BUFDONE = 2,
+ MSM_ISP_COMP_IRQ_PONG_BUFDONE = 3,
+ MSM_ISP_COMP_IRQ_MAX = 4
+};
+
#define MSM_VFE_REQUESTQ_SIZE 8
struct msm_vfe_axi_stream {
@@ -404,10 +407,10 @@ struct msm_vfe_axi_stream {
enum msm_vfe_axi_state state;
enum msm_vfe_axi_stream_src stream_src;
uint8_t num_planes;
- uint8_t wm[MAX_PLANES_PER_STREAM];
+ uint8_t wm[MAX_VFE][MAX_PLANES_PER_STREAM];
uint32_t output_format;/*Planar/RAW/Misc*/
- struct msm_vfe_axi_plane_cfg plane_cfg[MAX_PLANES_PER_STREAM];
- uint8_t comp_mask_index;
+ struct msm_vfe_axi_plane_cfg plane_cfg[MAX_VFE][MAX_PLANES_PER_STREAM];
+ uint8_t comp_mask_index[MAX_VFE];
struct msm_isp_buffer *buf[2];
uint32_t session_id;
uint32_t stream_id;
@@ -419,7 +422,7 @@ struct msm_vfe_axi_stream {
struct list_head request_q;
struct msm_vfe_frame_request_queue
request_queue_cmd[MSM_VFE_REQUESTQ_SIZE];
- uint32_t stream_handle;
+ uint32_t stream_handle[MAX_VFE];
uint8_t buf_divert;
enum msm_vfe_axi_stream_type stream_type;
uint32_t frame_based;
@@ -432,16 +435,28 @@ struct msm_vfe_axi_stream {
spinlock_t lock;
/*Bandwidth calculation info*/
- uint32_t max_width;
+ uint32_t max_width[MAX_VFE];
/*Based on format plane size in Q2. e.g NV12 = 1.5*/
uint32_t format_factor;
- uint32_t bandwidth;
+ uint32_t bandwidth[MAX_VFE];
uint32_t runtime_num_burst_capture;
uint32_t runtime_output_format;
enum msm_stream_memory_input_t memory_input;
struct msm_isp_sw_framskip sw_skip;
uint8_t sw_ping_pong_bit;
+
+ struct vfe_device *vfe_dev[MAX_VFE];
+ int num_isp;
+ struct completion active_comp;
+ struct completion inactive_comp;
+ uint32_t update_vfe_mask;
+ /*
+ * bits in this mask are set that correspond to vfe_id of
+ * the vfe on which this stream operates
+ */
+ uint32_t vfe_mask;
+ uint32_t composite_irq[MSM_ISP_COMP_IRQ_MAX];
};
struct msm_vfe_axi_composite_info {
@@ -450,17 +465,15 @@ struct msm_vfe_axi_composite_info {
};
enum msm_vfe_camif_state {
- CAMIF_STOPPED,
CAMIF_ENABLE,
CAMIF_DISABLE,
- CAMIF_STOPPING,
};
struct msm_vfe_src_info {
uint32_t frame_id;
uint32_t reg_update_frame_id;
uint8_t active;
- uint8_t pix_stream_count;
+ uint8_t stream_count;
uint8_t raw_stream_count;
enum msm_vfe_inputmux input_mux;
uint32_t width;
@@ -491,7 +504,6 @@ enum msm_wm_ub_cfg_type {
struct msm_vfe_axi_shared_data {
struct msm_vfe_axi_hardware_info *hw_info;
- struct msm_vfe_axi_stream stream_info[VFE_AXI_SRC_MAX];
uint32_t free_wm[MAX_NUM_WM];
uint32_t wm_image_size[MAX_NUM_WM];
enum msm_wm_ub_cfg_type wm_ub_cfg_policy;
@@ -503,14 +515,11 @@ struct msm_vfe_axi_shared_data {
struct msm_vfe_axi_composite_info
composite_info[MAX_NUM_COMPOSITE_MASK];
uint8_t num_used_composite_mask;
- uint32_t stream_update[VFE_SRC_MAX];
atomic_t axi_cfg_update[VFE_SRC_MAX];
- enum msm_isp_camif_update_state pipeline_update;
struct msm_vfe_src_info src_info[VFE_SRC_MAX];
uint16_t stream_handle_cnt;
uint32_t event_mask;
uint8_t enable_frameid_recovery;
- enum msm_vfe_camif_state camif_state;
};
struct msm_vfe_stats_hardware_info {
@@ -522,7 +531,7 @@ struct msm_vfe_stats_hardware_info {
};
enum msm_vfe_stats_state {
- STATS_AVALIABLE,
+ STATS_AVAILABLE,
STATS_INACTIVE,
STATS_ACTIVE,
STATS_START_PENDING,
@@ -534,7 +543,7 @@ enum msm_vfe_stats_state {
struct msm_vfe_stats_stream {
uint32_t session_id;
uint32_t stream_id;
- uint32_t stream_handle;
+ uint32_t stream_handle[MAX_VFE];
uint32_t composite_flag;
enum msm_isp_stats_type stats_type;
enum msm_vfe_stats_state state;
@@ -544,17 +553,27 @@ struct msm_vfe_stats_stream {
uint32_t init_stats_frame_drop;
struct msm_isp_sw_framskip sw_skip;
- uint32_t buffer_offset;
+ uint32_t buffer_offset[MAX_VFE];
struct msm_isp_buffer *buf[2];
uint32_t bufq_handle;
+
+ spinlock_t lock;
+ struct vfe_device *vfe_dev[MAX_VFE];
+ int num_isp;
+ struct completion active_comp;
+ struct completion inactive_comp;
+ /*
+ * bits in this mask are set that correspond to vfe_id of
+ * the vfe on which this stream operates
+ */
+ uint32_t vfe_mask;
+ uint32_t composite_irq[MSM_ISP_COMP_IRQ_MAX];
};
struct msm_vfe_stats_shared_data {
- struct msm_vfe_stats_stream stream_info[MSM_ISP_STATS_MAX];
uint8_t num_active_stream;
atomic_t stats_comp_mask[MAX_NUM_STATS_COMP_MASK];
uint16_t stream_handle_cnt;
- atomic_t stats_update;
};
struct msm_vfe_tasklet_queue_cmd {
@@ -653,7 +672,6 @@ struct dual_vfe_resource {
struct msm_vfe_stats_shared_data *stats_data[MAX_VFE];
struct msm_vfe_axi_shared_data *axi_data[MAX_VFE];
uint32_t wm_reload_mask[MAX_VFE];
- uint32_t epoch_sync_mask;
};
struct master_slave_resource_info {
@@ -671,6 +689,9 @@ struct msm_vfe_common_dev_data {
spinlock_t common_dev_data_lock;
struct dual_vfe_resource *dual_vfe_res;
struct master_slave_resource_info ms_resource;
+ struct msm_vfe_axi_stream streams[VFE_AXI_SRC_MAX * MAX_VFE];
+ struct msm_vfe_stats_stream stats_streams[MSM_ISP_STATS_MAX * MAX_VFE];
+ struct mutex vfe_common_mutex;
};
struct msm_vfe_common_subdev {
@@ -713,8 +734,6 @@ struct vfe_device {
/* Sync variables*/
struct completion reset_complete;
struct completion halt_complete;
- struct completion stream_config_complete;
- struct completion stats_config_complete;
struct mutex realtime_mutex;
struct mutex core_mutex;
spinlock_t shared_data_lock;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
index 9481bede6417..8b5a3d8d508d 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
@@ -412,10 +412,9 @@ static void msm_vfe32_process_camif_irq(struct vfe_device *vfe_dev,
ISP_DBG("%s: SOF IRQ\n", __func__);
if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
&& vfe_dev->axi_data.src_info[VFE_PIX_0].
- pix_stream_count == 0) {
+ stream_count == 0) {
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
- if (vfe_dev->axi_data.stream_update[VFE_PIX_0])
- msm_isp_axi_stream_update(vfe_dev, VFE_PIX_0);
+ msm_isp_axi_stream_update(vfe_dev, VFE_PIX_0, ts);
msm_isp_update_framedrop_reg(vfe_dev, VFE_PIX_0);
}
}
@@ -608,15 +607,14 @@ static void msm_vfe32_process_reg_update(struct vfe_device *vfe_dev,
if ((rdi_status & BIT(7)) && (!(irq_status0 & 0x20)))
return;
}
- if (atomic_read(&vfe_dev->stats_data.stats_update))
- msm_isp_stats_stream_update(vfe_dev);
+ msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
+ MSM_ISP_COMP_IRQ_REG_UPD);
}
for (i = VFE_RAW_0; i <= VFE_RAW_2; i++) {
if (irq_status1 & BIT(26 + (i - VFE_RAW_0))) {
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, i, ts);
- if (vfe_dev->axi_data.stream_update[i])
- msm_isp_axi_stream_update(vfe_dev, i);
+ msm_isp_axi_stream_update(vfe_dev, i, ts);
msm_isp_update_framedrop_reg(vfe_dev, i);
vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
@@ -693,8 +691,9 @@ static void msm_vfe32_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
uint32_t comp_mask, comp_mask_index =
- stream_info->comp_mask_index;
+ stream_info->comp_mask_index[vfe_idx];
uint32_t irq_mask;
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x34);
@@ -711,7 +710,9 @@ static void msm_vfe32_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
static void msm_vfe32_axi_clear_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- uint32_t comp_mask, comp_mask_index = stream_info->comp_mask_index;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t comp_mask, comp_mask_index =
+ stream_info->comp_mask_index[vfe_idx];
uint32_t irq_mask;
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x34);
@@ -727,8 +728,10 @@ static void msm_vfe32_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
uint32_t irq_mask;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
- irq_mask |= BIT(stream_info->wm[0] + 6);
+ irq_mask |= BIT(stream_info->wm[vfe_idx][0] + 6);
msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
}
@@ -736,15 +739,19 @@ static void msm_vfe32_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
uint32_t irq_mask;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
- irq_mask &= ~BIT(stream_info->wm[0] + 6);
+ irq_mask &= ~BIT(stream_info->wm[vfe_idx][0] + 6);
msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
}
-static void msm_vfe32_cfg_framedrop(void __iomem *vfe_base,
+static void msm_vfe32_cfg_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
uint32_t framedrop_period)
{
+ void __iomem *vfe_base = vfe_dev->vfe_base;
+
if (stream_info->stream_src == PIX_ENCODER) {
msm_camera_io_w(framedrop_period - 1, vfe_base + 0x504);
msm_camera_io_w(framedrop_period - 1, vfe_base + 0x508);
@@ -929,7 +936,7 @@ static void msm_vfe32_update_camif_state(
VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
vfe_en =
((vfe_dev->axi_data.src_info[
- VFE_PIX_0].pix_stream_count > 0) ? 1 : 0);
+ VFE_PIX_0].stream_count > 0) ? 1 : 0);
val &= 0xFFFFFF3F;
val = val | bus_en << 7 | vfe_en << 6;
msm_camera_io_w(val, vfe_dev->vfe_base + 0x1E4);
@@ -971,16 +978,17 @@ static void msm_vfe32_axi_cfg_wm_reg(
uint8_t plane_idx)
{
uint32_t val;
- uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[plane_idx]);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
if (!stream_info->frame_based) {
/*WR_IMAGE_SIZE*/
val =
((msm_isp_cal_word_per_line(
stream_info->output_format,
- stream_info->plane_cfg[plane_idx].
+ stream_info->plane_cfg[vfe_idx][plane_idx].
output_width)+1)/2 - 1) << 16 |
- (stream_info->plane_cfg[plane_idx].
+ (stream_info->plane_cfg[vfe_idx][plane_idx].
output_height - 1);
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x10);
@@ -988,9 +996,9 @@ static void msm_vfe32_axi_cfg_wm_reg(
val =
msm_isp_cal_word_per_line(
stream_info->output_format,
- stream_info->plane_cfg[plane_idx].
+ stream_info->plane_cfg[vfe_idx][plane_idx].
output_stride) << 16 |
- (stream_info->plane_cfg[plane_idx].
+ (stream_info->plane_cfg[vfe_idx][plane_idx].
output_height - 1) << 4 | VFE32_BURST_LEN;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
} else {
@@ -998,9 +1006,9 @@ static void msm_vfe32_axi_cfg_wm_reg(
val =
msm_isp_cal_word_per_line(
stream_info->output_format,
- stream_info->plane_cfg[plane_idx].
+ stream_info->plane_cfg[vfe_idx][plane_idx].
output_width) << 16 |
- (stream_info->plane_cfg[plane_idx].
+ (stream_info->plane_cfg[vfe_idx][plane_idx].
output_height - 1) << 4 | VFE32_BURST_LEN;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
}
@@ -1012,7 +1020,8 @@ static void msm_vfe32_axi_clear_wm_reg(
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
uint32_t val = 0;
- uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[plane_idx]);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
/*WR_IMAGE_SIZE*/
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x10);
/*WR_BUFFER_CFG*/
@@ -1024,9 +1033,10 @@ static void msm_vfe32_axi_cfg_wm_xbar_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
struct msm_vfe_axi_plane_cfg *plane_cfg =
- &stream_info->plane_cfg[plane_idx];
- uint8_t wm = stream_info->wm[plane_idx];
+ &stream_info->plane_cfg[vfe_idx][plane_idx];
+ uint8_t wm = stream_info->wm[vfe_idx][plane_idx];
uint32_t xbar_cfg = 0;
uint32_t xbar_reg_cfg = 0;
@@ -1080,7 +1090,8 @@ static void msm_vfe32_axi_clear_wm_xbar_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
- uint8_t wm = stream_info->wm[plane_idx];
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint8_t wm = stream_info->wm[vfe_idx][plane_idx];
uint32_t xbar_reg_cfg = 0;
xbar_reg_cfg = msm_camera_io_r(vfe_dev->vfe_base + VFE32_XBAR_BASE(wm));
@@ -1098,6 +1109,7 @@ static void msm_vfe32_cfg_axi_ub_equal_default(struct vfe_device *vfe_dev)
uint32_t prop_size = 0;
uint32_t wm_ub_size;
uint64_t delta;
+
for (i = 0; i < axi_data->hw_info->num_wm; i++) {
if (axi_data->free_wm[i] > 0) {
num_used_wms++;
@@ -1243,9 +1255,11 @@ static void msm_vfe32_stats_cfg_comp_mask(struct vfe_device *vfe_dev,
static void msm_vfe32_stats_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
uint32_t irq_mask;
irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
- irq_mask |= BIT(STATS_IDX(stream_info->stream_handle) + 13);
+ irq_mask |= BIT(STATS_IDX(stream_info->stream_handle[vfe_idx]) + 13);
msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
return;
}
@@ -1342,12 +1356,15 @@ static void msm_vfe32_stats_enable_module(struct vfe_device *vfe_dev,
msm_camera_io_w(module_cfg, vfe_dev->vfe_base + 0x10);
}
-static void msm_vfe32_stats_update_ping_pong_addr(void __iomem *vfe_base,
+static void msm_vfe32_stats_update_ping_pong_addr(struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info, uint32_t pingpong_status,
dma_addr_t paddr)
{
+ void __iomem *vfe_base = vfe_dev->vfe_base;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
uint32_t paddr32 = (paddr & 0xFFFFFFFF);
- int stats_idx = STATS_IDX(stream_info->stream_handle);
+ int stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
msm_camera_io_w(paddr32, vfe_base +
VFE32_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index d42ada769380..a2aa2983b056 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -599,7 +599,6 @@ static void msm_vfe40_process_reg_update(struct vfe_device *vfe_dev,
return;
/* Shift status bits so that PIX REG UPDATE is 1st bit */
shift_irq = ((irq_status0 & 0xF0) >> 4);
-
for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
if (shift_irq & BIT(i)) {
reg_updated |= BIT(i);
@@ -607,15 +606,17 @@ static void msm_vfe40_process_reg_update(struct vfe_device *vfe_dev,
(uint32_t)BIT(i));
switch (i) {
case VFE_PIX_0:
- msm_isp_save_framedrop_values(vfe_dev,
- VFE_PIX_0);
msm_isp_notify(vfe_dev, ISP_EVENT_REG_UPDATE,
VFE_PIX_0, ts);
- if (atomic_read(
- &vfe_dev->stats_data.stats_update))
- msm_isp_stats_stream_update(vfe_dev);
- if (vfe_dev->axi_data.camif_state ==
- CAMIF_STOPPING)
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
+ msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
+ MSM_ISP_COMP_IRQ_REG_UPD);
+ if (vfe_dev->axi_data.src_info[i].stream_count
+ == 0 &&
+ vfe_dev->axi_data.src_info[i].
+ raw_stream_count == 0 &&
+ vfe_dev->axi_data.src_info[i].active)
vfe_dev->hw_info->vfe_ops.core_ops.
reg_update(vfe_dev, i);
break;
@@ -624,29 +625,22 @@ static void msm_vfe40_process_reg_update(struct vfe_device *vfe_dev,
case VFE_RAW_2:
msm_isp_increment_frame_id(vfe_dev, i, ts);
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, i, ts);
- msm_isp_update_framedrop_reg(vfe_dev, i);
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
/*
* Reg Update is pseudo SOF for RDI,
* so request every frame
*/
vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
vfe_dev, i);
+ /* reg upd is also epoch for RDI */
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_EPOCH, ts);
break;
default:
pr_err("%s: Error case\n", __func__);
return;
}
- if (vfe_dev->axi_data.stream_update[i])
- msm_isp_axi_stream_update(vfe_dev, i);
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
- msm_isp_axi_cfg_update(vfe_dev, i);
- if (atomic_read(
- &vfe_dev->axi_data.axi_cfg_update[i]) ==
- 0)
- msm_isp_notify(vfe_dev,
- ISP_EVENT_STREAM_UPDATE_DONE,
- i, ts);
- }
}
}
@@ -695,7 +689,9 @@ static void msm_vfe40_reg_update(struct vfe_device *vfe_dev,
vfe_dev->vfe_base + 0x378);
} else if (!vfe_dev->is_split ||
((frame_src == VFE_PIX_0) &&
- (vfe_dev->axi_data.camif_state == CAMIF_STOPPING)) ||
+ (vfe_dev->axi_data.src_info[VFE_PIX_0].stream_count == 0) &&
+ (vfe_dev->axi_data.src_info[VFE_PIX_0].
+ raw_stream_count == 0)) ||
(frame_src >= VFE_RAW_0 && frame_src <= VFE_SRC_MAX)) {
msm_camera_io_w_mb(update_mask,
vfe_dev->vfe_base + 0x378);
@@ -713,16 +709,18 @@ static void msm_vfe40_process_epoch_irq(struct vfe_device *vfe_dev,
if (irq_status0 & BIT(2)) {
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
ISP_DBG("%s: EPOCH0 IRQ\n", __func__);
- msm_isp_update_framedrop_reg(vfe_dev, VFE_PIX_0);
- msm_isp_update_stats_framedrop_reg(vfe_dev);
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
+ MSM_ISP_COMP_IRQ_EPOCH, ts);
+ msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
+ MSM_ISP_COMP_IRQ_EPOCH);
msm_isp_update_error_frame_count(vfe_dev);
if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
&& vfe_dev->axi_data.src_info[VFE_PIX_0].
- pix_stream_count == 0) {
+ stream_count == 0) {
ISP_DBG("%s: SOF IRQ\n", __func__);
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
- if (vfe_dev->axi_data.stream_update[VFE_PIX_0])
- msm_isp_axi_stream_update(vfe_dev, VFE_PIX_0);
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
vfe_dev, VFE_PIX_0);
}
@@ -791,8 +789,10 @@ static void msm_vfe40_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- uint32_t comp_mask, comp_mask_index =
- stream_info->comp_mask_index;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t comp_mask, comp_mask_index;
+
+ comp_mask_index = stream_info->comp_mask_index[vfe_idx];
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x40);
comp_mask &= ~(0x7F << (comp_mask_index * 8));
@@ -807,8 +807,11 @@ static void msm_vfe40_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
static void msm_vfe40_axi_clear_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- uint32_t comp_mask, comp_mask_index = stream_info->comp_mask_index;
- vfe_dev->irq0_mask &= ~BIT(27);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t comp_mask, comp_mask_index;
+
+ comp_mask_index = stream_info->comp_mask_index[vfe_idx];
+ vfe_dev->irq0_mask &= ~BIT(27);
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x40);
comp_mask &= ~(0x7F << (comp_mask_index * 8));
@@ -821,32 +824,38 @@ static void msm_vfe40_axi_clear_comp_mask(struct vfe_device *vfe_dev,
static void msm_vfe40_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- msm_vfe40_config_irq(vfe_dev, 1 << (stream_info->wm[0] + 8), 0,
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ msm_vfe40_config_irq(vfe_dev, 1 << (stream_info->wm[vfe_idx][0] + 8), 0,
MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe40_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- vfe_dev->irq0_mask &= ~(1 << (stream_info->wm[0] + 8));
- msm_vfe40_config_irq(vfe_dev, (1 << (stream_info->wm[0] + 8)), 0,
- MSM_ISP_IRQ_DISABLE);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ vfe_dev->irq0_mask &= ~(1 << (stream_info->wm[vfe_idx][0] + 8));
+ msm_vfe40_config_irq(vfe_dev, (1 << (stream_info->wm[vfe_idx][0] + 8)),
+ 0, MSM_ISP_IRQ_DISABLE);
}
-static void msm_vfe40_cfg_framedrop(void __iomem *vfe_base,
+static void msm_vfe40_cfg_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
uint32_t framedrop_period)
{
+ void __iomem *vfe_base = vfe_dev->vfe_base;
uint32_t i, temp;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
for (i = 0; i < stream_info->num_planes; i++) {
msm_camera_io_w(framedrop_pattern, vfe_base +
- VFE40_WM_BASE(stream_info->wm[i]) + 0x1C);
+ VFE40_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x1C);
temp = msm_camera_io_r(vfe_base +
- VFE40_WM_BASE(stream_info->wm[i]) + 0xC);
+ VFE40_WM_BASE(stream_info->wm[vfe_idx][i]) + 0xC);
temp &= 0xFFFFFF83;
msm_camera_io_w(temp | (framedrop_period - 1) << 2,
- vfe_base + VFE40_WM_BASE(stream_info->wm[i]) + 0xC);
+ vfe_base + VFE40_WM_BASE(stream_info->wm[vfe_idx][i]) + 0xC);
}
msm_camera_io_w_mb(0x1, vfe_base + 0x378);
@@ -856,9 +865,11 @@ static void msm_vfe40_clear_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
uint32_t i;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
for (i = 0; i < stream_info->num_planes; i++)
msm_camera_io_w(0, vfe_dev->vfe_base +
- VFE40_WM_BASE(stream_info->wm[i]) + 0x1C);
+ VFE40_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x1C);
}
static int32_t msm_vfe40_convert_bpp_to_reg(int32_t bpp, uint32_t *bpp_reg)
@@ -1374,7 +1385,7 @@ static void msm_vfe40_update_camif_state(struct vfe_device *vfe_dev,
src_info[VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
vfe_en =
((vfe_dev->axi_data.
- src_info[VFE_PIX_0].pix_stream_count > 0) ? 1 : 0);
+ src_info[VFE_PIX_0].stream_count > 0) ? 1 : 0);
val = msm_camera_io_r(vfe_dev->vfe_base + 0x2F8);
val &= 0xFFFFFF3F;
val = val | bus_en << 7 | vfe_en << 6;
@@ -1443,7 +1454,10 @@ static void msm_vfe40_axi_cfg_wm_reg(
{
uint32_t val;
uint32_t burst_len, wm_bit_shift = VFE40_WM_BIT_SHIFT_8976_VERSION;
- uint32_t wm_base = VFE40_WM_BASE(stream_info->wm[plane_idx]);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t wm_base;
+
+ wm_base = VFE40_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
if (vfe_dev->vfe_hw_version == VFE40_8916_VERSION ||
vfe_dev->vfe_hw_version == VFE40_8939_VERSION) {
@@ -1468,18 +1482,18 @@ static void msm_vfe40_axi_cfg_wm_reg(
val =
((msm_isp_cal_word_per_line(
stream_info->output_format,
- stream_info->plane_cfg[plane_idx].
+ stream_info->plane_cfg[vfe_idx][plane_idx].
output_width)+1)/2 - 1) << 16 |
- (stream_info->plane_cfg[plane_idx].
+ (stream_info->plane_cfg[vfe_idx][plane_idx].
output_height - 1);
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
/*WR_BUFFER_CFG*/
val =
msm_isp_cal_word_per_line(stream_info->output_format,
- stream_info->plane_cfg[
+ stream_info->plane_cfg[vfe_idx][
plane_idx].output_stride) << 16 |
- (stream_info->plane_cfg[
+ (stream_info->plane_cfg[vfe_idx][
plane_idx].output_height - 1) << wm_bit_shift |
burst_len;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
@@ -1487,9 +1501,9 @@ static void msm_vfe40_axi_cfg_wm_reg(
msm_camera_io_w(0x2, vfe_dev->vfe_base + wm_base);
val =
msm_isp_cal_word_per_line(stream_info->output_format,
- stream_info->plane_cfg[
+ stream_info->plane_cfg[vfe_idx][
plane_idx].output_width) << 16 |
- (stream_info->plane_cfg[
+ (stream_info->plane_cfg[vfe_idx][
plane_idx].output_height - 1) << 4 |
burst_len;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
@@ -1507,7 +1521,10 @@ static void msm_vfe40_axi_clear_wm_reg(
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
uint32_t val = 0;
- uint32_t wm_base = VFE40_WM_BASE(stream_info->wm[plane_idx]);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t wm_base;
+
+ wm_base = VFE40_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
/*WR_ADDR_CFG*/
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0xC);
/*WR_IMAGE_SIZE*/
@@ -1524,12 +1541,15 @@ static void msm_vfe40_axi_cfg_wm_xbar_reg(
struct msm_vfe_axi_stream *stream_info,
uint8_t plane_idx)
{
- struct msm_vfe_axi_plane_cfg *plane_cfg =
- &stream_info->plane_cfg[plane_idx];
- uint8_t wm = stream_info->wm[plane_idx];
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ struct msm_vfe_axi_plane_cfg *plane_cfg;
+ uint8_t wm;
uint32_t xbar_cfg = 0;
uint32_t xbar_reg_cfg = 0;
+ plane_cfg = &stream_info->plane_cfg[vfe_idx][plane_idx];
+ wm = stream_info->wm[vfe_idx][plane_idx];
+
switch (stream_info->stream_src) {
case PIX_ENCODER:
case PIX_VIEWFINDER: {
@@ -1584,9 +1604,12 @@ static void msm_vfe40_axi_clear_wm_xbar_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
- uint8_t wm = stream_info->wm[plane_idx];
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint8_t wm;
uint32_t xbar_reg_cfg = 0;
+ wm = stream_info->wm[vfe_idx][plane_idx];
+
xbar_reg_cfg =
msm_camera_io_r(vfe_dev->vfe_base + VFE40_XBAR_BASE(wm));
xbar_reg_cfg &= ~(0xFFFF << VFE40_XBAR_SHIFT(wm));
@@ -1714,6 +1737,7 @@ static int msm_vfe40_axi_halt(struct vfe_device *vfe_dev,
{
int rc = 0;
enum msm_vfe_input_src i;
+ struct msm_isp_timestamp ts;
/* Keep only halt and restart mask */
msm_vfe40_config_irq(vfe_dev, (1 << 31), (1 << 8),
@@ -1722,30 +1746,16 @@ static int msm_vfe40_axi_halt(struct vfe_device *vfe_dev,
msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x30);
msm_camera_io_w(0xFEFFFEFF, vfe_dev->vfe_base + 0x34);
msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x24);
+
+ msm_isp_get_timestamp(&ts);
/* if any stream is waiting for update, signal complete */
for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
- /* if any stream is waiting for update, signal complete */
- if (vfe_dev->axi_data.stream_update[i]) {
- ISP_DBG("%s: complete stream update\n", __func__);
- msm_isp_axi_stream_update(vfe_dev, i);
- if (vfe_dev->axi_data.stream_update[i])
- msm_isp_axi_stream_update(vfe_dev, i);
- }
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
- ISP_DBG("%s: complete on axi config update\n",
- __func__);
- msm_isp_axi_cfg_update(vfe_dev, i);
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i]))
- msm_isp_axi_cfg_update(vfe_dev, i);
- }
+ msm_isp_axi_stream_update(vfe_dev, i, &ts);
+ msm_isp_axi_stream_update(vfe_dev, i, &ts);
}
- if (atomic_read(&vfe_dev->stats_data.stats_update)) {
- ISP_DBG("%s: complete on stats update\n", __func__);
- msm_isp_stats_stream_update(vfe_dev);
- if (atomic_read(&vfe_dev->stats_data.stats_update))
- msm_isp_stats_stream_update(vfe_dev);
- }
+ msm_isp_stats_stream_update(vfe_dev);
+ msm_isp_stats_stream_update(vfe_dev);
if (blocking) {
init_completion(&vfe_dev->halt_complete);
@@ -1764,7 +1774,7 @@ static int msm_vfe40_axi_halt(struct vfe_device *vfe_dev,
return rc;
}
-static int msm_vfe40_axi_restart(struct vfe_device *vfe_dev,
+static void msm_vfe40_axi_restart(struct vfe_device *vfe_dev,
uint32_t blocking, uint32_t enable_camif)
{
msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
@@ -1786,8 +1796,6 @@ static int msm_vfe40_axi_restart(struct vfe_device *vfe_dev,
vfe_dev->hw_info->vfe_ops.core_ops.
update_camif_state(vfe_dev, ENABLE_CAMIF);
}
-
- return 0;
}
static uint32_t msm_vfe40_get_wm_mask(
@@ -1903,27 +1911,37 @@ static void msm_vfe40_stats_cfg_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+
msm_vfe40_config_irq(vfe_dev,
- 1 << (STATS_IDX(stream_info->stream_handle) + 16), 0,
- MSM_ISP_IRQ_ENABLE);
+ 1 << (STATS_IDX(stream_info->stream_handle[vfe_idx]) + 16), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe40_stats_clear_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+
msm_vfe40_config_irq(vfe_dev,
- (1 << (STATS_IDX(stream_info->stream_handle) + 16)), 0,
- MSM_ISP_IRQ_DISABLE);
+ (1 << (STATS_IDX(stream_info->stream_handle[vfe_idx]) + 16)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe40_stats_cfg_wm_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- int stats_idx = STATS_IDX(stream_info->stream_handle);
- uint32_t stats_base = VFE40_STATS_BASE(stats_idx);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+ int stats_idx;
+ uint32_t stats_base;
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
+ stats_base = VFE40_STATS_BASE(stats_idx);
/*WR_ADDR_CFG*/
msm_camera_io_w(stream_info->framedrop_period << 2,
vfe_dev->vfe_base + stats_base + 0x8);
@@ -1939,9 +1957,14 @@ static void msm_vfe40_stats_clear_wm_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
uint32_t val = 0;
- int stats_idx = STATS_IDX(stream_info->stream_handle);
- uint32_t stats_base = VFE40_STATS_BASE(stats_idx);
+ int stats_idx;
+ uint32_t stats_base;
+
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
+ stats_base = VFE40_STATS_BASE(stats_idx);
/*WR_ADDR_CFG*/
msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x8);
@@ -2095,11 +2118,16 @@ static void msm_vfe40_stats_enable_module(struct vfe_device *vfe_dev,
}
static void msm_vfe40_stats_update_ping_pong_addr(
- void __iomem *vfe_base, struct msm_vfe_stats_stream *stream_info,
+ struct vfe_device *vfe_dev, struct msm_vfe_stats_stream *stream_info,
uint32_t pingpong_status, dma_addr_t paddr)
{
+ void __iomem *vfe_base = vfe_dev->vfe_base;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
uint32_t paddr32 = (paddr & 0xFFFFFFFF);
- int stats_idx = STATS_IDX(stream_info->stream_handle);
+ int stats_idx;
+
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
msm_camera_io_w(paddr32, vfe_base +
VFE40_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
index 388656b9ca30..c77eff66ccca 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
@@ -437,15 +437,17 @@ static void msm_vfe44_process_reg_update(struct vfe_device *vfe_dev,
(uint32_t)BIT(i));
switch (i) {
case VFE_PIX_0:
- msm_isp_save_framedrop_values(vfe_dev,
- VFE_PIX_0);
msm_isp_notify(vfe_dev, ISP_EVENT_REG_UPDATE,
VFE_PIX_0, ts);
- if (atomic_read(
- &vfe_dev->stats_data.stats_update))
- msm_isp_stats_stream_update(vfe_dev);
- if (vfe_dev->axi_data.camif_state ==
- CAMIF_STOPPING)
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
+ msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
+ MSM_ISP_COMP_IRQ_REG_UPD);
+ if (vfe_dev->axi_data.src_info[i].stream_count
+ == 0 &&
+ vfe_dev->axi_data.src_info[i].
+ raw_stream_count == 0 &&
+ vfe_dev->axi_data.src_info[i].active)
vfe_dev->hw_info->vfe_ops.core_ops.
reg_update(vfe_dev, i);
break;
@@ -454,29 +456,22 @@ static void msm_vfe44_process_reg_update(struct vfe_device *vfe_dev,
case VFE_RAW_2:
msm_isp_increment_frame_id(vfe_dev, i, ts);
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, i, ts);
- msm_isp_update_framedrop_reg(vfe_dev, i);
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
/*
* Reg Update is pseudo SOF for RDI,
* so request every frame
*/
vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
vfe_dev, i);
+ /* reg upd is epoch for rdi */
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_EPOCH, ts);
break;
default:
pr_err("%s: Error case\n", __func__);
return;
}
- if (vfe_dev->axi_data.stream_update[i])
- msm_isp_axi_stream_update(vfe_dev, i);
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
- msm_isp_axi_cfg_update(vfe_dev, i);
- if (atomic_read(
- &vfe_dev->axi_data.axi_cfg_update[i]) ==
- 0)
- msm_isp_notify(vfe_dev,
- ISP_EVENT_STREAM_UPDATE_DONE,
- i, ts);
- }
}
}
@@ -498,17 +493,19 @@ static void msm_vfe44_process_epoch_irq(struct vfe_device *vfe_dev,
if (irq_status0 & BIT(2)) {
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
ISP_DBG("%s: EPOCH0 IRQ\n", __func__);
- msm_isp_update_framedrop_reg(vfe_dev, VFE_PIX_0);
- msm_isp_update_stats_framedrop_reg(vfe_dev);
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
+ MSM_ISP_COMP_IRQ_EPOCH, ts);
+ msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
+ MSM_ISP_COMP_IRQ_EPOCH);
msm_isp_update_error_frame_count(vfe_dev);
if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
&& vfe_dev->axi_data.src_info[VFE_PIX_0].
- pix_stream_count == 0) {
+ stream_count == 0) {
ISP_DBG("%s: SOF IRQ\n", __func__);
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
- if (vfe_dev->axi_data.stream_update[VFE_PIX_0])
- msm_isp_axi_stream_update(vfe_dev, VFE_PIX_0);
- vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
vfe_dev, VFE_PIX_0);
}
}
@@ -550,7 +547,9 @@ static void msm_vfe44_reg_update(struct vfe_device *vfe_dev,
vfe_dev->vfe_base + 0x378);
} else if (!vfe_dev->is_split ||
((frame_src == VFE_PIX_0) &&
- (vfe_dev->axi_data.camif_state == CAMIF_STOPPING)) ||
+ (vfe_dev->axi_data.src_info[VFE_PIX_0].stream_count == 0) &&
+ (vfe_dev->axi_data.src_info[VFE_PIX_0].
+ raw_stream_count == 0)) ||
(frame_src >= VFE_RAW_0 && frame_src <= VFE_SRC_MAX)) {
msm_camera_io_w_mb(update_mask,
vfe_dev->vfe_base + 0x378);
@@ -628,8 +627,10 @@ static void msm_vfe44_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- uint32_t comp_mask, comp_mask_index =
- stream_info->comp_mask_index;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t comp_mask, comp_mask_index;
+
+ comp_mask_index = stream_info->comp_mask_index[vfe_idx];
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x40);
comp_mask &= ~(0x7F << (comp_mask_index * 8));
@@ -644,7 +645,10 @@ static void msm_vfe44_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
static void msm_vfe44_axi_clear_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- uint32_t comp_mask, comp_mask_index = stream_info->comp_mask_index;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t comp_mask, comp_mask_index;
+
+ comp_mask_index = stream_info->comp_mask_index[vfe_idx];
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x40);
comp_mask &= ~(0x7F << (comp_mask_index * 8));
@@ -657,31 +661,38 @@ static void msm_vfe44_axi_clear_comp_mask(struct vfe_device *vfe_dev,
static void msm_vfe44_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- msm_vfe44_config_irq(vfe_dev, 1 << (stream_info->wm[0] + 8), 0,
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ msm_vfe44_config_irq(vfe_dev, 1 << (stream_info->wm[vfe_idx][0] + 8), 0,
MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe44_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- msm_vfe44_config_irq(vfe_dev, (1 << (stream_info->wm[0] + 8)), 0,
- MSM_ISP_IRQ_DISABLE);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ msm_vfe44_config_irq(vfe_dev, (1 << (stream_info->wm[vfe_idx][0] + 8)),
+ 0, MSM_ISP_IRQ_DISABLE);
}
-static void msm_vfe44_cfg_framedrop(void __iomem *vfe_base,
+static void msm_vfe44_cfg_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
uint32_t framedrop_period)
{
+ void __iomem *vfe_base = vfe_dev->vfe_base;
uint32_t i, temp;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
for (i = 0; i < stream_info->num_planes; i++) {
msm_camera_io_w(framedrop_pattern, vfe_base +
- VFE44_WM_BASE(stream_info->wm[i]) + 0x1C);
+ VFE44_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x1C);
temp = msm_camera_io_r(vfe_base +
- VFE44_WM_BASE(stream_info->wm[i]) + 0xC);
+ VFE44_WM_BASE(stream_info->wm[vfe_idx][i]) + 0xC);
temp &= 0xFFFFFF83;
msm_camera_io_w(temp | (framedrop_period - 1) << 2,
- vfe_base + VFE44_WM_BASE(stream_info->wm[i]) + 0xC);
+ vfe_base +
+ VFE44_WM_BASE(stream_info->wm[vfe_idx][i]) + 0xC);
}
}
@@ -689,9 +700,11 @@ static void msm_vfe44_clear_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
uint32_t i;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
for (i = 0; i < stream_info->num_planes; i++)
msm_camera_io_w(0, vfe_dev->vfe_base +
- VFE44_WM_BASE(stream_info->wm[i]) + 0x1C);
+ VFE44_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x1C);
}
static int32_t msm_vfe44_convert_bpp_to_reg(int32_t bpp, uint32_t *bpp_reg)
@@ -1039,7 +1052,7 @@ static void msm_vfe44_update_camif_state(struct vfe_device *vfe_dev,
src_info[VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
vfe_en =
((vfe_dev->axi_data.
- src_info[VFE_PIX_0].pix_stream_count > 0) ? 1 : 0);
+ src_info[VFE_PIX_0].stream_count > 0) ? 1 : 0);
val = msm_camera_io_r(vfe_dev->vfe_base + 0x2F8);
val &= 0xFFFFFF3F;
val = val | bus_en << 7 | vfe_en << 6;
@@ -1101,7 +1114,10 @@ static void msm_vfe44_axi_cfg_wm_reg(
uint8_t plane_idx)
{
uint32_t val;
- uint32_t wm_base = VFE44_WM_BASE(stream_info->wm[plane_idx]);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t wm_base;
+
+ wm_base = VFE44_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
if (!stream_info->frame_based) {
msm_camera_io_w(0x0, vfe_dev->vfe_base + wm_base);
@@ -1109,28 +1125,30 @@ static void msm_vfe44_axi_cfg_wm_reg(
val =
((msm_isp_cal_word_per_line(
stream_info->output_format,
- stream_info->plane_cfg[plane_idx].
+ stream_info->plane_cfg[vfe_idx][plane_idx].
output_width)+1)/2 - 1) << 16 |
- (stream_info->plane_cfg[plane_idx].
+ (stream_info->plane_cfg[vfe_idx][plane_idx].
output_height - 1);
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
/*WR_BUFFER_CFG*/
- val = (stream_info->plane_cfg[plane_idx].output_height - 1);
+ val = (stream_info->plane_cfg[vfe_idx][plane_idx].
+ output_height - 1);
val = (((val & 0xfff) << 2) | ((val >> 12) & 0x3));
val = val << 2 |
msm_isp_cal_word_per_line(stream_info->output_format,
- stream_info->plane_cfg[
+ stream_info->plane_cfg[vfe_idx][
plane_idx].output_stride) << 16 |
VFE44_BURST_LEN;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
} else {
msm_camera_io_w(0x2, vfe_dev->vfe_base + wm_base);
- val = (stream_info->plane_cfg[plane_idx].output_height - 1);
+ val = (stream_info->plane_cfg[vfe_idx][plane_idx].
+ output_height - 1);
val = (((val & 0xfff) << 2) | ((val >> 12) & 0x3));
val = val << 2 |
msm_isp_cal_word_per_line(stream_info->output_format,
- stream_info->plane_cfg[
+ stream_info->plane_cfg[vfe_idx][
plane_idx].output_width) << 16 |
VFE44_BURST_LEN;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
@@ -1147,8 +1165,10 @@ static void msm_vfe44_axi_clear_wm_reg(
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
uint32_t val = 0;
- uint32_t wm_base = VFE44_WM_BASE(stream_info->wm[plane_idx]);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t wm_base;
+ wm_base = VFE44_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
/*WR_ADDR_CFG*/
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0xC);
/*WR_IMAGE_SIZE*/
@@ -1164,12 +1184,15 @@ static void msm_vfe44_axi_cfg_wm_xbar_reg(
struct msm_vfe_axi_stream *stream_info,
uint8_t plane_idx)
{
- struct msm_vfe_axi_plane_cfg *plane_cfg =
- &stream_info->plane_cfg[plane_idx];
- uint8_t wm = stream_info->wm[plane_idx];
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ struct msm_vfe_axi_plane_cfg *plane_cfg;
+ uint8_t wm;
uint32_t xbar_cfg = 0;
uint32_t xbar_reg_cfg = 0;
+ plane_cfg = &stream_info->plane_cfg[vfe_idx][plane_idx];
+ wm = stream_info->wm[vfe_idx][plane_idx];
+
switch (stream_info->stream_src) {
case PIX_ENCODER:
case PIX_VIEWFINDER: {
@@ -1223,9 +1246,12 @@ static void msm_vfe44_axi_clear_wm_xbar_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
- uint8_t wm = stream_info->wm[plane_idx];
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint8_t wm;
uint32_t xbar_reg_cfg = 0;
+ wm = stream_info->wm[vfe_idx][plane_idx];
+
xbar_reg_cfg =
msm_camera_io_r(vfe_dev->vfe_base + VFE44_XBAR_BASE(wm));
xbar_reg_cfg &= ~(0xFFFF << VFE44_XBAR_SHIFT(wm));
@@ -1245,6 +1271,7 @@ static void msm_vfe44_cfg_axi_ub_equal_default(
uint32_t prop_size = 0;
uint32_t wm_ub_size;
uint64_t delta;
+
for (i = 0; i < axi_data->hw_info->num_wm; i++) {
if (axi_data->free_wm[i] > 0) {
num_used_wms++;
@@ -1316,6 +1343,7 @@ static int msm_vfe44_axi_halt(struct vfe_device *vfe_dev,
{
int rc = 0;
enum msm_vfe_input_src i;
+ struct msm_isp_timestamp ts;
/* Keep only halt and restart mask */
msm_vfe44_config_irq(vfe_dev, (1 << 31), (1 << 8),
@@ -1349,34 +1377,20 @@ static int msm_vfe44_axi_halt(struct vfe_device *vfe_dev,
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x2C0);
}
+ msm_isp_get_timestamp(&ts);
for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
/* if any stream is waiting for update, signal complete */
- if (vfe_dev->axi_data.stream_update[i]) {
- ISP_DBG("%s: complete stream update\n", __func__);
- msm_isp_axi_stream_update(vfe_dev, i);
- if (vfe_dev->axi_data.stream_update[i])
- msm_isp_axi_stream_update(vfe_dev, i);
- }
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
- ISP_DBG("%s: complete on axi config update\n",
- __func__);
- msm_isp_axi_cfg_update(vfe_dev, i);
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i]))
- msm_isp_axi_cfg_update(vfe_dev, i);
- }
+ msm_isp_axi_stream_update(vfe_dev, i, &ts);
+ msm_isp_axi_stream_update(vfe_dev, i, &ts);
}
- if (atomic_read(&vfe_dev->stats_data.stats_update)) {
- ISP_DBG("%s: complete on stats update\n", __func__);
- msm_isp_stats_stream_update(vfe_dev);
- if (atomic_read(&vfe_dev->stats_data.stats_update))
- msm_isp_stats_stream_update(vfe_dev);
- }
+ msm_isp_stats_stream_update(vfe_dev);
+ msm_isp_stats_stream_update(vfe_dev);
return rc;
}
-static int msm_vfe44_axi_restart(struct vfe_device *vfe_dev,
+static void msm_vfe44_axi_restart(struct vfe_device *vfe_dev,
uint32_t blocking, uint32_t enable_camif)
{
msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
@@ -1397,8 +1411,6 @@ static int msm_vfe44_axi_restart(struct vfe_device *vfe_dev,
vfe_dev->hw_info->vfe_ops.core_ops.
update_camif_state(vfe_dev, ENABLE_CAMIF);
}
-
- return 0;
}
static uint32_t msm_vfe44_get_wm_mask(
@@ -1450,15 +1462,15 @@ static int msm_vfe44_stats_check_streams(
struct msm_vfe_stats_stream *stream_info)
{
if (stream_info[STATS_IDX_BF].state ==
- STATS_AVALIABLE &&
+ STATS_AVAILABLE &&
stream_info[STATS_IDX_BF_SCALE].state !=
- STATS_AVALIABLE) {
+ STATS_AVAILABLE) {
pr_err("%s: does not support BF_SCALE while BF is disabled\n",
__func__);
return -EINVAL;
}
- if (stream_info[STATS_IDX_BF].state != STATS_AVALIABLE &&
- stream_info[STATS_IDX_BF_SCALE].state != STATS_AVALIABLE &&
+ if (stream_info[STATS_IDX_BF].state != STATS_AVAILABLE &&
+ stream_info[STATS_IDX_BF_SCALE].state != STATS_AVAILABLE &&
stream_info[STATS_IDX_BF].composite_flag !=
stream_info[STATS_IDX_BF_SCALE].composite_flag) {
pr_err("%s: Different composite flag for BF and BF_SCALE\n",
@@ -1541,27 +1553,37 @@ static void msm_vfe44_stats_cfg_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+
msm_vfe44_config_irq(vfe_dev,
- 1 << (STATS_IDX(stream_info->stream_handle) + 15), 0,
- MSM_ISP_IRQ_ENABLE);
+ 1 << (STATS_IDX(stream_info->stream_handle[vfe_idx]) + 15), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe44_stats_clear_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+
msm_vfe44_config_irq(vfe_dev,
- (1 << (STATS_IDX(stream_info->stream_handle) + 15)), 0,
- MSM_ISP_IRQ_DISABLE);
+ (1 << (STATS_IDX(stream_info->stream_handle[vfe_idx]) + 15)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe44_stats_cfg_wm_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- int stats_idx = STATS_IDX(stream_info->stream_handle);
- uint32_t stats_base = VFE44_STATS_BASE(stats_idx);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+ int stats_idx;
+ uint32_t stats_base;
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
+ stats_base = VFE44_STATS_BASE(stats_idx);
/* BF_SCALE does not have its own WR_ADDR_CFG,
* IRQ_FRAMEDROP_PATTERN and IRQ_SUBSAMPLE_PATTERN;
* it's using the same from BF */
@@ -1582,9 +1604,14 @@ static void msm_vfe44_stats_clear_wm_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
uint32_t val = 0;
- int stats_idx = STATS_IDX(stream_info->stream_handle);
- uint32_t stats_base = VFE44_STATS_BASE(stats_idx);
+ int stats_idx;
+ uint32_t stats_base;
+
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
+ stats_base = VFE44_STATS_BASE(stats_idx);
/* BF_SCALE does not have its own WR_ADDR_CFG,
* IRQ_FRAMEDROP_PATTERN and IRQ_SUBSAMPLE_PATTERN;
* it's using the same from BF */
@@ -1742,12 +1769,16 @@ static void msm_vfe44_stats_update_cgc_override(struct vfe_device *vfe_dev,
}
static void msm_vfe44_stats_update_ping_pong_addr(
- void __iomem *vfe_base, struct msm_vfe_stats_stream *stream_info,
+ struct vfe_device *vfe_dev, struct msm_vfe_stats_stream *stream_info,
uint32_t pingpong_status, dma_addr_t paddr)
{
+ void __iomem *vfe_base = vfe_dev->vfe_base;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
uint32_t paddr32 = (paddr & 0xFFFFFFFF);
- int stats_idx = STATS_IDX(stream_info->stream_handle);
+ int stats_idx;
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
msm_camera_io_w(paddr32, vfe_base +
VFE44_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
index 40bb044fde47..6336892b1b4e 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
@@ -376,46 +376,40 @@ static void msm_vfe46_process_reg_update(struct vfe_device *vfe_dev,
switch (i) {
case VFE_PIX_0:
- msm_isp_save_framedrop_values(vfe_dev,
- VFE_PIX_0);
msm_isp_notify(vfe_dev, ISP_EVENT_REG_UPDATE,
VFE_PIX_0, ts);
- if (atomic_read(
- &vfe_dev->stats_data.stats_update))
- msm_isp_stats_stream_update(vfe_dev);
- if (vfe_dev->axi_data.camif_state ==
- CAMIF_STOPPING)
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
+ msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
+ MSM_ISP_COMP_IRQ_REG_UPD);
+ msm_isp_stats_stream_update(vfe_dev);
+ if (vfe_dev->axi_data.src_info[i].stream_count
+ == 0 &&
+ vfe_dev->axi_data.src_info[i].active)
vfe_dev->hw_info->vfe_ops.core_ops.
- reg_update(vfe_dev, i);
+ reg_update(vfe_dev, i);
break;
case VFE_RAW_0:
case VFE_RAW_1:
case VFE_RAW_2:
msm_isp_increment_frame_id(vfe_dev, i, ts);
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, i, ts);
- msm_isp_update_framedrop_reg(vfe_dev, i);
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
/*
* Reg Update is pseudo SOF for RDI,
* so request every frame
*/
vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
vfe_dev, i);
+ /* reg upd is also epoch for rdi */
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_EPOCH, ts);
break;
default:
pr_err("%s: Error case\n", __func__);
return;
}
- if (vfe_dev->axi_data.stream_update[i])
- msm_isp_axi_stream_update(vfe_dev, i);
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
- msm_isp_axi_cfg_update(vfe_dev, i);
- if (atomic_read(
- &vfe_dev->axi_data.axi_cfg_update[i]) ==
- 0)
- msm_isp_notify(vfe_dev,
- ISP_EVENT_STREAM_UPDATE_DONE,
- i, ts);
- }
}
}
@@ -437,14 +431,16 @@ static void msm_vfe46_process_epoch_irq(struct vfe_device *vfe_dev,
if (irq_status0 & BIT(2)) {
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
ISP_DBG("%s: EPOCH0 IRQ\n", __func__);
- msm_isp_update_framedrop_reg(vfe_dev, VFE_PIX_0);
- msm_isp_update_stats_framedrop_reg(vfe_dev);
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
+ MSM_ISP_COMP_IRQ_EPOCH, ts);
+ msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
+ MSM_ISP_COMP_IRQ_EPOCH);
msm_isp_update_error_frame_count(vfe_dev);
if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
&& vfe_dev->axi_data.src_info[VFE_PIX_0].
- pix_stream_count == 0) {
- if (vfe_dev->axi_data.stream_update[VFE_PIX_0])
- msm_isp_axi_stream_update(vfe_dev, VFE_PIX_0);
+ stream_count == 0) {
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
vfe_dev, VFE_PIX_0);
}
@@ -488,7 +484,9 @@ static void msm_vfe46_reg_update(struct vfe_device *vfe_dev,
vfe_dev->vfe_base + 0x3D8);
} else if (!vfe_dev->is_split ||
((frame_src == VFE_PIX_0) &&
- (vfe_dev->axi_data.camif_state == CAMIF_STOPPING)) ||
+ (vfe_dev->axi_data.src_info[VFE_PIX_0].stream_count == 0) &&
+ (vfe_dev->axi_data.src_info[VFE_PIX_0].
+ raw_stream_count == 0)) ||
(frame_src >= VFE_RAW_0 && frame_src <= VFE_SRC_MAX)) {
msm_camera_io_w_mb(update_mask,
vfe_dev->vfe_base + 0x3D8);
@@ -567,8 +565,10 @@ static void msm_vfe46_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- uint32_t comp_mask, comp_mask_index =
- stream_info->comp_mask_index;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t comp_mask, comp_mask_index;
+
+ comp_mask_index = stream_info->comp_mask_index[vfe_idx];
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x74);
comp_mask &= ~(0x7F << (comp_mask_index * 8));
@@ -583,7 +583,10 @@ static void msm_vfe46_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
static void msm_vfe46_axi_clear_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- uint32_t comp_mask, comp_mask_index = stream_info->comp_mask_index;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t comp_mask, comp_mask_index;
+
+ comp_mask_index = stream_info->comp_mask_index[vfe_idx];
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x74);
comp_mask &= ~(0x7F << (comp_mask_index * 8));
@@ -596,31 +599,37 @@ static void msm_vfe46_axi_clear_comp_mask(struct vfe_device *vfe_dev,
static void msm_vfe46_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- msm_vfe46_config_irq(vfe_dev, 1 << (stream_info->wm[0] + 8), 0,
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ msm_vfe46_config_irq(vfe_dev, 1 << (stream_info->wm[vfe_idx][0] + 8), 0,
MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe46_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- msm_vfe46_config_irq(vfe_dev, (1 << (stream_info->wm[0] + 8)), 0,
- MSM_ISP_IRQ_DISABLE);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ msm_vfe46_config_irq(vfe_dev, (1 << (stream_info->wm[vfe_idx][0] + 8)),
+ 0, MSM_ISP_IRQ_DISABLE);
}
-static void msm_vfe46_cfg_framedrop(void __iomem *vfe_base,
+static void msm_vfe46_cfg_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
uint32_t framedrop_period)
{
uint32_t i, temp;
+ void __iomem *vfe_base = vfe_dev->vfe_base;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
for (i = 0; i < stream_info->num_planes; i++) {
msm_camera_io_w(framedrop_pattern, vfe_base +
- VFE46_WM_BASE(stream_info->wm[i]) + 0x1C);
+ VFE46_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x1C);
temp = msm_camera_io_r(vfe_base +
- VFE46_WM_BASE(stream_info->wm[i]) + 0xC);
+ VFE46_WM_BASE(stream_info->wm[vfe_idx][i]) + 0xC);
temp &= 0xFFFFFF83;
msm_camera_io_w(temp | (framedrop_period - 1) << 2,
- vfe_base + VFE46_WM_BASE(stream_info->wm[i]) + 0xC);
+ vfe_base + VFE46_WM_BASE(stream_info->wm[vfe_idx][i]) + 0xC);
}
}
@@ -628,10 +637,11 @@ static void msm_vfe46_clear_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
uint32_t i;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
for (i = 0; i < stream_info->num_planes; i++)
msm_camera_io_w(0, vfe_dev->vfe_base +
- VFE46_WM_BASE(stream_info->wm[i]) + 0x1C);
+ VFE46_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x1C);
}
static int32_t msm_vfe46_convert_bpp_to_reg(int32_t bpp, uint32_t *bpp_reg)
@@ -1114,7 +1124,7 @@ static void msm_vfe46_update_camif_state(struct vfe_device *vfe_dev,
src_info[VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
vfe_en =
((vfe_dev->axi_data.
- src_info[VFE_PIX_0].pix_stream_count > 0) ? 1 : 0);
+ src_info[VFE_PIX_0].stream_count > 0) ? 1 : 0);
val = msm_camera_io_r(vfe_dev->vfe_base + 0x3AC);
val &= 0xFFFFFF3F;
val = val | bus_en << 7 | vfe_en << 6;
@@ -1178,7 +1188,10 @@ static void msm_vfe46_axi_cfg_wm_reg(
uint8_t plane_idx)
{
uint32_t val;
- uint32_t wm_base = VFE46_WM_BASE(stream_info->wm[plane_idx]);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t wm_base;
+
+ wm_base = VFE46_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
val = msm_camera_io_r(vfe_dev->vfe_base + wm_base + 0xC);
val &= ~0x2;
@@ -1190,17 +1203,18 @@ static void msm_vfe46_axi_cfg_wm_reg(
val =
((msm_isp_cal_word_per_line(
stream_info->output_format,
- stream_info->plane_cfg[plane_idx].
+ stream_info->plane_cfg[vfe_idx][plane_idx].
output_width)+3)/4 - 1) << 16 |
- (stream_info->plane_cfg[plane_idx].
+ (stream_info->plane_cfg[vfe_idx][plane_idx].
output_height - 1);
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
/* WR_BUFFER_CFG */
val = VFE46_BURST_LEN |
- (stream_info->plane_cfg[plane_idx].output_height - 1) <<
+ (stream_info->plane_cfg[vfe_idx][plane_idx].
+ output_height - 1) <<
2 |
((msm_isp_cal_word_per_line(stream_info->output_format,
- stream_info->plane_cfg[plane_idx].
+ stream_info->plane_cfg[vfe_idx][plane_idx].
output_stride)+1)/2) << 16;
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
}
@@ -1215,7 +1229,10 @@ static void msm_vfe46_axi_clear_wm_reg(
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
uint32_t val = 0;
- uint32_t wm_base = VFE46_WM_BASE(stream_info->wm[plane_idx]);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t wm_base;
+
+ wm_base = VFE46_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
/* WR_ADDR_CFG */
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0xC);
@@ -1232,12 +1249,15 @@ static void msm_vfe46_axi_cfg_wm_xbar_reg(
struct msm_vfe_axi_stream *stream_info,
uint8_t plane_idx)
{
- struct msm_vfe_axi_plane_cfg *plane_cfg =
- &stream_info->plane_cfg[plane_idx];
- uint8_t wm = stream_info->wm[plane_idx];
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ struct msm_vfe_axi_plane_cfg *plane_cfg;
+ uint8_t wm;
uint32_t xbar_cfg = 0;
uint32_t xbar_reg_cfg = 0;
+ plane_cfg = &stream_info->plane_cfg[vfe_idx][plane_idx];
+ wm = stream_info->wm[vfe_idx][plane_idx];
+
switch (stream_info->stream_src) {
case PIX_VIDEO:
case PIX_ENCODER:
@@ -1295,9 +1315,12 @@ static void msm_vfe46_axi_clear_wm_xbar_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
- uint8_t wm = stream_info->wm[plane_idx];
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint8_t wm;
uint32_t xbar_reg_cfg = 0;
+ wm = stream_info->wm[vfe_idx][plane_idx];
+
xbar_reg_cfg =
msm_camera_io_r(vfe_dev->vfe_base + VFE46_XBAR_BASE(wm));
xbar_reg_cfg &= ~(0xFFFF << VFE46_XBAR_SHIFT(wm));
@@ -1407,6 +1430,7 @@ static int msm_vfe46_axi_halt(struct vfe_device *vfe_dev,
{
int rc = 0;
enum msm_vfe_input_src i;
+ struct msm_isp_timestamp ts;
/* Keep only halt and restart mask */
msm_vfe46_config_irq(vfe_dev, (1 << 31), (1 << 8),
@@ -1440,34 +1464,19 @@ static int msm_vfe46_axi_halt(struct vfe_device *vfe_dev,
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x374);
}
+ msm_isp_get_timestamp(&ts);
for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
- /* if any stream is waiting for update, signal complete */
- if (vfe_dev->axi_data.stream_update[i]) {
- ISP_DBG("%s: complete stream update\n", __func__);
- msm_isp_axi_stream_update(vfe_dev, i);
- if (vfe_dev->axi_data.stream_update[i])
- msm_isp_axi_stream_update(vfe_dev, i);
- }
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
- ISP_DBG("%s: complete on axi config update\n",
- __func__);
- msm_isp_axi_cfg_update(vfe_dev, i);
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i]))
- msm_isp_axi_cfg_update(vfe_dev, i);
- }
+ msm_isp_axi_stream_update(vfe_dev, i, &ts);
+ msm_isp_axi_stream_update(vfe_dev, i, &ts);
}
- if (atomic_read(&vfe_dev->stats_data.stats_update)) {
- ISP_DBG("%s: complete on stats update\n", __func__);
- msm_isp_stats_stream_update(vfe_dev);
- if (atomic_read(&vfe_dev->stats_data.stats_update))
- msm_isp_stats_stream_update(vfe_dev);
- }
+ msm_isp_stats_stream_update(vfe_dev);
+ msm_isp_stats_stream_update(vfe_dev);
return rc;
}
-static int msm_vfe46_axi_restart(struct vfe_device *vfe_dev,
+static void msm_vfe46_axi_restart(struct vfe_device *vfe_dev,
uint32_t blocking, uint32_t enable_camif)
{
msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
@@ -1488,8 +1497,6 @@ static int msm_vfe46_axi_restart(struct vfe_device *vfe_dev,
vfe_dev->hw_info->vfe_ops.core_ops.
update_camif_state(vfe_dev, ENABLE_CAMIF);
}
-
- return 0;
}
static uint32_t msm_vfe46_get_wm_mask(
@@ -1541,15 +1548,15 @@ static int msm_vfe46_stats_check_streams(
struct msm_vfe_stats_stream *stream_info)
{
if (stream_info[STATS_IDX_BF].state ==
- STATS_AVALIABLE &&
+ STATS_AVAILABLE &&
stream_info[STATS_IDX_BF_SCALE].state !=
- STATS_AVALIABLE) {
+ STATS_AVAILABLE) {
pr_err("%s: does not support BF_SCALE while BF is disabled\n",
__func__);
return -EINVAL;
}
- if (stream_info[STATS_IDX_BF].state != STATS_AVALIABLE &&
- stream_info[STATS_IDX_BF_SCALE].state != STATS_AVALIABLE &&
+ if (stream_info[STATS_IDX_BF].state != STATS_AVAILABLE &&
+ stream_info[STATS_IDX_BF_SCALE].state != STATS_AVAILABLE &&
stream_info[STATS_IDX_BF].composite_flag !=
stream_info[STATS_IDX_BF_SCALE].composite_flag) {
pr_err("%s: Different composite flag for BF and BF_SCALE\n",
@@ -1632,26 +1639,37 @@ static void msm_vfe46_stats_cfg_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+
msm_vfe46_config_irq(vfe_dev,
- 1 << (STATS_IDX(stream_info->stream_handle) + 15), 0,
- MSM_ISP_IRQ_ENABLE);
+ 1 << (STATS_IDX(stream_info->stream_handle[vfe_idx]) + 15), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe46_stats_clear_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+
msm_vfe46_config_irq(vfe_dev,
- 1 << (STATS_IDX(stream_info->stream_handle) + 15), 0,
- MSM_ISP_IRQ_DISABLE);
+ 1 << (STATS_IDX(stream_info->stream_handle[vfe_idx]) + 15), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe46_stats_cfg_wm_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- int stats_idx = STATS_IDX(stream_info->stream_handle);
- uint32_t stats_base = VFE46_STATS_BASE(stats_idx);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+ int stats_idx;
+ uint32_t stats_base;
+
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
+ stats_base = VFE46_STATS_BASE(stats_idx);
/*
* BF_SCALE does not have its own WR_ADDR_CFG,
@@ -1676,10 +1694,14 @@ static void msm_vfe46_stats_clear_wm_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
uint32_t val = 0;
- int stats_idx = STATS_IDX(stream_info->stream_handle);
- uint32_t stats_base = VFE46_STATS_BASE(stats_idx);
+ int stats_idx;
+ uint32_t stats_base;
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
+ stats_base = VFE46_STATS_BASE(stats_idx);
/*
* BF_SCALE does not have its own WR_ADDR_CFG,
* IRQ_FRAMEDROP_PATTERN and IRQ_SUBSAMPLE_PATTERN;
@@ -1845,12 +1867,16 @@ static void msm_vfe46_stats_enable_module(struct vfe_device *vfe_dev,
}
static void msm_vfe46_stats_update_ping_pong_addr(
- void __iomem *vfe_base, struct msm_vfe_stats_stream *stream_info,
+ struct vfe_device *vfe_dev, struct msm_vfe_stats_stream *stream_info,
uint32_t pingpong_status, dma_addr_t paddr)
{
+ void __iomem *vfe_base = vfe_dev->vfe_base;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
uint32_t paddr32 = (paddr & 0xFFFFFFFF);
- int stats_idx = STATS_IDX(stream_info->stream_handle);
+ int stats_idx;
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
msm_camera_io_w(paddr32, vfe_base +
VFE46_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
index 290f100ffeba..b434161f5599 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -562,19 +562,20 @@ void msm_vfe47_process_reg_update(struct vfe_device *vfe_dev,
for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
if (shift_irq & BIT(i)) {
reg_updated |= BIT(i);
- ISP_DBG("%s REG_UPDATE IRQ %x\n", __func__,
- (uint32_t)BIT(i));
+ ISP_DBG("%s REG_UPDATE IRQ %x vfe %d\n", __func__,
+ (uint32_t)BIT(i), vfe_dev->pdev->id);
switch (i) {
case VFE_PIX_0:
- msm_isp_save_framedrop_values(vfe_dev,
- VFE_PIX_0);
msm_isp_notify(vfe_dev, ISP_EVENT_REG_UPDATE,
VFE_PIX_0, ts);
- if (atomic_read(
- &vfe_dev->stats_data.stats_update))
- msm_isp_stats_stream_update(vfe_dev);
- if (vfe_dev->axi_data.camif_state ==
- CAMIF_STOPPING)
+ msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
+ MSM_ISP_COMP_IRQ_REG_UPD);
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
+ /* if 0 streams then force reg update */
+ if (vfe_dev->axi_data.src_info
+ [i].stream_count == 0 &&
+ vfe_dev->axi_data.src_info[i].active)
vfe_dev->hw_info->vfe_ops.core_ops.
reg_update(vfe_dev, i);
break;
@@ -582,31 +583,23 @@ void msm_vfe47_process_reg_update(struct vfe_device *vfe_dev,
case VFE_RAW_1:
case VFE_RAW_2:
msm_isp_increment_frame_id(vfe_dev, i, ts);
- msm_isp_save_framedrop_values(vfe_dev, i);
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, i, ts);
- msm_isp_update_framedrop_reg(vfe_dev, i);
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
/*
* Reg Update is pseudo SOF for RDI,
* so request every frame
*/
vfe_dev->hw_info->vfe_ops.core_ops.
reg_update(vfe_dev, i);
+ /* reg upd is also epoch for RDI */
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, i,
+ MSM_ISP_COMP_IRQ_EPOCH, ts);
break;
default:
pr_err("%s: Error case\n", __func__);
return;
}
- if (vfe_dev->axi_data.stream_update[i])
- msm_isp_axi_stream_update(vfe_dev, i);
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
- msm_isp_axi_cfg_update(vfe_dev, i);
- if (atomic_read(
- &vfe_dev->axi_data.axi_cfg_update[i]) ==
- 0)
- msm_isp_notify(vfe_dev,
- ISP_EVENT_STREAM_UPDATE_DONE,
- i, ts);
- }
}
}
@@ -627,15 +620,17 @@ void msm_vfe47_process_epoch_irq(struct vfe_device *vfe_dev,
if (irq_status0 & BIT(2)) {
ISP_DBG("%s: EPOCH0 IRQ\n", __func__);
- msm_isp_update_framedrop_reg(vfe_dev, VFE_PIX_0);
- msm_isp_update_stats_framedrop_reg(vfe_dev);
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
+ MSM_ISP_COMP_IRQ_EPOCH, ts);
+ msm_isp_process_stats_reg_upd_epoch_irq(vfe_dev,
+ MSM_ISP_COMP_IRQ_EPOCH);
msm_isp_update_error_frame_count(vfe_dev);
msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
&& vfe_dev->axi_data.src_info[VFE_PIX_0].
- pix_stream_count == 0) {
- if (vfe_dev->axi_data.stream_update[VFE_PIX_0])
- msm_isp_axi_stream_update(vfe_dev, VFE_PIX_0);
+ stream_count == 0) {
+ msm_isp_process_reg_upd_epoch_irq(vfe_dev, VFE_PIX_0,
+ MSM_ISP_COMP_IRQ_REG_UPD, ts);
vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
vfe_dev, VFE_PIX_0);
}
@@ -679,7 +674,9 @@ void msm_vfe47_reg_update(struct vfe_device *vfe_dev,
vfe_dev->vfe_base + 0x4AC);
} else if (!vfe_dev->is_split ||
((frame_src == VFE_PIX_0) &&
- (vfe_dev->axi_data.camif_state == CAMIF_STOPPING)) ||
+ (vfe_dev->axi_data.src_info[VFE_PIX_0].stream_count == 0) &&
+ (vfe_dev->axi_data.src_info[VFE_PIX_0].
+ raw_stream_count == 0)) ||
(frame_src >= VFE_RAW_0 && frame_src <= VFE_SRC_MAX)) {
msm_camera_io_w_mb(update_mask,
vfe_dev->vfe_base + 0x4AC);
@@ -768,9 +765,10 @@ void msm_vfe47_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- uint32_t comp_mask, comp_mask_index =
- stream_info->comp_mask_index;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t comp_mask, comp_mask_index;
+ comp_mask_index = stream_info->comp_mask_index[vfe_idx];
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x74);
comp_mask &= ~(0x7F << (comp_mask_index * 8));
comp_mask |= (axi_data->composite_info[comp_mask_index].
@@ -784,8 +782,10 @@ void msm_vfe47_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
void msm_vfe47_axi_clear_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- uint32_t comp_mask, comp_mask_index = stream_info->comp_mask_index;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t comp_mask, comp_mask_index;
+ comp_mask_index = stream_info->comp_mask_index[vfe_idx];
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x74);
comp_mask &= ~(0x7F << (comp_mask_index * 8));
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x74);
@@ -797,31 +797,37 @@ void msm_vfe47_axi_clear_comp_mask(struct vfe_device *vfe_dev,
void msm_vfe47_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- msm_vfe47_config_irq(vfe_dev, 1 << (stream_info->wm[0] + 8), 0,
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ msm_vfe47_config_irq(vfe_dev, 1 << (stream_info->wm[vfe_idx][0] + 8), 0,
MSM_ISP_IRQ_ENABLE);
}
void msm_vfe47_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- msm_vfe47_config_irq(vfe_dev, (1 << (stream_info->wm[0] + 8)), 0,
- MSM_ISP_IRQ_DISABLE);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ msm_vfe47_config_irq(vfe_dev, (1 << (stream_info->wm[vfe_idx][0] + 8)),
+ 0, MSM_ISP_IRQ_DISABLE);
}
-void msm_vfe47_cfg_framedrop(void __iomem *vfe_base,
+void msm_vfe47_cfg_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
uint32_t framedrop_period)
{
+ void __iomem *vfe_base = vfe_dev->vfe_base;
uint32_t i, temp;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
for (i = 0; i < stream_info->num_planes; i++) {
msm_camera_io_w(framedrop_pattern, vfe_base +
- VFE47_WM_BASE(stream_info->wm[i]) + 0x24);
+ VFE47_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x24);
temp = msm_camera_io_r(vfe_base +
- VFE47_WM_BASE(stream_info->wm[i]) + 0x14);
+ VFE47_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x14);
temp &= 0xFFFFFF83;
msm_camera_io_w(temp | (framedrop_period - 1) << 2,
- vfe_base + VFE47_WM_BASE(stream_info->wm[i]) + 0x14);
+ vfe_base + VFE47_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x14);
}
}
@@ -829,10 +835,11 @@ void msm_vfe47_clear_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
uint32_t i;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
for (i = 0; i < stream_info->num_planes; i++)
msm_camera_io_w(0, vfe_dev->vfe_base +
- VFE47_WM_BASE(stream_info->wm[i]) + 0x24);
+ VFE47_WM_BASE(stream_info->wm[vfe_idx][i]) + 0x24);
}
static int32_t msm_vfe47_convert_bpp_to_reg(int32_t bpp, uint32_t *bpp_reg)
@@ -1395,7 +1402,7 @@ void msm_vfe47_update_camif_state(struct vfe_device *vfe_dev,
src_info[VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
vfe_en =
((vfe_dev->axi_data.
- src_info[VFE_PIX_0].pix_stream_count > 0) ? 1 : 0);
+ src_info[VFE_PIX_0].stream_count > 0) ? 1 : 0);
val = msm_camera_io_r(vfe_dev->vfe_base + 0x47C);
val &= 0xFFFFFF3F;
val = val | bus_en << 7 | vfe_en << 6;
@@ -1404,7 +1411,6 @@ void msm_vfe47_update_camif_state(struct vfe_device *vfe_dev,
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x478);
/* configure EPOCH0 for 20 lines */
msm_camera_io_w_mb(0x140000, vfe_dev->vfe_base + 0x4A0);
- vfe_dev->axi_data.src_info[VFE_PIX_0].active = 1;
/* testgen GO*/
if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
msm_camera_io_w(1, vfe_dev->vfe_base + 0xC58);
@@ -1427,7 +1433,6 @@ void msm_vfe47_update_camif_state(struct vfe_device *vfe_dev,
poll_val, poll_val & 0x80000000, 1000, 2000000))
pr_err("%s: camif disable failed %x\n",
__func__, poll_val);
- vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
/* testgen OFF*/
if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
msm_camera_io_w(1 << 1, vfe_dev->vfe_base + 0xC58);
@@ -1469,8 +1474,10 @@ void msm_vfe47_axi_cfg_wm_reg(
uint8_t plane_idx)
{
uint32_t val;
- uint32_t wm_base = VFE47_WM_BASE(stream_info->wm[plane_idx]);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t wm_base;
+ wm_base = VFE47_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
val = msm_camera_io_r(vfe_dev->vfe_base + wm_base + 0x14);
val &= ~0x2;
if (stream_info->frame_based)
@@ -1480,17 +1487,18 @@ void msm_vfe47_axi_cfg_wm_reg(
/* WR_IMAGE_SIZE */
val = ((msm_isp_cal_word_per_line(
stream_info->output_format,
- stream_info->plane_cfg[plane_idx].
+ stream_info->plane_cfg[vfe_idx][plane_idx].
output_width)+3)/4 - 1) << 16 |
- (stream_info->plane_cfg[plane_idx].
+ (stream_info->plane_cfg[vfe_idx][plane_idx].
output_height - 1);
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x1C);
/* WR_BUFFER_CFG */
val = VFE47_BURST_LEN |
- (stream_info->plane_cfg[plane_idx].output_height - 1) <<
+ (stream_info->plane_cfg[vfe_idx][plane_idx].
+ output_height - 1) <<
2 |
((msm_isp_cal_word_per_line(stream_info->output_format,
- stream_info->plane_cfg[plane_idx].
+ stream_info->plane_cfg[vfe_idx][plane_idx].
output_stride)+1)/2) << 16;
}
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x20);
@@ -1504,8 +1512,10 @@ void msm_vfe47_axi_clear_wm_reg(
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
uint32_t val = 0;
- uint32_t wm_base = VFE47_WM_BASE(stream_info->wm[plane_idx]);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint32_t wm_base;
+ wm_base = VFE47_WM_BASE(stream_info->wm[vfe_idx][plane_idx]);
/* WR_ADDR_CFG */
msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
/* WR_IMAGE_SIZE */
@@ -1521,12 +1531,14 @@ void msm_vfe47_axi_cfg_wm_xbar_reg(
struct msm_vfe_axi_stream *stream_info,
uint8_t plane_idx)
{
- struct msm_vfe_axi_plane_cfg *plane_cfg =
- &stream_info->plane_cfg[plane_idx];
- uint8_t wm = stream_info->wm[plane_idx];
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ struct msm_vfe_axi_plane_cfg *plane_cfg;
+ uint8_t wm;
uint32_t xbar_cfg = 0;
uint32_t xbar_reg_cfg = 0;
+ plane_cfg = &stream_info->plane_cfg[vfe_idx][plane_idx];
+ wm = stream_info->wm[vfe_idx][plane_idx];
switch (stream_info->stream_src) {
case PIX_VIDEO:
case PIX_ENCODER:
@@ -1585,9 +1597,11 @@ void msm_vfe47_axi_clear_wm_xbar_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
{
- uint8_t wm = stream_info->wm[plane_idx];
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ uint8_t wm;
uint32_t xbar_reg_cfg = 0;
+ wm = stream_info->wm[vfe_idx][plane_idx];
xbar_reg_cfg =
msm_camera_io_r(vfe_dev->vfe_base + VFE47_XBAR_BASE(wm));
xbar_reg_cfg &= ~(0xFFFF << VFE47_XBAR_SHIFT(wm));
@@ -1707,6 +1721,7 @@ int msm_vfe47_axi_halt(struct vfe_device *vfe_dev,
int rc = 0;
enum msm_vfe_input_src i;
uint32_t val = 0;
+ struct msm_isp_timestamp ts;
val = msm_camera_io_r(vfe_dev->vfe_vbif_base + VFE47_VBIF_CLK_OFFSET);
val |= 0x1;
@@ -1746,34 +1761,20 @@ int msm_vfe47_axi_halt(struct vfe_device *vfe_dev,
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x400);
}
+ msm_isp_get_timestamp(&ts);
for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
- /* if any stream is waiting for update, signal complete */
- if (vfe_dev->axi_data.stream_update[i]) {
- ISP_DBG("%s: complete stream update\n", __func__);
- msm_isp_axi_stream_update(vfe_dev, i);
- if (vfe_dev->axi_data.stream_update[i])
- msm_isp_axi_stream_update(vfe_dev, i);
- }
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
- ISP_DBG("%s: complete on axi config update\n",
- __func__);
- msm_isp_axi_cfg_update(vfe_dev, i);
- if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i]))
- msm_isp_axi_cfg_update(vfe_dev, i);
- }
+ /* if any stream is waiting for update, signal fake completes */
+ msm_isp_axi_stream_update(vfe_dev, i, &ts);
+ msm_isp_axi_stream_update(vfe_dev, i, &ts);
}
- if (atomic_read(&vfe_dev->stats_data.stats_update)) {
- ISP_DBG("%s: complete on stats update\n", __func__);
- msm_isp_stats_stream_update(vfe_dev);
- if (atomic_read(&vfe_dev->stats_data.stats_update))
- msm_isp_stats_stream_update(vfe_dev);
- }
+ msm_isp_stats_stream_update(vfe_dev);
+ msm_isp_stats_stream_update(vfe_dev);
return rc;
}
-int msm_vfe47_axi_restart(struct vfe_device *vfe_dev,
+void msm_vfe47_axi_restart(struct vfe_device *vfe_dev,
uint32_t blocking, uint32_t enable_camif)
{
msm_vfe47_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
@@ -1793,8 +1794,6 @@ int msm_vfe47_axi_restart(struct vfe_device *vfe_dev,
vfe_dev->hw_info->vfe_ops.core_ops.
update_camif_state(vfe_dev, ENABLE_CAMIF);
}
-
- return 0;
}
uint32_t msm_vfe47_get_wm_mask(
@@ -1912,7 +1911,10 @@ void msm_vfe47_stats_cfg_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- switch (STATS_IDX(stream_info->stream_handle)) {
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+
+ switch (STATS_IDX(stream_info->stream_handle[vfe_idx])) {
case STATS_COMP_IDX_AEC_BG:
msm_vfe47_config_irq(vfe_dev, 1 << 15, 0, MSM_ISP_IRQ_ENABLE);
break;
@@ -1943,7 +1945,7 @@ void msm_vfe47_stats_cfg_wm_irq_mask(
break;
default:
pr_err("%s: Invalid stats idx %d\n", __func__,
- STATS_IDX(stream_info->stream_handle));
+ STATS_IDX(stream_info->stream_handle[vfe_idx]));
}
}
@@ -1951,12 +1953,10 @@ void msm_vfe47_stats_clear_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- uint32_t irq_mask, irq_mask_1;
-
- irq_mask = vfe_dev->irq0_mask;
- irq_mask_1 = vfe_dev->irq1_mask;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
- switch (STATS_IDX(stream_info->stream_handle)) {
+ switch (STATS_IDX(stream_info->stream_handle[vfe_idx])) {
case STATS_COMP_IDX_AEC_BG:
msm_vfe47_config_irq(vfe_dev, 1 << 15, 0, MSM_ISP_IRQ_DISABLE);
break;
@@ -1987,7 +1987,7 @@ void msm_vfe47_stats_clear_wm_irq_mask(
break;
default:
pr_err("%s: Invalid stats idx %d\n", __func__,
- STATS_IDX(stream_info->stream_handle));
+ STATS_IDX(stream_info->stream_handle[vfe_idx]));
}
}
@@ -1995,8 +1995,13 @@ void msm_vfe47_stats_cfg_wm_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- int stats_idx = STATS_IDX(stream_info->stream_handle);
- uint32_t stats_base = VFE47_STATS_BASE(stats_idx);
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
+ int stats_idx;
+ uint32_t stats_base;
+
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
+ stats_base = VFE47_STATS_BASE(stats_idx);
/* WR_ADDR_CFG */
msm_camera_io_w(stream_info->framedrop_period << 2,
@@ -2013,9 +2018,14 @@ void msm_vfe47_stats_clear_wm_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
uint32_t val = 0;
- int stats_idx = STATS_IDX(stream_info->stream_handle);
- uint32_t stats_base = VFE47_STATS_BASE(stats_idx);
+ int stats_idx;
+ uint32_t stats_base;
+
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
+ stats_base = VFE47_STATS_BASE(stats_idx);
/* WR_ADDR_CFG */
msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x10);
@@ -2171,11 +2181,16 @@ void msm_vfe47_stats_enable_module(struct vfe_device *vfe_dev,
}
void msm_vfe47_stats_update_ping_pong_addr(
- void __iomem *vfe_base, struct msm_vfe_stats_stream *stream_info,
+ struct vfe_device *vfe_dev, struct msm_vfe_stats_stream *stream_info,
uint32_t pingpong_status, dma_addr_t paddr)
{
+ void __iomem *vfe_base = vfe_dev->vfe_base;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev,
+ stream_info);
uint32_t paddr32 = (paddr & 0xFFFFFFFF);
- int stats_idx = STATS_IDX(stream_info->stream_handle);
+ int stats_idx;
+
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
msm_camera_io_w(paddr32, vfe_base +
VFE47_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.h
index 737f845c7272..8581373b3b71 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.h
@@ -56,7 +56,7 @@ void msm_vfe47_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info);
void msm_vfe47_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info);
-void msm_vfe47_cfg_framedrop(void __iomem *vfe_base,
+void msm_vfe47_cfg_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
uint32_t framedrop_period);
void msm_vfe47_clear_framedrop(struct vfe_device *vfe_dev,
@@ -107,7 +107,7 @@ void msm_vfe47_update_ping_pong_addr(
int32_t buf_size);
int msm_vfe47_axi_halt(struct vfe_device *vfe_dev,
uint32_t blocking);
-int msm_vfe47_axi_restart(struct vfe_device *vfe_dev,
+void msm_vfe47_axi_restart(struct vfe_device *vfe_dev,
uint32_t blocking, uint32_t enable_camif);
uint32_t msm_vfe47_get_wm_mask(
uint32_t irq_status0, uint32_t irq_status1);
@@ -141,7 +141,7 @@ bool msm_vfe47_is_module_cfg_lock_needed(
void msm_vfe47_stats_enable_module(struct vfe_device *vfe_dev,
uint32_t stats_mask, uint8_t enable);
void msm_vfe47_stats_update_ping_pong_addr(
- void __iomem *vfe_base, struct msm_vfe_stats_stream *stream_info,
+ struct vfe_device *vfe_dev, struct msm_vfe_stats_stream *stream_info,
uint32_t pingpong_status, dma_addr_t paddr);
uint32_t msm_vfe47_stats_get_wm_mask(
uint32_t irq_status0, uint32_t irq_status1);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index ac2d508269a4..572e3c637c7b 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -22,9 +22,12 @@ static int msm_isp_update_dual_HW_ms_info_at_start(
struct vfe_device *vfe_dev,
enum msm_vfe_input_src stream_src);
-static int msm_isp_update_dual_HW_axi(
- struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream *stream_info);
+static void msm_isp_reload_ping_pong_offset(
+ struct msm_vfe_axi_stream *stream_info);
+
+static void __msm_isp_axi_stream_update(
+ struct msm_vfe_axi_stream *stream_info,
+ struct msm_isp_timestamp *ts);
#define DUAL_VFE_AND_VFE1(s, v) ((s->stream_src < RDI_INTF_0) && \
v->is_split && vfe_dev->pdev->id == ISP_VFE1)
@@ -33,105 +36,151 @@ static int msm_isp_update_dual_HW_axi(
((s->stream_src >= RDI_INTF_0) && \
(stream_info->stream_src <= RDI_INTF_2)))
-static inline struct msm_vfe_axi_stream *msm_isp_vfe_get_stream(
- struct dual_vfe_resource *dual_vfe_res,
- int vfe_id, uint32_t index)
-{
- struct msm_vfe_axi_shared_data *axi_data =
- dual_vfe_res->axi_data[vfe_id];
- return &axi_data->stream_info[index];
-}
-
-static inline struct msm_vfe_axi_stream *msm_isp_get_controllable_stream(
- struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream *stream_info)
-{
- if (vfe_dev->is_split && stream_info->stream_src < RDI_INTF_0 &&
- stream_info->controllable_output)
- return msm_isp_vfe_get_stream(
- vfe_dev->common_data->dual_vfe_res,
- ISP_VFE1,
- HANDLE_TO_IDX(
- stream_info->stream_handle));
- return stream_info;
-}
-
-int msm_isp_axi_create_stream(struct vfe_device *vfe_dev,
+static int msm_isp_axi_create_stream(struct vfe_device *vfe_dev,
struct msm_vfe_axi_shared_data *axi_data,
- struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
+ struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd,
+ struct msm_vfe_axi_stream *stream_info)
{
- uint32_t i = stream_cfg_cmd->stream_src;
-
- if (i >= VFE_AXI_SRC_MAX) {
- pr_err("%s:%d invalid stream_src %d\n", __func__, __LINE__,
- stream_cfg_cmd->stream_src);
- return -EINVAL;
- }
+ uint32_t i;
+ int rc = 0;
- if (axi_data->stream_info[i].state != AVAILABLE) {
+ if (stream_info->state != AVAILABLE) {
pr_err("%s:%d invalid state %d expected %d for src %d\n",
- __func__, __LINE__, axi_data->stream_info[i].state,
+ __func__, __LINE__, stream_info->state,
AVAILABLE, i);
return -EINVAL;
}
+ if (stream_info->num_isp == 0) {
+ stream_info->session_id = stream_cfg_cmd->session_id;
+ stream_info->stream_id = stream_cfg_cmd->stream_id;
+ stream_info->buf_divert = stream_cfg_cmd->buf_divert;
+ stream_info->stream_src = stream_cfg_cmd->stream_src;
+ stream_info->controllable_output =
+ stream_cfg_cmd->controllable_output;
+ stream_info->activated_framedrop_period =
+ MSM_VFE_STREAM_STOP_PERIOD;
+ if (stream_cfg_cmd->controllable_output)
+ stream_cfg_cmd->frame_skip_pattern = SKIP_ALL;
+ INIT_LIST_HEAD(&stream_info->request_q);
+ } else {
+ /* check if the stream has been added for the vfe-device */
+ if (stream_info->vfe_mask & (1 << vfe_dev->pdev->id)) {
+ pr_err("%s: stream %p/%x is already added for vfe dev %d vfe_mask %x\n",
+ __func__, stream_info, stream_info->stream_id,
+ vfe_dev->pdev->id, stream_info->vfe_mask);
+ return -EINVAL;
+ }
+ if (stream_info->session_id != stream_cfg_cmd->session_id) {
+ pr_err("%s: dual stream session id mismatch %d/%d\n",
+ __func__, stream_info->session_id,
+ stream_cfg_cmd->session_id);
+ rc = -EINVAL;
+ }
+ if (stream_info->stream_id != stream_cfg_cmd->stream_id) {
+ pr_err("%s: dual stream stream id mismatch %d/%d\n",
+ __func__, stream_info->stream_id,
+ stream_cfg_cmd->stream_id);
+ rc = -EINVAL;
+ }
+ if (stream_info->controllable_output !=
+ stream_cfg_cmd->controllable_output) {
+ pr_err("%s: dual stream controllable_op mismatch %d/%d\n",
+ __func__, stream_info->controllable_output,
+ stream_cfg_cmd->controllable_output);
+ rc = -EINVAL;
+ }
+ if (stream_info->buf_divert != stream_cfg_cmd->buf_divert) {
+ pr_err("%s: dual stream buf_divert mismatch %d/%d\n",
+ __func__, stream_info->buf_divert,
+ stream_cfg_cmd->buf_divert);
+ rc = -EINVAL;
+ }
+ if (rc)
+ return rc;
+ }
+ stream_info->vfe_dev[stream_info->num_isp] = vfe_dev;
+ stream_info->num_isp++;
+
if ((axi_data->stream_handle_cnt << 8) == 0)
axi_data->stream_handle_cnt++;
stream_cfg_cmd->axi_stream_handle =
- (++axi_data->stream_handle_cnt) << 8 | i;
+ (++axi_data->stream_handle_cnt) << 8 | stream_info->stream_src;
ISP_DBG("%s: vfe %d handle %x\n", __func__, vfe_dev->pdev->id,
stream_cfg_cmd->axi_stream_handle);
- memset(&axi_data->stream_info[i], 0,
- sizeof(struct msm_vfe_axi_stream));
- spin_lock_init(&axi_data->stream_info[i].lock);
- axi_data->stream_info[i].session_id = stream_cfg_cmd->session_id;
- axi_data->stream_info[i].stream_id = stream_cfg_cmd->stream_id;
- axi_data->stream_info[i].buf_divert = stream_cfg_cmd->buf_divert;
- axi_data->stream_info[i].state = INACTIVE;
- axi_data->stream_info[i].stream_handle =
+ stream_info->stream_handle[stream_info->num_isp - 1] =
stream_cfg_cmd->axi_stream_handle;
- axi_data->stream_info[i].controllable_output =
- stream_cfg_cmd->controllable_output;
- axi_data->stream_info[i].activated_framedrop_period =
- MSM_VFE_STREAM_STOP_PERIOD;
- if (stream_cfg_cmd->controllable_output)
- stream_cfg_cmd->frame_skip_pattern = SKIP_ALL;
- INIT_LIST_HEAD(&axi_data->stream_info[i].request_q);
+ stream_info->vfe_mask |= (1 << vfe_dev->pdev->id);
+
+ if (!vfe_dev->is_split || stream_cfg_cmd->stream_src >= RDI_INTF_0 ||
+ stream_info->num_isp == MAX_VFE) {
+ stream_info->state = INACTIVE;
+
+ for (i = 0; i < MSM_ISP_COMP_IRQ_MAX; i++)
+ stream_info->composite_irq[i] = 0;
+ }
return 0;
}
-void msm_isp_axi_destroy_stream(
- struct msm_vfe_axi_shared_data *axi_data, int stream_idx)
+static void msm_isp_axi_destroy_stream(
+ struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info)
{
- if (axi_data->stream_info[stream_idx].state != AVAILABLE) {
- axi_data->stream_info[stream_idx].state = AVAILABLE;
- axi_data->stream_info[stream_idx].stream_handle = 0;
- } else {
- pr_err("%s: stream does not exist\n", __func__);
+ int k;
+ int j;
+ int i;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ /*
+ * For the index being removed, shift everything to it's right by 1
+ * so that the index being removed becomes the last index
+ */
+ for (i = vfe_idx, k = vfe_idx + 1; k < stream_info->num_isp; k++, i++) {
+ stream_info->vfe_dev[i] = stream_info->vfe_dev[k];
+ stream_info->stream_handle[i] = stream_info->stream_handle[k];
+ stream_info->bandwidth[i] = stream_info->bandwidth[k];
+ stream_info->max_width[i] = stream_info->max_width[k];
+ stream_info->comp_mask_index[i] =
+ stream_info->comp_mask_index[k];
+ for (j = 0; j < stream_info->num_planes; j++) {
+ stream_info->plane_cfg[i][j] =
+ stream_info->plane_cfg[k][j];
+ stream_info->wm[i][j] = stream_info->wm[k][j];
+ }
+ }
+
+ stream_info->num_isp--;
+ stream_info->vfe_dev[stream_info->num_isp] = NULL;
+ stream_info->stream_handle[stream_info->num_isp] = 0;
+ stream_info->bandwidth[stream_info->num_isp] = 0;
+ stream_info->max_width[stream_info->num_isp] = 0;
+ stream_info->comp_mask_index[stream_info->num_isp] = -1;
+ stream_info->vfe_mask &= ~(1 << vfe_dev->pdev->id);
+ for (j = 0; j < stream_info->num_planes; j++) {
+ stream_info->wm[stream_info->num_isp][j] = -1;
+ memset(&stream_info->plane_cfg[stream_info->num_isp][j],
+ 0, sizeof(
+ stream_info->plane_cfg[stream_info->num_isp][j]));
+ }
+
+ if (stream_info->num_isp == 0) {
+ /* release the bufq */
+ for (k = 0; k < VFE_BUF_QUEUE_MAX; k++)
+ stream_info->bufq_handle[k] = 0;
+ stream_info->vfe_mask = 0;
+ stream_info->state = AVAILABLE;
}
}
-int msm_isp_validate_axi_request(struct msm_vfe_axi_shared_data *axi_data,
+static int msm_isp_validate_axi_request(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
{
int rc = -1, i;
- struct msm_vfe_axi_stream *stream_info = NULL;
- if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)
- < VFE_AXI_SRC_MAX) {
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
- } else {
- pr_err("%s: Invalid axi_stream_handle\n", __func__);
- return rc;
- }
-
- if (!stream_info) {
- pr_err("%s: Stream info is NULL\n", __func__);
- return -EINVAL;
- }
+ int vfe_idx;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
switch (stream_cfg_cmd->output_format) {
case V4L2_PIX_FMT_YUYV:
@@ -236,9 +285,13 @@ int msm_isp_validate_axi_request(struct msm_vfe_axi_shared_data *axi_data,
return rc;
}
+ vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
for (i = 0; i < stream_info->num_planes; i++) {
- stream_info->plane_cfg[i] = stream_cfg_cmd->plane_cfg[i];
- stream_info->max_width = max(stream_info->max_width,
+ stream_info->plane_cfg[vfe_idx][i] =
+ stream_cfg_cmd->plane_cfg[i];
+ stream_info->max_width[vfe_idx] =
+ max(stream_info->max_width[vfe_idx],
stream_cfg_cmd->plane_cfg[i].output_width);
}
@@ -250,10 +303,11 @@ int msm_isp_validate_axi_request(struct msm_vfe_axi_shared_data *axi_data,
}
static uint32_t msm_isp_axi_get_plane_size(
- struct msm_vfe_axi_stream *stream_info, int plane_idx)
+ struct msm_vfe_axi_stream *stream_info, int vfe_idx, int plane_idx)
{
uint32_t size = 0;
- struct msm_vfe_axi_plane_cfg *plane_cfg = stream_info->plane_cfg;
+ struct msm_vfe_axi_plane_cfg *plane_cfg =
+ stream_info->plane_cfg[vfe_idx];
switch (stream_info->output_format) {
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
@@ -355,37 +409,41 @@ static uint32_t msm_isp_axi_get_plane_size(
return size;
}
-void msm_isp_axi_reserve_wm(struct vfe_device *vfe_dev,
- struct msm_vfe_axi_shared_data *axi_data,
+static void msm_isp_axi_reserve_wm(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
int i, j;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
for (i = 0; i < stream_info->num_planes; i++) {
for (j = 0; j < axi_data->hw_info->num_wm; j++) {
if (!axi_data->free_wm[j]) {
axi_data->free_wm[j] =
- stream_info->stream_handle;
+ stream_info->stream_handle[vfe_idx];
axi_data->wm_image_size[j] =
msm_isp_axi_get_plane_size(
- stream_info, i);
+ stream_info, vfe_idx, i);
axi_data->num_used_wm++;
break;
}
}
ISP_DBG("%s vfe %d stream_handle %x wm %d\n", __func__,
vfe_dev->pdev->id,
- stream_info->stream_handle, j);
- stream_info->wm[i] = j;
+ stream_info->stream_handle[vfe_idx], j);
+ stream_info->wm[vfe_idx][i] = j;
}
}
-void msm_isp_axi_free_wm(struct msm_vfe_axi_shared_data *axi_data,
+void msm_isp_axi_free_wm(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
int i;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
for (i = 0; i < stream_info->num_planes; i++) {
- axi_data->free_wm[stream_info->wm[i]] = 0;
+ axi_data->free_wm[stream_info->wm[vfe_idx][i]] = 0;
axi_data->num_used_wm--;
}
if (stream_info->stream_src <= IDEAL_RAW)
@@ -394,88 +452,47 @@ void msm_isp_axi_free_wm(struct msm_vfe_axi_shared_data *axi_data,
axi_data->num_rdi_stream++;
}
-void msm_isp_axi_reserve_comp_mask(
- struct msm_vfe_axi_shared_data *axi_data,
+static void msm_isp_axi_reserve_comp_mask(
+ struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
int i;
uint8_t comp_mask = 0;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
for (i = 0; i < stream_info->num_planes; i++)
- comp_mask |= 1 << stream_info->wm[i];
+ comp_mask |= 1 << stream_info->wm[vfe_idx][i];
for (i = 0; i < axi_data->hw_info->num_comp_mask; i++) {
if (!axi_data->composite_info[i].stream_handle) {
axi_data->composite_info[i].stream_handle =
- stream_info->stream_handle;
+ stream_info->stream_handle[vfe_idx];
axi_data->composite_info[i].
stream_composite_mask = comp_mask;
axi_data->num_used_composite_mask++;
break;
}
}
- stream_info->comp_mask_index = i;
+ stream_info->comp_mask_index[vfe_idx] = i;
return;
}
-void msm_isp_axi_free_comp_mask(struct msm_vfe_axi_shared_data *axi_data,
+static void msm_isp_axi_free_comp_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- axi_data->composite_info[stream_info->comp_mask_index].
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ axi_data->composite_info[stream_info->comp_mask_index[vfe_idx]].
stream_composite_mask = 0;
- axi_data->composite_info[stream_info->comp_mask_index].
+ axi_data->composite_info[stream_info->comp_mask_index[vfe_idx]].
stream_handle = 0;
axi_data->num_used_composite_mask--;
}
-int msm_isp_axi_check_stream_state(
- struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
-{
- int rc = 0, i;
- unsigned long flags;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- struct msm_vfe_axi_stream *stream_info;
- enum msm_vfe_axi_state valid_state =
- (stream_cfg_cmd->cmd == START_STREAM) ? INACTIVE : ACTIVE;
-
- if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
- return -EINVAL;
-
- for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
- if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
- VFE_AXI_SRC_MAX) {
- return -EINVAL;
- }
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
- spin_lock_irqsave(&stream_info->lock, flags);
- if (stream_info->state != valid_state) {
- if ((stream_info->state == PAUSING ||
- stream_info->state == PAUSED ||
- stream_info->state == RESUME_PENDING ||
- stream_info->state == RESUMING ||
- stream_info->state == UPDATING) &&
- (stream_cfg_cmd->cmd == STOP_STREAM ||
- stream_cfg_cmd->cmd == STOP_IMMEDIATELY)) {
- stream_info->state = ACTIVE;
- } else {
- pr_err("%s: Invalid stream state: %d\n",
- __func__, stream_info->state);
- spin_unlock_irqrestore(
- &stream_info->lock, flags);
- if (stream_cfg_cmd->cmd == START_STREAM)
- rc = -EINVAL;
- break;
- }
- }
- spin_unlock_irqrestore(&stream_info->lock, flags);
- }
- return rc;
-}
-
/**
* msm_isp_cfg_framedrop_reg() - Program the period and pattern
- * @vfe_dev: The device for which the period and pattern is programmed
* @stream_info: The stream for which programming is done
*
* This function calculates the period and pattern to be configured
@@ -484,15 +501,15 @@ int msm_isp_axi_check_stream_state(
*
* Returns void.
*/
-static void msm_isp_cfg_framedrop_reg(struct vfe_device *vfe_dev,
+static void msm_isp_cfg_framedrop_reg(
struct msm_vfe_axi_stream *stream_info)
{
- struct msm_vfe_axi_stream *vfe0_stream_info = NULL;
+ struct vfe_device *vfe_dev = stream_info->vfe_dev[0];
uint32_t runtime_init_frame_drop;
-
uint32_t framedrop_pattern = 0;
uint32_t framedrop_period = MSM_VFE_STREAM_STOP_PERIOD;
enum msm_vfe_input_src frame_src = SRC_TO_INTF(stream_info->stream_src);
+ int i;
if (vfe_dev->axi_data.src_info[frame_src].frame_id >=
stream_info->init_frame_drop)
@@ -507,127 +524,45 @@ static void msm_isp_cfg_framedrop_reg(struct vfe_device *vfe_dev,
if (MSM_VFE_STREAM_STOP_PERIOD != framedrop_period)
framedrop_pattern = 0x1;
- ISP_DBG("%s: stream %x framedrop pattern %x period %u\n", __func__,
- stream_info->stream_handle, framedrop_pattern,
- framedrop_period);
-
BUG_ON(0 == framedrop_period);
- if (DUAL_VFE_AND_VFE1(stream_info, vfe_dev)) {
- vfe0_stream_info = msm_isp_vfe_get_stream(
- vfe_dev->common_data->dual_vfe_res,
- ISP_VFE0,
- HANDLE_TO_IDX(
- stream_info->stream_handle));
+ for (i = 0; i < stream_info->num_isp; i++) {
+ vfe_dev = stream_info->vfe_dev[i];
vfe_dev->hw_info->vfe_ops.axi_ops.cfg_framedrop(
- vfe_dev->common_data->dual_vfe_res->
- vfe_base[ISP_VFE0],
- vfe0_stream_info, framedrop_pattern,
- framedrop_period);
- vfe_dev->hw_info->vfe_ops.axi_ops.cfg_framedrop(
- vfe_dev->vfe_base, stream_info,
- framedrop_pattern,
- framedrop_period);
+ vfe_dev, stream_info, framedrop_pattern,
+ framedrop_period);
+ }
- stream_info->requested_framedrop_period =
- framedrop_period;
- vfe0_stream_info->requested_framedrop_period =
- framedrop_period;
+ ISP_DBG("%s: stream %x src %x framedrop pattern %x period %u\n",
+ __func__,
+ stream_info->stream_handle[0], stream_info->stream_src,
+ framedrop_pattern, framedrop_period);
- } else if (RDI_OR_NOT_DUAL_VFE(vfe_dev, stream_info)) {
- vfe_dev->hw_info->vfe_ops.axi_ops.cfg_framedrop(
- vfe_dev->vfe_base, stream_info, framedrop_pattern,
- framedrop_period);
- stream_info->requested_framedrop_period = framedrop_period;
- }
+ stream_info->requested_framedrop_period = framedrop_period;
}
-/**
- * msm_isp_check_epoch_status() - check the epock signal for framedrop
- *
- * @vfe_dev: The h/w on which the epoch signel is reveived
- * @frame_src: The source of the epoch signal for this frame
- *
- * For dual vfe case and pixel stream, if both vfe's epoch signal is
- * received, this function will return success.
- * It will also return the vfe1 for further process
- * For none dual VFE stream or none pixl source, this
- * funciton will just return success.
- *
- * Returns 1 - epoch received is complete.
- * 0 - epoch reveived is not complete.
- */
-static int msm_isp_check_epoch_status(struct vfe_device **vfe_dev,
- enum msm_vfe_input_src frame_src)
+static int msm_isp_composite_irq(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ enum msm_isp_comp_irq_types irq)
{
- struct vfe_device *vfe_dev_cur = *vfe_dev;
- struct vfe_device *vfe_dev_other = NULL;
- uint32_t vfe_id_other = 0;
- uint32_t vfe_id_cur = 0;
- uint32_t epoch_mask = 0;
- unsigned long flags;
- int completed = 0;
-
- spin_lock_irqsave(
- &vfe_dev_cur->common_data->common_dev_data_lock, flags);
-
- if (vfe_dev_cur->is_split &&
- frame_src == VFE_PIX_0) {
- if (vfe_dev_cur->pdev->id == ISP_VFE0) {
- vfe_id_cur = ISP_VFE0;
- vfe_id_other = ISP_VFE1;
- } else {
- vfe_id_cur = ISP_VFE1;
- vfe_id_other = ISP_VFE0;
- }
- vfe_dev_other = vfe_dev_cur->common_data->dual_vfe_res->
- vfe_dev[vfe_id_other];
-
- if (vfe_dev_cur->common_data->dual_vfe_res->
- epoch_sync_mask & (1 << vfe_id_cur)) {
- /* serious scheduling delay */
- pr_err("Missing epoch: vfe %d, epoch mask 0x%x\n",
- vfe_dev_cur->pdev->id,
- vfe_dev_cur->common_data->dual_vfe_res->
- epoch_sync_mask);
- goto fatal;
- }
-
- vfe_dev_cur->common_data->dual_vfe_res->
- epoch_sync_mask |= (1 << vfe_id_cur);
-
- epoch_mask = (1 << vfe_id_cur) | (1 << vfe_id_other);
- if ((vfe_dev_cur->common_data->dual_vfe_res->
- epoch_sync_mask & epoch_mask) == epoch_mask) {
-
- if (vfe_id_other == ISP_VFE0)
- *vfe_dev = vfe_dev_cur;
- else
- *vfe_dev = vfe_dev_other;
+ /* interrupt recv on same vfe w/o recv on other vfe */
+ if (stream_info->composite_irq[irq] & (1 << vfe_dev->pdev->id)) {
+ pr_err("%s: irq %d out of sync for dual vfe on vfe %d\n",
+ __func__, irq, vfe_dev->pdev->id);
+ return -EINVAL;
+ }
- vfe_dev_cur->common_data->dual_vfe_res->
- epoch_sync_mask &= ~epoch_mask;
- completed = 1;
- }
- } else
- completed = 1;
+ stream_info->composite_irq[irq] |= (1 << vfe_dev->pdev->id);
+ if (stream_info->composite_irq[irq] != stream_info->vfe_mask)
+ return 1;
- spin_unlock_irqrestore(
- &vfe_dev_cur->common_data->common_dev_data_lock, flags);
+ stream_info->composite_irq[irq] = 0;
- return completed;
-fatal:
- spin_unlock_irqrestore(
- &vfe_dev_cur->common_data->common_dev_data_lock, flags);
- /* new error event code will be added later */
- msm_isp_halt_send_error(vfe_dev_cur, ISP_EVENT_PING_PONG_MISMATCH);
return 0;
}
-
/**
* msm_isp_update_framedrop_reg() - Update frame period pattern on h/w
- * @vfe_dev: The h/w on which the perion pattern is updated.
- * @frame_src: Input source.
+ * @stream_info: Stream for which update is to be performed
*
* If the period and pattern needs to be updated for a stream then it is
* updated here. Updates happen if initial frame drop reaches 0 or burst
@@ -635,47 +570,75 @@ fatal:
*
* Returns void
*/
-void msm_isp_update_framedrop_reg(struct vfe_device *vfe_dev,
- enum msm_vfe_input_src frame_src)
+static void msm_isp_update_framedrop_reg(struct msm_vfe_axi_stream *stream_info)
+{
+ if (stream_info->stream_type == BURST_STREAM) {
+ if (stream_info->runtime_num_burst_capture == 0 ||
+ (stream_info->runtime_num_burst_capture == 1 &&
+ stream_info->activated_framedrop_period == 1))
+ stream_info->current_framedrop_period =
+ MSM_VFE_STREAM_STOP_PERIOD;
+ }
+
+ if (stream_info->undelivered_request_cnt > 0)
+ stream_info->current_framedrop_period =
+ MSM_VFE_STREAM_STOP_PERIOD;
+
+ /*
+ * re-configure the period pattern, only if it's not already
+ * set to what we want
+ */
+ if (stream_info->current_framedrop_period !=
+ stream_info->requested_framedrop_period) {
+ msm_isp_cfg_framedrop_reg(stream_info);
+ }
+}
+
+void msm_isp_process_reg_upd_epoch_irq(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src,
+ enum msm_isp_comp_irq_types irq,
+ struct msm_isp_timestamp *ts)
{
int i;
- struct msm_vfe_axi_shared_data *axi_data = NULL;
struct msm_vfe_axi_stream *stream_info;
unsigned long flags;
-
- if (msm_isp_check_epoch_status(&vfe_dev, frame_src) != 1)
- return;
-
- axi_data = &vfe_dev->axi_data;
+ int ret;
for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
- if (SRC_TO_INTF(axi_data->stream_info[i].stream_src) !=
+ stream_info = msm_isp_get_stream_common_data(vfe_dev, i);
+ if (SRC_TO_INTF(stream_info->stream_src) !=
frame_src) {
continue;
}
- stream_info = &axi_data->stream_info[i];
- if (stream_info->state != ACTIVE)
+ if (stream_info->state == AVAILABLE ||
+ stream_info->state == INACTIVE)
continue;
spin_lock_irqsave(&stream_info->lock, flags);
- if (BURST_STREAM == stream_info->stream_type) {
- if (0 == stream_info->runtime_num_burst_capture)
- stream_info->current_framedrop_period =
- MSM_VFE_STREAM_STOP_PERIOD;
+ ret = msm_isp_composite_irq(vfe_dev, stream_info, irq);
+ if (ret) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ if (ret < 0) {
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return;
+ }
+ continue;
}
- if (stream_info->undelivered_request_cnt > 0)
- stream_info->current_framedrop_period =
- MSM_VFE_STREAM_STOP_PERIOD;
-
- /*
- * re-configure the period pattern, only if it's not already
- * set to what we want
- */
- if (stream_info->current_framedrop_period !=
- stream_info->requested_framedrop_period) {
- msm_isp_cfg_framedrop_reg(vfe_dev, stream_info);
+ switch (irq) {
+ case MSM_ISP_COMP_IRQ_REG_UPD:
+ stream_info->activated_framedrop_period =
+ stream_info->requested_framedrop_period;
+ __msm_isp_axi_stream_update(stream_info, ts);
+ break;
+ case MSM_ISP_COMP_IRQ_EPOCH:
+ if (stream_info->state == ACTIVE)
+ msm_isp_update_framedrop_reg(stream_info);
+ break;
+ default:
+ WARN(1, "Invalid irq %d\n", irq);
}
spin_unlock_irqrestore(&stream_info->lock, flags);
}
@@ -708,7 +671,7 @@ void msm_isp_reset_framedrop(struct vfe_device *vfe_dev,
stream_info->frame_skip_pattern);
}
- msm_isp_cfg_framedrop_reg(vfe_dev, stream_info);
+ msm_isp_cfg_framedrop_reg(stream_info);
ISP_DBG("%s: init frame drop: %d\n", __func__,
stream_info->init_frame_drop);
ISP_DBG("%s: num_burst_capture: %d\n", __func__,
@@ -741,10 +704,9 @@ void msm_isp_check_for_output_error(struct vfe_device *vfe_dev,
vfe_dev->reg_update_requested;
}
for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
- struct msm_vfe_axi_stream *temp_stream_info;
-
- stream_info = &axi_data->stream_info[i];
- stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ i);
+ stream_idx = HANDLE_TO_IDX(stream_info->stream_handle[0]);
/*
* Process drop only if controllable ACTIVE PIX stream &&
@@ -761,10 +723,8 @@ void msm_isp_check_for_output_error(struct vfe_device *vfe_dev,
if (stream_info->controllable_output &&
!vfe_dev->reg_updated) {
- temp_stream_info =
- msm_isp_get_controllable_stream(vfe_dev,
- stream_info);
- if (temp_stream_info->undelivered_request_cnt) {
+ if (stream_info->undelivered_request_cnt) {
+ pr_err("Drop frame no reg update\n");
if (msm_isp_drop_frame(vfe_dev, stream_info, ts,
sof_info)) {
pr_err("drop frame failed\n");
@@ -1010,7 +970,7 @@ void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type,
/**
* msm_isp_calculate_framedrop() - Setup frame period and pattern
- * @axi_data: Structure describing the h/w streams.
+ * @vfe_dev: vfe device.
* @stream_cfg_cmd: User space input parameter for perion/pattern.
*
* Initialize the h/w stream framedrop period and pattern sent
@@ -1018,16 +978,16 @@ void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type,
*
* Returns 0 on success else error code.
*/
-int msm_isp_calculate_framedrop(
- struct msm_vfe_axi_shared_data *axi_data,
+static int msm_isp_calculate_framedrop(
+ struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
{
uint32_t framedrop_period = 0;
struct msm_vfe_axi_stream *stream_info = NULL;
if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)
< VFE_AXI_SRC_MAX) {
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle));
} else {
pr_err("%s: Invalid stream handle", __func__);
return -EINVAL;
@@ -1059,26 +1019,36 @@ int msm_isp_calculate_framedrop(
return 0;
}
-void msm_isp_calculate_bandwidth(
- struct msm_vfe_axi_shared_data *axi_data,
+static void msm_isp_calculate_bandwidth(
struct msm_vfe_axi_stream *stream_info)
{
int bpp = 0;
+ struct msm_vfe_axi_shared_data *axi_data;
+ int i;
+
if (stream_info->stream_src < RDI_INTF_0) {
- stream_info->bandwidth =
- (axi_data->src_info[VFE_PIX_0].pixel_clock /
- axi_data->src_info[VFE_PIX_0].width) *
- stream_info->max_width;
- stream_info->bandwidth = (unsigned long)stream_info->bandwidth *
- stream_info->format_factor / ISP_Q2;
+ for (i = 0; i < stream_info->num_isp; i++) {
+ axi_data = &stream_info->vfe_dev[i]->axi_data;
+ stream_info->bandwidth[i] =
+ (axi_data->src_info[VFE_PIX_0].pixel_clock /
+ axi_data->src_info[VFE_PIX_0].width) *
+ stream_info->max_width[i];
+ stream_info->bandwidth[i] =
+ (unsigned long)stream_info->bandwidth[i] *
+ stream_info->format_factor / ISP_Q2;
+ }
} else {
int rdi = SRC_TO_INTF(stream_info->stream_src);
bpp = msm_isp_get_bit_per_pixel(stream_info->output_format);
- if (rdi < VFE_SRC_MAX)
- stream_info->bandwidth =
+ if (rdi < VFE_SRC_MAX) {
+ for (i = 0; i < stream_info->num_isp; i++) {
+ axi_data = &stream_info->vfe_dev[i]->axi_data;
+ stream_info->bandwidth[i] =
(axi_data->src_info[rdi].pixel_clock / 8) * bpp;
- else
+ }
+ } else {
pr_err("%s: Invalid rdi interface\n", __func__);
+ }
}
}
@@ -1133,37 +1103,40 @@ int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
uint32_t io_format = 0;
struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd = arg;
struct msm_vfe_axi_stream *stream_info;
+ unsigned long flags;
+
+ if (stream_cfg_cmd->stream_src >= VFE_AXI_SRC_MAX) {
+ pr_err("%s:%d invalid stream_src %d\n", __func__, __LINE__,
+ stream_cfg_cmd->stream_src);
+ return -EINVAL;
+ }
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ stream_cfg_cmd->stream_src);
+
+ spin_lock_irqsave(&stream_info->lock, flags);
rc = msm_isp_axi_create_stream(vfe_dev,
- &vfe_dev->axi_data, stream_cfg_cmd);
+ &vfe_dev->axi_data, stream_cfg_cmd, stream_info);
if (rc) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
pr_err("%s: create stream failed\n", __func__);
return rc;
}
rc = msm_isp_validate_axi_request(
- &vfe_dev->axi_data, stream_cfg_cmd);
+ vfe_dev, stream_info, stream_cfg_cmd);
if (rc) {
+ msm_isp_axi_destroy_stream(vfe_dev, stream_info);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
pr_err("%s: Request validation failed\n", __func__);
- if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle) <
- VFE_AXI_SRC_MAX)
- msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
- HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle));
return rc;
}
- stream_info = &vfe_dev->axi_data.
- stream_info[HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
- if (!stream_info) {
- pr_err("%s: can not find stream handle %x\n", __func__,
- stream_cfg_cmd->axi_stream_handle);
- return -EINVAL;
- }
stream_info->memory_input = stream_cfg_cmd->memory_input;
vfe_dev->reg_update_requested &=
~(BIT(SRC_TO_INTF(stream_info->stream_src)));
- msm_isp_axi_reserve_wm(vfe_dev, &vfe_dev->axi_data, stream_info);
+ msm_isp_axi_reserve_wm(vfe_dev, stream_info);
if (stream_info->stream_src < RDI_INTF_0) {
io_format = vfe_dev->axi_data.src_info[VFE_PIX_0].input_format;
@@ -1183,16 +1156,67 @@ int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
goto done;
}
}
- rc = msm_isp_calculate_framedrop(&vfe_dev->axi_data, stream_cfg_cmd);
- if (rc)
- goto done;
+
+ if (!stream_info->controllable_output) {
+ /*
+ * check that the parameters passed from second vfe is same
+ * as first vfe, do this only for non controllable stream
+ * right now because user driver has bug where it sends
+ * mismatch info for controllable streams
+ */
+ if (stream_info->num_isp > 1) {
+ if (stream_cfg_cmd->init_frame_drop !=
+ stream_info->init_frame_drop) {
+ pr_err("%s: stream %d init drop mismatch %d/%d\n",
+ __func__, stream_info->stream_id,
+ stream_info->init_frame_drop,
+ stream_cfg_cmd->init_frame_drop);
+ rc = -EINVAL;
+ }
+ if (stream_cfg_cmd->frame_skip_pattern !=
+ stream_info->frame_skip_pattern) {
+ pr_err("%s: stream %d skip pattern mismatch %d/%d\n",
+ __func__, stream_info->stream_id,
+ stream_info->frame_skip_pattern,
+ stream_cfg_cmd->frame_skip_pattern);
+ rc = -EINVAL;
+ }
+ if (stream_info->stream_type == CONTINUOUS_STREAM &&
+ stream_cfg_cmd->burst_count > 0) {
+ pr_err("%s: stream %d stream type mismatch\n",
+ __func__, stream_info->stream_id);
+ rc = -EINVAL;
+ }
+ if (stream_info->stream_type == BURST_STREAM &&
+ stream_info->num_burst_capture !=
+ stream_cfg_cmd->burst_count) {
+ pr_err("%s: stream %d stream burst count mismatch %d/%d\n",
+ __func__, stream_info->stream_id,
+ stream_info->num_burst_capture,
+ stream_cfg_cmd->burst_count);
+ rc = -EINVAL;
+ }
+ } else {
+ rc = msm_isp_calculate_framedrop(vfe_dev,
+ stream_cfg_cmd);
+ }
+ if (rc)
+ goto done;
+ } else {
+ stream_info->stream_type = BURST_STREAM;
+ stream_info->num_burst_capture = 0;
+ stream_info->frame_skip_pattern = NO_SKIP;
+ stream_info->init_frame_drop = stream_cfg_cmd->init_frame_drop;
+ stream_info->current_framedrop_period =
+ MSM_VFE_STREAM_STOP_PERIOD;
+ }
if (stream_cfg_cmd->vt_enable && !vfe_dev->vt_enable) {
vfe_dev->vt_enable = stream_cfg_cmd->vt_enable;
msm_isp_start_avtimer();
}
+
if (stream_info->num_planes > 1)
- msm_isp_axi_reserve_comp_mask(
- &vfe_dev->axi_data, stream_info);
+ msm_isp_axi_reserve_comp_mask(vfe_dev, stream_info);
for (i = 0; i < stream_info->num_planes; i++) {
vfe_dev->hw_info->vfe_ops.axi_ops.
@@ -1201,16 +1225,17 @@ int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
vfe_dev->hw_info->vfe_ops.axi_ops.
cfg_wm_xbar_reg(vfe_dev, stream_info, i);
}
- /* initialize the WM ping pong with scratch buffer */
- msm_isp_cfg_stream_scratch(vfe_dev, stream_info, VFE_PING_FLAG);
- msm_isp_cfg_stream_scratch(vfe_dev, stream_info, VFE_PONG_FLAG);
-
+ if (stream_info->state == INACTIVE) {
+ /* initialize the WM ping pong with scratch buffer */
+ msm_isp_cfg_stream_scratch(stream_info, VFE_PING_FLAG);
+ msm_isp_cfg_stream_scratch(stream_info, VFE_PONG_FLAG);
+ }
done:
if (rc) {
- msm_isp_axi_free_wm(&vfe_dev->axi_data, stream_info);
- msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
- HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle));
+ msm_isp_axi_free_wm(vfe_dev, stream_info);
+ msm_isp_axi_destroy_stream(vfe_dev, stream_info);
}
+ spin_unlock_irqrestore(&stream_info->lock, flags);
return rc;
}
@@ -1218,26 +1243,42 @@ int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg)
{
int rc = 0, i;
struct msm_vfe_axi_stream_release_cmd *stream_release_cmd = arg;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
struct msm_vfe_axi_stream *stream_info;
struct msm_vfe_axi_stream_cfg_cmd stream_cfg;
-
+ int vfe_idx;
+ unsigned long flags;
if (HANDLE_TO_IDX(stream_release_cmd->stream_handle) >=
VFE_AXI_SRC_MAX) {
pr_err("%s: Invalid stream handle\n", __func__);
return -EINVAL;
}
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(stream_release_cmd->stream_handle)];
- if (stream_info->state == AVAILABLE) {
- pr_err("%s: Stream already released\n", __func__);
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(stream_release_cmd->stream_handle));
+
+ spin_lock_irqsave(&stream_info->lock, flags);
+
+ vfe_idx = msm_isp_get_vfe_idx_for_stream_user(vfe_dev, stream_info);
+ if (vfe_idx == -ENOTTY ||
+ stream_release_cmd->stream_handle !=
+ stream_info->stream_handle[vfe_idx]) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err("%s: Invalid stream %p handle %x/%x vfe_idx %d vfe_dev %d num_isp %d\n",
+ __func__, stream_info,
+ stream_release_cmd->stream_handle,
+ vfe_idx != -ENOTTY ?
+ stream_info->stream_handle[vfe_idx] : 0, vfe_idx,
+ vfe_dev->pdev->id, stream_info->num_isp);
return -EINVAL;
- } else if (stream_info->state != INACTIVE) {
+ }
+
+ if (stream_info->state != INACTIVE && stream_info->state != AVAILABLE) {
stream_cfg.cmd = STOP_STREAM;
stream_cfg.num_streams = 1;
stream_cfg.stream_handle[0] = stream_release_cmd->stream_handle;
+ spin_unlock_irqrestore(&stream_info->lock, flags);
msm_isp_cfg_axi_stream(vfe_dev, (void *) &stream_cfg);
+ spin_lock_irqsave(&stream_info->lock, flags);
}
for (i = 0; i < stream_info->num_planes; i++) {
@@ -1249,33 +1290,75 @@ int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg)
}
if (stream_info->num_planes > 1)
- msm_isp_axi_free_comp_mask(&vfe_dev->axi_data, stream_info);
+ msm_isp_axi_free_comp_mask(vfe_dev, stream_info);
vfe_dev->hw_info->vfe_ops.axi_ops.clear_framedrop(vfe_dev, stream_info);
- msm_isp_axi_free_wm(axi_data, stream_info);
+ msm_isp_axi_free_wm(vfe_dev, stream_info);
- msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
- HANDLE_TO_IDX(stream_release_cmd->stream_handle));
+ msm_isp_axi_destroy_stream(vfe_dev, stream_info);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
return rc;
}
-static int msm_isp_axi_stream_enable_cfg(
- struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream *stream_info, int32_t dual_vfe_sync)
+void msm_isp_release_all_axi_stream(struct vfe_device *vfe_dev)
{
- int i, vfe_id = 0, enable_wm = 0;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
- struct dual_vfe_resource *dual_vfe_res = NULL;
+ struct msm_vfe_axi_stream_release_cmd
+ stream_release_cmd[VFE_AXI_SRC_MAX];
+ struct msm_vfe_axi_stream_cfg_cmd stream_cfg_cmd;
+ struct msm_vfe_axi_stream *stream_info;
+ int i;
+ int vfe_idx;
+ int num_stream = 0;
+ unsigned long flags;
- if (stream_idx >= VFE_AXI_SRC_MAX) {
- pr_err("%s: Invalid stream_idx", __func__);
- goto error;
+ stream_cfg_cmd.cmd = STOP_STREAM;
+ stream_cfg_cmd.num_streams = 0;
+
+ for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
+ stream_info = msm_isp_get_stream_common_data(vfe_dev, i);
+ spin_lock_irqsave(&stream_info->lock, flags);
+ vfe_idx = msm_isp_get_vfe_idx_for_stream_user(
+ vfe_dev, stream_info);
+ if (-ENOTTY == vfe_idx) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ continue;
+ }
+ stream_release_cmd[num_stream++].stream_handle =
+ stream_info->stream_handle[vfe_idx];
+ if (stream_info->state == INACTIVE) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ continue;
+ }
+ stream_cfg_cmd.stream_handle[
+ stream_cfg_cmd.num_streams] =
+ stream_info->stream_handle[vfe_idx];
+ stream_cfg_cmd.num_streams++;
+ spin_unlock_irqrestore(&stream_info->lock, flags);
}
+ if (stream_cfg_cmd.num_streams)
+ msm_isp_cfg_axi_stream(vfe_dev, (void *) &stream_cfg_cmd);
- if (stream_info->state == INACTIVE)
- goto error;
+ for (i = 0; i < num_stream; i++)
+ msm_isp_release_axi_stream(vfe_dev, &stream_release_cmd[i]);
+}
+
+static void msm_isp_axi_stream_enable_cfg(
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int enable_wm = 0;
+ struct vfe_device *vfe_dev;
+ struct msm_vfe_axi_shared_data *axi_data;
+ uint32_t stream_idx = stream_info->stream_src;
+ int k;
+ int i;
+
+ WARN_ON(stream_idx >= VFE_AXI_SRC_MAX);
+
+ WARN_ON(stream_info->state != START_PENDING &&
+ stream_info->state != RESUME_PENDING &&
+ stream_info->state != STOP_PENDING &&
+ stream_info->state != PAUSE_PENDING);
if (stream_info->state == START_PENDING ||
stream_info->state == RESUME_PENDING) {
@@ -1283,50 +1366,24 @@ static int msm_isp_axi_stream_enable_cfg(
} else {
enable_wm = 0;
}
- for (i = 0; i < stream_info->num_planes; i++) {
- /*
- * In case when sensor is streaming, use dual vfe sync mode
- * to enable wm together and avoid split.
- */
- if ((stream_info->stream_src < RDI_INTF_0) &&
- vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE1 &&
- dual_vfe_sync) {
- dual_vfe_res = vfe_dev->common_data->dual_vfe_res;
- if (!dual_vfe_res->vfe_base[ISP_VFE0] ||
- !dual_vfe_res->axi_data[ISP_VFE0] ||
- !dual_vfe_res->vfe_base[ISP_VFE1] ||
- !dual_vfe_res->axi_data[ISP_VFE1]) {
- pr_err("%s:%d failed vfe0 %pK %pK vfe %pK %pK\n",
- __func__, __LINE__,
- dual_vfe_res->vfe_base[ISP_VFE0],
- dual_vfe_res->axi_data[ISP_VFE0],
- dual_vfe_res->vfe_base[ISP_VFE1],
- dual_vfe_res->axi_data[ISP_VFE1]);
- goto error;
- }
- for (vfe_id = 0; vfe_id < MAX_VFE; vfe_id++) {
- vfe_dev->hw_info->vfe_ops.axi_ops.
- enable_wm(dual_vfe_res->vfe_base[vfe_id],
- dual_vfe_res->axi_data[vfe_id]->
- stream_info[stream_idx].wm[i],
- enable_wm);
- }
- } else if (!vfe_dev->is_split ||
- (stream_info->stream_src >= RDI_INTF_0 &&
- stream_info->stream_src <= RDI_INTF_2) ||
- !dual_vfe_sync) {
- vfe_dev->hw_info->vfe_ops.axi_ops.
- enable_wm(vfe_dev->vfe_base, stream_info->wm[i],
- enable_wm);
- }
- if (!enable_wm) {
- /* Issue a reg update for Raw Snapshot Case
+
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ axi_data = &vfe_dev->axi_data;
+ for (i = 0; i < stream_info->num_planes; i++) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.enable_wm(
+ vfe_dev->vfe_base,
+ stream_info->wm[k][i], enable_wm);
+ if (enable_wm)
+ continue;
+ /*
+ * Issue a reg update for Raw Snapshot Case
* since we dont have reg update ack
- */
+ */
if (vfe_dev->axi_data.src_info[VFE_PIX_0].
raw_stream_count > 0
&& vfe_dev->axi_data.src_info[VFE_PIX_0].
- pix_stream_count == 0) {
+ stream_count == 0) {
if (stream_info->stream_src == CAMIF_RAW ||
stream_info->stream_src == IDEAL_RAW) {
vfe_dev->hw_info->vfe_ops.core_ops.
@@ -1335,70 +1392,103 @@ static int msm_isp_axi_stream_enable_cfg(
}
}
}
+ if (stream_info->state == START_PENDING)
+ axi_data->num_active_stream++;
+ else if (stream_info->state == STOP_PENDING)
+ axi_data->num_active_stream--;
+ }
+}
+
+static void __msm_isp_axi_stream_update(
+ struct msm_vfe_axi_stream *stream_info,
+ struct msm_isp_timestamp *ts)
+{
+ int j;
+ int intf = SRC_TO_INTF(stream_info->stream_src);
+ struct vfe_device *vfe_dev;
+ int k;
+
+ switch (stream_info->state) {
+ case UPDATING:
+ stream_info->state = ACTIVE;
+ break;
+ case STOP_PENDING:
+ msm_isp_axi_stream_enable_cfg(stream_info);
+ stream_info->state = STOPPING;
+ break;
+ case START_PENDING:
+ msm_isp_axi_stream_enable_cfg(stream_info);
+ stream_info->state = STARTING;
+ break;
+ case STOPPING:
+ stream_info->state = INACTIVE;
+ for (k = 0; k < MSM_ISP_COMP_IRQ_MAX; k++)
+ stream_info->composite_irq[k] = 0;
+ complete_all(&stream_info->inactive_comp);
+ break;
+ case STARTING:
+ stream_info->state = ACTIVE;
+ complete_all(&stream_info->active_comp);
+ break;
+ case PAUSING:
+ stream_info->state = PAUSED;
+ msm_isp_reload_ping_pong_offset(stream_info);
+ for (j = 0; j < stream_info->num_planes; j++) {
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_reg(vfe_dev, stream_info, j);
+ }
+ }
+ stream_info->state = RESUME_PENDING;
+ msm_isp_axi_stream_enable_cfg(stream_info);
+ stream_info->state = RESUMING;
+ break;
+ case RESUMING:
+ stream_info->runtime_output_format = stream_info->output_format;
+ stream_info->state = ACTIVE;
+ complete_all(&stream_info->active_comp);
+ for (j = 0; j < stream_info->num_isp; j++) {
+ /* notify that all streams have been updated */
+ msm_isp_notify(stream_info->vfe_dev[j],
+ ISP_EVENT_STREAM_UPDATE_DONE, intf, ts);
+ atomic_set(&stream_info->vfe_dev[j]->
+ axi_data.axi_cfg_update[intf], 0);
+ }
+ stream_info->update_vfe_mask = 0;
+ break;
+ default:
+ break;
}
- if (stream_info->state == START_PENDING)
- axi_data->num_active_stream++;
- else if (stream_info->state == STOP_PENDING)
- axi_data->num_active_stream--;
- return 0;
-error:
- return -EINVAL;
}
void msm_isp_axi_stream_update(struct vfe_device *vfe_dev,
- enum msm_vfe_input_src frame_src)
+ enum msm_vfe_input_src frame_src,
+ struct msm_isp_timestamp *ts)
{
int i;
unsigned long flags;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_vfe_axi_stream *stream_info;
for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
- if (SRC_TO_INTF(axi_data->stream_info[i].stream_src) !=
+ stream_info = msm_isp_get_stream_common_data(vfe_dev, i);
+ if (SRC_TO_INTF(stream_info->stream_src) !=
frame_src) {
ISP_DBG("%s stream_src %d frame_src %d\n", __func__,
SRC_TO_INTF(
- axi_data->stream_info[i].stream_src),
+ stream_info->stream_src),
frame_src);
continue;
}
- if (axi_data->stream_info[i].state == UPDATING)
- axi_data->stream_info[i].state = ACTIVE;
- else if (axi_data->stream_info[i].state == START_PENDING ||
- axi_data->stream_info[i].state == STOP_PENDING) {
- msm_isp_axi_stream_enable_cfg(
- vfe_dev, &axi_data->stream_info[i],
- axi_data->stream_info[i].state ==
- START_PENDING ? 1 : 0);
- axi_data->stream_info[i].state =
- axi_data->stream_info[i].state ==
- START_PENDING ? STARTING : STOPPING;
- } else if (axi_data->stream_info[i].state == STARTING ||
- axi_data->stream_info[i].state == STOPPING) {
- axi_data->stream_info[i].state =
- axi_data->stream_info[i].state == STARTING ?
- ACTIVE : INACTIVE;
- }
- }
-
- spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
- if (vfe_dev->axi_data.stream_update[frame_src]) {
- vfe_dev->axi_data.stream_update[frame_src]--;
- }
- spin_unlock_irqrestore(&vfe_dev->shared_data_lock, flags);
-
- if (vfe_dev->axi_data.pipeline_update == DISABLE_CAMIF ||
- (vfe_dev->axi_data.pipeline_update ==
- DISABLE_CAMIF_IMMEDIATELY)) {
- vfe_dev->hw_info->vfe_ops.stats_ops.
- enable_module(vfe_dev, 0xFF, 0);
- vfe_dev->axi_data.pipeline_update = NO_UPDATE;
+ if (stream_info->state == AVAILABLE)
+ continue;
+ spin_lock_irqsave(&stream_info->lock, flags);
+ __msm_isp_axi_stream_update(stream_info, ts);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
}
-
- if (vfe_dev->axi_data.stream_update[frame_src] == 0)
- complete(&vfe_dev->stream_config_complete);
}
-static void msm_isp_reload_ping_pong_offset(struct vfe_device *vfe_dev,
+static void msm_isp_reload_ping_pong_offset(
struct msm_vfe_axi_stream *stream_info)
{
int i, j;
@@ -1406,120 +1496,70 @@ static void msm_isp_reload_ping_pong_offset(struct vfe_device *vfe_dev,
struct msm_isp_buffer *buf;
int32_t buf_size_byte = 0;
int32_t word_per_line = 0;
+ int k;
+ struct vfe_device *vfe_dev;
+
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ for (i = 0; i < 2; i++) {
+ buf = stream_info->buf[i];
+ if (!buf)
+ continue;
- for (i = 0; i < 2; i++) {
- buf = stream_info->buf[i];
- if (!buf)
- continue;
-
- bit = i ? 0 : 1;
+ bit = i ? 0 : 1;
- for (j = 0; j < stream_info->num_planes; j++) {
- word_per_line = msm_isp_cal_word_per_line(
+ for (j = 0; j < stream_info->num_planes; j++) {
+ word_per_line = msm_isp_cal_word_per_line(
stream_info->output_format, stream_info->
- plane_cfg[j].output_stride);
- if (word_per_line < 0) {
- /* 0 means no prefetch*/
- word_per_line = 0;
- buf_size_byte = 0;
- } else {
- buf_size_byte = (word_per_line * 8 *
- stream_info->plane_cfg[j].
+ plane_cfg[k][j].output_stride);
+ if (word_per_line < 0) {
+ /* 0 means no prefetch*/
+ word_per_line = 0;
+ buf_size_byte = 0;
+ } else {
+ buf_size_byte = (word_per_line * 8 *
+ stream_info->plane_cfg[k][j].
output_scan_lines) - stream_info->
- plane_cfg[j].plane_addr_offset;
- }
-
- vfe_dev->hw_info->vfe_ops.axi_ops.update_ping_pong_addr(
- vfe_dev->vfe_base, stream_info->wm[j], bit,
- buf->mapped_info[j].paddr +
- stream_info->plane_cfg[j].plane_addr_offset,
- buf_size_byte);
- }
- }
-}
-
-void msm_isp_axi_cfg_update(struct vfe_device *vfe_dev,
- enum msm_vfe_input_src frame_src)
-{
- int i, j;
- uint32_t update_state;
- unsigned long flags;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- struct msm_vfe_axi_stream *stream_info;
- int num_stream = 0;
+ plane_cfg[k][j].plane_addr_offset;
+ }
- spin_lock_irqsave(&vfe_dev->common_data->common_dev_data_lock, flags);
- for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
- if (SRC_TO_INTF(axi_data->stream_info[i].stream_src) !=
- frame_src) {
- continue;
- }
- num_stream++;
- stream_info = &axi_data->stream_info[i];
- if ((stream_info->stream_type == BURST_STREAM &&
- !stream_info->controllable_output) ||
- stream_info->state == AVAILABLE)
- continue;
- spin_lock_irqsave(&stream_info->lock, flags);
- if (stream_info->state == PAUSING) {
- /*AXI Stopped, apply update*/
- stream_info->state = PAUSED;
- msm_isp_reload_ping_pong_offset(vfe_dev, stream_info);
- for (j = 0; j < stream_info->num_planes; j++)
vfe_dev->hw_info->vfe_ops.axi_ops.
- cfg_wm_reg(vfe_dev, stream_info, j);
- /*Resume AXI*/
- stream_info->state = RESUME_PENDING;
- if (vfe_dev->is_split) {
- msm_isp_update_dual_HW_axi(vfe_dev,
- stream_info);
- } else {
- msm_isp_axi_stream_enable_cfg(
- vfe_dev,
- &axi_data->stream_info[i], 1);
- stream_info->state = RESUMING;
+ update_ping_pong_addr(
+ vfe_dev->vfe_base,
+ stream_info->wm[k][j],
+ bit,
+ buf->mapped_info[j].paddr +
+ stream_info->plane_cfg[k][j].
+ plane_addr_offset,
+ buf_size_byte);
}
- } else if (stream_info->state == RESUMING) {
- stream_info->runtime_output_format =
- stream_info->output_format;
- stream_info->state = ACTIVE;
}
- spin_unlock_irqrestore(&stream_info->lock, flags);
}
- spin_unlock_irqrestore(&vfe_dev->common_data->common_dev_data_lock,
- flags);
- if (num_stream)
- update_state = atomic_dec_return(
- &axi_data->axi_cfg_update[frame_src]);
}
static int msm_isp_update_deliver_count(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint32_t pingpong_bit)
{
- struct msm_vfe_axi_stream *temp_stream_info;
int rc = 0;
if (!stream_info->controllable_output)
goto done;
- temp_stream_info =
- msm_isp_get_controllable_stream(vfe_dev, stream_info);
-
- if (!temp_stream_info->undelivered_request_cnt) {
+ if (!stream_info->undelivered_request_cnt) {
pr_err_ratelimited("%s:%d error undelivered_request_cnt 0\n",
__func__, __LINE__);
rc = -EINVAL;
goto done;
} else {
- temp_stream_info->undelivered_request_cnt--;
- if (pingpong_bit != temp_stream_info->sw_ping_pong_bit) {
+ stream_info->undelivered_request_cnt--;
+ if (pingpong_bit != stream_info->sw_ping_pong_bit) {
pr_err("%s:%d ping pong bit actual %d sw %d\n",
__func__, __LINE__, pingpong_bit,
- temp_stream_info->sw_ping_pong_bit);
+ stream_info->sw_ping_pong_bit);
rc = -EINVAL;
goto done;
}
- temp_stream_info->sw_ping_pong_bit ^= 1;
+ stream_info->sw_ping_pong_bit ^= 1;
}
done:
return rc;
@@ -1527,7 +1567,6 @@ done:
void msm_isp_halt_send_error(struct vfe_device *vfe_dev, uint32_t event)
{
- uint32_t i = 0;
struct msm_isp_event_data error_event;
struct msm_vfe_axi_halt_cmd halt_cmd;
@@ -1545,10 +1584,6 @@ void msm_isp_halt_send_error(struct vfe_device *vfe_dev, uint32_t event)
/* heavy spin lock in axi halt, avoid spin lock outside. */
msm_isp_axi_halt(vfe_dev, &halt_cmd);
- for (i = 0; i < VFE_AXI_SRC_MAX; i++)
- vfe_dev->axi_data.stream_info[i].state =
- INACTIVE;
-
error_event.frame_id =
vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
@@ -1562,31 +1597,49 @@ int msm_isp_print_ping_pong_address(struct vfe_device *vfe_dev,
struct msm_isp_buffer *buf = NULL;
uint32_t pingpong_bit;
struct msm_vfe_axi_stream *stream_info = NULL;
+ int k;
for (j = 0; j < VFE_AXI_SRC_MAX; j++) {
- stream_info = &vfe_dev->axi_data.stream_info[j];
- if (stream_info->state == INACTIVE)
+ stream_info = msm_isp_get_stream_common_data(vfe_dev, j);
+ if (stream_info->state == INACTIVE ||
+ stream_info->state == AVAILABLE)
continue;
for (pingpong_bit = 0; pingpong_bit < 2; pingpong_bit++) {
+ dma_addr_t temp;
+
+ buf = stream_info->buf[pingpong_bit];
+ if (buf == NULL) {
+ pr_err("%s: buf NULL for stream %x num_isp %d\n",
+ __func__,
+ stream_info->stream_src,
+ stream_info->num_isp);
+ continue;
+ }
+ temp = buf->mapped_info[0].paddr +
+ buf->mapped_info[0].len;
+ pr_err("%s: stream %x ping bit %d uses buffer %pa-%pa, num_isp %d\n",
+ __func__, stream_info->stream_src,
+ pingpong_bit,
+ &buf->mapped_info[0].paddr, &temp,
+ stream_info->num_isp);
+
for (i = 0; i < stream_info->num_planes; i++) {
- buf = stream_info->buf[pingpong_bit];
- if (buf == NULL) {
- pr_err("%s: buf NULL\n", __func__);
- continue;
- }
- pr_debug("%s: stream_id %x ping-pong %d plane %d start_addr %lu addr_offset %x len %zx stride %d scanline %d\n"
+ for (k = 0; k < stream_info->num_isp; k++) {
+ pr_debug(
+ "%s: stream_id %x ping-pong %d plane %d start_addr %lu addr_offset %x len %zx stride %d scanline %d\n"
, __func__, stream_info->stream_id,
pingpong_bit, i, (unsigned long)
buf->mapped_info[i].paddr,
stream_info->
- plane_cfg[i].plane_addr_offset,
+ plane_cfg[k][i].plane_addr_offset,
buf->mapped_info[i].len,
stream_info->
- plane_cfg[i].output_stride,
+ plane_cfg[k][i].output_stride,
stream_info->
- plane_cfg[i].output_scan_lines
+ plane_cfg[k][i].output_scan_lines
);
+ }
}
}
}
@@ -1601,36 +1654,35 @@ static struct msm_isp_buffer *msm_isp_get_stream_buffer(
int rc = 0;
uint32_t bufq_handle = 0;
struct msm_isp_buffer *buf = NULL;
- struct msm_vfe_axi_stream *temp_stream_info = NULL;
struct msm_vfe_frame_request_queue *queue_req;
+ uint32_t buf_index = MSM_ISP_INVALID_BUF_INDEX;
if (!stream_info->controllable_output) {
bufq_handle = stream_info->bufq_handle
[VFE_BUF_QUEUE_DEFAULT];
} else {
- temp_stream_info = msm_isp_get_controllable_stream(
- vfe_dev, stream_info);
queue_req = list_first_entry_or_null(
- &temp_stream_info->request_q,
+ &stream_info->request_q,
struct msm_vfe_frame_request_queue, list);
if (!queue_req)
return buf;
- bufq_handle = temp_stream_info->
+ bufq_handle = stream_info->
bufq_handle[queue_req->buff_queue_id];
if (!bufq_handle ||
- temp_stream_info->request_q_cnt <= 0) {
+ stream_info->request_q_cnt <= 0) {
pr_err_ratelimited("%s: Drop request. Shared stream is stopped.\n",
__func__);
return buf;
}
+ buf_index = queue_req->buf_index;
queue_req->cmd_used = 0;
list_del(&queue_req->list);
- temp_stream_info->request_q_cnt--;
+ stream_info->request_q_cnt--;
}
rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
- vfe_dev->pdev->id, bufq_handle, &buf);
+ vfe_dev->pdev->id, bufq_handle, buf_index, &buf);
if (rc == -EFAULT) {
msm_isp_halt_send_error(vfe_dev,
@@ -1649,144 +1701,74 @@ static struct msm_isp_buffer *msm_isp_get_stream_buffer(
return buf;
}
-static int msm_isp_cfg_ping_pong_address(struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream *stream_info, uint32_t pingpong_status,
- int scratch)
+static int msm_isp_cfg_ping_pong_address(
+ struct msm_vfe_axi_stream *stream_info, uint32_t pingpong_status)
{
int i;
- struct msm_isp_buffer *buf = NULL;
+ int j;
uint32_t pingpong_bit;
- uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+ struct vfe_device *vfe_dev = stream_info->vfe_dev[0];
uint32_t buffer_size_byte = 0;
int32_t word_per_line = 0;
dma_addr_t paddr;
- struct dual_vfe_resource *dual_vfe_res = NULL;
- uint32_t vfe_id = 0;
- unsigned long flags;
+ struct msm_isp_buffer *buf = NULL;
- if (stream_idx >= VFE_AXI_SRC_MAX) {
- pr_err("%s: Invalid stream_idx", __func__);
- return -EINVAL;
- }
- /* make sure that streams are in right state */
- if ((stream_info->stream_src < RDI_INTF_0) &&
- vfe_dev->is_split) {
- dual_vfe_res = vfe_dev->common_data->dual_vfe_res;
- if (!dual_vfe_res->vfe_base[ISP_VFE0] ||
- !dual_vfe_res->axi_data[ISP_VFE0] ||
- !dual_vfe_res->vfe_base[ISP_VFE1] ||
- !dual_vfe_res->axi_data[ISP_VFE1]) {
- pr_err("%s:%d failed vfe0 %pK %pK vfe %pK %pK\n",
- __func__, __LINE__,
- dual_vfe_res->vfe_base[ISP_VFE0],
- dual_vfe_res->axi_data[ISP_VFE0],
- dual_vfe_res->vfe_base[ISP_VFE1],
- dual_vfe_res->axi_data[ISP_VFE1]);
- return -EINVAL;
- }
- } else if (!vfe_dev->is_split ||
- (stream_info->stream_src >= RDI_INTF_0 &&
- stream_info->stream_src <= RDI_INTF_2)) {
- dual_vfe_res = NULL;
- } else {
- pr_err("%s: Error! Should not reach this case is_split %d stream_src %d\n",
- __func__, vfe_dev->is_split, stream_info->stream_src);
- msm_isp_halt_send_error(vfe_dev, ISP_EVENT_BUF_FATAL_ERROR);
+ /* Isolate pingpong_bit from pingpong_status */
+ pingpong_bit = ((pingpong_status >>
+ stream_info->wm[0][0]) & 0x1);
+
+ /* return if buffer already present */
+ if (stream_info->buf[!pingpong_bit]) {
+ pr_err("stream %x buffer already set for pingpong %d\n",
+ stream_info->stream_src, pingpong_bit);
return 0;
}
- if (!scratch)
- buf = msm_isp_get_stream_buffer(vfe_dev, stream_info);
+ buf = msm_isp_get_stream_buffer(vfe_dev, stream_info);
- /* Isolate pingpong_bit from pingpong_status */
- pingpong_bit = ((pingpong_status >>
- stream_info->wm[0]) & 0x1);
+ if (!buf) {
+ msm_isp_cfg_stream_scratch(stream_info, pingpong_status);
+ return 0;
+ }
for (i = 0; i < stream_info->num_planes; i++) {
- if (buf) {
- word_per_line = msm_isp_cal_word_per_line(
- stream_info->output_format, stream_info->
- plane_cfg[i].output_stride);
+ paddr = buf->mapped_info[i].paddr;
+ ISP_DBG(
+ "%s: vfe %d config buf %d to pingpong %d stream %x\n",
+ __func__, vfe_dev->pdev->id,
+ buf->buf_idx, !pingpong_bit,
+ stream_info->stream_id);
+ for (j = 0; j < stream_info->num_isp; j++) {
+ vfe_dev = stream_info->vfe_dev[j];
+ word_per_line =
+ msm_isp_cal_word_per_line(
+ stream_info->output_format,
+ stream_info->plane_cfg[j][i].output_stride);
if (word_per_line < 0) {
/* 0 means no prefetch*/
word_per_line = 0;
buffer_size_byte = 0;
} else {
- buffer_size_byte = (word_per_line * 8 *
- stream_info->plane_cfg[i].
- output_scan_lines) - stream_info->
- plane_cfg[i].plane_addr_offset;
+ buffer_size_byte =
+ (word_per_line * 8 *
+ stream_info->plane_cfg[j][i].
+ output_scan_lines) -
+ stream_info->plane_cfg[j][i].
+ plane_addr_offset;
}
-
- paddr = buf->mapped_info[i].paddr;
- ISP_DBG(
- "%s: vfe %d config buf %d to pingpong %d stream %x\n",
- __func__, vfe_dev->pdev->id,
- buf->buf_idx, !pingpong_bit,
- stream_info->stream_id);
- }
-
- if (dual_vfe_res) {
- for (vfe_id = 0; vfe_id < MAX_VFE; vfe_id++) {
- if (vfe_id != vfe_dev->pdev->id)
- spin_lock_irqsave(
- &dual_vfe_res->
- axi_data[vfe_id]->
- stream_info[stream_idx].
- lock, flags);
-
- if (buf)
- vfe_dev->hw_info->vfe_ops.axi_ops.
- update_ping_pong_addr(
- dual_vfe_res->vfe_base[vfe_id],
- dual_vfe_res->axi_data[vfe_id]->
- stream_info[stream_idx].wm[i],
- pingpong_bit, paddr +
- dual_vfe_res->axi_data[vfe_id]->
- stream_info[stream_idx].
- plane_cfg[i].plane_addr_offset,
- buffer_size_byte);
- else
- msm_isp_cfg_stream_scratch(
- dual_vfe_res->vfe_dev[vfe_id],
- &(dual_vfe_res->axi_data
- [vfe_id]->
- stream_info[stream_idx]),
- pingpong_status);
-
- if (i == 0) {
- dual_vfe_res->axi_data[vfe_id]->
- stream_info[stream_idx].
- buf[!pingpong_bit] =
- buf;
- }
- if (vfe_id != vfe_dev->pdev->id)
- spin_unlock_irqrestore(
- &dual_vfe_res->
- axi_data[vfe_id]->
- stream_info[stream_idx].
- lock, flags);
- }
- } else {
- if (buf)
- vfe_dev->hw_info->vfe_ops.axi_ops.
+ vfe_dev->hw_info->vfe_ops.axi_ops.
update_ping_pong_addr(
- vfe_dev->vfe_base, stream_info->wm[i],
+ vfe_dev->vfe_base,
+ stream_info->wm[j][i],
pingpong_bit, paddr +
- stream_info->plane_cfg[i].
- plane_addr_offset,
+ stream_info->plane_cfg[j][i].
+ plane_addr_offset,
buffer_size_byte);
- else
- msm_isp_cfg_stream_scratch(vfe_dev,
- stream_info, pingpong_status);
- if (0 == i)
- stream_info->buf[!pingpong_bit] = buf;
}
- if (0 == i && buf)
- buf->pingpong_bit = !pingpong_bit;
}
-
+ stream_info->buf[!pingpong_bit] = buf;
+ buf->pingpong_bit = !pingpong_bit;
return 0;
}
@@ -1803,10 +1785,14 @@ static void msm_isp_handle_done_buf_frame_id_mismatch(
vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
error_event.u.error_info.err_type =
ISP_ERROR_FRAME_ID_MISMATCH;
- ret = vfe_dev->buf_mgr->ops->buf_done(vfe_dev->buf_mgr,
- buf->bufq_handle, buf->buf_idx, time_stamp,
- frame_id,
- stream_info->runtime_output_format);
+ if (stream_info->buf_divert)
+ vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx);
+ else
+ ret = vfe_dev->buf_mgr->ops->buf_done(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx, time_stamp,
+ frame_id,
+ stream_info->runtime_output_format);
if (ret == -EFAULT) {
msm_isp_halt_send_error(vfe_dev, ISP_EVENT_BUF_FATAL_ERROR);
return;
@@ -1825,7 +1811,7 @@ static int msm_isp_process_done_buf(struct vfe_device *vfe_dev,
int rc;
unsigned long flags;
struct msm_isp_event_data buf_event;
- uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+ uint32_t stream_idx = stream_info->stream_src;
uint32_t buf_src;
uint8_t drop_frame = 0;
struct msm_isp_bufq *bufq = NULL;
@@ -1875,11 +1861,16 @@ static int msm_isp_process_done_buf(struct vfe_device *vfe_dev,
buf->buf_debug.put_state_last] =
MSM_ISP_BUFFER_STATE_DROP_SKIP;
buf->buf_debug.put_state_last ^= 1;
- rc = vfe_dev->buf_mgr->ops->buf_done(
- vfe_dev->buf_mgr,
- buf->bufq_handle, buf->buf_idx,
- time_stamp, frame_id,
- stream_info->runtime_output_format);
+ if (stream_info->buf_divert)
+ vfe_dev->buf_mgr->ops->put_buf(
+ vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx);
+ else
+ rc = vfe_dev->buf_mgr->ops->buf_done(
+ vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx,
+ time_stamp, frame_id,
+ stream_info->runtime_output_format);
if (rc == -EFAULT) {
msm_isp_halt_send_error(vfe_dev,
@@ -1918,6 +1909,11 @@ static int msm_isp_process_done_buf(struct vfe_device *vfe_dev,
return -EINVAL;
}
+ /* divert native buffers */
+ vfe_dev->buf_mgr->ops->buf_divert(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx, time_stamp,
+ frame_id);
+
if ((bufq != NULL) && bufq->buf_type == ISP_SHARE_BUF)
msm_isp_send_event(vfe_dev->common_data->
dual_vfe_res->vfe_dev[ISP_VFE1],
@@ -1957,6 +1953,7 @@ int msm_isp_drop_frame(struct vfe_device *vfe_dev,
unsigned long flags;
struct msm_isp_bufq *bufq = NULL;
uint32_t pingpong_bit;
+ int vfe_idx;
if (!vfe_dev || !stream_info || !ts || !sof_info) {
pr_err("%s %d vfe_dev %pK stream_info %pK ts %pK op_info %pK\n",
@@ -1964,11 +1961,14 @@ int msm_isp_drop_frame(struct vfe_device *vfe_dev,
sof_info);
return -EINVAL;
}
+ vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
pingpong_status =
~vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status(vfe_dev);
spin_lock_irqsave(&stream_info->lock, flags);
- pingpong_bit = (~(pingpong_status >> stream_info->wm[0]) & 0x1);
+ pingpong_bit =
+ (~(pingpong_status >> stream_info->wm[vfe_idx][0]) & 0x1);
done_buf = stream_info->buf[pingpong_bit];
if (done_buf) {
bufq = vfe_dev->buf_mgr->ops->get_bufq(vfe_dev->buf_mgr,
@@ -1994,93 +1994,149 @@ int msm_isp_drop_frame(struct vfe_device *vfe_dev,
return 0;
}
-static void msm_isp_get_camif_update_state_and_halt(
- struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
- enum msm_isp_camif_update_state *camif_update,
- int *halt)
+/**
+ * msm_isp_input_disable() - Disable the input for given vfe
+ * @vfe_dev: The vfe device whose input is to be disabled
+ *
+ * Returns - void
+ *
+ * If stream count on an input line is 0 then disable the input
+ */
+static void msm_isp_input_disable(struct vfe_device *vfe_dev)
{
- int i;
- struct msm_vfe_axi_stream *stream_info;
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- uint8_t pix_stream_cnt = 0, cur_pix_stream_cnt;
- cur_pix_stream_cnt =
- axi_data->src_info[VFE_PIX_0].pix_stream_count +
- axi_data->src_info[VFE_PIX_0].raw_stream_count;
- for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
- stream_info =
- &axi_data->stream_info[
- HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
- if (stream_info->stream_src < RDI_INTF_0)
- pix_stream_cnt++;
- }
+ int ext_read =
+ (axi_data->src_info[VFE_PIX_0].input_mux == EXTERNAL_READ);
+ int stream_count;
+ int total_stream_count = 0;
+ int i;
- if (vfe_dev->axi_data.num_active_stream == stream_cfg_cmd->num_streams
- && (stream_cfg_cmd->cmd == STOP_STREAM ||
- stream_cfg_cmd->cmd == STOP_IMMEDIATELY))
- *halt = 1;
- else
- *halt = 0;
-
- if ((pix_stream_cnt) &&
- (axi_data->src_info[VFE_PIX_0].input_mux != EXTERNAL_READ)) {
- if (cur_pix_stream_cnt == 0 && pix_stream_cnt &&
- stream_cfg_cmd->cmd == START_STREAM)
- *camif_update = ENABLE_CAMIF;
- else if (cur_pix_stream_cnt &&
- (cur_pix_stream_cnt - pix_stream_cnt) == 0 &&
- (stream_cfg_cmd->cmd == STOP_STREAM ||
- stream_cfg_cmd->cmd == STOP_IMMEDIATELY)) {
- if (*halt)
- *camif_update = DISABLE_CAMIF_IMMEDIATELY;
- else
- *camif_update = DISABLE_CAMIF;
- }
+ for (i = 0; i < VFE_SRC_MAX; i++)
+ total_stream_count += axi_data->src_info[i].stream_count +
+ axi_data->src_info[i].raw_stream_count;
+
+ for (i = 0; i < VFE_SRC_MAX; i++) {
+ stream_count = axi_data->src_info[i].stream_count +
+ axi_data->src_info[i].raw_stream_count;
+ if (stream_count)
+ continue;
+ if (axi_data->src_info[i].active == 0)
+ continue;
+ /* deactivate the input line */
+ axi_data->src_info[i].active = 0;
+
+ if (i != VFE_PIX_0 || ext_read)
+ continue;
+ /* halt camif */
+ if (total_stream_count == 0)
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev,
+ DISABLE_CAMIF_IMMEDIATELY);
else
- *camif_update = NO_UPDATE;
- } else
- *camif_update = NO_UPDATE;
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, DISABLE_CAMIF);
+ }
+
+ /* halt and reset hardware if all streams are disabled */
+ if (total_stream_count == 0) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
+ vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 0, 1);
+ vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
+
+ }
}
-static void msm_isp_update_camif_output_count(
- struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
+/**
+ * msm_isp_input_enable() - Enable the input for given vfe
+ * @vfe_dev: The vfe device whose input is to be enabled
+ *
+ * Returns - void
+ *
+ * Enable inout line if it is not enabled
+ */
+static void msm_isp_input_enable(struct vfe_device *vfe_dev,
+ int sync_frame_id_src)
{
- int i;
- struct msm_vfe_axi_stream *stream_info;
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ int ext_read =
+ (axi_data->src_info[VFE_PIX_0].input_mux == EXTERNAL_READ);
+ int stream_count;
+ int i;
- if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
- return;
-
- for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
- if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
- VFE_AXI_SRC_MAX) {
- return;
+ for (i = 0; i < VFE_SRC_MAX; i++) {
+ stream_count = axi_data->src_info[i].stream_count +
+ axi_data->src_info[i].raw_stream_count;
+ if (stream_count == 0)
+ continue;
+ if (axi_data->src_info[i].active)
+ continue;
+ /* activate the input since it is deactivated */
+ axi_data->src_info[i].frame_id = 0;
+ axi_data->src_info[i].active = 1;
+ if (i >= VFE_RAW_0 && sync_frame_id_src) {
+ /*
+ * Incase PIX and RDI streams are part
+ * of same session, this will ensure
+ * RDI stream will have same frame id
+ * as of PIX stream
+ */
+ axi_data->src_info[i].frame_id =
+ axi_data->src_info[VFE_PIX_0].frame_id;
}
- stream_info =
- &axi_data->stream_info[
- HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
- if (stream_info->stream_src >= RDI_INTF_0)
+ if (i != VFE_PIX_0 || ext_read)
continue;
- if (stream_info->stream_src == PIX_ENCODER ||
- stream_info->stream_src == PIX_VIEWFINDER ||
- stream_info->stream_src == PIX_VIDEO ||
- stream_info->stream_src == IDEAL_RAW) {
- if (stream_cfg_cmd->cmd == START_STREAM)
- vfe_dev->axi_data.src_info[VFE_PIX_0].
- pix_stream_count++;
+ /* for camif input the camif needs enabling */
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, ENABLE_CAMIF);
+ }
+}
+
+/**
+ * msm_isp_update_intf_stream_cnt() - Update the stream count in axi interface
+ * @stream_info: The stream that is either being enabled/disabled
+ * @enable: 0 means stream is being disabled, else enabled
+ *
+ * Returns - void
+ */
+static void msm_isp_update_intf_stream_cnt(
+ struct msm_vfe_axi_stream *stream_info,
+ int enable)
+{
+ int i;
+
+ switch (stream_info->stream_src) {
+ case PIX_ENCODER:
+ case PIX_VIEWFINDER:
+ case PIX_VIDEO:
+ case IDEAL_RAW:
+ case RDI_INTF_0:
+ case RDI_INTF_1:
+ case RDI_INTF_2:
+ for (i = 0; i < stream_info->num_isp; i++) {
+ if (enable)
+ stream_info->vfe_dev[i]->axi_data.src_info[
+ SRC_TO_INTF(stream_info->stream_src)].
+ stream_count++;
else
- vfe_dev->axi_data.src_info[VFE_PIX_0].
- pix_stream_count--;
- } else if (stream_info->stream_src == CAMIF_RAW) {
- if (stream_cfg_cmd->cmd == START_STREAM)
- vfe_dev->axi_data.src_info[VFE_PIX_0].
+ stream_info->vfe_dev[i]->axi_data.src_info[
+ SRC_TO_INTF(stream_info->stream_src)].
+ stream_count--;
+ }
+ break;
+ case CAMIF_RAW:
+ for (i = 0; i < stream_info->num_isp; i++) {
+ if (enable)
+ stream_info->vfe_dev[i]->axi_data.src_info[
+ SRC_TO_INTF(stream_info->stream_src)].
raw_stream_count++;
else
- vfe_dev->axi_data.src_info[VFE_PIX_0].
+ stream_info->vfe_dev[i]->axi_data.src_info[
+ SRC_TO_INTF(stream_info->stream_src)].
raw_stream_count--;
}
+ break;
+ default:
+ WARN(1, "Invalid steam src %d\n", stream_info->stream_src);
}
}
@@ -2091,20 +2147,24 @@ static int msm_isp_update_stream_bandwidth(struct vfe_device *vfe_dev)
{
int i, rc = 0;
struct msm_vfe_axi_stream *stream_info;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
uint64_t total_pix_bandwidth = 0, total_rdi_bandwidth = 0;
uint32_t num_pix_streams = 0;
uint64_t total_bandwidth = 0;
+ int vfe_idx;
for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
- stream_info = &axi_data->stream_info[i];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev, i);
if (stream_info->state == ACTIVE ||
stream_info->state == START_PENDING) {
+ vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev,
+ stream_info);
if (stream_info->stream_src < RDI_INTF_0) {
- total_pix_bandwidth += stream_info->bandwidth;
+ total_pix_bandwidth +=
+ stream_info->bandwidth[vfe_idx];
num_pix_streams++;
} else {
- total_rdi_bandwidth += stream_info->bandwidth;
+ total_rdi_bandwidth +=
+ stream_info->bandwidth[vfe_idx];
}
}
}
@@ -2119,121 +2179,88 @@ static int msm_isp_update_stream_bandwidth(struct vfe_device *vfe_dev)
return rc;
}
-static int msm_isp_axi_wait_for_cfg_done(struct vfe_device *vfe_dev,
- enum msm_isp_camif_update_state camif_update,
- uint32_t src_mask, int regUpdateCnt)
-{
- int rc;
- unsigned long flags;
- enum msm_vfe_input_src i = 0;
- spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
-
- for (i = 0; i < VFE_SRC_MAX; i++) {
- if (src_mask & (1 << i)) {
- if (vfe_dev->axi_data.stream_update[i] > 0) {
- pr_err("%s:Stream Update in progress. cnt %d\n",
- __func__,
- vfe_dev->axi_data.stream_update[i]);
- spin_unlock_irqrestore(
- &vfe_dev->shared_data_lock, flags);
- return -EINVAL;
- }
- vfe_dev->axi_data.stream_update[i] = regUpdateCnt;
- }
- }
- if (src_mask) {
- init_completion(&vfe_dev->stream_config_complete);
- vfe_dev->axi_data.pipeline_update = camif_update;
- }
- spin_unlock_irqrestore(&vfe_dev->shared_data_lock, flags);
- rc = wait_for_completion_timeout(
- &vfe_dev->stream_config_complete,
- msecs_to_jiffies(VFE_MAX_CFG_TIMEOUT));
- if (rc == 0) {
- for (i = 0; i < VFE_SRC_MAX; i++) {
- if (src_mask & (1 << i)) {
- spin_lock_irqsave(&vfe_dev->shared_data_lock,
- flags);
- vfe_dev->axi_data.stream_update[i] = 0;
- spin_unlock_irqrestore(&vfe_dev->
- shared_data_lock, flags);
- }
- }
- pr_err("%s: wait timeout\n", __func__);
- rc = -EBUSY;
- } else {
- rc = 0;
- }
- return rc;
-}
-
static int msm_isp_init_stream_ping_pong_reg(
- struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
int rc = 0;
- if ((vfe_dev->is_split && vfe_dev->pdev->id == 1 &&
- stream_info->stream_src < RDI_INTF_0) ||
- !vfe_dev->is_split || stream_info->stream_src >= RDI_INTF_0) {
- /* Set address for both PING & PONG register */
- rc = msm_isp_cfg_ping_pong_address(vfe_dev,
- stream_info, VFE_PING_FLAG, 0);
- if (rc < 0) {
- pr_err("%s: No free buffer for ping\n",
- __func__);
- return rc;
- }
-
- if (stream_info->stream_type != BURST_STREAM ||
- stream_info->runtime_num_burst_capture > 1)
- rc = msm_isp_cfg_ping_pong_address(vfe_dev,
- stream_info, VFE_PONG_FLAG, 0);
+ /* Set address for both PING & PO NG register */
+ rc = msm_isp_cfg_ping_pong_address(
+ stream_info, VFE_PING_FLAG);
+ if (rc < 0) {
+ pr_err("%s: No free buffer for ping\n",
+ __func__);
+ return rc;
+ }
+ if (stream_info->stream_type != BURST_STREAM ||
+ stream_info->runtime_num_burst_capture > 1)
+ rc = msm_isp_cfg_ping_pong_address(
+ stream_info, VFE_PONG_FLAG);
- if (rc < 0) {
- pr_err("%s: No free buffer for pong\n",
- __func__);
- return rc;
- }
+ if (rc < 0) {
+ pr_err("%s: No free buffer for pong\n",
+ __func__);
+ return rc;
}
return rc;
}
static void msm_isp_get_stream_wm_mask(
+ struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info,
uint32_t *wm_reload_mask)
{
int i;
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
for (i = 0; i < stream_info->num_planes; i++)
- *wm_reload_mask |= (1 << stream_info->wm[i]);
+ *wm_reload_mask |= (1 << stream_info->wm[vfe_idx][i]);
}
int msm_isp_axi_halt(struct vfe_device *vfe_dev,
struct msm_vfe_axi_halt_cmd *halt_cmd)
{
int rc = 0;
+ int i;
+ struct vfe_device *halt_vfes[MAX_VFE] = { NULL, NULL };
- if (atomic_read(&vfe_dev->error_info.overflow_state) ==
- OVERFLOW_DETECTED) {
- ISP_DBG("%s: VFE%d already halted, direct return\n",
- __func__, vfe_dev->pdev->id);
- return rc;
- }
+ if (vfe_dev->is_split)
+ for (i = 0; i < MAX_VFE; i++)
+ halt_vfes[i] = vfe_dev->common_data->
+ dual_vfe_res->vfe_dev[i];
+ else
+ halt_vfes[vfe_dev->pdev->id] = vfe_dev;
- if (halt_cmd->overflow_detected) {
- atomic_cmpxchg(&vfe_dev->error_info.overflow_state,
- NO_OVERFLOW, OVERFLOW_DETECTED);
- pr_err("%s: VFE%d Bus overflow detected: start recovery!\n",
- __func__, vfe_dev->pdev->id);
- }
+ for (i = 0; i < MAX_VFE; i++) {
+ vfe_dev = halt_vfes[i];
+ if (!vfe_dev)
+ continue;
+ if (atomic_read(&vfe_dev->error_info.overflow_state) ==
+ OVERFLOW_DETECTED) {
+ ISP_DBG("%s: VFE%d already halted, direct return\n",
+ __func__, vfe_dev->pdev->id);
+ continue;
+ }
- if (halt_cmd->stop_camif) {
- vfe_dev->hw_info->vfe_ops.core_ops.
- update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY);
+ if (halt_cmd->overflow_detected) {
+ atomic_cmpxchg(&vfe_dev->error_info.overflow_state,
+ NO_OVERFLOW, OVERFLOW_DETECTED);
+ pr_err("%s: VFE%d Bus overflow detected: start recovery!\n",
+ __func__, vfe_dev->pdev->id);
+ }
+
+ if (halt_cmd->stop_camif) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev,
+ DISABLE_CAMIF_IMMEDIATELY);
+ }
+ rc |= vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev,
+ halt_cmd->blocking_halt);
+
+ /* take care of pending items in tasklet after halt */
+ msm_isp_flush_tasklet(vfe_dev);
}
- rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev,
- halt_cmd->blocking_halt);
return rc;
}
@@ -2241,12 +2268,13 @@ int msm_isp_axi_halt(struct vfe_device *vfe_dev,
int msm_isp_axi_reset(struct vfe_device *vfe_dev,
struct msm_vfe_axi_reset_cmd *reset_cmd)
{
- int rc = 0, i, j;
+ int rc = 0, i, k;
struct msm_vfe_axi_stream *stream_info;
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
uint32_t bufq_handle = 0, bufq_id = 0;
struct msm_isp_timestamp timestamp;
unsigned long flags;
+ struct vfe_device *update_vfes[MAX_VFE] = {0, 0};
if (!reset_cmd) {
pr_err("%s: NULL pointer reset cmd %pK\n", __func__, reset_cmd);
@@ -2256,49 +2284,74 @@ int msm_isp_axi_reset(struct vfe_device *vfe_dev,
rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
0, reset_cmd->blocking);
+ if (vfe_dev->is_split) {
+ for (i = 0; i < MAX_VFE; i++)
+ update_vfes[i] = vfe_dev->common_data->dual_vfe_res->
+ vfe_dev[i];
+ } else {
+ update_vfes[vfe_dev->pdev->id] = vfe_dev;
+ }
msm_isp_get_timestamp(&timestamp);
- for (i = 0, j = 0; j < axi_data->num_active_stream &&
- i < VFE_AXI_SRC_MAX; i++, j++) {
- stream_info = &axi_data->stream_info[i];
- if (stream_info->stream_src >= VFE_AXI_SRC_MAX) {
- rc = -1;
- pr_err("%s invalid stream src = %d\n", __func__,
- stream_info->stream_src);
- break;
- }
- if (stream_info->state != ACTIVE) {
- j--;
+ for (k = 0; k < MAX_VFE; k++) {
+ vfe_dev = update_vfes[k];
+ if (!vfe_dev)
continue;
- }
+ rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
+ 0, reset_cmd->blocking);
- for (bufq_id = 0; bufq_id < VFE_BUF_QUEUE_MAX; bufq_id++) {
- bufq_handle = stream_info->bufq_handle[bufq_id];
- if (!bufq_handle)
- continue;
- /* set ping pong address to scratch before flush */
- spin_lock_irqsave(&stream_info->lock, flags);
- msm_isp_cfg_stream_scratch(vfe_dev, stream_info,
- VFE_PING_FLAG);
- msm_isp_cfg_stream_scratch(vfe_dev, stream_info,
- VFE_PONG_FLAG);
- spin_unlock_irqrestore(&stream_info->lock, flags);
- rc = vfe_dev->buf_mgr->ops->flush_buf(
- vfe_dev->buf_mgr, vfe_dev->pdev->id,
- bufq_handle, MSM_ISP_BUFFER_FLUSH_ALL,
- &timestamp.buf_time, reset_cmd->frame_id);
- if (rc == -EFAULT) {
- msm_isp_halt_send_error(vfe_dev,
- ISP_EVENT_BUF_FATAL_ERROR);
- return rc;
+ for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
+ stream_info = msm_isp_get_stream_common_data(
+ vfe_dev, i);
+ if (stream_info->stream_src >= VFE_AXI_SRC_MAX) {
+ rc = -1;
+ pr_err("%s invalid stream src = %d\n",
+ __func__,
+ stream_info->stream_src);
+ break;
}
+ if (stream_info->state == AVAILABLE ||
+ stream_info->state == INACTIVE)
+ continue;
- axi_data->src_info[SRC_TO_INTF(stream_info->
- stream_src)].frame_id = reset_cmd->frame_id;
- msm_isp_reset_burst_count_and_frame_drop(vfe_dev,
- stream_info);
+ /* handle dual stream on ISP_VFE1 turn */
+ if (stream_info->num_isp > 1 &&
+ vfe_dev->pdev->id == ISP_VFE0)
+ continue;
+
+ for (bufq_id = 0; bufq_id < VFE_BUF_QUEUE_MAX;
+ bufq_id++) {
+ bufq_handle = stream_info->bufq_handle[bufq_id];
+ if (!bufq_handle)
+ continue;
+
+ /* set ping pong to scratch before flush */
+ spin_lock_irqsave(&stream_info->lock, flags);
+ msm_isp_cfg_stream_scratch(stream_info,
+ VFE_PING_FLAG);
+ msm_isp_cfg_stream_scratch(stream_info,
+ VFE_PONG_FLAG);
+ spin_unlock_irqrestore(&stream_info->lock,
+ flags);
+ rc = vfe_dev->buf_mgr->ops->flush_buf(
+ vfe_dev->buf_mgr,
+ bufq_handle, MSM_ISP_BUFFER_FLUSH_ALL,
+ &timestamp.buf_time,
+ reset_cmd->frame_id);
+ if (rc == -EFAULT) {
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return rc;
+ }
+
+ axi_data->src_info[SRC_TO_INTF(stream_info->
+ stream_src)].frame_id =
+ reset_cmd->frame_id;
+ msm_isp_reset_burst_count_and_frame_drop(
+ vfe_dev, stream_info);
+ }
}
}
@@ -2308,46 +2361,67 @@ int msm_isp_axi_reset(struct vfe_device *vfe_dev,
return rc;
}
-int msm_isp_axi_restart(struct vfe_device *vfe_dev,
+int msm_isp_axi_restart(struct vfe_device *vfe_dev_ioctl,
struct msm_vfe_axi_restart_cmd *restart_cmd)
{
- int rc = 0, i, j;
+ int rc = 0, i, k, j;
struct msm_vfe_axi_stream *stream_info;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- uint32_t wm_reload_mask = 0x0;
+ uint32_t wm_reload_mask = 0;
unsigned long flags;
+ struct vfe_device *update_vfes[MAX_VFE] = {0, 0};
+ struct vfe_device *vfe_dev;
+
+ if (vfe_dev_ioctl->is_split) {
+ for (i = 0; i < MAX_VFE; i++)
+ update_vfes[i] = vfe_dev_ioctl->common_data->
+ dual_vfe_res->vfe_dev[i];
+ } else {
+ update_vfes[vfe_dev_ioctl->pdev->id] = vfe_dev_ioctl;
+ }
- vfe_dev->buf_mgr->frameId_mismatch_recovery = 0;
- for (i = 0, j = 0; j < axi_data->num_active_stream &&
- i < VFE_AXI_SRC_MAX; i++, j++) {
- stream_info = &axi_data->stream_info[i];
- if (stream_info->state != ACTIVE) {
- j--;
+ vfe_dev_ioctl->buf_mgr->frameId_mismatch_recovery = 0;
+ for (k = 0; k < MAX_VFE; k++) {
+ vfe_dev = update_vfes[k];
+ if (!vfe_dev)
continue;
+ vfe_dev->buf_mgr->frameId_mismatch_recovery = 0;
+ for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
+ stream_info = msm_isp_get_stream_common_data(
+ vfe_dev, i);
+ if (stream_info->state == AVAILABLE ||
+ stream_info->state == INACTIVE)
+ continue;
+ msm_isp_get_stream_wm_mask(vfe_dev, stream_info,
+ &wm_reload_mask);
+ /* handle dual stream on ISP_VFE1 turn */
+ if (stream_info->num_isp > 1 &&
+ vfe_dev->pdev->id == ISP_VFE0)
+ continue;
+ spin_lock_irqsave(&stream_info->lock, flags);
+ for (j = 0; j < MSM_ISP_COMP_IRQ_MAX; j++)
+ stream_info->composite_irq[j] = 0;
+ msm_isp_init_stream_ping_pong_reg(stream_info);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
}
- msm_isp_get_stream_wm_mask(stream_info, &wm_reload_mask);
- spin_lock_irqsave(&stream_info->lock, flags);
- msm_isp_init_stream_ping_pong_reg(vfe_dev, stream_info);
- spin_unlock_irqrestore(&stream_info->lock, flags);
- }
- vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
- vfe_dev->vfe_base, wm_reload_mask);
- rc = vfe_dev->hw_info->vfe_ops.axi_ops.restart(vfe_dev, 0,
- restart_cmd->enable_camif);
- if (rc < 0)
- pr_err("%s Error restarting HW\n", __func__);
+ vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
+ vfe_dev->vfe_base, wm_reload_mask);
+ vfe_dev->hw_info->vfe_ops.axi_ops.restart(vfe_dev, 0,
+ restart_cmd->enable_camif);
+ }
return rc;
}
-static int msm_isp_axi_update_cgc_override(struct vfe_device *vfe_dev,
+static int msm_isp_axi_update_cgc_override(struct vfe_device *vfe_dev_ioctl,
struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
uint8_t cgc_override)
{
int i = 0, j = 0;
struct msm_vfe_axi_stream *stream_info;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ int k;
+ struct vfe_device *vfe_dev;
+ int vfe_idx;
if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
return -EINVAL;
@@ -2357,14 +2431,21 @@ static int msm_isp_axi_update_cgc_override(struct vfe_device *vfe_dev,
VFE_AXI_SRC_MAX) {
return -EINVAL;
}
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev_ioctl,
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]));
for (j = 0; j < stream_info->num_planes; j++) {
- if (vfe_dev->hw_info->vfe_ops.axi_ops.
- update_cgc_override)
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ if (!vfe_dev->hw_info->vfe_ops.axi_ops.
+ update_cgc_override)
+ continue;
+ vfe_idx = msm_isp_get_vfe_idx_for_stream(
+ vfe_dev, stream_info);
vfe_dev->hw_info->vfe_ops.axi_ops.
update_cgc_override(vfe_dev,
- stream_info->wm[j], cgc_override);
+ stream_info->wm[vfe_idx][j],
+ cgc_override);
+ }
}
}
return 0;
@@ -2456,8 +2537,7 @@ static int msm_isp_update_dual_HW_ms_info_at_start(
static int msm_isp_update_dual_HW_ms_info_at_stop(
struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
- enum msm_isp_camif_update_state camif_update)
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
{
int i, rc = 0;
uint8_t slave_id;
@@ -2476,13 +2556,13 @@ static int msm_isp_update_dual_HW_ms_info_at_stop(
VFE_AXI_SRC_MAX) {
return -EINVAL;
}
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]));
stream_src = SRC_TO_INTF(stream_info->stream_src);
/* Remove PIX if DISABLE CAMIF */
- if (stream_src == VFE_PIX_0 && !((camif_update == DISABLE_CAMIF)
- || (camif_update == DISABLE_CAMIF_IMMEDIATELY)))
+ if (stream_src == VFE_PIX_0 &&
+ axi_data->src_info[VFE_PIX_0].active)
continue;
src_info = &axi_data->src_info[stream_src];
@@ -2517,404 +2597,489 @@ static int msm_isp_update_dual_HW_ms_info_at_stop(
return rc;
}
-static int msm_isp_update_dual_HW_axi(struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream *stream_info)
+/**
+ * msm_isp_axi_wait_for_stream_cfg_done() - Wait for a stream completion
+ * @stream_info: The stream to wait on
+ * @active: Reset means wait for stream to be INACTIVE else wait for ACTIVE
+ *
+ * Returns - 0 on success else error code
+ */
+static int msm_isp_axi_wait_for_stream_cfg_done(
+ struct msm_vfe_axi_stream *stream_info, int active)
+{
+ int rc = -1;
+ unsigned long flags;
+
+ /* No need to wait if stream is already in required state */
+ spin_lock_irqsave(&stream_info->lock, flags);
+ if (active && ACTIVE == stream_info->state)
+ rc = 0;
+ if (!active && INACTIVE == stream_info->state)
+ rc = 0;
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ if (rc == 0)
+ return rc;
+
+ rc = wait_for_completion_timeout(
+ active ? &stream_info->active_comp :
+ &stream_info->inactive_comp,
+ msecs_to_jiffies(VFE_MAX_CFG_TIMEOUT));
+
+ if (rc <= 0) {
+ rc = rc ? rc : -ETIMEDOUT;
+ pr_err("%s: wait for stream %x/%x state %d config failed %d\n",
+ __func__,
+ stream_info->stream_id,
+ stream_info->stream_src,
+ stream_info->state,
+ rc);
+ rc = -EINVAL;
+ } else {
+ rc = 0;
+ }
+ return rc;
+}
+
+/**
+ * msm_isp_axi_wait_for_streams() - Wait for completion of a number of streams
+ * @streams: The streams to wait on
+ * @num_stream: Number of streams to wait on
+ * @active: Reset means wait for stream to be INACTIVE else wait for ACTIVE
+ *
+ * Returns - 0 on success else error code
+ */
+static int msm_isp_axi_wait_for_streams(struct msm_vfe_axi_stream **streams,
+ int num_stream, int active)
{
+ int i;
int rc = 0;
- int vfe_id;
- uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
- struct dual_vfe_resource *dual_vfe_res = NULL;
+ struct msm_vfe_axi_stream *stream_info;
- if (stream_idx >= VFE_AXI_SRC_MAX) {
- pr_err("%s: Invalid stream idx %d\n", __func__, stream_idx);
+ for (i = 0; i < num_stream; i++) {
+ stream_info = streams[i];
+ rc |= msm_isp_axi_wait_for_stream_cfg_done(stream_info, active);
+ }
+ return rc;
+}
+
+static int __msm_isp_check_stream_state(struct msm_vfe_axi_stream *stream_info,
+ int cmd)
+{
+ switch (stream_info->state) {
+ case AVAILABLE:
return -EINVAL;
+ case PAUSING:
+ case RESUMING:
+ case RESUME_PENDING:
+ case ACTIVE:
+ if (cmd != 0)
+ return -EALREADY;
+ break;
+ case INACTIVE:
+ if (cmd == 0)
+ return -EALREADY;
+ break;
+ /*
+ * stream cannot be in following states since we always
+ * wait in ioctl for stream to be active or inactive
+ */
+ case UPDATING:
+ case START_PENDING:
+ case STARTING:
+ case STOPPING:
+ case STOP_PENDING:
+ case PAUSE_PENDING:
+ default:
+ WARN(1, "Invalid state %d\n", stream_info->state);
}
+ return 0;
+}
- dual_vfe_res = vfe_dev->common_data->dual_vfe_res;
- if (!dual_vfe_res->vfe_dev[ISP_VFE0] ||
- !dual_vfe_res->vfe_dev[ISP_VFE1] ||
- !dual_vfe_res->axi_data[ISP_VFE0] ||
- !dual_vfe_res->axi_data[ISP_VFE1]) {
- pr_err("%s: Error in dual vfe resource\n", __func__);
- rc = -EINVAL;
- } else {
- if (stream_info->state == RESUME_PENDING &&
- (dual_vfe_res->axi_data[!vfe_dev->pdev->id]->
- stream_info[stream_idx].state == RESUME_PENDING)) {
- /* Update the AXI only after both ISPs receiving the
- Reg update interrupt*/
- for (vfe_id = 0; vfe_id < MAX_VFE; vfe_id++) {
- rc = msm_isp_axi_stream_enable_cfg(
- dual_vfe_res->vfe_dev[vfe_id],
- &dual_vfe_res->axi_data[vfe_id]->
- stream_info[stream_idx], 1);
- dual_vfe_res->axi_data[vfe_id]->
- stream_info[stream_idx].state =
- RESUMING;
+static void __msm_isp_stop_axi_streams(struct msm_vfe_axi_stream **streams,
+ int num_streams, int cmd_type)
+{
+ int i;
+ struct msm_vfe_axi_shared_data *axi_data;
+ struct msm_isp_timestamp timestamp;
+ int total_stream_count = 0;
+ uint32_t bufq_id = 0, bufq_handle = 0;
+ struct msm_vfe_axi_stream *stream_info;
+ unsigned long flags;
+ uint32_t intf;
+ int rc;
+ struct vfe_device *vfe_dev;
+ struct vfe_device *update_vfes[MAX_VFE] = {0, 0};
+ int k;
+
+ msm_isp_get_timestamp(&timestamp);
+
+ for (i = 0; i < num_streams; i++) {
+ stream_info = streams[i];
+ spin_lock_irqsave(&stream_info->lock, flags);
+ /*
+ * since we can get here from start axi stream error path due
+ * to which the stream may be intermittent state like
+ * STARTING/START_PENDING, force the stream to move out of
+ * intermittent state so it can be made INACTIVE. The
+ * intermittent states update variables so better to go through
+ * those state transitions instead of directly forcing stream to
+ * be INACTIVE
+ */
+ while (stream_info->state != ACTIVE)
+ __msm_isp_axi_stream_update(stream_info,
+ &timestamp);
+ msm_isp_cfg_stream_scratch(stream_info, VFE_PING_FLAG);
+ msm_isp_cfg_stream_scratch(stream_info, VFE_PONG_FLAG);
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ if (stream_info->num_planes > 1)
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ clear_comp_mask(vfe_dev, stream_info);
+ else
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ clear_wm_irq_mask(vfe_dev, stream_info);
+ update_vfes[vfe_dev->pdev->id] = vfe_dev;
+ }
+ init_completion(&stream_info->inactive_comp);
+ stream_info->state = STOP_PENDING;
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ msm_isp_update_intf_stream_cnt(stream_info, 0);
+ }
+
+ for (k = 0; k < MAX_VFE; k++) {
+ int ext_read;
+
+ if (!update_vfes[k])
+ continue;
+ vfe_dev = update_vfes[k];
+ axi_data = &vfe_dev->axi_data;
+ ext_read =
+ (axi_data->src_info[VFE_PIX_0].input_mux == EXTERNAL_READ);
+ for (i = 0; i < VFE_SRC_MAX; i++) {
+ total_stream_count +=
+ axi_data->src_info[i].stream_count +
+ axi_data->src_info[i].raw_stream_count;
+ if (i != VFE_PIX_0)
+ continue;
+ if (axi_data->src_info[i].stream_count == 0) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ enable_module(vfe_dev, 0xFF, 0);
+ /* reg update for PIX with 0 streams active */
+ if (ext_read == 0)
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ reg_update(vfe_dev, VFE_PIX_0);
}
}
+
+ }
+ for (i = 0; i < num_streams; i++) {
+ stream_info = streams[i];
+ intf = SRC_TO_INTF(stream_info->stream_src);
+ if (total_stream_count == 0 ||
+ ((stream_info->stream_type == BURST_STREAM) &&
+ stream_info->runtime_num_burst_capture == 0)) {
+ spin_lock_irqsave(&stream_info->lock, flags);
+ while (stream_info->state != INACTIVE)
+ __msm_isp_axi_stream_update(
+ stream_info, &timestamp);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ continue;
+ }
+ }
+
+ rc = msm_isp_axi_wait_for_streams(streams, num_streams, 0);
+ if (rc) {
+ pr_err("%s: wait for stream comp failed, retry...\n", __func__);
+ for (i = 0; i < num_streams; i++) {
+ stream_info = streams[i];
+ if (stream_info->state == INACTIVE)
+ continue;
+ spin_lock_irqsave(&stream_info->lock, flags);
+ __msm_isp_axi_stream_update(stream_info,
+ &timestamp);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
+ rc = msm_isp_axi_wait_for_streams(streams, num_streams, 0);
+ if (rc) {
+ pr_err("%s: wait for stream comp failed, force streams to inactive\n",
+ __func__);
+ for (i = 0; i < num_streams; i++) {
+ stream_info = streams[i];
+ if (stream_info->state == INACTIVE)
+ continue;
+ spin_lock_irqsave(&stream_info->lock, flags);
+ while (stream_info->state != INACTIVE)
+ __msm_isp_axi_stream_update(
+ stream_info, &timestamp);
+ spin_unlock_irqrestore(&stream_info->lock,
+ flags);
+ }
+ }
+ }
+ /* clear buffers that are dequeued */
+ for (i = 0; i < num_streams; i++) {
+ stream_info = streams[i];
+ for (bufq_id = 0; bufq_id < VFE_BUF_QUEUE_MAX; bufq_id++) {
+ bufq_handle = stream_info->bufq_handle[bufq_id];
+ if (!bufq_handle)
+ continue;
+ vfe_dev = stream_info->vfe_dev[0];
+ rc = vfe_dev->buf_mgr->ops->flush_buf(
+ vfe_dev->buf_mgr,
+ bufq_handle, MSM_ISP_BUFFER_FLUSH_ALL,
+ &timestamp.buf_time, 0);
+ if (rc == -EFAULT)
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ }
+ }
+
+ for (k = 0; k < MAX_VFE; k++) {
+ if (!update_vfes[k])
+ continue;
+ msm_isp_update_stream_bandwidth(update_vfes[k]);
+ msm_isp_input_disable(update_vfes[k]);
+ }
+
+ for (i = 0; i < num_streams; i++) {
+ stream_info = streams[i];
+ intf = SRC_TO_INTF(stream_info->stream_src);
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ axi_data = &vfe_dev->axi_data;
+ if (axi_data->src_info[intf].stream_count == 0)
+ vfe_dev->reg_update_requested &=
+ ~(BIT(intf));
+ }
}
- return rc;
}
-static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
- enum msm_isp_camif_update_state camif_update)
+static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
{
int i, rc = 0;
- uint8_t src_state, wait_for_complete = 0;
- uint32_t wm_reload_mask = 0x0;
+ uint8_t src_state;
+ uint32_t wm_reload_mask[MAX_VFE] = {0, 0};
struct msm_vfe_axi_stream *stream_info;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
uint32_t src_mask = 0;
unsigned long flags;
+ struct msm_vfe_axi_stream *streams[MAX_NUM_STREAM];
+ int num_streams = 0;
+ struct msm_isp_timestamp timestamp;
+ struct vfe_device *update_vfes[MAX_VFE] = {0, 0};
+ int k;
+ uint32_t num_active_streams[MAX_VFE] = {0, 0};
+ struct vfe_device *vfe_dev;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev_ioctl->axi_data;
if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
return -EINVAL;
- if (camif_update == ENABLE_CAMIF) {
- ISP_DBG("%s: vfe %d camif enable\n", __func__,
- vfe_dev->pdev->id);
- vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id = 0;
- }
+ msm_isp_get_timestamp(&timestamp);
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
- if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
- VFE_AXI_SRC_MAX) {
- return -EINVAL;
- }
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev_ioctl,
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]));
if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX)
src_state = axi_data->src_info[
SRC_TO_INTF(stream_info->stream_src)].active;
+
else {
ISP_DBG("%s: invalid src info index\n", __func__);
- return -EINVAL;
+ rc = -EINVAL;
+ goto error;
}
-
- msm_isp_calculate_bandwidth(axi_data, stream_info);
- msm_isp_get_stream_wm_mask(stream_info, &wm_reload_mask);
spin_lock_irqsave(&stream_info->lock, flags);
- msm_isp_reset_framedrop(vfe_dev, stream_info);
- rc = msm_isp_init_stream_ping_pong_reg(vfe_dev, stream_info);
+ rc = __msm_isp_check_stream_state(stream_info, 1);
+ if (-EALREADY == rc) {
+ rc = 0;
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ continue;
+ }
+ if (rc) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ goto error;
+ }
+
+ msm_isp_calculate_bandwidth(stream_info);
+ for (k = 0; k < stream_info->num_isp; k++) {
+ msm_isp_get_stream_wm_mask(stream_info->vfe_dev[k],
+ stream_info, &wm_reload_mask[
+ stream_info->vfe_dev[k]->pdev->id]);
+ src_state = stream_info->vfe_dev[k]->axi_data.src_info[
+ SRC_TO_INTF(stream_info->stream_src)].active;
+ if (update_vfes[stream_info->vfe_dev[k]->pdev->id])
+ continue;
+ update_vfes[stream_info->vfe_dev[k]->pdev->id] =
+ stream_info->vfe_dev[k];
+ num_active_streams[stream_info->vfe_dev[k]->pdev->id] =
+ stream_info->vfe_dev[k]->axi_data.
+ num_active_stream;
+ }
+ msm_isp_reset_framedrop(vfe_dev_ioctl, stream_info);
+ rc = msm_isp_init_stream_ping_pong_reg(stream_info);
if (rc < 0) {
pr_err("%s: No buffer for stream%d\n", __func__,
HANDLE_TO_IDX(
stream_cfg_cmd->stream_handle[i]));
spin_unlock_irqrestore(&stream_info->lock, flags);
- return rc;
+ goto error;
}
- spin_unlock_irqrestore(&stream_info->lock, flags);
- if (stream_info->num_planes > 1) {
- vfe_dev->hw_info->vfe_ops.axi_ops.
- cfg_comp_mask(vfe_dev, stream_info);
- } else {
- vfe_dev->hw_info->vfe_ops.axi_ops.
- cfg_wm_irq_mask(vfe_dev, stream_info);
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ if (stream_info->num_planes > 1) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_comp_mask(vfe_dev, stream_info);
+ } else {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_irq_mask(vfe_dev, stream_info);
+ }
}
+ init_completion(&stream_info->active_comp);
stream_info->state = START_PENDING;
+ msm_isp_update_intf_stream_cnt(stream_info, 1);
- ISP_DBG("%s, Stream 0x%x src %d src_state %d on vfe %d\n",
- __func__, stream_info->stream_id,
- HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]),
- src_state, vfe_dev->pdev->id);
-
+ ISP_DBG("%s, Stream 0x%x src_state %d on vfe %d\n", __func__,
+ stream_info->stream_src, src_state,
+ vfe_dev_ioctl->pdev->id);
if (src_state) {
src_mask |= (1 << SRC_TO_INTF(stream_info->stream_src));
- wait_for_complete = 1;
} else {
- if (vfe_dev->dump_reg)
- msm_camera_io_dump(vfe_dev->vfe_base,
- 0x1000, 1);
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+
+ if (vfe_dev->dump_reg)
+ msm_camera_io_dump(vfe_dev->vfe_base,
+ 0x1000, 1);
+ }
- /*Configure AXI start bits to start immediately*/
- msm_isp_axi_stream_enable_cfg(vfe_dev, stream_info, 0);
- stream_info->state = ACTIVE;
- vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
- SRC_TO_INTF(stream_info->stream_src));
+ /* Configure AXI start bits to start immediately */
+ while (stream_info->state != ACTIVE)
+ __msm_isp_axi_stream_update(
+ stream_info, &timestamp);
- /*
- * Active bit is set in enable_camif for PIX.
- * For RDI, set it here
- */
- if (SRC_TO_INTF(stream_info->stream_src) >= VFE_RAW_0 &&
- SRC_TO_INTF(stream_info->stream_src) <
- VFE_SRC_MAX) {
- /* Incase PIX and RDI streams are part of same
- * session, this will ensure RDI stream will
- * have same frame id as of PIX stream
- */
- if (stream_cfg_cmd->sync_frame_id_src)
- vfe_dev->axi_data.src_info[SRC_TO_INTF(
- stream_info->stream_src)].frame_id =
- vfe_dev->axi_data.src_info[VFE_PIX_0]
- .frame_id;
- else
- vfe_dev->axi_data.src_info[SRC_TO_INTF(
- stream_info->stream_src)].frame_id = 0;
- vfe_dev->axi_data.src_info[SRC_TO_INTF(
- stream_info->stream_src)].active = 1;
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
+ vfe_dev,
+ SRC_TO_INTF(stream_info->stream_src));
}
}
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ streams[num_streams++] = stream_info;
}
- msm_isp_update_stream_bandwidth(vfe_dev);
- vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
- vfe_dev->vfe_base, wm_reload_mask);
- msm_isp_update_camif_output_count(vfe_dev, stream_cfg_cmd);
- if (camif_update == ENABLE_CAMIF) {
- vfe_dev->hw_info->vfe_ops.core_ops.
- update_camif_state(vfe_dev, camif_update);
- vfe_dev->axi_data.camif_state = CAMIF_ENABLE;
- vfe_dev->common_data->dual_vfe_res->epoch_sync_mask = 0;
+ for (i = 0; i < MAX_VFE; i++) {
+ vfe_dev = update_vfes[i];
+ if (!vfe_dev)
+ continue;
+ if (num_active_streams[i] == 0) {
+ /* Configure UB */
+ vfe_dev->hw_info->vfe_ops.axi_ops.cfg_ub(vfe_dev);
+ /* when start reset overflow state */
+ atomic_set(&vfe_dev->error_info.overflow_state,
+ NO_OVERFLOW);
+ }
+ msm_isp_update_stream_bandwidth(vfe_dev);
+ vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
+ vfe_dev->vfe_base, wm_reload_mask[i]);
+
+ msm_isp_input_enable(vfe_dev,
+ stream_cfg_cmd->sync_frame_id_src);
}
- if (wait_for_complete) {
- rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update,
- src_mask, 2);
- if (rc < 0) {
- pr_err("%s: wait for config done failed\n", __func__);
- for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(
- stream_cfg_cmd->stream_handle[i])];
- stream_info->state = STOPPING;
- msm_isp_axi_stream_enable_cfg(
- vfe_dev, stream_info, 0);
- stream_cfg_cmd->cmd = STOP_IMMEDIATELY;
- msm_isp_update_camif_output_count(vfe_dev,
- stream_cfg_cmd);
- }
- }
+ rc = msm_isp_axi_wait_for_streams(streams, num_streams, 1);
+ if (rc < 0) {
+ pr_err("%s: wait for config done failed\n", __func__);
+ goto error;
}
+ return 0;
+error:
+ __msm_isp_stop_axi_streams(streams, num_streams,
+ STOP_STREAM);
+
return rc;
}
-static int msm_isp_stop_axi_stream(struct vfe_device *vfe_dev,
- struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
- enum msm_isp_camif_update_state camif_update,
- int halt)
+static int msm_isp_stop_axi_stream(struct vfe_device *vfe_dev_ioctl,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
{
int i, rc = 0;
- uint8_t wait_for_complete_for_this_stream = 0;
struct msm_vfe_axi_stream *stream_info = NULL;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- int ext_read =
- (axi_data->src_info[VFE_PIX_0].input_mux == EXTERNAL_READ);
- uint32_t src_mask = 0, intf, bufq_id = 0, bufq_handle = 0;
+ struct msm_vfe_axi_stream *streams[MAX_NUM_STREAM];
+ int num_streams = 0;
unsigned long flags;
- struct msm_isp_timestamp timestamp;
if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM ||
stream_cfg_cmd->num_streams == 0)
return -EINVAL;
- msm_isp_get_timestamp(&timestamp);
-
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
- if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
- VFE_AXI_SRC_MAX) {
- return -EINVAL;
- }
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev_ioctl,
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]));
- /* set ping pong address to scratch before stream stop */
spin_lock_irqsave(&stream_info->lock, flags);
- msm_isp_cfg_stream_scratch(vfe_dev, stream_info, VFE_PING_FLAG);
- msm_isp_cfg_stream_scratch(vfe_dev, stream_info, VFE_PONG_FLAG);
+ rc = __msm_isp_check_stream_state(stream_info, 0);
spin_unlock_irqrestore(&stream_info->lock, flags);
- wait_for_complete_for_this_stream = 0;
-
- if (stream_info->num_planes > 1)
- vfe_dev->hw_info->vfe_ops.axi_ops.
- clear_comp_mask(vfe_dev, stream_info);
- else
- vfe_dev->hw_info->vfe_ops.axi_ops.
- clear_wm_irq_mask(vfe_dev, stream_info);
-
- stream_info->state = STOP_PENDING;
-
- if (!halt && !ext_read &&
- !(stream_info->stream_type == BURST_STREAM &&
- stream_info->runtime_num_burst_capture == 0))
- wait_for_complete_for_this_stream = 1;
-
- ISP_DBG("%s: stream 0x%x, vfe %d camif %d halt %d wait %d\n",
- __func__,
- stream_info->stream_id,
- vfe_dev->pdev->id,
- camif_update,
- halt,
- wait_for_complete_for_this_stream);
-
- intf = SRC_TO_INTF(stream_info->stream_src);
- if (!wait_for_complete_for_this_stream ||
- stream_info->state == INACTIVE ||
- !vfe_dev->axi_data.src_info[intf].active) {
- msm_isp_axi_stream_enable_cfg(vfe_dev, stream_info, 0);
- stream_info->state = INACTIVE;
- vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
- SRC_TO_INTF(stream_info->stream_src));
-
+ if (rc) {
/*
- * Active bit is reset in disble_camif for PIX.
- * For RDI, reset it here for not wait_for_complete
- * This is assuming there is only 1 stream mapped to
- * each RDI.
+ * continue stopping other streams as error here means
+ * stream is already not active
*/
- if (intf >= VFE_RAW_0 &&
- intf < VFE_SRC_MAX) {
- vfe_dev->axi_data.src_info[intf].active = 0;
- }
- } else
- src_mask |= (1 << intf);
-
- }
-
- if (src_mask) {
- rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update,
- src_mask, 2);
- if (rc < 0) {
- pr_err("%s: wait for config done failed, retry...\n",
- __func__);
- for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(
- stream_cfg_cmd->stream_handle[i])];
- stream_info->state = STOPPING;
- msm_isp_axi_stream_enable_cfg(
- vfe_dev, stream_info, 0);
- vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
- vfe_dev,
- SRC_TO_INTF(stream_info->stream_src));
- rc = msm_isp_axi_wait_for_cfg_done(vfe_dev,
- camif_update, src_mask, 1);
- if (rc < 0) {
- pr_err("%s: vfe%d cfg done failed\n",
- __func__, vfe_dev->pdev->id);
- stream_info->state = INACTIVE;
- } else
- pr_err("%s: vfe%d retry success! report err!\n",
- __func__, vfe_dev->pdev->id);
-
- rc = -EBUSY;
- }
- }
-
- /*
- * Active bit is reset in disble_camif for PIX.
- * For RDI, reset it here after wait_for_complete
- * This is assuming there is only 1 stream mapped to each RDI
- */
- for (i = VFE_RAW_0; i < VFE_SRC_MAX; i++) {
- if (src_mask & (1 << i)) {
- vfe_dev->axi_data.src_info[i].active = 0;
- }
- }
- }
-
- if (camif_update == DISABLE_CAMIF) {
- vfe_dev->hw_info->vfe_ops.core_ops.
- update_camif_state(vfe_dev, DISABLE_CAMIF);
- vfe_dev->axi_data.camif_state = CAMIF_DISABLE;
- } else if ((camif_update == DISABLE_CAMIF_IMMEDIATELY) ||
- (ext_read)) {
- if (!ext_read)
- vfe_dev->hw_info->vfe_ops.core_ops.
- update_camif_state(vfe_dev,
- DISABLE_CAMIF_IMMEDIATELY);
- vfe_dev->axi_data.camif_state = CAMIF_STOPPED;
- }
- if (halt) {
- /*during stop immediately, stop output then stop input*/
- vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
- vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 0, 1);
- vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
- }
-
- msm_isp_update_camif_output_count(vfe_dev, stream_cfg_cmd);
- msm_isp_update_stream_bandwidth(vfe_dev);
-
- for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
- for (bufq_id = 0; bufq_id < VFE_BUF_QUEUE_MAX; bufq_id++) {
- bufq_handle = stream_info->bufq_handle[bufq_id];
- if (!bufq_handle)
- continue;
-
- rc = vfe_dev->buf_mgr->ops->flush_buf(
- vfe_dev->buf_mgr, vfe_dev->pdev->id,
- bufq_handle, MSM_ISP_BUFFER_FLUSH_ALL,
- &timestamp.buf_time, 0);
- if (rc == -EFAULT) {
- msm_isp_halt_send_error(vfe_dev,
- ISP_EVENT_BUF_FATAL_ERROR);
- return rc;
- }
+ rc = 0;
+ continue;
}
- vfe_dev->reg_update_requested &=
- ~(BIT(SRC_TO_INTF(stream_info->stream_src)));
+ streams[num_streams++] = stream_info;
}
+ __msm_isp_stop_axi_streams(streams, num_streams,
+ stream_cfg_cmd->cmd);
return rc;
}
-
int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg)
{
int rc = 0, ret;
struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd = arg;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
- enum msm_isp_camif_update_state camif_update;
- int halt = 0;
-
- rc = msm_isp_axi_check_stream_state(vfe_dev, stream_cfg_cmd);
- if (rc < 0) {
- pr_err("%s: Invalid stream state\n", __func__);
- return rc;
- }
+ int i;
- if (axi_data->num_active_stream == 0) {
- /*Configure UB*/
- vfe_dev->hw_info->vfe_ops.axi_ops.cfg_ub(vfe_dev);
- /*when start reset overflow state*/
- atomic_set(&vfe_dev->error_info.overflow_state,
- NO_OVERFLOW);
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+ VFE_AXI_SRC_MAX)
+ return -EINVAL;
}
- msm_isp_get_camif_update_state_and_halt(vfe_dev, stream_cfg_cmd,
- &camif_update, &halt);
- if (camif_update == DISABLE_CAMIF)
- vfe_dev->axi_data.camif_state = CAMIF_STOPPING;
if (stream_cfg_cmd->cmd == START_STREAM) {
msm_isp_axi_update_cgc_override(vfe_dev, stream_cfg_cmd, 1);
rc = msm_isp_start_axi_stream(
- vfe_dev, stream_cfg_cmd, camif_update);
+ vfe_dev, stream_cfg_cmd);
} else {
rc = msm_isp_stop_axi_stream(
- vfe_dev, stream_cfg_cmd, camif_update, halt);
+ vfe_dev, stream_cfg_cmd);
msm_isp_axi_update_cgc_override(vfe_dev, stream_cfg_cmd, 0);
- if (axi_data->num_active_stream == 0) {
- /* Reset hvx state */
- vfe_dev->hvx_cmd = HVX_DISABLE;
- }
/*
* Use different ret value to not overwrite the error from
* msm_isp_stop_axi_stream
*/
ret = msm_isp_update_dual_HW_ms_info_at_stop(
- vfe_dev, stream_cfg_cmd, camif_update);
+ vfe_dev, stream_cfg_cmd);
if (ret < 0)
pr_warn("%s: Warning! Update dual_cam failed\n",
__func__);
+ if (vfe_dev->axi_data.num_active_stream == 0)
+ vfe_dev->hvx_cmd = HVX_DISABLE;
+ if (vfe_dev->is_split) {
+ struct vfe_device *vfe_temp =
+ vfe_dev->common_data->
+ dual_vfe_res->vfe_dev[ISP_VFE0];
+ if (vfe_temp->axi_data.num_active_stream == 0)
+ vfe_temp->hvx_cmd = HVX_DISABLE;
+ }
}
if (rc < 0)
@@ -2925,7 +3090,8 @@ int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg)
static int msm_isp_return_empty_buffer(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint32_t user_stream_id,
- uint32_t frame_id, enum msm_vfe_input_src frame_src)
+ uint32_t frame_id, uint32_t buf_index,
+ enum msm_vfe_input_src frame_src)
{
int rc = -1;
struct msm_isp_buffer *buf = NULL;
@@ -2940,7 +3106,7 @@ static int msm_isp_return_empty_buffer(struct vfe_device *vfe_dev,
return -EINVAL;
}
- stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+ stream_idx = stream_info->stream_src;
if (!stream_info->controllable_output)
return -EINVAL;
@@ -2961,7 +3127,7 @@ static int msm_isp_return_empty_buffer(struct vfe_device *vfe_dev,
rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
- vfe_dev->pdev->id, bufq_handle, &buf);
+ vfe_dev->pdev->id, bufq_handle, buf_index, &buf);
if (rc == -EFAULT) {
msm_isp_halt_send_error(vfe_dev, ISP_EVENT_BUF_FATAL_ERROR);
return rc;
@@ -2999,7 +3165,7 @@ static int msm_isp_return_empty_buffer(struct vfe_device *vfe_dev,
static int msm_isp_request_frame(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, uint32_t user_stream_id,
- uint32_t frame_id)
+ uint32_t frame_id, uint32_t buf_index)
{
struct msm_vfe_axi_stream_request_cmd stream_cfg_cmd;
struct msm_vfe_frame_request_queue *queue_req;
@@ -3007,10 +3173,9 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
unsigned long flags;
int rc = 0;
enum msm_vfe_input_src frame_src = 0;
- struct dual_vfe_resource *dual_vfe_res =
- vfe_dev->common_data->dual_vfe_res;
- uint32_t vfe_id = 0;
- bool dual_vfe = false;
+ int k;
+ uint32_t wm_mask = 0;
+ int vfe_idx;
if (!vfe_dev || !stream_info) {
pr_err("%s %d failed: vfe_dev %pK stream_info %pK\n", __func__,
@@ -3018,16 +3183,9 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
return -EINVAL;
}
- if (vfe_dev->is_split) {
- if (stream_info->stream_src < RDI_INTF_0) {
- if (vfe_dev->pdev->id == ISP_VFE1) {
- dual_vfe = true;
- } else {
- /* return early for dual vfe0 */
- return 0;
- }
- }
- }
+ /* return early for dual vfe0 */
+ if (stream_info->num_isp > 1 && vfe_dev->pdev->id == ISP_VFE0)
+ return 0;
if (stream_info->stream_src >= VFE_AXI_SRC_MAX) {
pr_err("%s:%d invalid stream src %d\n", __func__, __LINE__,
@@ -3052,7 +3210,7 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
vfe_dev->axi_data.src_info[VFE_PIX_0].active);
rc = msm_isp_return_empty_buffer(vfe_dev, stream_info,
- user_stream_id, frame_id, frame_src);
+ user_stream_id, frame_id, buf_index, frame_src);
if (rc < 0)
pr_err("%s:%d failed: return_empty_buffer src %d\n",
__func__, __LINE__, frame_src);
@@ -3067,13 +3225,13 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
stream_info->stream_id);
rc = msm_isp_return_empty_buffer(vfe_dev, stream_info,
- user_stream_id, frame_id, frame_src);
+ user_stream_id, frame_id, buf_index, frame_src);
if (rc < 0)
pr_err("%s:%d failed: return_empty_buffer src %d\n",
__func__, __LINE__, frame_src);
stream_info->current_framedrop_period =
MSM_VFE_STREAM_STOP_PERIOD;
- msm_isp_cfg_framedrop_reg(vfe_dev, stream_info);
+ msm_isp_cfg_framedrop_reg(stream_info);
return 0;
}
@@ -3093,10 +3251,12 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
if (!stream_info->bufq_handle[queue_req->buff_queue_id]) {
spin_unlock_irqrestore(&stream_info->lock, flags);
pr_err("%s:%d request frame failed on hw stream 0x%x, request stream %d due to no bufq idx: %d\n",
- __func__, __LINE__, stream_info->stream_handle,
+ __func__, __LINE__,
+ stream_info->stream_handle[0],
user_stream_id, queue_req->buff_queue_id);
return 0;
}
+ queue_req->buf_index = buf_index;
queue_req->cmd_used = 1;
stream_info->request_q_idx =
@@ -3105,14 +3265,15 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
stream_info->request_q_cnt++;
stream_info->undelivered_request_cnt++;
- stream_cfg_cmd.axi_stream_handle = stream_info->stream_handle;
+ vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ stream_cfg_cmd.axi_stream_handle = stream_info->stream_handle[vfe_idx];
stream_cfg_cmd.frame_skip_pattern = NO_SKIP;
stream_cfg_cmd.init_frame_drop = 0;
stream_cfg_cmd.burst_count = stream_info->request_q_cnt;
if (stream_info->undelivered_request_cnt == 1) {
- rc = msm_isp_cfg_ping_pong_address(vfe_dev, stream_info,
- VFE_PING_FLAG, 0);
+ rc = msm_isp_cfg_ping_pong_address(stream_info,
+ VFE_PING_FLAG);
if (rc) {
spin_unlock_irqrestore(&stream_info->lock, flags);
stream_info->undelivered_request_cnt--;
@@ -3121,41 +3282,23 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
return rc;
}
- vfe_id = vfe_dev->pdev->id;
- if (dual_vfe) {
- struct msm_vfe_axi_stream *temp_stream_info;
-
- temp_stream_info = msm_isp_vfe_get_stream(dual_vfe_res,
- ISP_VFE0,
- HANDLE_TO_IDX(
- stream_info->stream_handle));
- msm_isp_get_stream_wm_mask(temp_stream_info,
- &dual_vfe_res->wm_reload_mask[ISP_VFE0]);
- msm_isp_get_stream_wm_mask(stream_info,
- &dual_vfe_res->wm_reload_mask[ISP_VFE1]);
- vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
- dual_vfe_res->vfe_base[ISP_VFE0],
- dual_vfe_res->wm_reload_mask[ISP_VFE0]);
- vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
- dual_vfe_res->vfe_base[ISP_VFE1],
- dual_vfe_res->wm_reload_mask[ISP_VFE1]);
- dual_vfe_res->wm_reload_mask[ISP_VFE0] = 0;
- dual_vfe_res->wm_reload_mask[ISP_VFE1] = 0;
- } else {
- msm_isp_get_stream_wm_mask(stream_info,
- &dual_vfe_res->wm_reload_mask[vfe_id]);
- vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
- vfe_dev->vfe_base,
- dual_vfe_res->wm_reload_mask[vfe_id]);
- dual_vfe_res->wm_reload_mask[vfe_id] = 0;
+ for (k = 0; k < stream_info->num_isp; k++) {
+ wm_mask = 0;
+ msm_isp_get_stream_wm_mask(stream_info->vfe_dev[k],
+ stream_info, &wm_mask);
+ stream_info->vfe_dev[k]->
+ hw_info->vfe_ops.axi_ops.reload_wm(
+ stream_info->vfe_dev[k],
+ stream_info->vfe_dev[k]->vfe_base, wm_mask);
+
}
stream_info->sw_ping_pong_bit = 0;
} else if (stream_info->undelivered_request_cnt == 2) {
pingpong_status =
vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status(
vfe_dev);
- rc = msm_isp_cfg_ping_pong_address(vfe_dev,
- stream_info, pingpong_status, 0);
+ rc = msm_isp_cfg_ping_pong_address(
+ stream_info, pingpong_status);
if (rc) {
stream_info->undelivered_request_cnt--;
spin_unlock_irqrestore(&stream_info->lock,
@@ -3172,7 +3315,7 @@ static int msm_isp_request_frame(struct vfe_device *vfe_dev,
return -EINVAL;
}
- rc = msm_isp_calculate_framedrop(&vfe_dev->axi_data, &stream_cfg_cmd);
+ rc = msm_isp_calculate_framedrop(vfe_dev, &stream_cfg_cmd);
if (0 == rc)
msm_isp_reset_framedrop(vfe_dev, stream_info);
@@ -3186,25 +3329,45 @@ static int msm_isp_add_buf_queue(struct vfe_device *vfe_dev,
{
int rc = 0;
uint32_t bufq_id = 0;
+ unsigned long flags;
if (stream_id == stream_info->stream_id)
bufq_id = VFE_BUF_QUEUE_DEFAULT;
else
bufq_id = VFE_BUF_QUEUE_SHARED;
+ spin_lock_irqsave(&stream_info->lock, flags);
- stream_info->bufq_handle[bufq_id] =
- vfe_dev->buf_mgr->ops->get_bufq_handle(vfe_dev->buf_mgr,
- stream_info->session_id, stream_id);
if (stream_info->bufq_handle[bufq_id] == 0) {
- pr_err("%s: failed: No valid buffer queue for stream: 0x%x\n",
- __func__, stream_id);
- rc = -EINVAL;
+ stream_info->bufq_handle[bufq_id] =
+ vfe_dev->buf_mgr->ops->get_bufq_handle(vfe_dev->buf_mgr,
+ stream_info->session_id, stream_id);
+ if (stream_info->bufq_handle[bufq_id] == 0) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err("%s: failed: No valid buffer queue for stream: 0x%x\n",
+ __func__, stream_id);
+ return -EINVAL;
+ }
+ } else {
+ uint32_t bufq_handle = vfe_dev->buf_mgr->ops->get_bufq_handle(
+ vfe_dev->buf_mgr,
+ stream_info->session_id,
+ stream_id);
+ if (bufq_handle != stream_info->bufq_handle[bufq_id]) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err("%s: Stream %x already has buffer q %x cannot add handle %x\n",
+ __func__, stream_id,
+ stream_info->bufq_handle[bufq_id], bufq_handle);
+ return -EINVAL;
+ }
}
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+
ISP_DBG("%d: Add bufq handle:0x%x, idx:%d, for stream %d on VFE %d\n",
__LINE__, stream_info->bufq_handle[bufq_id],
- bufq_id, stream_info->stream_handle, vfe_dev->pdev->id);
+ bufq_id, stream_info->stream_handle[0],
+ vfe_dev->pdev->id);
return rc;
}
@@ -3221,18 +3384,102 @@ static void msm_isp_remove_buf_queue(struct vfe_device *vfe_dev,
bufq_id = VFE_BUF_QUEUE_SHARED;
spin_lock_irqsave(&stream_info->lock, flags);
- stream_info->bufq_handle[bufq_id] = 0;
+
+ if (stream_info->bufq_handle[bufq_id]) {
+ stream_info->bufq_handle[bufq_id] = 0;
+ if (stream_info->state == ACTIVE)
+ stream_info->state = UPDATING;
+ }
spin_unlock_irqrestore(&stream_info->lock, flags);
+ if (stream_info->state == UPDATING)
+ msm_isp_axi_wait_for_stream_cfg_done(stream_info, 1);
+
+}
+
+/**
+ * msm_isp_stream_axi_cfg_update() - Apply axi config update to a stream
+ * @vfe_dev: The vfe device on which the update is to be applied
+ * @stream_info: Stream for which update is to be applied
+ * @update_info: Parameters of the update
+ *
+ * Returns - 0 on success else error code
+ *
+ * For dual vfe stream apply the update once update for both vfe is
+ * received.
+ */
+static int msm_isp_stream_axi_cfg_update(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ struct msm_vfe_axi_stream_cfg_update_info *update_info)
+{
+ int j;
+ int k;
+ unsigned long flags;
+ int vfe_idx;
+ if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[
+ SRC_TO_INTF(stream_info->stream_src)])) {
+ pr_err("%s: Update in progress for vfe %d intf %d\n",
+ __func__, vfe_dev->pdev->id,
+ SRC_TO_INTF(stream_info->stream_src));
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&stream_info->lock, flags);
+ if (stream_info->state != ACTIVE) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err("Invalid stream state for axi update %d\n",
+ stream_info->state);
+ return -EINVAL;
+ }
+ if (stream_info->update_vfe_mask) {
+ if (stream_info->update_vfe_mask & (1 << vfe_dev->pdev->id)) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err("%s: Stream %p/%x Update already in progress for vfe %d\n",
+ __func__, stream_info, stream_info->stream_src,
+ vfe_dev->pdev->id);
+ return -EINVAL;
+ }
+ }
+ vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+
+ for (j = 0; j < stream_info->num_planes; j++)
+ stream_info->plane_cfg[vfe_idx][j] = update_info->plane_cfg[j];
+
+ stream_info->update_vfe_mask |= (1 << vfe_dev->pdev->id);
+ /* wait for update from all vfe's under stream before applying */
+ if (stream_info->update_vfe_mask != stream_info->vfe_mask) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ return 0;
+ }
+
+ atomic_set(&vfe_dev->axi_data.axi_cfg_update[
+ SRC_TO_INTF(stream_info->stream_src)], 1);
+ stream_info->output_format = update_info->output_format;
+ init_completion(&stream_info->active_comp);
+ if (((vfe_dev->hw_info->runtime_axi_update == 0) ||
+ (vfe_dev->dual_vfe_enable == 1))) {
+ stream_info->state = PAUSE_PENDING;
+ msm_isp_axi_stream_enable_cfg(stream_info);
+ stream_info->state = PAUSING;
+ } else {
+ for (j = 0; j < stream_info->num_planes; j++) {
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_reg(vfe_dev, stream_info, j);
+ }
+ }
+ stream_info->state = RESUMING;
+ }
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ return 0;
}
int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
{
- int rc = 0, i, j;
+ int rc = 0, i;
struct msm_vfe_axi_stream *stream_info;
- struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
struct msm_vfe_axi_stream_update_cmd *update_cmd = arg;
- struct msm_vfe_axi_stream_cfg_update_info *update_info;
+ struct msm_vfe_axi_stream_cfg_update_info *update_info = NULL;
struct msm_isp_sw_framskip *sw_skip_info = NULL;
unsigned long flags;
struct msm_isp_timestamp timestamp;
@@ -3243,14 +3490,15 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
return -EINVAL;
for (i = 0; i < update_cmd->num_streams; i++) {
- update_info = &update_cmd->update_info[i];
+ update_info = (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
/*check array reference bounds*/
if (HANDLE_TO_IDX(update_info->stream_handle) >=
VFE_AXI_SRC_MAX) {
return -EINVAL;
}
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(update_info->stream_handle)];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(update_info->stream_handle));
if (SRC_TO_INTF(stream_info->stream_src) >= VFE_SRC_MAX)
continue;
if (stream_info->state != ACTIVE &&
@@ -3267,37 +3515,45 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
return -EINVAL;
}
if (update_cmd->update_type == UPDATE_STREAM_AXI_CONFIG &&
- atomic_read(&axi_data->axi_cfg_update[
- SRC_TO_INTF(stream_info->stream_src)])) {
+ stream_info->state != ACTIVE) {
pr_err("%s: AXI stream config updating\n", __func__);
return -EBUSY;
}
}
- for (i = 0; i < update_cmd->num_streams; i++) {
- update_info = &update_cmd->update_info[i];
- stream_info = &axi_data->stream_info[
- HANDLE_TO_IDX(update_info->stream_handle)];
-
- switch (update_cmd->update_type) {
- case ENABLE_STREAM_BUF_DIVERT:
+ switch (update_cmd->update_type) {
+ case ENABLE_STREAM_BUF_DIVERT:
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(update_info->stream_handle));
stream_info->buf_divert = 1;
- break;
- case DISABLE_STREAM_BUF_DIVERT:
+ }
+ break;
+ case DISABLE_STREAM_BUF_DIVERT:
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(update_info->stream_handle));
stream_info->buf_divert = 0;
msm_isp_get_timestamp(&timestamp);
frame_id = vfe_dev->axi_data.src_info[
SRC_TO_INTF(stream_info->stream_src)].frame_id;
/* set ping pong address to scratch before flush */
spin_lock_irqsave(&stream_info->lock, flags);
- msm_isp_cfg_stream_scratch(vfe_dev, stream_info,
- VFE_PING_FLAG);
- msm_isp_cfg_stream_scratch(vfe_dev, stream_info,
- VFE_PONG_FLAG);
+ msm_isp_cfg_stream_scratch(stream_info,
+ VFE_PING_FLAG);
+ msm_isp_cfg_stream_scratch(stream_info,
+ VFE_PONG_FLAG);
spin_unlock_irqrestore(&stream_info->lock, flags);
- rc = vfe_dev->buf_mgr->ops->flush_buf(vfe_dev->buf_mgr,
- vfe_dev->pdev->id,
- stream_info->bufq_handle[VFE_BUF_QUEUE_DEFAULT],
+ rc = vfe_dev->buf_mgr->ops->flush_buf(
+ vfe_dev->buf_mgr,
+ stream_info->bufq_handle
+ [VFE_BUF_QUEUE_DEFAULT],
MSM_ISP_BUFFER_FLUSH_DIVERTED,
&timestamp.buf_time, frame_id);
if (rc == -EFAULT) {
@@ -3305,11 +3561,18 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
ISP_EVENT_BUF_FATAL_ERROR);
return rc;
}
- break;
- case UPDATE_STREAM_FRAMEDROP_PATTERN: {
+ }
+ break;
+ case UPDATE_STREAM_FRAMEDROP_PATTERN: {
+ for (i = 0; i < update_cmd->num_streams; i++) {
uint32_t framedrop_period =
msm_isp_get_framedrop_period(
- update_info->skip_pattern);
+ update_info->skip_pattern);
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(update_info->stream_handle));
spin_lock_irqsave(&stream_info->lock, flags);
/* no change then break early */
if (stream_info->current_framedrop_period ==
@@ -3331,11 +3594,18 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
stream_info->current_framedrop_period =
framedrop_period;
if (stream_info->stream_type != BURST_STREAM)
- msm_isp_cfg_framedrop_reg(vfe_dev, stream_info);
+ msm_isp_cfg_framedrop_reg(stream_info);
spin_unlock_irqrestore(&stream_info->lock, flags);
- break;
}
- case UPDATE_STREAM_SW_FRAME_DROP: {
+ break;
+ }
+ case UPDATE_STREAM_SW_FRAME_DROP: {
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(update_info->stream_handle));
sw_skip_info = &update_info->sw_skip_info;
if (sw_skip_info->stream_src_mask != 0) {
/* SW image buffer drop */
@@ -3350,88 +3620,88 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
spin_unlock_irqrestore(&stream_info->lock,
flags);
}
- break;
}
- case UPDATE_STREAM_AXI_CONFIG: {
- for (j = 0; j < stream_info->num_planes; j++) {
- stream_info->plane_cfg[j] =
- update_info->plane_cfg[j];
- }
- stream_info->output_format = update_info->output_format;
- if ((stream_info->state == ACTIVE) &&
- ((vfe_dev->hw_info->runtime_axi_update == 0) ||
- (vfe_dev->dual_vfe_enable == 1))) {
- spin_lock_irqsave(&stream_info->lock, flags);
- stream_info->state = PAUSE_PENDING;
- msm_isp_axi_stream_enable_cfg(
- vfe_dev, stream_info, 1);
- stream_info->state = PAUSING;
- atomic_set(&axi_data->
- axi_cfg_update[SRC_TO_INTF(
- stream_info->stream_src)],
- UPDATE_REQUESTED);
- spin_unlock_irqrestore(&stream_info->lock,
- flags);
- } else {
- for (j = 0; j < stream_info->num_planes; j++) {
- vfe_dev->hw_info->vfe_ops.axi_ops.
- cfg_wm_reg(vfe_dev, stream_info, j);
- }
-
- spin_lock_irqsave(&stream_info->lock, flags);
- if (stream_info->state != ACTIVE) {
- stream_info->runtime_output_format =
- stream_info->output_format;
- } else {
- stream_info->state = RESUMING;
- atomic_set(&axi_data->
- axi_cfg_update[SRC_TO_INTF(
- stream_info->stream_src)],
- APPLYING_UPDATE_RESUME);
- }
- spin_unlock_irqrestore(&stream_info->lock,
- flags);
- }
- break;
+ break;
+ }
+ case UPDATE_STREAM_AXI_CONFIG: {
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(update_info->stream_handle));
+ rc = msm_isp_stream_axi_cfg_update(vfe_dev, stream_info,
+ update_info);
+ if (rc)
+ return rc;
}
- case UPDATE_STREAM_REQUEST_FRAMES: {
+ break;
+ }
+ case UPDATE_STREAM_REQUEST_FRAMES: {
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(update_info->stream_handle));
rc = msm_isp_request_frame(vfe_dev, stream_info,
update_info->user_stream_id,
- update_info->frame_id);
+ update_info->frame_id,
+ MSM_ISP_INVALID_BUF_INDEX);
if (rc)
pr_err("%s failed to request frame!\n",
__func__);
- break;
}
- case UPDATE_STREAM_ADD_BUFQ: {
+ break;
+ }
+ case UPDATE_STREAM_ADD_BUFQ: {
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(update_info->stream_handle));
rc = msm_isp_add_buf_queue(vfe_dev, stream_info,
update_info->user_stream_id);
if (rc)
pr_err("%s failed to add bufq!\n", __func__);
- break;
}
- case UPDATE_STREAM_REMOVE_BUFQ: {
+ break;
+ }
+ case UPDATE_STREAM_REMOVE_BUFQ: {
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info =
+ (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(update_info->stream_handle));
msm_isp_remove_buf_queue(vfe_dev, stream_info,
update_info->user_stream_id);
pr_debug("%s, Remove bufq for Stream 0x%x\n",
__func__, stream_info->stream_id);
- if (stream_info->state == ACTIVE) {
- stream_info->state = UPDATING;
- rc = msm_isp_axi_wait_for_cfg_done(vfe_dev,
- NO_UPDATE, (1 << SRC_TO_INTF(
- stream_info->stream_src)), 2);
- if (rc < 0)
- pr_err("%s: wait for update failed\n",
- __func__);
- }
-
- break;
- }
- default:
- pr_err("%s: Invalid update type\n", __func__);
- return -EINVAL;
}
+ break;
+ }
+ case UPDATE_STREAM_REQUEST_FRAMES_VER2: {
+ struct msm_vfe_axi_stream_cfg_update_info_req_frm *req_frm =
+ &update_cmd->req_frm_ver2;
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ HANDLE_TO_IDX(req_frm->stream_handle));
+ rc = msm_isp_request_frame(vfe_dev, stream_info,
+ req_frm->user_stream_id,
+ req_frm->frame_id,
+ req_frm->buf_index);
+ if (rc)
+ pr_err("%s failed to request frame!\n",
+ __func__);
+ break;
}
+ default:
+ pr_err("%s: Invalid update type %d\n", __func__,
+ update_cmd->update_type);
+ return -EINVAL;
+ }
+
return rc;
}
@@ -3446,7 +3716,7 @@ void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev,
unsigned long flags;
struct timeval *time_stamp;
uint32_t frame_id, buf_index = -1;
- struct msm_vfe_axi_stream *temp_stream;
+ int vfe_idx;
if (!ts) {
pr_err("%s: Error! Invalid argument\n", __func__);
@@ -3464,10 +3734,13 @@ void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev,
src_info[SRC_TO_INTF(stream_info->stream_src)].frame_id;
spin_lock_irqsave(&stream_info->lock, flags);
- pingpong_bit = (~(pingpong_status >> stream_info->wm[0]) & 0x1);
+ vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
+ pingpong_bit = (~(pingpong_status >>
+ stream_info->wm[vfe_idx][0]) & 0x1);
for (i = 0; i < stream_info->num_planes; i++) {
if (pingpong_bit !=
- (~(pingpong_status >> stream_info->wm[i]) & 0x1)) {
+ (~(pingpong_status >>
+ stream_info->wm[vfe_idx][i]) & 0x1)) {
spin_unlock_irqrestore(&stream_info->lock, flags);
pr_err("%s: Write master ping pong mismatch. Status: 0x%x\n",
__func__, pingpong_status);
@@ -3476,15 +3749,23 @@ void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev,
return;
}
}
-
if (stream_info->state == INACTIVE) {
- msm_isp_cfg_stream_scratch(vfe_dev, stream_info,
- pingpong_status);
+ WARN_ON(stream_info->buf[pingpong_bit] != NULL);
spin_unlock_irqrestore(&stream_info->lock, flags);
- pr_err_ratelimited("%s: Warning! Stream already inactive. Drop irq handling\n",
- __func__);
return;
}
+
+ /* composite the irq for dual vfe */
+ rc = msm_isp_composite_irq(vfe_dev, stream_info,
+ MSM_ISP_COMP_IRQ_PING_BUFDONE + pingpong_bit);
+ if (rc) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ if (rc < 0)
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return;
+ }
+
done_buf = stream_info->buf[pingpong_bit];
if (vfe_dev->buf_mgr->frameId_mismatch_recovery == 1) {
@@ -3494,46 +3775,28 @@ void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev,
return;
}
- stream_info->frame_id++;
if (done_buf)
buf_index = done_buf->buf_idx;
- rc = vfe_dev->buf_mgr->ops->update_put_buf_cnt(vfe_dev->buf_mgr,
+ ISP_DBG("%s: vfe %d: stream 0x%x, frame id %d, pingpong bit %d\n",
+ __func__,
vfe_dev->pdev->id,
- done_buf ? done_buf->bufq_handle :
- stream_info->bufq_handle[VFE_BUF_QUEUE_DEFAULT], buf_index,
- time_stamp, frame_id, pingpong_bit);
+ stream_info->stream_id,
+ frame_id,
+ pingpong_bit);
- if (rc < 0) {
- spin_unlock_irqrestore(&stream_info->lock, flags);
- /* this usually means a serious scheduling error */
- msm_isp_halt_send_error(vfe_dev, ISP_EVENT_BUF_FATAL_ERROR);
- return;
- }
- /*
- * Buf divert return value represent whether the buf
- * can be diverted. A positive return value means
- * other ISP hardware is still processing the frame.
- * A negative value is error. Return in both cases.
- */
- if (rc != 0) {
- spin_unlock_irqrestore(&stream_info->lock, flags);
- return;
- }
+ stream_info->frame_id++;
+ stream_info->buf[pingpong_bit] = NULL;
if (stream_info->stream_type == CONTINUOUS_STREAM ||
stream_info->runtime_num_burst_capture > 1) {
- rc = msm_isp_cfg_ping_pong_address(vfe_dev,
- stream_info, pingpong_status, 0);
+ rc = msm_isp_cfg_ping_pong_address(
+ stream_info, pingpong_status);
if (rc < 0)
ISP_DBG("%s: Error configuring ping_pong\n",
__func__);
} else if (done_buf) {
- rc = msm_isp_cfg_ping_pong_address(vfe_dev,
- stream_info, pingpong_status, 1);
- if (rc < 0)
- ISP_DBG("%s: Error configuring ping_pong\n",
- __func__);
+ msm_isp_cfg_stream_scratch(stream_info, pingpong_status);
}
if (!done_buf) {
@@ -3547,28 +3810,12 @@ void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev,
return;
}
- temp_stream = msm_isp_get_controllable_stream(vfe_dev,
- stream_info);
- if (temp_stream->stream_type == BURST_STREAM &&
- temp_stream->runtime_num_burst_capture) {
+ if (stream_info->stream_type == BURST_STREAM &&
+ stream_info->runtime_num_burst_capture) {
ISP_DBG("%s: burst_frame_count: %d\n",
__func__,
- temp_stream->runtime_num_burst_capture);
- temp_stream->runtime_num_burst_capture--;
- /*
- * For non controllable stream decrement the burst count for
- * dual stream as well here
- */
- if (!stream_info->controllable_output && vfe_dev->is_split &&
- RDI_INTF_0 > stream_info->stream_src) {
- temp_stream = msm_isp_vfe_get_stream(
- vfe_dev->common_data->dual_vfe_res,
- ((vfe_dev->pdev->id == ISP_VFE0) ?
- ISP_VFE1 : ISP_VFE0),
- HANDLE_TO_IDX(
- stream_info->stream_handle));
- temp_stream->runtime_num_burst_capture--;
- }
+ stream_info->runtime_num_burst_capture);
+ stream_info->runtime_num_burst_capture--;
}
rc = msm_isp_update_deliver_count(vfe_dev, stream_info,
@@ -3637,7 +3884,8 @@ void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
continue;
}
stream_idx = HANDLE_TO_IDX(comp_info->stream_handle);
- stream_info = &axi_data->stream_info[stream_idx];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ stream_idx);
msm_isp_process_axi_irq_stream(vfe_dev, stream_info,
pingpong_status, ts);
@@ -3656,7 +3904,8 @@ void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
pingpong_status);
continue;
}
- stream_info = &axi_data->stream_info[stream_idx];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev,
+ stream_idx);
msm_isp_process_axi_irq_stream(vfe_dev, stream_info,
pingpong_status, ts);
@@ -3670,6 +3919,7 @@ void msm_isp_axi_disable_all_wm(struct vfe_device *vfe_dev)
struct msm_vfe_axi_stream *stream_info;
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
int i, j;
+ int vfe_idx;
if (!vfe_dev || !axi_data) {
pr_err("%s: error %pK %pK\n", __func__, vfe_dev, axi_data);
@@ -3677,14 +3927,16 @@ void msm_isp_axi_disable_all_wm(struct vfe_device *vfe_dev)
}
for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
- stream_info = &axi_data->stream_info[i];
+ stream_info = msm_isp_get_stream_common_data(vfe_dev, i);
if (stream_info->state != ACTIVE)
continue;
+ vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev,
+ stream_info);
for (j = 0; j < stream_info->num_planes; j++)
vfe_dev->hw_info->vfe_ops.axi_ops.enable_wm(
vfe_dev->vfe_base,
- stream_info->wm[j], 0);
+ stream_info->wm[vfe_idx][j], 0);
}
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
index 08053aa410e7..84720f3d8625 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
@@ -14,37 +14,15 @@
#include "msm_isp.h"
+#define HANDLE_TO_IDX(handle) (handle & 0xFF)
#define SRC_TO_INTF(src) \
((src < RDI_INTF_0 || src == VFE_AXI_SRC_MAX) ? VFE_PIX_0 : \
(VFE_RAW_0 + src - RDI_INTF_0))
-int msm_isp_axi_create_stream(struct vfe_device *vfe_dev,
- struct msm_vfe_axi_shared_data *axi_data,
- struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd);
-
-void msm_isp_axi_destroy_stream(
- struct msm_vfe_axi_shared_data *axi_data, int stream_idx);
-
-int msm_isp_validate_axi_request(
- struct msm_vfe_axi_shared_data *axi_data,
- struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd);
-
-void msm_isp_axi_reserve_wm(
- struct vfe_device *vfe_dev,
- struct msm_vfe_axi_shared_data *axi_data,
- struct msm_vfe_axi_stream *stream_info);
-
-void msm_isp_axi_reserve_comp_mask(
- struct msm_vfe_axi_shared_data *axi_data,
- struct msm_vfe_axi_stream *stream_info);
-
int msm_isp_axi_check_stream_state(
struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd);
-int msm_isp_calculate_framedrop(
- struct msm_vfe_axi_shared_data *axi_data,
- struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd);
void msm_isp_reset_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info);
@@ -62,10 +40,13 @@ int msm_isp_axi_restart(struct vfe_device *vfe_dev,
struct msm_vfe_axi_restart_cmd *restart_cmd);
void msm_isp_axi_stream_update(struct vfe_device *vfe_dev,
- enum msm_vfe_input_src frame_src);
+ enum msm_vfe_input_src frame_src,
+ struct msm_isp_timestamp *ts);
-void msm_isp_update_framedrop_reg(struct vfe_device *vfe_dev,
- enum msm_vfe_input_src frame_src);
+void msm_isp_process_reg_upd_epoch_irq(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src,
+ enum msm_isp_comp_irq_types irq,
+ struct msm_isp_timestamp *ts);
void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type,
enum msm_vfe_input_src frame_src, struct msm_isp_timestamp *ts);
@@ -94,6 +75,34 @@ void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev,
uint32_t pingpong_status,
struct msm_isp_timestamp *ts);
+void msm_isp_release_all_axi_stream(struct vfe_device *vfe_dev);
+
+static inline int msm_isp_get_vfe_idx_for_stream_user(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int vfe_idx;
+
+ for (vfe_idx = 0; vfe_idx < stream_info->num_isp; vfe_idx++) {
+ if (stream_info->vfe_dev[vfe_idx] == vfe_dev)
+ return vfe_idx;
+ }
+ return -ENOTTY;
+}
+
+static inline int msm_isp_get_vfe_idx_for_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stream_user(vfe_dev, stream_info);
+
+ if (vfe_idx < 0) {
+ WARN(1, "%s vfe index misssing for stream %d, vfe %d\n",
+ __func__, stream_info->stream_src, vfe_dev->pdev->id);
+ vfe_idx = 0;
+ }
+ return vfe_idx;
+}
+
static inline void msm_isp_cfg_wm_scratch(struct vfe_device *vfe_dev,
int wm,
uint32_t pingpong_bit)
@@ -103,18 +112,48 @@ static inline void msm_isp_cfg_wm_scratch(struct vfe_device *vfe_dev,
pingpong_bit, vfe_dev->buf_mgr->scratch_buf_addr, 0);
}
-static inline void msm_isp_cfg_stream_scratch(struct vfe_device *vfe_dev,
+static inline void msm_isp_cfg_stream_scratch(
struct msm_vfe_axi_stream *stream_info,
uint32_t pingpong_status)
{
int i;
+ int j;
uint32_t pingpong_bit;
-
- pingpong_bit = (~(pingpong_status >> stream_info->wm[0]) & 0x1);
- for (i = 0; i < stream_info->num_planes; i++)
- msm_isp_cfg_wm_scratch(vfe_dev, stream_info->wm[i],
+ int vfe_idx;
+
+ pingpong_bit = (~(pingpong_status >> stream_info->wm[0][0]) & 0x1);
+ for (i = 0; i < stream_info->num_planes; i++) {
+ for (j = 0; j < stream_info->num_isp; j++) {
+ vfe_idx = msm_isp_get_vfe_idx_for_stream(
+ stream_info->vfe_dev[j], stream_info);
+ msm_isp_cfg_wm_scratch(stream_info->vfe_dev[j],
+ stream_info->wm[vfe_idx][i],
~pingpong_bit);
+ }
+ }
stream_info->buf[pingpong_bit] = NULL;
}
+static inline struct msm_vfe_axi_stream *msm_isp_get_stream_common_data(
+ struct vfe_device *vfe_dev, int stream_idx)
+{
+ struct msm_vfe_common_dev_data *common_data = vfe_dev->common_data;
+ struct msm_vfe_axi_stream *stream_info;
+
+ if (vfe_dev->is_split && stream_idx < RDI_INTF_0)
+ stream_info = &common_data->streams[stream_idx];
+ else
+ stream_info = &common_data->streams[VFE_AXI_SRC_MAX *
+ vfe_dev->pdev->id + stream_idx];
+ return stream_info;
+}
+
+static inline struct msm_vfe_axi_stream *msm_isp_vfe_get_stream(
+ struct dual_vfe_resource *dual_vfe_res,
+ int vfe_id, uint32_t index)
+{
+ return msm_isp_get_stream_common_data(dual_vfe_res->vfe_dev[vfe_id],
+ index);
+}
+
#endif /* __MSM_ISP_AXI_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
index 4aef6b5c7f38..f851e8c9289e 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
@@ -22,59 +22,89 @@ static inline void msm_isp_stats_cfg_wm_scratch(struct vfe_device *vfe_dev,
uint32_t pingpong_status)
{
vfe_dev->hw_info->vfe_ops.stats_ops.update_ping_pong_addr(
- vfe_dev->vfe_base, stream_info,
+ vfe_dev, stream_info,
pingpong_status, vfe_dev->buf_mgr->scratch_buf_addr);
}
-static inline void msm_isp_stats_cfg_stream_scratch(struct vfe_device *vfe_dev,
+static inline void msm_isp_stats_cfg_stream_scratch(
struct msm_vfe_stats_stream *stream_info,
uint32_t pingpong_status)
{
- uint32_t stats_idx = STATS_IDX(stream_info->stream_handle);
+ uint32_t stats_idx = STATS_IDX(stream_info->stream_handle[0]);
uint32_t pingpong_bit;
- uint32_t stats_pingpong_offset =
- vfe_dev->hw_info->stats_hw_info->stats_ping_pong_offset[
- stats_idx];
+ uint32_t stats_pingpong_offset;
+ struct vfe_device *vfe_dev;
+ int i;
+ stats_pingpong_offset = stream_info->vfe_dev[0]->hw_info->
+ stats_hw_info->stats_ping_pong_offset[stats_idx];
pingpong_bit = (~(pingpong_status >> stats_pingpong_offset) & 0x1);
- msm_isp_stats_cfg_wm_scratch(vfe_dev, stream_info,
- pingpong_status);
+ for (i = 0; i < stream_info->num_isp; i++) {
+ vfe_dev = stream_info->vfe_dev[i];
+ msm_isp_stats_cfg_wm_scratch(vfe_dev, stream_info,
+ pingpong_status);
+ }
+
stream_info->buf[pingpong_bit] = NULL;
}
-static int msm_isp_stats_cfg_ping_pong_address(struct vfe_device *vfe_dev,
+static int msm_isp_composite_stats_irq(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info,
+ enum msm_isp_comp_irq_types irq)
+{
+ /* interrupt recv on same vfe w/o recv on other vfe */
+ if (stream_info->composite_irq[irq] & (1 << vfe_dev->pdev->id)) {
+ pr_err("%s: irq %d out of sync for dual vfe on vfe %d\n",
+ __func__, irq, vfe_dev->pdev->id);
+ return -EFAULT;
+ }
+
+ stream_info->composite_irq[irq] |= (1 << vfe_dev->pdev->id);
+ if (stream_info->composite_irq[irq] != stream_info->vfe_mask)
+ return 1;
+
+ stream_info->composite_irq[irq] = 0;
+
+ return 0;
+}
+
+static int msm_isp_stats_cfg_ping_pong_address(
struct msm_vfe_stats_stream *stream_info, uint32_t pingpong_status)
{
- int rc = -1, vfe_id = 0;
- struct msm_isp_buffer *buf;
- uint32_t pingpong_bit = 0;
- uint32_t stats_pingpong_offset;
+ int rc = -1;
+ struct msm_isp_buffer *buf = NULL;
uint32_t bufq_handle = stream_info->bufq_handle;
- uint32_t stats_idx = STATS_IDX(stream_info->stream_handle);
- struct dual_vfe_resource *dual_vfe_res = NULL;
- struct msm_vfe_stats_stream *dual_vfe_stream_info = NULL;
+ uint32_t stats_idx = STATS_IDX(stream_info->stream_handle[0]);
+ struct vfe_device *vfe_dev = stream_info->vfe_dev[0];
+ uint32_t stats_pingpong_offset;
+ uint32_t pingpong_bit;
+ int k;
if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type ||
stats_idx >= MSM_ISP_STATS_MAX) {
pr_err("%s Invalid stats index %d", __func__, stats_idx);
return -EINVAL;
}
-
- stats_pingpong_offset =
- vfe_dev->hw_info->stats_hw_info->stats_ping_pong_offset[
- stats_idx];
-
+ stats_pingpong_offset = vfe_dev->hw_info->stats_hw_info->
+ stats_ping_pong_offset[stats_idx];
pingpong_bit = (~(pingpong_status >> stats_pingpong_offset) & 0x1);
+ /* if buffer already exists then no need to replace */
+ if (stream_info->buf[pingpong_bit])
+ return 0;
rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
- vfe_dev->pdev->id, bufq_handle, &buf);
+ vfe_dev->pdev->id, bufq_handle,
+ MSM_ISP_INVALID_BUF_INDEX, &buf);
if (rc == -EFAULT) {
msm_isp_halt_send_error(vfe_dev, ISP_EVENT_BUF_FATAL_ERROR);
return rc;
}
- if (rc < 0 || NULL == buf)
- vfe_dev->error_info.stats_framedrop_count[stats_idx]++;
+ if (rc < 0 || NULL == buf) {
+ for (k = 0; k < stream_info->num_isp; k++)
+ stream_info->vfe_dev[k]->error_info.
+ stats_framedrop_count[stats_idx]++;
+ }
if (buf && buf->num_planes != 1) {
pr_err("%s: Invalid buffer\n", __func__);
@@ -82,58 +112,22 @@ static int msm_isp_stats_cfg_ping_pong_address(struct vfe_device *vfe_dev,
rc = -EINVAL;
goto buf_error;
}
- if (vfe_dev->is_split) {
- dual_vfe_res = vfe_dev->common_data->dual_vfe_res;
- if (!dual_vfe_res->vfe_base[ISP_VFE0] ||
- !dual_vfe_res->stats_data[ISP_VFE0] ||
- !dual_vfe_res->vfe_base[ISP_VFE1] ||
- !dual_vfe_res->stats_data[ISP_VFE1]) {
- pr_err("%s:%d error vfe0 %pK %pK vfe1 %pK %pK\n",
- __func__, __LINE__,
- dual_vfe_res->vfe_base[ISP_VFE0],
- dual_vfe_res->stats_data[ISP_VFE0],
- dual_vfe_res->vfe_base[ISP_VFE1],
- dual_vfe_res->stats_data[ISP_VFE1]);
- } else {
- for (vfe_id = 0; vfe_id < MAX_VFE; vfe_id++) {
- dual_vfe_stream_info = &dual_vfe_res->
- stats_data[vfe_id]->
- stream_info[stats_idx];
- if (buf)
- vfe_dev->hw_info->vfe_ops.stats_ops.
- update_ping_pong_addr(
- dual_vfe_res->vfe_base[vfe_id],
- dual_vfe_stream_info,
- pingpong_status,
- buf->mapped_info[0].paddr +
- dual_vfe_stream_info->
- buffer_offset);
- else
- msm_isp_stats_cfg_stream_scratch(
- vfe_dev,
- dual_vfe_stream_info,
- pingpong_status);
- dual_vfe_stream_info->buf[pingpong_bit]
- = buf;
- }
- }
- } else {
- if (buf)
- vfe_dev->hw_info->vfe_ops.stats_ops.
- update_ping_pong_addr(
- vfe_dev->vfe_base, stream_info,
- pingpong_status, buf->mapped_info[0].paddr +
- stream_info->buffer_offset);
- else
- msm_isp_stats_cfg_stream_scratch(vfe_dev,
- stream_info, pingpong_status);
-
- stream_info->buf[pingpong_bit] = buf;
+ if (!buf) {
+ msm_isp_stats_cfg_stream_scratch(stream_info,
+ pingpong_status);
+ return 0;
}
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ vfe_dev->hw_info->vfe_ops.stats_ops.update_ping_pong_addr(
+ vfe_dev, stream_info, pingpong_status,
+ buf->mapped_info[0].paddr +
+ stream_info->buffer_offset[k]);
+ }
+ stream_info->buf[pingpong_bit] = buf;
+ buf->pingpong_bit = pingpong_bit;
- if (buf)
- buf->pingpong_bit = pingpong_bit;
return 0;
buf_error:
vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
@@ -155,6 +149,8 @@ static int32_t msm_isp_stats_buf_divert(struct vfe_device *vfe_dev,
struct msm_isp_buffer *done_buf;
uint32_t stats_pingpong_offset;
uint32_t stats_idx;
+ int vfe_idx;
+ unsigned long flags;
if (!vfe_dev || !ts || !buf_event || !stream_info) {
pr_err("%s:%d failed: invalid params %pK %pK %pK %pK\n",
@@ -163,6 +159,9 @@ static int32_t msm_isp_stats_buf_divert(struct vfe_device *vfe_dev,
return -EINVAL;
}
frame_id = vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+
+ spin_lock_irqsave(&stream_info->lock, flags);
+
sw_skip = &stream_info->sw_skip;
stats_event = &buf_event->u.stats;
@@ -182,73 +181,62 @@ static int32_t msm_isp_stats_buf_divert(struct vfe_device *vfe_dev,
(struct msm_isp_sw_framskip));
}
}
- stats_idx = STATS_IDX(stream_info->stream_handle);
+ vfe_idx = msm_isp_get_vfe_idx_for_stats_stream(vfe_dev, stream_info);
+ stats_idx = STATS_IDX(stream_info->stream_handle[vfe_idx]);
stats_pingpong_offset =
vfe_dev->hw_info->stats_hw_info->stats_ping_pong_offset[
stats_idx];
pingpong_bit = (~(pingpong_status >> stats_pingpong_offset) & 0x1);
- done_buf = stream_info->buf[pingpong_bit];
-
- if (done_buf)
- buf_index = done_buf->buf_idx;
-
- rc = vfe_dev->buf_mgr->ops->update_put_buf_cnt(
- vfe_dev->buf_mgr, vfe_dev->pdev->id, stream_info->bufq_handle,
- buf_index, &ts->buf_time,
- frame_id, pingpong_bit);
-
- if (rc < 0) {
- if (rc == -EFAULT)
+ rc = msm_isp_composite_stats_irq(vfe_dev, stream_info,
+ MSM_ISP_COMP_IRQ_PING_BUFDONE + pingpong_bit);
+ if (rc) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ if (rc < 0)
msm_isp_halt_send_error(vfe_dev,
- ISP_EVENT_BUF_FATAL_ERROR);
- pr_err("stats_buf_divert: update put buf cnt fail\n");
- return rc;
- }
-
- if (rc > 0) {
- ISP_DBG("%s: vfe_id %d buf_id %d bufq %x put_cnt 1\n", __func__,
- vfe_dev->pdev->id, buf_index,
- stream_info->bufq_handle);
+ ISP_EVENT_BUF_FATAL_ERROR);
return rc;
}
+ done_buf = stream_info->buf[pingpong_bit];
/* Program next buffer */
- rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev, stream_info,
+ stream_info->buf[pingpong_bit] = NULL;
+ rc = msm_isp_stats_cfg_ping_pong_address(stream_info,
pingpong_status);
- if (rc)
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+
+ if (!done_buf)
return rc;
- if (drop_buffer && done_buf) {
- rc = vfe_dev->buf_mgr->ops->buf_done(
+ buf_index = done_buf->buf_idx;
+ if (drop_buffer) {
+ vfe_dev->buf_mgr->ops->put_buf(
vfe_dev->buf_mgr,
done_buf->bufq_handle,
- done_buf->buf_idx, &ts->buf_time, frame_id, 0);
- if (rc == -EFAULT)
- msm_isp_halt_send_error(vfe_dev,
- ISP_EVENT_BUF_FATAL_ERROR);
- return rc;
+ done_buf->buf_idx);
+ } else {
+ /* divert native buffers */
+ vfe_dev->buf_mgr->ops->buf_divert(vfe_dev->buf_mgr,
+ done_buf->bufq_handle, done_buf->buf_idx,
+ &ts->buf_time, frame_id);
}
-
- if (done_buf) {
- stats_event->stats_buf_idxs
- [stream_info->stats_type] =
- done_buf->buf_idx;
- if (NULL == comp_stats_type_mask) {
- stats_event->stats_mask =
- 1 << stream_info->stats_type;
- ISP_DBG("%s: stats frameid: 0x%x %d bufq %x\n",
- __func__, buf_event->frame_id,
- stream_info->stats_type, done_buf->bufq_handle);
- msm_isp_send_event(vfe_dev,
- ISP_EVENT_STATS_NOTIFY +
- stream_info->stats_type,
- buf_event);
- } else {
- *comp_stats_type_mask |=
- 1 << stream_info->stats_type;
- }
+ stats_event->stats_buf_idxs
+ [stream_info->stats_type] =
+ done_buf->buf_idx;
+ if (comp_stats_type_mask == NULL) {
+ stats_event->stats_mask =
+ 1 << stream_info->stats_type;
+ ISP_DBG("%s: stats frameid: 0x%x %d bufq %x\n",
+ __func__, buf_event->frame_id,
+ stream_info->stats_type, done_buf->bufq_handle);
+ msm_isp_send_event(vfe_dev,
+ ISP_EVENT_STATS_NOTIFY +
+ stream_info->stats_type,
+ buf_event);
+ } else {
+ *comp_stats_type_mask |=
+ 1 << stream_info->stats_type;
}
return rc;
@@ -275,8 +263,9 @@ static int32_t msm_isp_stats_configure(struct vfe_device *vfe_dev,
for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
if (!(stats_irq_mask & (1 << i)))
continue;
- stream_info = &vfe_dev->stats_data.stream_info[i];
- if (stream_info->state == STATS_INACTIVE) {
+ stream_info = msm_isp_get_stats_stream_common_data(vfe_dev, i);
+ if (stream_info->state == STATS_INACTIVE ||
+ stream_info->state == STATS_STOPPING) {
pr_debug("%s: Warning! Stream already inactive. Drop irq handling\n",
__func__);
continue;
@@ -353,12 +342,17 @@ void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
}
int msm_isp_stats_create_stream(struct vfe_device *vfe_dev,
- struct msm_vfe_stats_stream_request_cmd *stream_req_cmd)
+ struct msm_vfe_stats_stream_request_cmd *stream_req_cmd,
+ struct msm_vfe_stats_stream *stream_info)
{
- int rc = -1;
- struct msm_vfe_stats_stream *stream_info = NULL;
- struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ int rc = 0;
uint32_t stats_idx;
+ uint32_t framedrop_pattern;
+ uint32_t framedrop_period;
+ int i;
+
+ stats_idx = vfe_dev->hw_info->vfe_ops.stats_ops.
+ get_stats_idx(stream_req_cmd->stats_type);
if (!(vfe_dev->hw_info->stats_hw_info->stats_capability_mask &
(1 << stream_req_cmd->stats_type))) {
@@ -366,16 +360,7 @@ int msm_isp_stats_create_stream(struct vfe_device *vfe_dev,
return rc;
}
- stats_idx = vfe_dev->hw_info->vfe_ops.stats_ops.
- get_stats_idx(stream_req_cmd->stats_type);
-
- if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
- pr_err("%s Invalid stats index %d", __func__, stats_idx);
- return -EINVAL;
- }
-
- stream_info = &stats_data->stream_info[stats_idx];
- if (stream_info->state != STATS_AVALIABLE) {
+ if (stream_info->state != STATS_AVAILABLE) {
pr_err("%s: Stats already requested\n", __func__);
return rc;
}
@@ -389,17 +374,74 @@ int msm_isp_stats_create_stream(struct vfe_device *vfe_dev,
pr_err("%s: Invalid irq subsample pattern\n", __func__);
return rc;
}
+ if (stream_req_cmd->composite_flag >
+ vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask) {
+ pr_err("%s: comp grp %d exceed max %d\n",
+ __func__, stream_req_cmd->composite_flag,
+ vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask);
+ return -EINVAL;
+ }
- stream_info->session_id = stream_req_cmd->session_id;
- stream_info->stream_id = stream_req_cmd->stream_id;
- stream_info->composite_flag = stream_req_cmd->composite_flag;
- stream_info->stats_type = stream_req_cmd->stats_type;
- stream_info->buffer_offset = stream_req_cmd->buffer_offset;
- stream_info->framedrop_pattern = stream_req_cmd->framedrop_pattern;
- stream_info->init_stats_frame_drop = stream_req_cmd->init_frame_drop;
- stream_info->irq_subsample_pattern =
- stream_req_cmd->irq_subsample_pattern;
- stream_info->state = STATS_INACTIVE;
+ if (stream_info->num_isp == 0) {
+ stream_info->session_id = stream_req_cmd->session_id;
+ stream_info->stream_id = stream_req_cmd->stream_id;
+ stream_info->composite_flag = stream_req_cmd->composite_flag;
+ stream_info->stats_type = stream_req_cmd->stats_type;
+ framedrop_pattern = stream_req_cmd->framedrop_pattern;
+ if (framedrop_pattern == SKIP_ALL)
+ framedrop_pattern = 0;
+ else
+ framedrop_pattern = 1;
+ stream_info->framedrop_pattern = framedrop_pattern;
+ stream_info->init_stats_frame_drop =
+ stream_req_cmd->init_frame_drop;
+ stream_info->irq_subsample_pattern =
+ stream_req_cmd->irq_subsample_pattern;
+ framedrop_period = msm_isp_get_framedrop_period(
+ stream_req_cmd->framedrop_pattern);
+ stream_info->framedrop_period = framedrop_period;
+ } else {
+ if (stream_info->vfe_mask & (1 << vfe_dev->pdev->id)) {
+ pr_err("%s: stats %d already requested for vfe %d\n",
+ __func__, stats_idx, vfe_dev->pdev->id);
+ return -EINVAL;
+ }
+ if (stream_info->session_id != stream_req_cmd->session_id)
+ rc = -EINVAL;
+ if (stream_info->session_id != stream_req_cmd->session_id)
+ rc = -EINVAL;
+ if (stream_info->composite_flag !=
+ stream_req_cmd->composite_flag)
+ rc = -EINVAL;
+ if (stream_info->stats_type != stream_req_cmd->stats_type)
+ rc = -EINVAL;
+ framedrop_pattern = stream_req_cmd->framedrop_pattern;
+ if (framedrop_pattern == SKIP_ALL)
+ framedrop_pattern = 0;
+ else
+ framedrop_pattern = 1;
+ if (stream_info->framedrop_pattern != framedrop_pattern)
+ rc = -EINVAL;
+ framedrop_period = msm_isp_get_framedrop_period(
+ stream_req_cmd->framedrop_pattern);
+ if (stream_info->framedrop_period != framedrop_period)
+ rc = -EINVAL;
+ if (rc) {
+ pr_err("%s: Stats stream param mismatch between vfe\n",
+ __func__);
+ return rc;
+ }
+ }
+ stream_info->buffer_offset[stream_info->num_isp] =
+ stream_req_cmd->buffer_offset;
+ stream_info->vfe_dev[stream_info->num_isp] = vfe_dev;
+ stream_info->vfe_mask |= (1 << vfe_dev->pdev->id);
+ stream_info->num_isp++;
+ if (!vfe_dev->is_split || stream_info->num_isp == MAX_VFE) {
+ stream_info->state = STATS_INACTIVE;
+ for (i = 0; i < MSM_ISP_COMP_IRQ_MAX; i++)
+ stream_info->composite_irq[i] = 0;
+ }
if ((vfe_dev->stats_data.stream_handle_cnt << 8) == 0)
vfe_dev->stats_data.stream_handle_cnt++;
@@ -407,7 +449,8 @@ int msm_isp_stats_create_stream(struct vfe_device *vfe_dev,
stream_req_cmd->stream_handle =
(++vfe_dev->stats_data.stream_handle_cnt) << 8 | stats_idx;
- stream_info->stream_handle = stream_req_cmd->stream_handle;
+ stream_info->stream_handle[stream_info->num_isp - 1] =
+ stream_req_cmd->stream_handle;
return 0;
}
@@ -416,42 +459,39 @@ int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg)
int rc = -1;
struct msm_vfe_stats_stream_request_cmd *stream_req_cmd = arg;
struct msm_vfe_stats_stream *stream_info = NULL;
- struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
- uint32_t framedrop_period;
uint32_t stats_idx;
+ unsigned long flags;
- rc = msm_isp_stats_create_stream(vfe_dev, stream_req_cmd);
- if (rc < 0) {
- pr_err("%s: create stream failed\n", __func__);
- return rc;
- }
-
- stats_idx = STATS_IDX(stream_req_cmd->stream_handle);
+ stats_idx = vfe_dev->hw_info->vfe_ops.stats_ops.
+ get_stats_idx(stream_req_cmd->stats_type);
if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
pr_err("%s Invalid stats index %d", __func__, stats_idx);
return -EINVAL;
}
- stream_info = &stats_data->stream_info[stats_idx];
+ stream_info = msm_isp_get_stats_stream_common_data(vfe_dev, stats_idx);
- framedrop_period = msm_isp_get_framedrop_period(
- stream_req_cmd->framedrop_pattern);
+ spin_lock_irqsave(&stream_info->lock, flags);
- if (stream_req_cmd->framedrop_pattern == SKIP_ALL)
- stream_info->framedrop_pattern = 0x0;
- else
- stream_info->framedrop_pattern = 0x1;
- stream_info->framedrop_period = framedrop_period - 1;
+ rc = msm_isp_stats_create_stream(vfe_dev, stream_req_cmd, stream_info);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err("%s: create stream failed\n", __func__);
+ return rc;
+ }
if (stream_info->init_stats_frame_drop == 0)
vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(vfe_dev,
stream_info);
- msm_isp_stats_cfg_stream_scratch(vfe_dev, stream_info,
+ if (stream_info->state == STATS_INACTIVE) {
+ msm_isp_stats_cfg_stream_scratch(stream_info,
VFE_PING_FLAG);
- msm_isp_stats_cfg_stream_scratch(vfe_dev, stream_info,
+ msm_isp_stats_cfg_stream_scratch(stream_info,
VFE_PONG_FLAG);
+ }
+ spin_unlock_irqrestore(&stream_info->lock, flags);
return rc;
}
@@ -460,32 +500,112 @@ int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg)
int rc = -1;
struct msm_vfe_stats_stream_cfg_cmd stream_cfg_cmd;
struct msm_vfe_stats_stream_release_cmd *stream_release_cmd = arg;
- struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
int stats_idx = STATS_IDX(stream_release_cmd->stream_handle);
struct msm_vfe_stats_stream *stream_info = NULL;
+ int vfe_idx;
+ int i;
+ int k;
+ unsigned long flags;
if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
pr_err("%s Invalid stats index %d", __func__, stats_idx);
return -EINVAL;
}
- stream_info = &stats_data->stream_info[stats_idx];
- if (stream_info->state == STATS_AVALIABLE) {
+ stream_info = msm_isp_get_stats_stream_common_data(vfe_dev, stats_idx);
+ spin_lock_irqsave(&stream_info->lock, flags);
+ vfe_idx = msm_isp_get_vfe_idx_for_stats_stream_user(
+ vfe_dev, stream_info);
+ if (vfe_idx == -ENOTTY || stream_info->stream_handle[vfe_idx] !=
+ stream_release_cmd->stream_handle) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err("%s: Invalid stream handle %x, expected %x\n",
+ __func__, stream_release_cmd->stream_handle,
+ vfe_idx != -ENOTTY ?
+ stream_info->stream_handle[vfe_idx] : 0);
+ return -EINVAL;
+ }
+ if (stream_info->state == STATS_AVAILABLE) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
pr_err("%s: stream already release\n", __func__);
return rc;
- } else if (stream_info->state != STATS_INACTIVE) {
+ }
+ vfe_dev->hw_info->vfe_ops.stats_ops.clear_wm_reg(vfe_dev, stream_info);
+
+ if (stream_info->state != STATS_INACTIVE) {
stream_cfg_cmd.enable = 0;
stream_cfg_cmd.num_streams = 1;
stream_cfg_cmd.stream_handle[0] =
stream_release_cmd->stream_handle;
- rc = msm_isp_cfg_stats_stream(vfe_dev, &stream_cfg_cmd);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ msm_isp_cfg_stats_stream(vfe_dev, &stream_cfg_cmd);
+ spin_lock_irqsave(&stream_info->lock, flags);
}
- vfe_dev->hw_info->vfe_ops.stats_ops.clear_wm_reg(vfe_dev, stream_info);
- memset(stream_info, 0, sizeof(struct msm_vfe_stats_stream));
+ for (i = vfe_idx, k = vfe_idx + 1; k < stream_info->num_isp; k++, i++) {
+ stream_info->vfe_dev[i] = stream_info->vfe_dev[k];
+ stream_info->stream_handle[i] = stream_info->stream_handle[k];
+ stream_info->buffer_offset[i] = stream_info->buffer_offset[k];
+ }
+
+ stream_info->vfe_dev[stream_info->num_isp] = 0;
+ stream_info->stream_handle[stream_info->num_isp] = 0;
+ stream_info->buffer_offset[stream_info->num_isp] = 0;
+ stream_info->num_isp--;
+ stream_info->vfe_mask &= ~(1 << vfe_dev->pdev->id);
+ if (stream_info->num_isp == 0)
+ stream_info->state = STATS_AVAILABLE;
+
+ spin_unlock_irqrestore(&stream_info->lock, flags);
return 0;
}
+void msm_isp_release_all_stats_stream(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_stats_stream_release_cmd
+ stream_release_cmd[MSM_ISP_STATS_MAX];
+ struct msm_vfe_stats_stream_cfg_cmd stream_cfg_cmd;
+ struct msm_vfe_stats_stream *stream_info;
+ int i;
+ int vfe_idx;
+ int num_stream = 0;
+ unsigned long flags;
+
+ stream_cfg_cmd.enable = 0;
+ stream_cfg_cmd.num_streams = 0;
+
+ for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
+ stream_info = msm_isp_get_stats_stream_common_data(vfe_dev, i);
+ spin_lock_irqsave(&stream_info->lock, flags);
+ if (stream_info->state == STATS_AVAILABLE) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ continue;
+ }
+ vfe_idx = msm_isp_get_vfe_idx_for_stats_stream_user(vfe_dev,
+ stream_info);
+ if (vfe_idx == -ENOTTY) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ continue;
+ }
+ stream_release_cmd[num_stream++].stream_handle =
+ stream_info->stream_handle[vfe_idx];
+ if (stream_info->state == STATS_INACTIVE) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ continue;
+ }
+ stream_cfg_cmd.stream_handle[
+ stream_cfg_cmd.num_streams] =
+ stream_info->stream_handle[vfe_idx];
+ stream_cfg_cmd.num_streams++;
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
+ if (stream_cfg_cmd.num_streams)
+ msm_isp_cfg_stats_stream(vfe_dev, &stream_cfg_cmd);
+
+ for (i = 0; i < num_stream; i++)
+ msm_isp_release_stats_stream(vfe_dev, &stream_release_cmd[i]);
+}
+
static int msm_isp_init_stats_ping_pong_reg(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
@@ -497,108 +617,205 @@ static int msm_isp_init_stats_ping_pong_reg(
stream_info->stream_id);
if (stream_info->bufq_handle == 0) {
pr_err("%s: no buf configured for stream: 0x%x\n",
- __func__, stream_info->stream_handle);
+ __func__, stream_info->stream_handle[0]);
return -EINVAL;
}
- if ((vfe_dev->is_split && vfe_dev->pdev->id == 1) ||
- !vfe_dev->is_split) {
- rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev,
- stream_info, VFE_PING_FLAG);
- if (rc < 0) {
- pr_err("%s: No free buffer for ping\n", __func__);
- return rc;
- }
- rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev,
- stream_info, VFE_PONG_FLAG);
- if (rc < 0) {
- pr_err("%s: No free buffer for pong\n", __func__);
- return rc;
- }
+ rc = msm_isp_stats_cfg_ping_pong_address(
+ stream_info, VFE_PING_FLAG);
+ if (rc < 0) {
+ pr_err("%s: No free buffer for ping\n", __func__);
+ return rc;
+ }
+ rc = msm_isp_stats_cfg_ping_pong_address(
+ stream_info, VFE_PONG_FLAG);
+ if (rc < 0) {
+ pr_err("%s: No free buffer for pong\n", __func__);
+ return rc;
}
return rc;
}
-void msm_isp_update_stats_framedrop_reg(struct vfe_device *vfe_dev)
+void __msm_isp_update_stats_framedrop_reg(
+ struct msm_vfe_stats_stream *stream_info)
{
- int i;
- struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
- struct msm_vfe_stats_stream *stream_info = NULL;
+ int k;
+ struct vfe_device *vfe_dev;
- for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
- stream_info = &stats_data->stream_info[i];
- if (stream_info->state != STATS_ACTIVE)
- continue;
+ if (!stream_info->init_stats_frame_drop)
+ return;
+ stream_info->init_stats_frame_drop--;
+ if (stream_info->init_stats_frame_drop)
+ return;
- if (stream_info->init_stats_frame_drop) {
- stream_info->init_stats_frame_drop--;
- if (stream_info->init_stats_frame_drop == 0) {
- vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(
- vfe_dev, stream_info);
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(vfe_dev,
+ stream_info);
+
+ }
+}
+
+static void __msm_isp_stats_stream_update(
+ struct msm_vfe_stats_stream *stream_info)
+{
+ uint32_t enable = 0;
+ uint8_t comp_flag = 0;
+ int k;
+ struct vfe_device *vfe_dev;
+ int index = STATS_IDX(stream_info->stream_handle[0]);
+
+ switch (stream_info->state) {
+ case STATS_INACTIVE:
+ case STATS_ACTIVE:
+ case STATS_AVAILABLE:
+ break;
+ case STATS_START_PENDING:
+ enable = 1;
+ case STATS_STOP_PENDING:
+ stream_info->state =
+ (stream_info->state == STATS_START_PENDING ?
+ STATS_STARTING : STATS_STOPPING);
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+ vfe_dev, BIT(index), enable);
+ comp_flag = stream_info->composite_flag;
+ if (comp_flag) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ cfg_comp_mask(vfe_dev, BIT(index),
+ (comp_flag - 1), enable);
+ } else {
+ if (enable)
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ cfg_wm_irq_mask(vfe_dev,
+ stream_info);
+ else
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ clear_wm_irq_mask(vfe_dev,
+ stream_info);
}
}
+ break;
+ case STATS_STARTING:
+ stream_info->state = STATS_ACTIVE;
+ complete_all(&stream_info->active_comp);
+ break;
+ case STATS_STOPPING:
+ stream_info->state = STATS_INACTIVE;
+ complete_all(&stream_info->inactive_comp);
+ break;
}
}
+
void msm_isp_stats_stream_update(struct vfe_device *vfe_dev)
{
int i;
- uint32_t enable = 0;
- uint8_t comp_flag = 0;
- struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
- struct msm_vfe_stats_ops *stats_ops =
- &vfe_dev->hw_info->vfe_ops.stats_ops;
+ struct msm_vfe_stats_stream *stream_info;
+ unsigned long flags;
for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
- if (stats_data->stream_info[i].state == STATS_START_PENDING ||
- stats_data->stream_info[i].state ==
- STATS_STOP_PENDING) {
- enable = stats_data->stream_info[i].state ==
- STATS_START_PENDING ? 1 : 0;
- stats_data->stream_info[i].state =
- stats_data->stream_info[i].state ==
- STATS_START_PENDING ?
- STATS_STARTING : STATS_STOPPING;
- vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
- vfe_dev, BIT(i), enable);
- comp_flag = stats_data->stream_info[i].composite_flag;
- if (comp_flag)
- stats_ops->cfg_comp_mask(vfe_dev, BIT(i),
- (comp_flag - 1), enable);
- } else if (stats_data->stream_info[i].state == STATS_STARTING ||
- stats_data->stream_info[i].state == STATS_STOPPING) {
- stats_data->stream_info[i].state =
- stats_data->stream_info[i].state ==
- STATS_STARTING ? STATS_ACTIVE : STATS_INACTIVE;
- }
+ stream_info = msm_isp_get_stats_stream_common_data(vfe_dev, i);
+ if (stream_info->state == STATS_AVAILABLE ||
+ stream_info->state == STATS_INACTIVE)
+ continue;
+ spin_lock_irqsave(&stream_info->lock, flags);
+ __msm_isp_stats_stream_update(stream_info);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
}
- atomic_sub(1, &stats_data->stats_update);
- if (!atomic_read(&stats_data->stats_update))
- complete(&vfe_dev->stats_config_complete);
}
-static int msm_isp_stats_wait_for_cfg_done(struct vfe_device *vfe_dev)
+void msm_isp_process_stats_reg_upd_epoch_irq(struct vfe_device *vfe_dev,
+ enum msm_isp_comp_irq_types irq)
{
+ int i;
+ struct msm_vfe_stats_stream *stream_info;
+ unsigned long flags;
int rc;
- init_completion(&vfe_dev->stats_config_complete);
- atomic_set(&vfe_dev->stats_data.stats_update, 2);
- rc = wait_for_completion_timeout(
- &vfe_dev->stats_config_complete,
+
+ for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
+ stream_info = msm_isp_get_stats_stream_common_data(vfe_dev, i);
+ if (stream_info->state == STATS_AVAILABLE ||
+ stream_info->state == STATS_INACTIVE)
+ continue;
+
+ spin_lock_irqsave(&stream_info->lock, flags);
+
+ rc = msm_isp_composite_stats_irq(vfe_dev, stream_info, irq);
+
+ if (rc) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ if (-EFAULT == rc) {
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return;
+ }
+ continue;
+ }
+
+ if (irq == MSM_ISP_COMP_IRQ_REG_UPD)
+ __msm_isp_stats_stream_update(stream_info);
+ else if (irq == MSM_ISP_COMP_IRQ_EPOCH &&
+ stream_info->state == STATS_ACTIVE)
+ __msm_isp_update_stats_framedrop_reg(stream_info);
+
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
+}
+
+static int msm_isp_stats_wait_for_stream_cfg_done(
+ struct msm_vfe_stats_stream *stream_info,
+ int active)
+{
+ int rc = -1;
+
+ if (active && stream_info->state == STATS_ACTIVE)
+ rc = 0;
+ if (!active && stream_info->state == STATS_INACTIVE)
+ rc = 0;
+ if (rc == 0)
+ return rc;
+
+ rc = wait_for_completion_timeout(active ? &stream_info->active_comp :
+ &stream_info->inactive_comp,
msecs_to_jiffies(VFE_MAX_CFG_TIMEOUT));
- if (rc == 0) {
- pr_err("%s: wait timeout\n", __func__);
- rc = -1;
+ if (rc <= 0) {
+ rc = rc ? rc : -ETIMEDOUT;
+ pr_err("%s: wait for stats stream %x idx %d state %d active %d config failed %d\n",
+ __func__, stream_info->stream_id,
+ STATS_IDX(stream_info->stream_handle[0]),
+ stream_info->state, active, rc);
} else {
rc = 0;
}
return rc;
}
+static int msm_isp_stats_wait_for_streams(
+ struct msm_vfe_stats_stream **streams,
+ int num_stream, int active)
+{
+ int rc = 0;
+ int i;
+ struct msm_vfe_stats_stream *stream_info;
+
+ for (i = 0; i < num_stream; i++) {
+ stream_info = streams[i];
+ rc |= msm_isp_stats_wait_for_stream_cfg_done(stream_info,
+ active);
+ }
+ return rc;
+}
+
static int msm_isp_stats_update_cgc_override(struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
{
int i;
uint32_t stats_mask = 0, idx;
+ struct vfe_device *update_vfes[MAX_VFE] = {NULL, NULL};
+ struct msm_vfe_stats_stream *stream_info;
+ int k;
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
@@ -607,12 +824,33 @@ static int msm_isp_stats_update_cgc_override(struct vfe_device *vfe_dev,
pr_err("%s Invalid stats index %d", __func__, idx);
return -EINVAL;
}
- stats_mask |= 1 << idx;
+ stream_info = msm_isp_get_stats_stream_common_data(vfe_dev,
+ idx);
+ if (stream_info->state == STATS_AVAILABLE)
+ continue;
+
+ /*
+ * we update cgc after making streams inactive or before
+ * starting streams, so stream should be in inactive state
+ */
+ if (stream_info->state == STATS_INACTIVE)
+ stats_mask |= 1 << idx;
+ for (k = 0; k < stream_info->num_isp; k++) {
+ if (update_vfes[stream_info->vfe_dev[k]->pdev->id])
+ continue;
+ update_vfes[stream_info->vfe_dev[k]->pdev->id] =
+ stream_info->vfe_dev[k];
+ }
}
- if (vfe_dev->hw_info->vfe_ops.stats_ops.update_cgc_override) {
- vfe_dev->hw_info->vfe_ops.stats_ops.update_cgc_override(
- vfe_dev, stats_mask, stream_cfg_cmd->enable);
+ for (k = 0; k < MAX_VFE; k++) {
+ if (!update_vfes[k])
+ continue;
+ vfe_dev = update_vfes[k];
+ if (vfe_dev->hw_info->vfe_ops.stats_ops.update_cgc_override) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.update_cgc_override(
+ vfe_dev, stats_mask, stream_cfg_cmd->enable);
+ }
}
return 0;
}
@@ -621,61 +859,108 @@ int msm_isp_stats_reset(struct vfe_device *vfe_dev)
{
int i = 0, rc = 0;
struct msm_vfe_stats_stream *stream_info = NULL;
- struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
struct msm_isp_timestamp timestamp;
+ struct vfe_device *update_vfes[MAX_VFE] = {NULL, NULL};
+ unsigned long flags;
+ int k;
msm_isp_get_timestamp(&timestamp);
- for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
- stream_info = &stats_data->stream_info[i];
- if (stream_info->state != STATS_ACTIVE)
+ if (vfe_dev->is_split) {
+ for (i = 0; i < MAX_VFE; i++)
+ update_vfes[i] = vfe_dev->common_data->dual_vfe_res->
+ vfe_dev[i];
+ } else {
+ update_vfes[vfe_dev->pdev->id] = vfe_dev;
+ }
+
+ for (k = 0; k < MAX_VFE; k++) {
+ vfe_dev = update_vfes[k];
+ if (!vfe_dev)
continue;
- rc = vfe_dev->buf_mgr->ops->flush_buf(vfe_dev->buf_mgr,
- vfe_dev->pdev->id, stream_info->bufq_handle,
- MSM_ISP_BUFFER_FLUSH_ALL, &timestamp.buf_time,
- vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
- if (rc == -EFAULT) {
- msm_isp_halt_send_error(vfe_dev,
- ISP_EVENT_BUF_FATAL_ERROR);
- return rc;
+ for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
+ stream_info = msm_isp_get_stats_stream_common_data(
+ vfe_dev, i);
+ if (stream_info->state == STATS_AVAILABLE ||
+ stream_info->state == STATS_INACTIVE)
+ continue;
+
+ if (stream_info->num_isp > 1 &&
+ vfe_dev->pdev->id == ISP_VFE0)
+ continue;
+ spin_lock_irqsave(&stream_info->lock, flags);
+ msm_isp_stats_cfg_stream_scratch(stream_info,
+ VFE_PING_FLAG);
+ msm_isp_stats_cfg_stream_scratch(stream_info,
+ VFE_PONG_FLAG);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ rc = vfe_dev->buf_mgr->ops->flush_buf(vfe_dev->buf_mgr,
+ stream_info->bufq_handle,
+ MSM_ISP_BUFFER_FLUSH_ALL, &timestamp.buf_time,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
+ if (rc == -EFAULT) {
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ return rc;
+ }
}
}
return rc;
}
-int msm_isp_stats_restart(struct vfe_device *vfe_dev)
+int msm_isp_stats_restart(struct vfe_device *vfe_dev_ioctl)
{
int i = 0;
struct msm_vfe_stats_stream *stream_info = NULL;
- struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ unsigned long flags;
+ struct vfe_device *update_vfes[MAX_VFE] = {NULL, NULL};
+ struct vfe_device *vfe_dev;
+ int k;
+ int j;
+
+ if (vfe_dev_ioctl->is_split) {
+ for (i = 0; i < MAX_VFE; i++)
+ update_vfes[i] = vfe_dev_ioctl->common_data->
+ dual_vfe_res->vfe_dev[i];
+ } else {
+ update_vfes[vfe_dev_ioctl->pdev->id] = vfe_dev_ioctl;
+ }
- for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
- stream_info = &stats_data->stream_info[i];
- if (stream_info->state < STATS_ACTIVE)
+ for (k = 0; k < MAX_VFE; k++) {
+ vfe_dev = update_vfes[k];
+ if (!vfe_dev)
+ continue;
+ for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
+ stream_info = msm_isp_get_stats_stream_common_data(
+ vfe_dev, i);
+ if (stream_info->state == STATS_AVAILABLE ||
+ stream_info->state == STATS_INACTIVE)
+ continue;
+ if (stream_info->num_isp > 1 &&
+ vfe_dev->pdev->id == ISP_VFE0)
continue;
- msm_isp_init_stats_ping_pong_reg(vfe_dev, stream_info);
+ spin_lock_irqsave(&stream_info->lock, flags);
+ for (j = 0; j < MSM_ISP_COMP_IRQ_MAX; j++)
+ stream_info->composite_irq[j] = 0;
+ msm_isp_init_stats_ping_pong_reg(vfe_dev_ioctl,
+ stream_info);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
}
return 0;
}
-static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,
+static int msm_isp_check_stream_cfg_cmd(struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
{
- int i, rc = 0;
- uint32_t stats_mask = 0, idx;
- uint32_t comp_stats_mask[MAX_NUM_STATS_COMP_MASK] = {0};
- uint32_t num_stats_comp_mask = 0;
+ int i;
struct msm_vfe_stats_stream *stream_info;
- struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
- num_stats_comp_mask =
- vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
- rc = vfe_dev->hw_info->vfe_ops.stats_ops.check_streams(
- stats_data->stream_info);
- if (rc < 0)
- return rc;
+ uint32_t idx;
+ int vfe_idx;
+
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
@@ -683,63 +968,99 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,
pr_err("%s Invalid stats index %d", __func__, idx);
return -EINVAL;
}
-
- stream_info = &stats_data->stream_info[idx];
- if (stream_info->stream_handle !=
- stream_cfg_cmd->stream_handle[i]) {
- pr_err("%s: Invalid stream handle: 0x%x received\n",
- __func__, stream_cfg_cmd->stream_handle[i]);
- continue;
+ stream_info = msm_isp_get_stats_stream_common_data(
+ vfe_dev, idx);
+ vfe_idx = msm_isp_get_vfe_idx_for_stats_stream_user(vfe_dev,
+ stream_info);
+ if (vfe_idx == -ENOTTY || stream_info->stream_handle[vfe_idx] !=
+ stream_cfg_cmd->stream_handle[i]) {
+ pr_err("%s: Invalid stream handle: 0x%x received expected %x\n",
+ __func__, stream_cfg_cmd->stream_handle[i],
+ vfe_idx == -ENOTTY ? 0 :
+ stream_info->stream_handle[vfe_idx]);
+ return -EINVAL;
}
+ }
+ return 0;
+}
- if (stream_info->composite_flag > num_stats_comp_mask) {
- pr_err("%s: comp grp %d exceed max %d\n",
- __func__, stream_info->composite_flag,
- num_stats_comp_mask);
- return -EINVAL;
+static void __msm_isp_stop_stats_streams(
+ struct msm_vfe_stats_stream **streams,
+ int num_streams,
+ struct msm_isp_timestamp timestamp)
+{
+ int i;
+ int k;
+ struct msm_vfe_stats_stream *stream_info;
+ struct vfe_device *vfe_dev;
+ struct msm_vfe_stats_shared_data *stats_data;
+ unsigned long flags;
+
+ for (i = 0; i < num_streams; i++) {
+ stream_info = streams[i];
+ spin_lock_irqsave(&stream_info->lock, flags);
+ init_completion(&stream_info->inactive_comp);
+ stream_info->state = STATS_STOP_PENDING;
+ if (stream_info->vfe_dev[0]->
+ axi_data.src_info[VFE_PIX_0].active == 0) {
+ while (stream_info->state != STATS_INACTIVE)
+ __msm_isp_stats_stream_update(stream_info);
}
- rc = msm_isp_init_stats_ping_pong_reg(vfe_dev, stream_info);
- if (rc < 0) {
- pr_err("%s: No buffer for stream%d\n", __func__, idx);
- return rc;
+ for (k = 0; k < stream_info->num_isp; k++) {
+ stats_data = &stream_info->vfe_dev[k]->stats_data;
+ stats_data->num_active_stream--;
}
- if (!stream_info->composite_flag)
- vfe_dev->hw_info->vfe_ops.stats_ops.
- cfg_wm_irq_mask(vfe_dev, stream_info);
-
- if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
- stream_info->state = STATS_START_PENDING;
- else
- stream_info->state = STATS_ACTIVE;
-
- stats_data->num_active_stream++;
- stats_mask |= 1 << idx;
-
- if (stream_info->composite_flag > 0)
- comp_stats_mask[stream_info->composite_flag-1] |=
- 1 << idx;
-
- ISP_DBG("%s: stats_mask %x %x active streams %d\n",
- __func__, comp_stats_mask[0],
- comp_stats_mask[1],
- stats_data->num_active_stream);
+ msm_isp_stats_cfg_stream_scratch(
+ stream_info, VFE_PING_FLAG);
+ msm_isp_stats_cfg_stream_scratch(
+ stream_info, VFE_PONG_FLAG);
+ vfe_dev = stream_info->vfe_dev[0];
+ if (vfe_dev->buf_mgr->ops->flush_buf(vfe_dev->buf_mgr,
+ stream_info->bufq_handle,
+ MSM_ISP_BUFFER_FLUSH_ALL, &timestamp.buf_time,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id ==
+ -EFAULT))
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_BUF_FATAL_ERROR);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
}
- if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
- rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
- } else {
- vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
- vfe_dev, stats_mask, stream_cfg_cmd->enable);
- for (i = 0; i < num_stats_comp_mask; i++) {
- vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
- vfe_dev, comp_stats_mask[i], i, 1);
+ if (msm_isp_stats_wait_for_streams(streams, num_streams, 0)) {
+ for (i = 0; i < num_streams; i++) {
+ stream_info = streams[i];
+ if (stream_info->state == STATS_INACTIVE)
+ continue;
+ spin_lock_irqsave(&stream_info->lock, flags);
+ while (stream_info->state != STATS_INACTIVE)
+ __msm_isp_stats_stream_update(stream_info);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
}
}
- return rc;
}
-static int msm_isp_stop_stats_stream(struct vfe_device *vfe_dev,
+static int msm_isp_check_stats_stream_state(
+ struct msm_vfe_stats_stream *stream_info,
+ int cmd)
+{
+ switch (stream_info->state) {
+ case STATS_AVAILABLE:
+ return -EINVAL;
+ case STATS_INACTIVE:
+ if (cmd == 0)
+ return -EALREADY;
+ break;
+ case STATS_ACTIVE:
+ if (cmd)
+ return -EALREADY;
+ break;
+ default:
+ WARN(1, "Invalid stats state %d\n", stream_info->state);
+ }
+ return 0;
+}
+
+static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl,
struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
{
int i, rc = 0;
@@ -747,95 +1068,125 @@ static int msm_isp_stop_stats_stream(struct vfe_device *vfe_dev,
uint32_t comp_stats_mask[MAX_NUM_STATS_COMP_MASK] = {0};
uint32_t num_stats_comp_mask = 0;
struct msm_vfe_stats_stream *stream_info;
- struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ struct msm_vfe_stats_shared_data *stats_data;
+ int num_stream = 0;
+ struct msm_vfe_stats_stream *streams[MSM_ISP_STATS_MAX];
struct msm_isp_timestamp timestamp;
+ unsigned long flags;
+ int k;
+ struct vfe_device *update_vfes[MAX_VFE] = {NULL, NULL};
+ uint32_t num_active_streams[MAX_VFE] = {0, 0};
+ struct vfe_device *vfe_dev;
msm_isp_get_timestamp(&timestamp);
num_stats_comp_mask =
- vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
-
+ vfe_dev_ioctl->hw_info->stats_hw_info->num_stats_comp_mask;
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
-
idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
-
- if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
- pr_err("%s Invalid stats index %d", __func__, idx);
- return -EINVAL;
- }
-
- stream_info = &stats_data->stream_info[idx];
- if (stream_info->stream_handle !=
- stream_cfg_cmd->stream_handle[i]) {
- pr_err("%s: Invalid stream handle: 0x%x received\n",
- __func__, stream_cfg_cmd->stream_handle[i]);
+ stream_info = msm_isp_get_stats_stream_common_data(
+ vfe_dev_ioctl, idx);
+ spin_lock_irqsave(&stream_info->lock, flags);
+ rc = msm_isp_check_stats_stream_state(stream_info, 1);
+ if (rc == -EALREADY) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ rc = 0;
continue;
}
-
- if (stream_info->composite_flag > num_stats_comp_mask) {
- pr_err("%s: comp grp %d exceed max %d\n",
- __func__, stream_info->composite_flag,
- num_stats_comp_mask);
- return -EINVAL;
+ if (rc) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ goto error;
}
+ rc = msm_isp_init_stats_ping_pong_reg(vfe_dev_ioctl,
+ stream_info);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err("%s: No buffer for stream%d\n", __func__, idx);
+ return rc;
+ }
+ init_completion(&stream_info->active_comp);
+ stream_info->state = STATS_START_PENDING;
+ if (vfe_dev_ioctl->axi_data.src_info[VFE_PIX_0].active == 0) {
+ while (stream_info->state != STATS_ACTIVE)
+ __msm_isp_stats_stream_update(stream_info);
+ }
+ spin_unlock_irqrestore(&stream_info->lock, flags);
- if (!stream_info->composite_flag)
- vfe_dev->hw_info->vfe_ops.stats_ops.
- clear_wm_irq_mask(vfe_dev, stream_info);
-
- if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
- stream_info->state = STATS_STOP_PENDING;
- else
- stream_info->state = STATS_INACTIVE;
-
- stats_data->num_active_stream--;
stats_mask |= 1 << idx;
+ for (k = 0; k < stream_info->num_isp; k++) {
+ vfe_dev = stream_info->vfe_dev[k];
+ if (update_vfes[vfe_dev->pdev->id])
+ continue;
+ update_vfes[vfe_dev->pdev->id] = vfe_dev;
+ stats_data = &vfe_dev->stats_data;
+ num_active_streams[vfe_dev->pdev->id] =
+ stats_data->num_active_stream;
+ stats_data->num_active_stream++;
+ }
- if (stream_info->composite_flag > 0)
+ if (stream_info->composite_flag)
comp_stats_mask[stream_info->composite_flag-1] |=
1 << idx;
- msm_isp_stats_cfg_stream_scratch(vfe_dev, stream_info,
- VFE_PING_FLAG);
- msm_isp_stats_cfg_stream_scratch(vfe_dev, stream_info,
- VFE_PONG_FLAG);
-
ISP_DBG("%s: stats_mask %x %x active streams %d\n",
__func__, comp_stats_mask[0],
comp_stats_mask[1],
stats_data->num_active_stream);
+ streams[num_stream++] = stream_info;
}
- if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
- rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
- } else {
- vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
- vfe_dev, stats_mask, stream_cfg_cmd->enable);
- for (i = 0; i < num_stats_comp_mask; i++) {
- vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
- vfe_dev, comp_stats_mask[i], i, 0);
- }
+ for (k = 0; k < MAX_VFE; k++) {
+ if (!update_vfes[k] || num_active_streams[k])
+ continue;
+ vfe_dev = update_vfes[k];
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_ub(vfe_dev);
}
+ rc = msm_isp_stats_wait_for_streams(streams, num_stream, 1);
+ if (rc)
+ goto error;
+ return 0;
+error:
+ __msm_isp_stop_stats_streams(streams, num_stream, timestamp);
+ return rc;
+}
+
+static int msm_isp_stop_stats_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
+{
+ int i, rc = 0;
+ uint32_t idx;
+ uint32_t num_stats_comp_mask = 0;
+ struct msm_vfe_stats_stream *stream_info;
+ struct msm_isp_timestamp timestamp;
+ int num_stream = 0;
+ struct msm_vfe_stats_stream *streams[MSM_ISP_STATS_MAX];
+ unsigned long flags;
+
+ msm_isp_get_timestamp(&timestamp);
+
+ num_stats_comp_mask =
+ vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
+
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
- idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
- if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
- pr_err("%s Invalid stats index %d", __func__, idx);
- return -EINVAL;
- }
+ idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
- stream_info = &stats_data->stream_info[idx];
- rc = vfe_dev->buf_mgr->ops->flush_buf(vfe_dev->buf_mgr,
- vfe_dev->pdev->id, stream_info->bufq_handle,
- MSM_ISP_BUFFER_FLUSH_ALL, &timestamp.buf_time,
- vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
- if (rc == -EFAULT) {
- msm_isp_halt_send_error(vfe_dev,
- ISP_EVENT_BUF_FATAL_ERROR);
- return rc;
+ stream_info = msm_isp_get_stats_stream_common_data(
+ vfe_dev, idx);
+ spin_lock_irqsave(&stream_info->lock, flags);
+ rc = msm_isp_check_stats_stream_state(stream_info, 0);
+ if (rc) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ rc = 0;
+ continue;
}
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ streams[num_stream++] = stream_info;
}
+
+ __msm_isp_stop_stats_streams(streams, num_stream, timestamp);
+
return rc;
}
@@ -843,8 +1194,10 @@ int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg)
{
int rc = 0;
struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd = arg;
- if (vfe_dev->stats_data.num_active_stream == 0)
- vfe_dev->hw_info->vfe_ops.stats_ops.cfg_ub(vfe_dev);
+
+ rc = msm_isp_check_stream_cfg_cmd(vfe_dev, stream_cfg_cmd);
+ if (rc)
+ return rc;
if (stream_cfg_cmd->enable) {
msm_isp_stats_update_cgc_override(vfe_dev, stream_cfg_cmd);
@@ -863,31 +1216,37 @@ int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg)
{
int rc = 0, i;
struct msm_vfe_stats_stream *stream_info;
- struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
struct msm_vfe_axi_stream_update_cmd *update_cmd = arg;
struct msm_vfe_axi_stream_cfg_update_info *update_info = NULL;
struct msm_isp_sw_framskip *sw_skip_info = NULL;
+ int vfe_idx;
+ int k;
/*validate request*/
for (i = 0; i < update_cmd->num_streams; i++) {
- update_info = &update_cmd->update_info[i];
+ update_info = (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
/*check array reference bounds*/
if (STATS_IDX(update_info->stream_handle)
> vfe_dev->hw_info->stats_hw_info->num_stats_type) {
pr_err("%s: stats idx %d out of bound!", __func__,
- STATS_IDX(update_info->stream_handle));
+ STATS_IDX(update_info->stream_handle));
return -EINVAL;
}
}
for (i = 0; i < update_cmd->num_streams; i++) {
- update_info = &update_cmd->update_info[i];
- stream_info = &stats_data->stream_info[
- STATS_IDX(update_info->stream_handle)];
- if (stream_info->stream_handle !=
+ update_info = (struct msm_vfe_axi_stream_cfg_update_info *)
+ &update_cmd->update_info[i];
+ stream_info = msm_isp_get_stats_stream_common_data(vfe_dev,
+ STATS_IDX(update_info->stream_handle));
+ vfe_idx = msm_isp_get_vfe_idx_for_stats_stream_user(vfe_dev,
+ stream_info);
+ if (vfe_idx == -ENOTTY || stream_info->stream_handle[vfe_idx] !=
update_info->stream_handle) {
pr_err("%s: stats stream handle %x %x mismatch!\n",
- __func__, stream_info->stream_handle,
+ __func__, vfe_idx != -ENOTTY ?
+ stream_info->stream_handle[vfe_idx] : 0,
update_info->stream_handle);
continue;
}
@@ -897,18 +1256,22 @@ int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg)
uint32_t framedrop_period =
msm_isp_get_framedrop_period(
update_info->skip_pattern);
- if (update_info->skip_pattern == SKIP_ALL)
+ if (update_info->skip_pattern ==
+ SKIP_ALL)
stream_info->framedrop_pattern = 0x0;
else
stream_info->framedrop_pattern = 0x1;
stream_info->framedrop_period = framedrop_period - 1;
if (stream_info->init_stats_frame_drop == 0)
- vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(
- vfe_dev, stream_info);
+ for (k = 0; k < stream_info->num_isp; k++)
+ stream_info->vfe_dev[k]->hw_info->
+ vfe_ops.stats_ops.cfg_wm_reg(
+ vfe_dev, stream_info);
break;
}
case UPDATE_STREAM_SW_FRAME_DROP: {
- sw_skip_info = &update_info->sw_skip_info;
+ sw_skip_info =
+ &update_info->sw_skip_info;
if (!stream_info->sw_skip.stream_src_mask)
stream_info->sw_skip = *sw_skip_info;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
index 01120b65be92..e9728f33fae1 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,8 +23,58 @@ int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg);
int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg);
int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg);
int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg);
-void msm_isp_update_stats_framedrop_reg(struct vfe_device *vfe_dev);
void msm_isp_stats_disable(struct vfe_device *vfe_dev);
int msm_isp_stats_reset(struct vfe_device *vfe_dev);
int msm_isp_stats_restart(struct vfe_device *vfe_dev);
+void msm_isp_release_all_stats_stream(struct vfe_device *vfe_dev);
+void msm_isp_process_stats_reg_upd_epoch_irq(struct vfe_device *vfe_dev,
+ enum msm_isp_comp_irq_types irq);
+
+static inline int msm_isp_get_vfe_idx_for_stats_stream_user(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ int vfe_idx;
+
+ for (vfe_idx = 0; vfe_idx < stream_info->num_isp; vfe_idx++)
+ if (stream_info->vfe_dev[vfe_idx] == vfe_dev)
+ return vfe_idx;
+ return -ENOTTY;
+}
+
+static inline int msm_isp_get_vfe_idx_for_stats_stream(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ int vfe_idx = msm_isp_get_vfe_idx_for_stats_stream_user(vfe_dev,
+ stream_info);
+
+ if (vfe_idx < 0) {
+ WARN(1, "%s vfe index missing for stream %d vfe %d\n",
+ __func__, stream_info->stats_type, vfe_dev->pdev->id);
+ vfe_idx = 0;
+ }
+ return vfe_idx;
+}
+
+static inline struct msm_vfe_stats_stream *
+ msm_isp_get_stats_stream_common_data(
+ struct vfe_device *vfe_dev,
+ enum msm_isp_stats_type idx)
+{
+ if (vfe_dev->is_split)
+ return &vfe_dev->common_data->stats_streams[idx];
+ else
+ return &vfe_dev->common_data->stats_streams[idx +
+ MSM_ISP_STATS_MAX * vfe_dev->pdev->id];
+}
+
+static inline struct msm_vfe_stats_stream *
+ msm_isp_get_stats_stream(struct dual_vfe_resource *dual_vfe_res,
+ int vfe_id,
+ enum msm_isp_stats_type idx)
+{
+ return msm_isp_get_stats_stream_common_data(
+ dual_vfe_res->vfe_dev[vfe_id], idx);
+}
#endif /* __MSM_ISP_STATS_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index e47a8de30aa9..fdee3cabd097 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -25,6 +25,22 @@
static DEFINE_MUTEX(bandwidth_mgr_mutex);
static struct msm_isp_bandwidth_mgr isp_bandwidth_mgr;
+#define MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev) { \
+ if (vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE0) { \
+ struct vfe_device *vfe1_dev = vfe_dev->common_data-> \
+ dual_vfe_res->vfe_dev[ISP_VFE1]; \
+ mutex_lock(&vfe1_dev->core_mutex); \
+ } \
+}
+
+#define MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev) { \
+ if (vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE0) { \
+ struct vfe_device *vfe1_dev = vfe_dev->common_data-> \
+ dual_vfe_res->vfe_dev[ISP_VFE1]; \
+ mutex_unlock(&vfe1_dev->core_mutex); \
+ } \
+}
+
static uint64_t msm_isp_cpp_clk_rate;
#define VFE40_8974V2_VERSION 0x1001001A
@@ -762,26 +778,39 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
}
case VIDIOC_MSM_ISP_REQUEST_STREAM:
mutex_lock(&vfe_dev->core_mutex);
+ MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
rc = msm_isp_request_axi_stream(vfe_dev, arg);
+ MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_RELEASE_STREAM:
mutex_lock(&vfe_dev->core_mutex);
+ MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
rc = msm_isp_release_axi_stream(vfe_dev, arg);
+ MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_CFG_STREAM:
mutex_lock(&vfe_dev->core_mutex);
+ MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
rc = msm_isp_cfg_axi_stream(vfe_dev, arg);
+ MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_AXI_HALT:
mutex_lock(&vfe_dev->core_mutex);
+ MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
rc = msm_isp_axi_halt(vfe_dev, arg);
+ MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_AXI_RESET:
mutex_lock(&vfe_dev->core_mutex);
+ /* For dual vfe reset both on vfe1 call */
+ if (vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE0) {
+ mutex_unlock(&vfe_dev->core_mutex);
+ return 0;
+ }
if (atomic_read(&vfe_dev->error_info.overflow_state)
!= HALT_ENFORCED) {
rc = msm_isp_stats_reset(vfe_dev);
@@ -796,6 +825,11 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
break;
case VIDIOC_MSM_ISP_AXI_RESTART:
mutex_lock(&vfe_dev->core_mutex);
+ /* For dual vfe restart both on vfe1 call */
+ if (vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE0) {
+ mutex_unlock(&vfe_dev->core_mutex);
+ return 0;
+ }
if (atomic_read(&vfe_dev->error_info.overflow_state)
!= HALT_ENFORCED) {
rc = msm_isp_stats_restart(vfe_dev);
@@ -848,27 +882,37 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
break;
case VIDIOC_MSM_ISP_REQUEST_STATS_STREAM:
mutex_lock(&vfe_dev->core_mutex);
+ MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
rc = msm_isp_request_stats_stream(vfe_dev, arg);
+ MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_RELEASE_STATS_STREAM:
mutex_lock(&vfe_dev->core_mutex);
+ MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
rc = msm_isp_release_stats_stream(vfe_dev, arg);
+ MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_CFG_STATS_STREAM:
mutex_lock(&vfe_dev->core_mutex);
+ MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
rc = msm_isp_cfg_stats_stream(vfe_dev, arg);
+ MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_UPDATE_STATS_STREAM:
mutex_lock(&vfe_dev->core_mutex);
+ MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
rc = msm_isp_update_stats_stream(vfe_dev, arg);
+ MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_UPDATE_STREAM:
mutex_lock(&vfe_dev->core_mutex);
+ MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
rc = msm_isp_update_axi_stream(vfe_dev, arg);
+ MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
mutex_unlock(&vfe_dev->core_mutex);
break;
case VIDIOC_MSM_ISP_SMMU_ATTACH:
@@ -883,10 +927,7 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
vfe_dev->isp_raw2_debug = 0;
break;
case MSM_SD_UNNOTIFY_FREEZE:
- break;
case MSM_SD_SHUTDOWN:
- while (vfe_dev->vfe_open_cnt != 0)
- msm_isp_close_node(sd, NULL);
break;
default:
@@ -1631,8 +1672,8 @@ static int msm_isp_process_iommu_page_fault(struct vfe_device *vfe_dev)
{
int rc = vfe_dev->buf_mgr->pagefault_debug_disable;
- pr_err("%s:%d] VFE%d Handle Page fault! vfe_dev %pK\n", __func__,
- __LINE__, vfe_dev->pdev->id, vfe_dev);
+ pr_err("%s:%d] VFE%d Handle Page fault!\n", __func__,
+ __LINE__, vfe_dev->pdev->id);
msm_isp_halt_send_error(vfe_dev, ISP_EVENT_IOMMU_P_FAULT);
@@ -1899,6 +1940,7 @@ static void msm_vfe_iommu_fault_handler(struct iommu_domain *domain,
if (vfe_dev->vfe_open_cnt > 0) {
atomic_set(&vfe_dev->error_info.overflow_state,
HALT_ENFORCED);
+ pr_err("%s: fault address is %lx\n", __func__, iova);
msm_isp_process_iommu_page_fault(vfe_dev);
} else {
pr_err("%s: no handling, vfe open cnt = %d\n",
@@ -1928,9 +1970,6 @@ int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
return -EINVAL;
}
- if (vfe_dev->pdev->id == ISP_VFE0)
- vfe_dev->common_data->dual_vfe_res->epoch_sync_mask = 0;
-
mutex_lock(&vfe_dev->realtime_mutex);
mutex_lock(&vfe_dev->core_mutex);
@@ -2032,6 +2071,10 @@ int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
mutex_unlock(&vfe_dev->realtime_mutex);
return 0;
}
+ MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
+ msm_isp_release_all_axi_stream(vfe_dev);
+ msm_isp_release_all_stats_stream(vfe_dev);
+
/* Unregister page fault handler */
cam_smmu_reg_client_page_fault_handler(
vfe_dev->buf_mgr->iommu_hdl,
@@ -2059,6 +2102,7 @@ int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
msm_isp_end_avtimer();
vfe_dev->vt_enable = 0;
}
+ MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
vfe_dev->is_split = 0;
mutex_unlock(&vfe_dev->core_mutex);
@@ -2088,25 +2132,3 @@ void msm_isp_flush_tasklet(struct vfe_device *vfe_dev)
return;
}
-void msm_isp_save_framedrop_values(struct vfe_device *vfe_dev,
- enum msm_vfe_input_src frame_src)
-{
- struct msm_vfe_axi_stream *stream_info = NULL;
- uint32_t j = 0;
- unsigned long flags;
-
- for (j = 0; j < VFE_AXI_SRC_MAX; j++) {
- stream_info = &vfe_dev->axi_data.stream_info[j];
- if (stream_info->state != ACTIVE)
- continue;
- if (frame_src != SRC_TO_INTF(stream_info->stream_src))
- continue;
-
- stream_info =
- &vfe_dev->axi_data.stream_info[j];
- spin_lock_irqsave(&stream_info->lock, flags);
- stream_info->activated_framedrop_period =
- stream_info->requested_framedrop_period;
- spin_unlock_irqrestore(&stream_info->lock, flags);
- }
-}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h
index 9df60c0d7383..16e3198f35b7 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h
@@ -70,7 +70,5 @@ void msm_isp_fetch_engine_done_notify(struct vfe_device *vfe_dev,
struct msm_vfe_fetch_engine_info *fetch_engine_info);
void msm_isp_print_fourcc_error(const char *origin, uint32_t fourcc_format);
void msm_isp_flush_tasklet(struct vfe_device *vfe_dev);
-void msm_isp_save_framedrop_values(struct vfe_device *vfe_dev,
- enum msm_vfe_input_src frame_src);
void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp);
#endif /* __MSM_ISP_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
index d0ce1de1162a..3124fd8a1777 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/of_gpio.h>
+#include <linux/leds-qpnp-flash.h>
#include "msm_flash.h"
#include "msm_camera_dt_util.h"
#include "msm_cci.h"
@@ -491,6 +492,45 @@ static int32_t msm_flash_init(
return 0;
}
+static int32_t msm_flash_prepare(
+ struct msm_flash_ctrl_t *flash_ctrl)
+{
+ int32_t ret = 0;
+
+ CDBG("%s:%d: State : %d\n",
+ __func__, __LINE__, flash_ctrl->flash_state);
+
+ if (flash_ctrl->switch_trigger == NULL) {
+ pr_err("%s:%d Invalid argument\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT &&
+ flash_ctrl->is_regulator_enabled == 0) {
+ ret = qpnp_flash_led_prepare(flash_ctrl->switch_trigger,
+ ENABLE_REGULATOR, NULL);
+ if (ret < 0) {
+ pr_err("%s:%d regulator enable failed ret = %d\n",
+ __func__, __LINE__, ret);
+ return ret;
+ }
+ flash_ctrl->is_regulator_enabled = 1;
+ } else if (flash_ctrl->flash_state == MSM_CAMERA_FLASH_RELEASE &&
+ flash_ctrl->is_regulator_enabled) {
+ ret = qpnp_flash_led_prepare(flash_ctrl->switch_trigger,
+ DISABLE_REGULATOR, NULL);
+ if (ret < 0) {
+ pr_err("%s:%d regulator disable failed ret = %d\n",
+ __func__, __LINE__, ret);
+ return ret;
+ }
+ flash_ctrl->is_regulator_enabled = 0;
+ }
+ CDBG("%s:%d:Exit\n", __func__, __LINE__);
+ return ret;
+}
+
static int32_t msm_flash_low(
struct msm_flash_ctrl_t *flash_ctrl,
struct msm_flash_cfg_data_t *flash_data)
@@ -564,6 +604,29 @@ static int32_t msm_flash_high(
return 0;
}
+static int32_t msm_flash_query_current(
+ struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_query_data_t *flash_query_data)
+{
+ int32_t ret = -EINVAL;
+ int32_t max_current = -EINVAL;
+
+ if (flash_ctrl->switch_trigger) {
+ ret = qpnp_flash_led_prepare(flash_ctrl->switch_trigger,
+ QUERY_MAX_CURRENT, &max_current);
+ if (ret < 0) {
+ pr_err("%s:%d Query max_avail_curr failed ret = %d\n",
+ __func__, __LINE__, ret);
+ return ret;
+ }
+ }
+
+ flash_query_data->max_avail_curr = max_current;
+ CDBG("%s: %d: max_avail_curr : %d\n", __func__, __LINE__,
+ flash_query_data->max_avail_curr);
+ return 0;
+}
+
static int32_t msm_flash_release(
struct msm_flash_ctrl_t *flash_ctrl)
{
@@ -626,11 +689,55 @@ static int32_t msm_flash_config(struct msm_flash_ctrl_t *flash_ctrl,
mutex_unlock(flash_ctrl->flash_mutex);
+ rc = msm_flash_prepare(flash_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d Enable/Disable Regulator failed ret = %d",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+
CDBG("Exit %s type %d\n", __func__, flash_data->cfg_type);
return rc;
}
+static int32_t msm_flash_query_data(struct msm_flash_ctrl_t *flash_ctrl,
+ void __user *argp)
+{
+ int32_t rc = -EINVAL, i = 0;
+ struct msm_flash_query_data_t *flash_query =
+ (struct msm_flash_query_data_t *) argp;
+
+ CDBG("Enter %s type %d\n", __func__, flash_query->query_type);
+
+ switch (flash_query->query_type) {
+ case FLASH_QUERY_CURRENT:
+ if (flash_ctrl->func_tbl && flash_ctrl->func_tbl->
+ camera_flash_query_current != NULL)
+ rc = flash_ctrl->func_tbl->
+ camera_flash_query_current(
+ flash_ctrl, flash_query);
+ else {
+ flash_query->max_avail_curr = 0;
+ for (i = 0; i < flash_ctrl->flash_num_sources; i++) {
+ flash_query->max_avail_curr +=
+ flash_ctrl->flash_op_current[i];
+ }
+ rc = 0;
+ CDBG("%s: max_avail_curr: %d\n", __func__,
+ flash_query->max_avail_curr);
+ }
+ break;
+ default:
+ rc = -EFAULT;
+ break;
+ }
+
+ CDBG("Exit %s type %d\n", __func__, flash_query->query_type);
+
+ return rc;
+}
+
static long msm_flash_subdev_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
{
@@ -662,8 +769,11 @@ static long msm_flash_subdev_ioctl(struct v4l2_subdev *sd,
pr_err("fctrl->func_tbl NULL\n");
return -EINVAL;
} else {
- return fctrl->func_tbl->camera_flash_release(fctrl);
+ fctrl->func_tbl->camera_flash_release(fctrl);
+ return msm_flash_prepare(fctrl);
}
+ case VIDIOC_MSM_FLASH_QUERY_DATA:
+ return msm_flash_query_data(fctrl, argp);
default:
pr_err_ratelimited("invalid cmd %d\n", cmd);
return -ENOIOCTLCMD;
@@ -1140,6 +1250,7 @@ static struct msm_flash_table msm_pmic_flash_table = {
.camera_flash_off = msm_flash_off,
.camera_flash_low = msm_flash_low,
.camera_flash_high = msm_flash_high,
+ .camera_flash_query_current = msm_flash_query_current,
},
};
@@ -1151,6 +1262,7 @@ static struct msm_flash_table msm_gpio_flash_table = {
.camera_flash_off = msm_flash_off,
.camera_flash_low = msm_flash_low,
.camera_flash_high = msm_flash_high,
+ .camera_flash_query_current = NULL,
},
};
@@ -1162,6 +1274,7 @@ static struct msm_flash_table msm_i2c_flash_table = {
.camera_flash_off = msm_flash_i2c_write_setting_array,
.camera_flash_low = msm_flash_i2c_write_setting_array,
.camera_flash_high = msm_flash_i2c_write_setting_array,
+ .camera_flash_query_current = NULL,
},
};
diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h
index c82e48cddcaf..f6ac16f57080 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2009-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -42,6 +42,9 @@ struct msm_flash_func_t {
struct msm_flash_cfg_data_t *);
int32_t (*camera_flash_high)(struct msm_flash_ctrl_t *,
struct msm_flash_cfg_data_t *);
+ int32_t (*camera_flash_query_current)(struct msm_flash_ctrl_t *,
+ struct msm_flash_query_data_t *);
+
};
struct msm_flash_table {
@@ -67,6 +70,7 @@ struct msm_flash_ctrl_t {
/* Switch node to trigger led */
const char *switch_trigger_name;
struct led_trigger *switch_trigger;
+ uint32_t is_regulator_enabled;
/* Flash */
uint32_t flash_num_sources;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/Makefile b/drivers/media/platform/msm/camera_v2/sensor/io/Makefile
index ec958697ae13..549c35a806f7 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/Makefile
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/Makefile
@@ -2,4 +2,5 @@ ccflags-y += -Idrivers/media/platform/msm/camera_v2/
ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor
ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
-obj-$(CONFIG_MSMB_CAMERA) += msm_camera_cci_i2c.o msm_camera_qup_i2c.o msm_camera_spi.o msm_camera_dt_util.o
+ccflags-y += -Idrivers/misc/
+obj-$(CONFIG_MSMB_CAMERA) += msm_camera_cci_i2c.o msm_camera_qup_i2c.o msm_camera_spi.o msm_camera_dt_util.o msm_camera_tz_i2c.o
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c.h b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c.h
index 0fbe35713d8e..785dd54d65e1 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c.h
@@ -152,4 +152,60 @@ int32_t msm_camera_qup_i2c_poll(struct msm_camera_i2c_client *client,
uint32_t addr, uint16_t data,
enum msm_camera_i2c_data_type data_type, uint32_t delay_ms);
+int32_t msm_camera_tz_i2c_register_sensor(void *s_ctrl_p);
+
+int32_t msm_camera_tz_i2c_power_up(struct msm_camera_i2c_client *client);
+
+int32_t msm_camera_tz_i2c_power_down(struct msm_camera_i2c_client *client);
+
+int32_t msm_camera_tz_i2c_read(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t *data,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_tz_i2c_read_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int32_t msm_camera_tz_i2c_write(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_tz_i2c_write_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int32_t msm_camera_tz_i2c_write_table(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_tz_i2c_write_table_async(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_tz_i2c_write_table_sync(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_tz_i2c_write_table_sync_block(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_tz_i2c_write_seq_table(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_seq_reg_setting *write_setting);
+
+int32_t msm_camera_tz_i2c_write_table_w_microdelay(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_tz_i2c_write_conf_tbl(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_conf *reg_conf_tbl, uint16_t size,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_sensor_tz_i2c_util(struct msm_camera_i2c_client *client,
+ uint16_t cci_cmd);
+
+int32_t msm_camera_tz_i2c_poll(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type);
+
#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c
new file mode 100644
index 000000000000..25c152be2b71
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_tz_i2c.c
@@ -0,0 +1,1093 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+#include <soc/qcom/camera2.h>
+#include "qseecom_kernel.h"
+#include "msm_camera_i2c.h"
+#include "msm_camera_io_util.h"
+#include "msm_cci.h"
+#include "msm_sensor.h"
+
+#define QSEECOM_SBUFF_SIZE SZ_128K
+#define MAX_TA_NAME 32
+#define EMPTY_QSEECOM_HANDLE NULL
+
+#ifndef CONFIG_MSM_SEC_CCI_TA_NAME
+ #define CONFIG_MSM_SEC_CCI_TA_NAME "seccamdemo64"
+#endif /* CONFIG_MSM_SEC_CCI_TA_NAME */
+
+/* Update version major number in case the HLOS-TA interface is changed*/
+#define TA_IF_VERSION_MAJ 0
+#define TA_IF_VERSION_MIN 1
+
+#undef CDBG
+#ifdef CONFIG_MSM_SEC_CCI_DEBUG
+
+#define CDBG(fmt, args...) pr_info(CONFIG_MSM_SEC_CCI_TA_NAME "::%s:%d - " fmt,\
+ __func__, __LINE__, ##args)
+#define TZ_I2C_FN_RETURN(ret, i2c_fn, ...) \
+ ((ret < 0) ? i2c_fn(__VA_ARGS__):ret)
+
+#else /* CONFIG_MSM_SEC_CCI_DEBUG */
+
+#define CDBG(fmt, args...) pr_info("%s:%d - " fmt, __func__, __LINE__, ##args)
+#define TZ_I2C_FN_RETURN(ret, i2c_fn, ...) \
+ ((ret < 0) ? -EFAULT:ret)
+
+#endif /* CONFIG_MSM_SEC_CCI_DEBUG */
+
+#pragma pack(push, msm_camera_tz_i2c, 1)
+
+enum msm_camera_tz_i2c_cmd_id_t {
+ TZ_I2C_CMD_GET_NONE,
+ TZ_I2C_CMD_GET_IF_VERSION,
+ TZ_I2C_CMD_POWER_UP,
+ TZ_I2C_CMD_POWER_DOWN,
+ TZ_I2C_CMD_CCI_GENERIC,
+ TZ_I2C_CMD_CCI_READ,
+ TZ_I2C_CMD_CCI_READ_SEQ,
+ TZ_I2C_CMD_CCI_WRITE,
+ TZ_I2C_CMD_CCI_WRITE_SEQ,
+ TZ_I2C_CMD_CCI_WRITE_TABLE_ASYNC,
+ TZ_I2C_CMD_CCI_WRITE_TABLE_SYNC,
+ TZ_I2C_CMD_CCI_WRITE_TABLE_SYNC_BLOCK,
+ TZ_I2C_CMD_CCI_WRITE_TABLE,
+ TZ_I2C_CMD_CCI_WRITE_SEQ_TABLE,
+ TZ_I2C_CMD_CCI_WRITE_TABLE_W_MICRODELAY,
+ TZ_I2C_CMD_CCI_POLL,
+ TZ_I2C_CMD_CCI_WRITE_CONF_TBL,
+ TZ_I2C_CMD_CCI_UTIL,
+};
+
+enum msm_camera_tz_i2c_status_t {
+ TZ_I2C_STATUS_SUCCESS = 0,
+ TZ_I2C_STATUS_GENERAL_FAILURE = -1,
+ TZ_I2C_STATUS_INVALID_INPUT_PARAMS = -2,
+ TZ_I2C_STATUS_INVALID_SENSOR_ID = -3,
+ TZ_I2C_STATUS_BYPASS = -4,
+ TZ_I2C_STATUS_ERR_SIZE = 0x7FFFFFFF
+};
+
+struct msm_camera_tz_i2c_generic_req_t {
+ enum msm_camera_tz_i2c_cmd_id_t cmd_id;
+};
+
+struct msm_camera_tz_i2c_generic_rsp_t {
+ enum msm_camera_tz_i2c_status_t rc;
+};
+
+#define msm_camera_tz_i2c_get_if_version_req_t msm_camera_tz_i2c_generic_req_t
+
+struct msm_camera_tz_i2c_get_if_version_rsp_t {
+ enum msm_camera_tz_i2c_status_t rc;
+ uint32_t if_version_maj;
+ uint32_t if_version_min;
+};
+
+struct msm_camera_tz_i2c_power_up_req_t {
+ enum msm_camera_tz_i2c_cmd_id_t cmd_id;
+ int32_t sensor_id;
+};
+
+#define msm_camera_tz_i2c_power_up_rsp_t msm_camera_tz_i2c_generic_rsp_t
+
+struct msm_camera_tz_i2c_power_down_req_t {
+ enum msm_camera_tz_i2c_cmd_id_t cmd_id;
+ int32_t sensor_id;
+};
+
+#define msm_camera_tz_i2c_power_down_rsp_t msm_camera_tz_i2c_generic_rsp_t
+
+struct msm_camera_tz_i2c_cci_generic_req_t {
+ enum msm_camera_tz_i2c_cmd_id_t cmd_id;
+ int32_t sensor_id;
+ enum msm_camera_tz_i2c_cmd_id_t cci_cmd_id;
+ uint32_t cci_i2c_master;
+ uint16_t sid;
+ uint16_t cid;
+};
+
+#define msm_camera_tz_i2c_cci_generic_rsp_t msm_camera_tz_i2c_generic_rsp_t
+
+struct msm_camera_tz_i2c_cci_read_req_t {
+ enum msm_camera_tz_i2c_cmd_id_t cmd_id;
+ int32_t sensor_id;
+ uint32_t cci_i2c_master;
+ uint16_t sid;
+ uint16_t cid;
+ uint32_t addr;
+ uint32_t data_type;
+};
+
+struct msm_camera_tz_i2c_cci_read_rsp_t {
+ enum msm_camera_tz_i2c_status_t rc;
+ uint16_t data;
+};
+
+struct msm_camera_tz_i2c_cci_write_req_t {
+ enum msm_camera_tz_i2c_cmd_id_t cmd_id;
+ int32_t sensor_id;
+ uint32_t cci_i2c_master;
+ uint16_t sid;
+ uint16_t cid;
+ uint32_t addr;
+ uint16_t data;
+ uint32_t data_type;
+};
+
+#define msm_camera_tz_i2c_cci_write_rsp_t msm_camera_tz_i2c_generic_rsp_t
+
+struct msm_camera_tz_i2c_cci_util_req_t {
+ enum msm_camera_tz_i2c_cmd_id_t cmd_id;
+ int32_t sensor_id;
+ uint32_t cci_i2c_master;
+ uint16_t sid;
+ uint16_t cid;
+ uint16_t cci_cmd;
+};
+
+#define msm_camera_tz_i2c_cci_util_rsp_t msm_camera_tz_i2c_generic_rsp_t
+
+#pragma pack(pop, msm_camera_tz_i2c)
+
+struct msm_camera_tz_i2c_sensor_info_t {
+ struct msm_sensor_ctrl_t *s_ctrl;
+ struct msm_camera_i2c_fn_t *saved_sensor_i2c_fn;
+ uint32_t secure;
+ uint32_t ta_enabled;
+ struct qseecom_handle *ta_qseecom_handle;
+ const char *ta_name;
+};
+
+struct msm_camera_tz_i2c_ctrl_t {
+ struct mutex lock;
+ uint32_t lock_ready;
+ uint32_t secure_mode;
+};
+
+static struct msm_camera_tz_i2c_ctrl_t msm_camera_tz_i2c_ctrl;
+
+static struct msm_camera_tz_i2c_sensor_info_t sensor_info[MAX_CAMERAS] = {
+ {NULL, NULL, 0, 0, NULL, CONFIG_MSM_SEC_CCI_TA_NAME},
+ {NULL, NULL, 0, 0, NULL, CONFIG_MSM_SEC_CCI_TA_NAME},
+ {NULL, NULL, 0, 0, NULL, CONFIG_MSM_SEC_CCI_TA_NAME},
+ {NULL, NULL, 0, 0, NULL, CONFIG_MSM_SEC_CCI_TA_NAME},
+};
+
+static int32_t msm_camera_tz_i2c_is_sensor_secure(
+ struct msm_camera_i2c_client *client)
+{
+ uint32_t index;
+
+ if (client == NULL) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ CDBG("Enter\n");
+ for (index = 0; index < MAX_CAMERAS; index++) {
+ if ((sensor_info[index].s_ctrl != NULL) &&
+ sensor_info[index].secure &&
+ (sensor_info[index].s_ctrl->sensor_i2c_client ==
+ client)) {
+ CDBG("Found secure sensor ID = %d\n",
+ sensor_info[index].s_ctrl->id);
+ return sensor_info[index].s_ctrl->id;
+ }
+ }
+ return -EINVAL;
+}
+
+static int32_t get_cmd_rsp_buffers(
+ struct qseecom_handle *ta_qseecom_handle,
+ void **cmd, int *cmd_len,
+ void **rsp, int *rsp_len)
+{
+
+ CDBG("Enter\n");
+ if ((ta_qseecom_handle == NULL) ||
+ (cmd == NULL) || (cmd_len == NULL) ||
+ (rsp == NULL) || (rsp_len == NULL)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (*cmd_len & QSEECOM_ALIGN_MASK)
+ *cmd_len = QSEECOM_ALIGN(*cmd_len);
+
+ if (*rsp_len & QSEECOM_ALIGN_MASK)
+ *rsp_len = QSEECOM_ALIGN(*rsp_len);
+
+ if ((*rsp_len + *cmd_len) > QSEECOM_SBUFF_SIZE) {
+ pr_err("%s:%d - Shared buffer too small to hold cmd=%d and rsp=%d\n",
+ __func__, __LINE__,
+ *cmd_len, *rsp_len);
+ return -ENOMEM;
+ }
+
+ *cmd = ta_qseecom_handle->sbuf;
+ *rsp = ta_qseecom_handle->sbuf + *cmd_len;
+ return 0;
+}
+
+static int32_t msm_camera_tz_i2c_ta_get_if_version(
+ struct qseecom_handle *ta_qseecom_handle,
+ uint32_t *if_version_maj,
+ uint32_t *if_version_min)
+{
+ int32_t cmd_len, rsp_len;
+ struct msm_camera_tz_i2c_get_if_version_req_t *cmd;
+ struct msm_camera_tz_i2c_get_if_version_rsp_t *rsp;
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+ if ((ta_qseecom_handle == NULL) ||
+ (if_version_maj == NULL) || (if_version_min == NULL)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ cmd_len = sizeof(struct msm_camera_tz_i2c_get_if_version_req_t);
+ rsp_len = sizeof(struct msm_camera_tz_i2c_get_if_version_rsp_t);
+
+ rc = get_cmd_rsp_buffers(ta_qseecom_handle,
+ (void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
+ if (!rc) {
+ cmd->cmd_id = TZ_I2C_CMD_GET_IF_VERSION;
+
+ rc = qseecom_send_command(ta_qseecom_handle,
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (rc < 0) {
+ pr_err("%s:%d - Unable to get if version info, rc=%d\n",
+ __func__, __LINE__,
+ rc);
+ return rc;
+ }
+
+ if (rsp->rc < 0) {
+ CDBG("TZ I2C App error, rc=%d\n", rsp->rc);
+ rc = -EFAULT;
+ } else {
+ *if_version_maj = rsp->if_version_maj;
+ *if_version_min = rsp->if_version_min;
+ CDBG("TZ I2C If version %d.%d\n", *if_version_maj,
+ *if_version_min);
+ }
+ }
+ return rc;
+}
+
+static int32_t msm_camera_tz_i2c_ta_power_up(
+ struct qseecom_handle *ta_qseecom_handle,
+ int32_t sensor_id,
+ uint32_t *sensor_secure)
+{
+ int32_t cmd_len, rsp_len;
+ struct msm_camera_tz_i2c_power_up_req_t *cmd;
+ struct msm_camera_tz_i2c_power_up_rsp_t *rsp;
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+
+ if (sensor_secure == NULL)
+ return -EINVAL;
+
+ *sensor_secure = 0;
+ if ((ta_qseecom_handle == NULL) ||
+ (sensor_secure == NULL) ||
+ (sensor_id < 0) ||
+ (sensor_id >= MAX_CAMERAS)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ cmd_len = sizeof(struct msm_camera_tz_i2c_power_up_req_t);
+ rsp_len = sizeof(struct msm_camera_tz_i2c_power_up_rsp_t);
+
+ rc = get_cmd_rsp_buffers(ta_qseecom_handle,
+ (void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
+ if (!rc) {
+ cmd->cmd_id = TZ_I2C_CMD_POWER_UP;
+ cmd->sensor_id = sensor_id;
+
+ rc = qseecom_send_command(ta_qseecom_handle,
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (rc < 0) {
+ pr_err("%s:%d - Unable to get sensor secure status, rc=%d\n",
+ __func__, __LINE__,
+ rc);
+ return rc;
+ }
+
+ if (rsp->rc == TZ_I2C_STATUS_SUCCESS)
+ *sensor_secure = 1;
+ CDBG("Sensor %d is %s\n", sensor_id,
+ (*sensor_secure)?"SECURE":"NON-SECURE");
+ }
+ return rc;
+}
+
+static int32_t msm_camera_tz_i2c_ta_power_down(
+ struct qseecom_handle *ta_qseecom_handle,
+ int32_t sensor_id)
+{
+ int32_t cmd_len, rsp_len;
+ struct msm_camera_tz_i2c_power_down_req_t *cmd;
+ struct msm_camera_tz_i2c_power_down_rsp_t *rsp;
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+
+ if ((ta_qseecom_handle == NULL) ||
+ (sensor_id < 0) ||
+ (sensor_id >= MAX_CAMERAS)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ cmd_len = sizeof(struct msm_camera_tz_i2c_power_down_req_t);
+ rsp_len = sizeof(struct msm_camera_tz_i2c_power_down_rsp_t);
+
+ rc = get_cmd_rsp_buffers(ta_qseecom_handle,
+ (void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
+ if (!rc) {
+ cmd->cmd_id = TZ_I2C_CMD_POWER_DOWN;
+ cmd->sensor_id = sensor_id;
+
+ rc = qseecom_send_command(ta_qseecom_handle,
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (rc < 0) {
+ pr_err("%s:%d - Failed: rc=%d\n",
+ __func__, __LINE__,
+ rc);
+ return rc;
+ }
+ }
+ return rc;
+}
+
+static int32_t msm_camera_tz_i2c_ta_cci_generic(
+ struct msm_camera_i2c_client *client,
+ enum msm_camera_tz_i2c_cmd_id_t cci_cmd_id)
+{
+ int32_t cmd_len, rsp_len;
+ struct msm_camera_tz_i2c_cci_generic_req_t *cmd;
+ struct msm_camera_tz_i2c_cci_generic_rsp_t *rsp;
+ int32_t rc = 0;
+ struct qseecom_handle *ta_qseecom_handle;
+ int32_t sensor_id = msm_camera_tz_i2c_is_sensor_secure(client);
+
+ if ((client == NULL) ||
+ (sensor_id < 0) ||
+ (sensor_id >= MAX_CAMERAS)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, cci_cmd_id=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ cci_cmd_id);
+
+ ta_qseecom_handle = sensor_info[sensor_id].ta_qseecom_handle;
+ cmd_len = sizeof(struct msm_camera_tz_i2c_cci_generic_req_t);
+ rsp_len = sizeof(struct msm_camera_tz_i2c_cci_generic_rsp_t);
+
+ rc = get_cmd_rsp_buffers(ta_qseecom_handle,
+ (void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
+ if (!rc) {
+ cmd->cmd_id = TZ_I2C_CMD_CCI_GENERIC;
+ cmd->sensor_id = sensor_id;
+ cmd->cci_cmd_id = cci_cmd_id;
+ cmd->cci_i2c_master = client->cci_client->cci_i2c_master;
+ cmd->sid = client->cci_client->sid;
+ cmd->cid = client->cci_client->cid;
+
+ rc = qseecom_send_command(ta_qseecom_handle,
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (rc < 0) {
+ pr_err("%s:%d - Failed: rc=%d\n",
+ __func__, __LINE__,
+ rc);
+ return rc;
+ }
+ rc = rsp->rc;
+ CDBG("Done: rc=%d, cci_cmd_id=%d\n", rc, cci_cmd_id);
+ }
+ return rc;
+}
+
+static int32_t msm_camera_tz_i2c_ta_cci_read(
+ struct msm_camera_i2c_client *client,
+ uint32_t addr,
+ uint16_t *data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t cmd_len, rsp_len;
+ struct msm_camera_tz_i2c_cci_read_req_t *cmd;
+ struct msm_camera_tz_i2c_cci_read_rsp_t *rsp;
+ int32_t rc = 0;
+ struct qseecom_handle *ta_qseecom_handle;
+ int32_t sensor_id = msm_camera_tz_i2c_is_sensor_secure(client);
+
+ if ((client == NULL) ||
+ (data == NULL) ||
+ (sensor_id < 0) ||
+ (sensor_id >= MAX_CAMERAS)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, Addr=0x%X, Type=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ addr,
+ data_type);
+
+ ta_qseecom_handle = sensor_info[sensor_id].ta_qseecom_handle;
+ cmd_len = sizeof(struct msm_camera_tz_i2c_cci_read_req_t);
+ rsp_len = sizeof(struct msm_camera_tz_i2c_cci_read_rsp_t);
+
+ rc = get_cmd_rsp_buffers(ta_qseecom_handle,
+ (void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
+ if (!rc) {
+ cmd->cmd_id = TZ_I2C_CMD_CCI_READ;
+ cmd->sensor_id = sensor_id;
+ cmd->cci_i2c_master = client->cci_client->cci_i2c_master;
+ cmd->sid = client->cci_client->sid;
+ cmd->cid = client->cci_client->cid;
+ cmd->addr = addr;
+ cmd->data_type = data_type;
+
+ rc = qseecom_send_command(ta_qseecom_handle,
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (rc < 0) {
+ pr_err("%s:%d - Failed: rc=%d\n",
+ __func__, __LINE__,
+ rc);
+ return rc;
+ }
+ rc = rsp->rc;
+ *data = rsp->data;
+
+ CDBG("Done: rc=%d, addr=0x%X, data=0x%X\n", rc,
+ addr, *data);
+ }
+ return rc;
+}
+
+static int32_t msm_camera_tz_i2c_ta_cci_write(
+ struct msm_camera_i2c_client *client,
+ uint32_t addr,
+ uint16_t data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t cmd_len, rsp_len;
+ struct msm_camera_tz_i2c_cci_write_req_t *cmd;
+ struct msm_camera_tz_i2c_cci_write_rsp_t *rsp;
+ int32_t rc = 0;
+ struct qseecom_handle *ta_qseecom_handle;
+ int32_t sensor_id = msm_camera_tz_i2c_is_sensor_secure(client);
+
+ if ((client == NULL) ||
+ (sensor_id < 0) ||
+ (sensor_id >= MAX_CAMERAS)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, Addr=0x%X, Data=0x%X Type=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ addr,
+ data,
+ data_type);
+
+ ta_qseecom_handle = sensor_info[sensor_id].ta_qseecom_handle;
+ cmd_len = sizeof(struct msm_camera_tz_i2c_cci_write_req_t);
+ rsp_len = sizeof(struct msm_camera_tz_i2c_cci_write_rsp_t);
+
+ rc = get_cmd_rsp_buffers(ta_qseecom_handle,
+ (void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
+ if (!rc) {
+ cmd->cmd_id = TZ_I2C_CMD_CCI_WRITE;
+ cmd->sensor_id = sensor_id;
+ cmd->cci_i2c_master = client->cci_client->cci_i2c_master;
+ cmd->sid = client->cci_client->sid;
+ cmd->cid = client->cci_client->cid;
+ cmd->addr = addr;
+ cmd->data = data;
+ cmd->data_type = data_type;
+
+ rc = qseecom_send_command(ta_qseecom_handle,
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (rc < 0) {
+ pr_err("%s:%d - Failed:, rc=%d\n",
+ __func__, __LINE__,
+ rc);
+ return rc;
+ }
+ rc = rsp->rc;
+
+ CDBG("Done: rc=%d, addr=0x%X, data=0x%X\n", rc,
+ addr, data);
+ }
+ return rc;
+}
+
+static int32_t msm_camera_tz_i2c_ta_cci_util(
+ struct msm_camera_i2c_client *client,
+ uint16_t cci_cmd)
+{
+ int32_t cmd_len, rsp_len;
+ struct msm_camera_tz_i2c_cci_util_req_t *cmd;
+ struct msm_camera_tz_i2c_cci_util_rsp_t *rsp;
+ int32_t rc = 0;
+ struct qseecom_handle *ta_qseecom_handle;
+ int32_t sensor_id = msm_camera_tz_i2c_is_sensor_secure(client);
+
+ if ((client == NULL) ||
+ (sensor_id < 0) ||
+ (sensor_id >= MAX_CAMERAS)) {
+ pr_err("%s:%d - Bad parameters\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, cci_cmd=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ cci_cmd);
+
+ ta_qseecom_handle = sensor_info[sensor_id].ta_qseecom_handle;
+ cmd_len = sizeof(struct msm_camera_tz_i2c_cci_util_req_t);
+ rsp_len = sizeof(struct msm_camera_tz_i2c_cci_util_rsp_t);
+
+ rc = get_cmd_rsp_buffers(ta_qseecom_handle,
+ (void **)&cmd, &cmd_len, (void **)&rsp, &rsp_len);
+ if (!rc) {
+ cmd->cmd_id = TZ_I2C_CMD_CCI_UTIL;
+ cmd->sensor_id = sensor_id;
+ cmd->cci_i2c_master = client->cci_client->cci_i2c_master;
+ cmd->sid = client->cci_client->sid;
+ cmd->cid = client->cci_client->cid;
+ cmd->cci_cmd = cci_cmd;
+
+ rc = qseecom_send_command(ta_qseecom_handle,
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (rc < 0) {
+ pr_err("%s:%d - Failed: rc=%d\n",
+ __func__, __LINE__,
+ rc);
+ return rc;
+ }
+ rc = rsp->rc;
+ CDBG("Done: rc=%d, cci_cmd=%d\n", rc, cci_cmd);
+ }
+ return rc;
+}
+
+static int32_t msm_camera_tz_i2c_ta_probe(
+ struct msm_camera_i2c_client *client)
+{
+ int32_t sensor_id = -1;
+
+ CDBG("Enter\n");
+ sensor_id = msm_camera_tz_i2c_is_sensor_secure(client);
+ if ((sensor_id >= 0) && sensor_info[sensor_id].ta_enabled
+ && msm_camera_tz_i2c_ctrl.lock_ready) {
+ mutex_lock(&msm_camera_tz_i2c_ctrl.lock);
+ return sensor_id;
+ }
+ return -EINVAL;
+}
+
+static int32_t msm_camera_tz_i2c_ta_done(void)
+{
+ int32_t rc = 0;
+
+ CDBG("Enter\n");
+ if (msm_camera_tz_i2c_ctrl.lock_ready)
+ mutex_unlock(&msm_camera_tz_i2c_ctrl.lock);
+ return rc;
+}
+
+int32_t msm_camera_tz_i2c_power_up(
+ struct msm_camera_i2c_client *client)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_is_sensor_secure(client);
+
+ if (!msm_camera_tz_i2c_ctrl.lock_ready) {
+ msm_camera_tz_i2c_ctrl.lock_ready = 1;
+ mutex_init(&msm_camera_tz_i2c_ctrl.lock);
+ }
+
+ CDBG("Enter (sensor_id=%d)\n", sensor_id);
+ if (sensor_id >= 0) {
+ ktime_t startTime;
+
+ mutex_lock(&msm_camera_tz_i2c_ctrl.lock);
+ if (msm_camera_tz_i2c_ctrl.secure_mode) {
+ mutex_unlock(&msm_camera_tz_i2c_ctrl.lock);
+ return rc;
+ }
+ startTime = ktime_get();
+
+ CDBG("Switch to secure mode (secure sensor=%d)\n",
+ sensor_id);
+ /* Start the TA */
+ if ((sensor_info[sensor_id].ta_qseecom_handle == NULL)
+ && (sensor_info[sensor_id].ta_name != NULL) &&
+ ('\0' != sensor_info[sensor_id].ta_name[0])) {
+ uint32_t if_version_maj = 0;
+ uint32_t if_version_min = 0;
+
+ sensor_info[sensor_id].ta_enabled = 0;
+ rc = qseecom_start_app(
+ &sensor_info[sensor_id].ta_qseecom_handle,
+ (char *)sensor_info[sensor_id].ta_name,
+ QSEECOM_SBUFF_SIZE);
+ if (!rc) {
+ rc = msm_camera_tz_i2c_ta_get_if_version(
+ sensor_info[sensor_id].
+ ta_qseecom_handle,
+ &if_version_maj, &if_version_min);
+ }
+
+ if (!rc) {
+ if (if_version_maj != TA_IF_VERSION_MAJ) {
+ CDBG("TA ver mismatch %d.%d != %d.%d\n",
+ if_version_maj, if_version_min,
+ TA_IF_VERSION_MAJ,
+ TA_IF_VERSION_MIN);
+ rc = qseecom_shutdown_app(
+ &sensor_info[sensor_id].
+ ta_qseecom_handle);
+ sensor_info[sensor_id].ta_qseecom_handle
+ = EMPTY_QSEECOM_HANDLE;
+ rc = -EFAULT;
+ } else {
+ uint32_t sensor_secure = 0;
+ /*Notify TA & get sensor secure status*/
+ rc = msm_camera_tz_i2c_ta_power_up(
+ sensor_info[sensor_id].
+ ta_qseecom_handle,
+ sensor_id,
+ &sensor_secure);
+ if (!rc && sensor_secure)
+ /* Sensor validated by TA*/
+ sensor_info[sensor_id].
+ ta_enabled = 1;
+ else {
+ qseecom_shutdown_app(
+ &sensor_info[sensor_id].
+ ta_qseecom_handle);
+ sensor_info[sensor_id].
+ ta_qseecom_handle
+ = EMPTY_QSEECOM_HANDLE;
+ rc = -EFAULT;
+ }
+ }
+ }
+ }
+ CDBG("Init TA %s - %s(%d) - %llu\n",
+ sensor_info[sensor_id].ta_name,
+ (sensor_info[sensor_id].ta_enabled)?"Ok" :
+ "Failed", rc, ktime_us_delta(ktime_get(),
+ startTime));
+ if (!rc)
+ msm_camera_tz_i2c_ctrl.secure_mode++;
+ mutex_unlock(&msm_camera_tz_i2c_ctrl.lock);
+ }
+ return rc;
+}
+
+int32_t msm_camera_tz_i2c_power_down(
+ struct msm_camera_i2c_client *client)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_is_sensor_secure(client);
+
+ if (!msm_camera_tz_i2c_ctrl.lock_ready) {
+ msm_camera_tz_i2c_ctrl.lock_ready = 1;
+ mutex_init(&msm_camera_tz_i2c_ctrl.lock);
+ }
+
+ CDBG("Enter (sensor_id=%d)\n", sensor_id);
+ if ((sensor_id >= 0) && (msm_camera_tz_i2c_ctrl.secure_mode != 0)) {
+ mutex_lock(&msm_camera_tz_i2c_ctrl.lock);
+ if (msm_camera_tz_i2c_ctrl.secure_mode == 1) {
+ ktime_t startTime = ktime_get();
+
+ CDBG("Switch to non-secure mode (secure sensor=%d)\n",
+ sensor_id);
+ /* Shutdown the TA */
+ if (sensor_info[sensor_id].ta_qseecom_handle != NULL) {
+ msm_camera_tz_i2c_ta_power_down(
+ sensor_info[sensor_id].
+ ta_qseecom_handle,
+ sensor_id);
+ rc = qseecom_shutdown_app(&sensor_info[
+ sensor_id].ta_qseecom_handle);
+ sensor_info[sensor_id].ta_qseecom_handle
+ = EMPTY_QSEECOM_HANDLE;
+ }
+ CDBG("Unload TA %s - %s(%d) - %llu\n",
+ sensor_info[sensor_id].ta_name,
+ (!rc)?"Ok":"Failed", rc,
+ ktime_us_delta(ktime_get(), startTime));
+ }
+ msm_camera_tz_i2c_ctrl.secure_mode--;
+ mutex_unlock(&msm_camera_tz_i2c_ctrl.lock);
+ }
+ return rc;
+}
+
+int32_t msm_camera_tz_i2c_register_sensor(
+ void *s_ctrl_p)
+{
+ struct msm_sensor_ctrl_t *s_ctrl = (struct msm_sensor_ctrl_t *)s_ctrl_p;
+
+ if (s_ctrl == NULL) {
+ pr_err("%s:%d - invalid parameter)\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ if (s_ctrl->id >= MAX_CAMERAS) {
+ pr_err("%s:%d - invalid ID: %d\n",
+ __func__, __LINE__, s_ctrl->id);
+ return -EINVAL;
+ }
+
+ CDBG("id=%d, client=%p\n", s_ctrl->id, s_ctrl);
+ sensor_info[s_ctrl->id].s_ctrl = s_ctrl;
+ sensor_info[s_ctrl->id].secure = s_ctrl->is_secure;
+ return 0;
+}
+
+int32_t msm_camera_tz_i2c_read(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t *data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, addr=0x%08X\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ addr);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_read(
+ client, addr, data, data_type);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_read, client, addr, data, data_type);
+}
+
+int32_t msm_camera_tz_i2c_read_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, addr=0x%08X, num=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ addr,
+ num_byte);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_READ_SEQ);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_read_seq, client, addr, data, num_byte);
+}
+
+int32_t msm_camera_tz_i2c_write(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, addr=0x%08X\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ addr);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_write(
+ client, addr, data, data_type);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write, client, addr, data, data_type);
+}
+
+int32_t msm_camera_tz_i2c_write_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, addr=0x%08X, num=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid,
+ addr,
+ num_byte);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_SEQ);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_seq, client, addr, data, num_byte);
+}
+
+int32_t msm_camera_tz_i2c_write_table_async(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_TABLE_ASYNC);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_table_async, client, write_setting);
+}
+
+int32_t msm_camera_tz_i2c_write_table_sync(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_TABLE_SYNC);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_table_sync, client, write_setting);
+}
+
+int32_t msm_camera_tz_i2c_write_table_sync_block(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_TABLE_SYNC_BLOCK);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_table_sync_block, client,
+ write_setting);
+}
+
+int32_t msm_camera_tz_i2c_write_table(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_TABLE);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_table, client, write_setting);
+}
+
+int32_t msm_camera_tz_i2c_write_seq_table(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_seq_reg_setting *write_setting)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_SEQ_TABLE);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_seq_table, client, write_setting);
+}
+
+int32_t msm_camera_tz_i2c_write_table_w_microdelay(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_TABLE_W_MICRODELAY);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_table_w_microdelay, client,
+ write_setting);
+}
+
+int32_t msm_camera_tz_i2c_poll(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_POLL);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_poll, client, addr, data, data_type);
+}
+
+int32_t msm_camera_tz_i2c_write_conf_tbl(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_conf *reg_conf_tbl, uint16_t size,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_generic(
+ client, TZ_I2C_CMD_CCI_WRITE_CONF_TBL);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_camera_cci_i2c_write_conf_tbl, client, reg_conf_tbl, size,
+ data_type);
+}
+
+int32_t msm_sensor_tz_i2c_util(struct msm_camera_i2c_client *client,
+ uint16_t cci_cmd)
+{
+ int32_t rc = -EFAULT;
+ int32_t sensor_id = msm_camera_tz_i2c_ta_probe(client);
+
+ CDBG("Sensor=%d, MS=%d, SID=%d, CID=%d, cci_cmd=%d\n",
+ sensor_id,
+ client->cci_client->cci_i2c_master,
+ client->cci_client->sid,
+ client->cci_client->cid, cci_cmd);
+
+ if (sensor_id >= 0) {
+ rc = msm_camera_tz_i2c_ta_cci_util(client, cci_cmd);
+ msm_camera_tz_i2c_ta_done();
+ }
+ return TZ_I2C_FN_RETURN(rc,
+ msm_sensor_cci_i2c_util, client, cci_cmd);
+}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
index 22d90a2baf7d..e1143c356721 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
@@ -21,6 +21,9 @@
#undef CDBG
#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+static struct msm_camera_i2c_fn_t msm_sensor_cci_func_tbl;
+static struct msm_camera_i2c_fn_t msm_sensor_secure_func_tbl;
+
static void msm_sensor_adjust_mclk(struct msm_camera_power_ctrl_t *ctrl)
{
int idx;
@@ -132,6 +135,11 @@ int msm_sensor_power_down(struct msm_sensor_ctrl_t *s_ctrl)
__func__, __LINE__, power_info, sensor_i2c_client);
return -EINVAL;
}
+
+ /* Power down secure session if it exist*/
+ if (s_ctrl->is_secure)
+ msm_camera_tz_i2c_power_down(sensor_i2c_client);
+
return msm_camera_power_down(power_info, sensor_device_type,
sensor_i2c_client);
}
@@ -170,7 +178,27 @@ int msm_sensor_power_up(struct msm_sensor_ctrl_t *s_ctrl)
if (s_ctrl->set_mclk_23880000)
msm_sensor_adjust_mclk(power_info);
+ CDBG("Sensor %d tagged as %s\n", s_ctrl->id,
+ (s_ctrl->is_secure)?"SECURE":"NON-SECURE");
+
for (retry = 0; retry < 3; retry++) {
+ if (s_ctrl->is_secure) {
+ rc = msm_camera_tz_i2c_power_up(sensor_i2c_client);
+ if (rc < 0) {
+#ifdef CONFIG_MSM_SEC_CCI_DEBUG
+ CDBG("Secure Sensor %d use cci\n", s_ctrl->id);
+ /* session is not secure */
+ s_ctrl->sensor_i2c_client->i2c_func_tbl =
+ &msm_sensor_cci_func_tbl;
+#else /* CONFIG_MSM_SEC_CCI_DEBUG */
+ return rc;
+#endif /* CONFIG_MSM_SEC_CCI_DEBUG */
+ } else {
+ /* session is secure */
+ s_ctrl->sensor_i2c_client->i2c_func_tbl =
+ &msm_sensor_secure_func_tbl;
+ }
+ }
rc = msm_camera_power_up(power_info, s_ctrl->sensor_device_type,
sensor_i2c_client);
if (rc < 0)
@@ -1433,6 +1461,21 @@ static struct msm_camera_i2c_fn_t msm_sensor_qup_func_tbl = {
.i2c_write_table_sync_block = msm_camera_qup_i2c_write_table,
};
+static struct msm_camera_i2c_fn_t msm_sensor_secure_func_tbl = {
+ .i2c_read = msm_camera_tz_i2c_read,
+ .i2c_read_seq = msm_camera_tz_i2c_read_seq,
+ .i2c_write = msm_camera_tz_i2c_write,
+ .i2c_write_table = msm_camera_tz_i2c_write_table,
+ .i2c_write_seq_table = msm_camera_tz_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_tz_i2c_write_table_w_microdelay,
+ .i2c_util = msm_sensor_tz_i2c_util,
+ .i2c_write_conf_tbl = msm_camera_tz_i2c_write_conf_tbl,
+ .i2c_write_table_async = msm_camera_tz_i2c_write_table_async,
+ .i2c_write_table_sync = msm_camera_tz_i2c_write_table_sync,
+ .i2c_write_table_sync_block = msm_camera_tz_i2c_write_table_sync_block,
+};
+
int32_t msm_sensor_init_default_params(struct msm_sensor_ctrl_t *s_ctrl)
{
struct msm_camera_cci_client *cci_client = NULL;
@@ -1466,6 +1509,9 @@ int32_t msm_sensor_init_default_params(struct msm_sensor_ctrl_t *s_ctrl)
/* Get CCI subdev */
cci_client->cci_subdev = msm_cci_get_subdev();
+ if (s_ctrl->is_secure)
+ msm_camera_tz_i2c_register_sensor((void *)s_ctrl);
+
/* Update CCI / I2C function table */
if (!s_ctrl->sensor_i2c_client->i2c_func_tbl)
s_ctrl->sensor_i2c_client->i2c_func_tbl =
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.h b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.h
index bd12588eada9..5d57ec8c28ff 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -88,6 +88,7 @@ struct msm_sensor_ctrl_t {
enum msm_camera_stream_type_t camera_stream_type;
uint32_t set_mclk_23880000;
uint8_t is_csid_tg_mode;
+ uint32_t is_secure;
};
int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
index 02b83c969958..43aadffa2983 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
@@ -992,7 +992,7 @@ CSID_TG:
}
/* Update sensor mount angle and position in media entity flag */
is_yuv = (slave_info->output_format == MSM_SENSOR_YCBCR) ? 1 : 0;
- mount_pos = is_yuv << 25 |
+ mount_pos = ((s_ctrl->is_secure & 0x1) << 26) | is_yuv << 25 |
(s_ctrl->sensordata->sensor_info->position << 16) |
((s_ctrl->sensordata->
sensor_info->sensor_mount_angle / 90) << 8);
@@ -1079,6 +1079,16 @@ static int32_t msm_sensor_driver_get_dt_data(struct msm_sensor_ctrl_t *s_ctrl)
goto FREE_VREG_DATA;
}
+ /* Get custom mode */
+ rc = of_property_read_u32(of_node, "qcom,secure",
+ &s_ctrl->is_secure);
+ CDBG("qcom,secure = %d, rc %d", s_ctrl->is_secure, rc);
+ if (rc < 0) {
+ /* Set default to non-secure mode */
+ s_ctrl->is_secure = 0;
+ rc = 0;
+ }
+
/* Get CCI master */
rc = of_property_read_u32(of_node, "qcom,cci-master",
&s_ctrl->cci_i2c_master);
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 05232f862e55..fdf6e1b1c5d0 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -2290,6 +2290,8 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
rc = -ENOTSUPP;
break;
}
+
+ msm_dcvs_try_enable(inst);
msm_comm_scale_clocks_and_bus(inst);
break;
case V4L2_CID_MPEG_VIDC_VIDEO_ALLOC_MODE_INPUT:
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 5eb287a28bbd..99f30d9cb97b 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1665,7 +1665,6 @@ static inline int msm_venc_power_save_mode_enable(struct msm_vidc_inst *inst)
goto fail_power_mode_set;
}
inst->flags |= VIDC_LOW_POWER;
- msm_dcvs_enc_set_power_save_mode(inst, true);
dprintk(VIDC_INFO, "Power Save Mode set for inst: %pK\n", inst);
}
@@ -2939,6 +2938,7 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
break;
}
+ msm_dcvs_try_enable(inst);
msm_comm_scale_clocks_and_bus(inst);
break;
case V4L2_CID_MPEG_VIDC_VIDEO_H264_VUI_BITSTREAM_RESTRICT:
@@ -3052,8 +3052,6 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
}
pdata = &venc_mode;
- msm_dcvs_enc_set_power_save_mode(inst,
- ctrl->val == V4L2_MPEG_VIDC_VIDEO_PERF_POWER_SAVE);
break;
case V4L2_CID_MPEG_VIDC_VIDEO_HIER_B_NUM_LAYERS:
if (inst->fmts[CAPTURE_PORT]->fourcc != V4L2_PIX_FMT_HEVC) {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 84ce75c28bf8..437ad43e23e9 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -242,8 +242,7 @@ err_invalid_input:
return ret;
}
-static struct msm_smem *get_same_fd_buffer(struct msm_vidc_list *buf_list,
- int fd)
+static struct msm_smem *get_same_fd_buffer(struct msm_vidc_inst *inst, int fd)
{
struct buffer_info *temp;
struct msm_smem *same_fd_handle = NULL;
@@ -253,16 +252,18 @@ static struct msm_smem *get_same_fd_buffer(struct msm_vidc_list *buf_list,
if (!fd)
return NULL;
- if (!buf_list || fd < 0) {
- dprintk(VIDC_ERR, "Invalid input\n");
+ if (!inst || fd < 0) {
+ dprintk(VIDC_ERR, "%s: Invalid input\n", __func__);
goto err_invalid_input;
}
- mutex_lock(&buf_list->lock);
- list_for_each_entry(temp, &buf_list->list, list) {
+ mutex_lock(&inst->registeredbufs.lock);
+ list_for_each_entry(temp, &inst->registeredbufs.list, list) {
for (i = 0; i < min(temp->num_planes, VIDEO_MAX_PLANES); i++) {
- if (temp->fd[i] == fd &&
- temp->handle[i] && temp->mapped[i]) {
+ bool ion_hndl_matches = temp->handle[i] ?
+ msm_smem_compare_buffers(inst->mem_client, fd,
+ temp->handle[i]->smem_priv) : false;
+ if (ion_hndl_matches && temp->mapped[i]) {
temp->same_fd_ref[i]++;
dprintk(VIDC_INFO,
"Found same fd buffer\n");
@@ -273,7 +274,7 @@ static struct msm_smem *get_same_fd_buffer(struct msm_vidc_list *buf_list,
if (same_fd_handle)
break;
}
- mutex_unlock(&buf_list->lock);
+ mutex_unlock(&inst->registeredbufs.lock);
err_invalid_input:
return same_fd_handle;
@@ -487,8 +488,7 @@ int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
}
same_fd_handle = get_same_fd_buffer(
- &inst->registeredbufs,
- b->m.planes[i].reserved[0]);
+ inst, b->m.planes[i].reserved[0]);
populate_buf_info(binfo, b, i);
if (same_fd_handle) {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 10dcc30f4aaa..566441e9c546 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1239,8 +1239,6 @@ static void handle_event_change(enum hal_command_response cmd, void *data)
inst->prop.width[OUTPUT_PORT] = event_notify->width;
}
- inst->seqchanged_count++;
-
if (inst->session_type == MSM_VIDC_DECODER)
msm_dcvs_init_load(inst);
@@ -2227,11 +2225,32 @@ void handle_cmd_response(enum hal_command_response cmd, void *data)
int msm_comm_scale_clocks(struct msm_vidc_core *core)
{
- int num_mbs_per_sec =
- msm_comm_get_load(core, MSM_VIDC_ENCODER, LOAD_CALC_NO_QUIRKS) +
+ int num_mbs_per_sec, enc_mbs_per_sec, dec_mbs_per_sec;
+
+ enc_mbs_per_sec =
+ msm_comm_get_load(core, MSM_VIDC_ENCODER, LOAD_CALC_NO_QUIRKS);
+ dec_mbs_per_sec =
msm_comm_get_load(core, MSM_VIDC_DECODER, LOAD_CALC_NO_QUIRKS);
+
+ if (enc_mbs_per_sec >= dec_mbs_per_sec) {
+ /*
+ * If Encoder load is higher, use that load. Encoder votes for higher
+ * clock. Since Encoder and Deocder run on parallel cores, this clock
+ * should suffice decoder usecases.
+ */
+ num_mbs_per_sec = enc_mbs_per_sec;
+ } else {
+ /*
+ * If Decoder load is higher, it's tricky to decide clock. Decoder
+ * higher load might results less clocks than Encoder smaller load.
+ * At this point driver doesn't know which clock to vote. Hence use
+ * total load.
+ */
+ num_mbs_per_sec = enc_mbs_per_sec + dec_mbs_per_sec;
+ }
+
return msm_comm_scale_clocks_load(core, num_mbs_per_sec,
- LOAD_CALC_NO_QUIRKS);
+ LOAD_CALC_NO_QUIRKS);
}
int msm_comm_scale_clocks_load(struct msm_vidc_core *core,
@@ -4896,6 +4915,9 @@ int msm_vidc_check_session_supported(struct msm_vidc_inst *inst)
return -ENOTSUPP;
}
+ if (!rc)
+ msm_dcvs_try_enable(inst);
+
if (!rc) {
if (inst->prop.width[CAPTURE_PORT] < capability->width.min ||
inst->prop.height[CAPTURE_PORT] <
@@ -5205,6 +5227,7 @@ int msm_vidc_comm_s_parm(struct msm_vidc_inst *inst, struct v4l2_streamparm *a)
msm_dcvs_init_load(inst);
}
msm_comm_scale_clocks_and_bus(inst);
+ msm_dcvs_try_enable(inst);
}
exit:
return rc;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c
index c03f887be6f6..9e67ef096c63 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c
@@ -20,10 +20,19 @@
((__cur_mbpf) >= (__min_mbpf))
static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst);
-static bool msm_dcvs_enc_check(struct msm_vidc_inst *inst);
static int msm_dcvs_enc_scale_clocks(struct msm_vidc_inst *inst);
static int msm_dcvs_dec_scale_clocks(struct msm_vidc_inst *inst, bool fbd);
+int msm_dcvs_try_enable(struct msm_vidc_inst *inst)
+{
+ if (!inst) {
+ dprintk(VIDC_ERR, "%s: Invalid args: %p\n", __func__, inst);
+ return -EINVAL;
+ }
+ inst->dcvs_mode = msm_dcvs_check_supported(inst);
+ return 0;
+}
+
static inline int msm_dcvs_get_mbs_per_frame(struct msm_vidc_inst *inst)
{
int height, width;
@@ -41,20 +50,27 @@ static inline int msm_dcvs_get_mbs_per_frame(struct msm_vidc_inst *inst)
return NUM_MBS_PER_FRAME(height, width);
}
-static inline int msm_dcvs_count_active_instances(struct msm_vidc_core *core)
+static inline int msm_dcvs_count_active_instances(struct msm_vidc_core *core,
+ enum session_type session_type)
{
int active_instances = 0;
- struct msm_vidc_inst *inst = NULL;
+ struct msm_vidc_inst *temp = NULL;
if (!core) {
dprintk(VIDC_ERR, "%s: Invalid args: %pK\n", __func__, core);
return -EINVAL;
}
+ /* DCVS condition is as following
+ * Decoder DCVS : Only for ONE decoder session.
+ * Encoder DCVS : Only for ONE encoder session + ONE decoder session
+ */
mutex_lock(&core->lock);
- list_for_each_entry(inst, &core->instances, list) {
- if (inst->state >= MSM_VIDC_OPEN_DONE &&
- inst->state < MSM_VIDC_STOP_DONE)
+ list_for_each_entry(temp, &core->instances, list) {
+ if (temp->state >= MSM_VIDC_OPEN_DONE &&
+ temp->state < MSM_VIDC_STOP_DONE &&
+ (temp->session_type == session_type ||
+ temp->session_type == MSM_VIDC_ENCODER))
active_instances++;
}
mutex_unlock(&core->lock);
@@ -112,17 +128,12 @@ static void msm_dcvs_enc_check_and_scale_clocks(struct msm_vidc_inst *inst)
{
int rc = 0;
- if (inst->session_type == MSM_VIDC_ENCODER && msm_vidc_enc_dcvs_mode) {
- inst->dcvs_mode = msm_dcvs_check_supported(inst);
- dprintk(VIDC_DBG, "%s: session DCVS %s supported\n",
- __func__, inst->dcvs_mode ? "" : "not");
-
- if (inst->dcvs_mode) {
- rc = msm_dcvs_enc_scale_clocks(inst);
- if (rc) {
- dprintk(VIDC_DBG,
+ if (inst->session_type == MSM_VIDC_ENCODER &&
+ msm_vidc_enc_dcvs_mode) {
+ rc = msm_dcvs_enc_scale_clocks(inst);
+ if (rc) {
+ dprintk(VIDC_DBG,
"ENC_DCVS: error while scaling clocks\n");
- }
}
}
}
@@ -131,28 +142,14 @@ static void msm_dcvs_dec_check_and_scale_clocks(struct msm_vidc_inst *inst)
{
int rc = 0;
- if (inst->session_type != MSM_VIDC_DECODER || !msm_vidc_dec_dcvs_mode)
- return;
-
- if (msm_dcvs_check_supported(inst)) {
- inst->dcvs_mode = true;
- dprintk(VIDC_DBG,
- "%s: session DCVS supported, decode_dcvs_mode = %d\n",
- __func__, inst->dcvs_mode);
- } else {
- inst->dcvs_mode = false;
- dprintk(VIDC_DBG,
- "%s: session DCVS not supported, decode_dcvs_mode = %d\n",
- __func__, inst->dcvs_mode);
- }
-
- if (msm_vidc_dec_dcvs_mode && inst->dcvs_mode) {
+ if (inst->session_type == MSM_VIDC_DECODER &&
+ msm_vidc_dec_dcvs_mode) {
msm_dcvs_monitor_buffer(inst);
rc = msm_dcvs_dec_scale_clocks(inst, false);
if (rc) {
dprintk(VIDC_ERR,
- "%s: Failed to scale clocks in DCVS: %d\n",
- __func__, rc);
+ "%s: Failed to scale clocks in DCVS: %d\n",
+ __func__, rc);
}
}
}
@@ -163,6 +160,11 @@ void msm_dcvs_check_and_scale_clocks(struct msm_vidc_inst *inst, bool is_etb)
dprintk(VIDC_ERR, "%s Invalid args: %pK\n", __func__, inst);
return;
}
+ msm_dcvs_try_enable(inst);
+ if (!inst->dcvs_mode) {
+ dprintk(VIDC_DBG, "DCVS is not enabled\n");
+ return;
+ }
if (is_etb)
msm_dcvs_enc_check_and_scale_clocks(inst);
@@ -531,48 +533,6 @@ static int msm_dcvs_dec_scale_clocks(struct msm_vidc_inst *inst, bool fbd)
return rc;
}
-static bool msm_dcvs_enc_check(struct msm_vidc_inst *inst)
-{
- int num_mbs_per_frame = 0;
- long int instance_load = 0;
- long int dcvs_limit = 0;
- bool dcvs_check_passed = false, is_codec_supported = false;
- struct msm_vidc_platform_resources *res = NULL;
-
- if (!inst || !inst->core) {
- dprintk(VIDC_ERR, "%s Invalid params\n", __func__);
- return dcvs_check_passed;
- }
-
- res = &inst->core->resources;
- if (!res->dcvs_limit) {
- dprintk(VIDC_ERR,
- "%s Dcvs limit table uninitialized\n", __func__);
- return false;
- }
-
- is_codec_supported =
- msm_dcvs_check_codec_supported(
- inst->fmts[CAPTURE_PORT]->fourcc,
- inst->dcvs.supported_codecs,
- inst->session_type);
-
- num_mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
- instance_load = msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS);
- dcvs_limit =
- (long int)res->dcvs_limit[inst->session_type].min_mbpf *
- res->dcvs_limit[inst->session_type].fps;
-
- if (msm_vidc_enc_dcvs_mode && is_codec_supported &&
- inst->dcvs.is_power_save_mode &&
- IS_VALID_DCVS_SESSION(num_mbs_per_frame,
- res->dcvs_limit[inst->session_type].min_mbpf) &&
- IS_VALID_DCVS_SESSION(instance_load, dcvs_limit)) {
- dcvs_check_passed = true;
- }
- return dcvs_check_passed;
-}
-
static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst)
{
int num_mbs_per_frame = 0, instance_count = 0;
@@ -583,6 +543,7 @@ static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst)
struct hal_buffer_requirements *output_buf_req;
struct dcvs_stats *dcvs;
bool is_codec_supported = false;
+ bool is_dcvs_supported = true;
struct msm_vidc_platform_resources *res = NULL;
if (!inst || !inst->core || !inst->core->device) {
@@ -599,104 +560,88 @@ static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst)
"%s: dcvs limit table not found\n", __func__);
return false;
}
- instance_count = msm_dcvs_count_active_instances(core);
+ instance_count = msm_dcvs_count_active_instances(core,
+ inst->session_type);
+ num_mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
+ instance_load = msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS);
+ dcvs_limit =
+ (long int)res->dcvs_limit[inst->session_type].min_mbpf *
+ res->dcvs_limit[inst->session_type].fps;
+ inst->dcvs.extra_buffer_count = 0;
- if (instance_count == 1 && inst->session_type == MSM_VIDC_DECODER &&
- !msm_comm_turbo_session(inst)) {
- num_mbs_per_frame = msm_dcvs_get_mbs_per_frame(inst);
- instance_load = msm_comm_get_inst_load(inst,
- LOAD_CALC_NO_QUIRKS);
- output_buf_req = get_buff_req_buffer(inst,
- msm_comm_get_hal_output_buffer(inst));
- dcvs_limit =
- (long int)res->dcvs_limit[inst->session_type].min_mbpf *
- res->dcvs_limit[inst->session_type].fps;
- is_codec_supported =
- msm_dcvs_check_codec_supported(
- inst->fmts[OUTPUT_PORT]->fourcc,
- inst->dcvs.supported_codecs,
- inst->session_type);
- if (!is_codec_supported ||
- !IS_VALID_DCVS_SESSION(num_mbs_per_frame,
- res->dcvs_limit[inst->session_type].min_mbpf) ||
- !IS_VALID_DCVS_SESSION(instance_load, dcvs_limit) ||
- inst->seqchanged_count > 1)
- return false;
+ if (!IS_VALID_DCVS_SESSION(num_mbs_per_frame,
+ res->dcvs_limit[inst->session_type].min_mbpf)) {
+ inst->dcvs.extra_buffer_count = 0;
+ is_dcvs_supported = false;
+ goto dcvs_decision_done;
+ }
+
+ if (inst->session_type == MSM_VIDC_DECODER) {
+ inst->dcvs.extra_buffer_count = DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
+ output_buf_req = get_buff_req_buffer(inst,
+ msm_comm_get_hal_output_buffer(inst));
if (!output_buf_req) {
dprintk(VIDC_ERR,
- "%s: No buffer requirement for buffer type %x\n",
- __func__, HAL_BUFFER_OUTPUT);
+ "%s: No buffer requirement for buffer type %x\n",
+ __func__, HAL_BUFFER_OUTPUT);
return false;
}
- } else if (instance_count == 1 &&
- inst->session_type == MSM_VIDC_ENCODER &&
- !msm_comm_turbo_session(inst)) {
- if (!msm_dcvs_enc_check(inst))
- return false;
- } else {
- /*
- * For multiple instance use case with 4K, clocks will be scaled
- * as per load in streamon, but the clocks may be scaled
- * down as DCVS is running for first playback instance
- * Rescaling the core clock for multiple instance use case
- */
- if (!dcvs->is_clock_scaled) {
- if (!msm_comm_scale_clocks(core)) {
- dcvs->is_clock_scaled = true;
- dprintk(VIDC_DBG,
- "%s: Scaled clocks = %d\n",
- __func__, dcvs->is_clock_scaled);
- } else {
- dprintk(VIDC_DBG,
- "%s: Failed to Scale clocks. Perf might be impacted\n",
- __func__);
- }
+ is_codec_supported =
+ msm_dcvs_check_codec_supported(
+ inst->fmts[OUTPUT_PORT]->fourcc,
+ inst->dcvs.supported_codecs,
+ inst->session_type);
+ if (!is_codec_supported ||
+ !msm_vidc_dec_dcvs_mode) {
+ inst->dcvs.extra_buffer_count = 0;
+ is_dcvs_supported = false;
+ goto dcvs_decision_done;
}
- /*
- * For multiple instance use case turn OFF DCVS algorithm
- * immediately
- */
+ if (msm_comm_turbo_session(inst) ||
+ !IS_VALID_DCVS_SESSION(instance_load, dcvs_limit ||
+ instance_count > 1))
+ is_dcvs_supported = false;
+ }
+ if (inst->session_type == MSM_VIDC_ENCODER) {
+ inst->dcvs.extra_buffer_count = DCVS_ENC_EXTRA_OUTPUT_BUFFERS;
+ is_codec_supported =
+ msm_dcvs_check_codec_supported(
+ inst->fmts[CAPTURE_PORT]->fourcc,
+ inst->dcvs.supported_codecs,
+ inst->session_type);
+ if (!is_codec_supported ||
+ !msm_vidc_enc_dcvs_mode) {
+ inst->dcvs.extra_buffer_count = 0;
+ is_dcvs_supported = false;
+ goto dcvs_decision_done;
+ }
+ if (msm_comm_turbo_session(inst) ||
+ !IS_VALID_DCVS_SESSION(instance_load, dcvs_limit ||
+ instance_count > 1))
+ is_dcvs_supported = false;
+ }
+dcvs_decision_done:
+ if (!is_dcvs_supported) {
+ msm_comm_scale_clocks(core);
if (instance_count > 1) {
mutex_lock(&core->lock);
list_for_each_entry(temp, &core->instances, list)
temp->dcvs_mode = false;
mutex_unlock(&core->lock);
}
-
- return false;
}
-
- return true;
+ return is_dcvs_supported;
}
int msm_dcvs_get_extra_buff_count(struct msm_vidc_inst *inst)
{
- int extra_buffer = 0;
-
if (!inst) {
dprintk(VIDC_ERR, "%s Invalid args\n", __func__);
return 0;
}
- if (inst->session_type == MSM_VIDC_ENCODER) {
- if (msm_dcvs_enc_check(inst))
- extra_buffer = DCVS_ENC_EXTRA_OUTPUT_BUFFERS;
- } else if (inst->session_type == MSM_VIDC_DECODER) {
- if (msm_dcvs_check_supported(inst))
- extra_buffer = DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
- }
- return extra_buffer;
+ return inst->dcvs.extra_buffer_count;
}
-void msm_dcvs_enc_set_power_save_mode(struct msm_vidc_inst *inst,
- bool is_power_save_mode)
-{
- if (!inst) {
- dprintk(VIDC_ERR, "%s Invalid args\n", __func__);
- return;
- }
-
- inst->dcvs.is_power_save_mode = is_power_save_mode;
-}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.h b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.h
index 17040166b1e4..d40473976ebb 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -36,6 +36,5 @@ void msm_dcvs_init_load(struct msm_vidc_inst *inst);
void msm_dcvs_monitor_buffer(struct msm_vidc_inst *inst);
void msm_dcvs_check_and_scale_clocks(struct msm_vidc_inst *inst, bool is_etb);
int msm_dcvs_get_extra_buff_count(struct msm_vidc_inst *inst);
-void msm_dcvs_enc_set_power_save_mode(struct msm_vidc_inst *inst,
- bool is_power_save_mode);
+int msm_dcvs_try_enable(struct msm_vidc_inst *inst);
#endif
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 72c1ddcf3a70..b6e74715ad07 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -204,9 +204,9 @@ struct dcvs_stats {
int load_high;
int min_threshold;
int max_threshold;
- bool is_clock_scaled;
int etb_counter;
bool is_power_save_mode;
+ unsigned int extra_buffer_count;
u32 supported_codecs;
};
@@ -279,7 +279,6 @@ struct msm_vidc_inst {
bool in_reconfig;
u32 reconfig_width;
u32 reconfig_height;
- u32 seqchanged_count;
struct dentry *debugfs_root;
void *priv;
struct msm_vidc_debug debug;
diff --git a/drivers/mfd/qcom-i2c-pmic.c b/drivers/mfd/qcom-i2c-pmic.c
index 9eb75d876577..ea5ac972b096 100644
--- a/drivers/mfd/qcom-i2c-pmic.c
+++ b/drivers/mfd/qcom-i2c-pmic.c
@@ -543,7 +543,8 @@ static int i2c_pmic_probe(struct i2c_client *client,
}
rc = devm_request_threaded_irq(&client->dev, client->irq, NULL,
- i2c_pmic_irq_handler, IRQF_ONESHOT,
+ i2c_pmic_irq_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
"i2c_pmic_stat_irq", chip);
if (rc < 0) {
pr_err("Couldn't request irq %d rc=%d\n", client->irq, rc);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 0568769fd94a..be3ccf2536d9 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -3707,6 +3707,19 @@ static void sdhci_msm_init(struct sdhci_host *host)
msm_host->pdata->pm_qos_data.latency);
}
+static unsigned int sdhci_msm_get_current_limit(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct sdhci_msm_slot_reg_data *curr_slot = msm_host->pdata->vreg_data;
+ u32 max_curr = 0;
+
+ if (curr_slot && curr_slot->vdd_data)
+ max_curr = curr_slot->vdd_data->hpm_uA;
+
+ return max_curr;
+}
+
static struct sdhci_ops sdhci_msm_ops = {
.crypto_engine_cfg = sdhci_msm_ice_cfg,
.crypto_cfg_reset = sdhci_msm_ice_cfg_reset,
@@ -3732,6 +3745,7 @@ static struct sdhci_ops sdhci_msm_ops = {
.init = sdhci_msm_init,
.pre_req = sdhci_msm_pre_req,
.post_req = sdhci_msm_post_req,
+ .get_current_limit = sdhci_msm_get_current_limit,
};
static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
diff --git a/drivers/net/wireless/cnss/cnss_pci.c b/drivers/net/wireless/cnss/cnss_pci.c
index 801c94859084..5d9329168699 100644
--- a/drivers/net/wireless/cnss/cnss_pci.c
+++ b/drivers/net/wireless/cnss/cnss_pci.c
@@ -2761,14 +2761,8 @@ static void cnss_crash_shutdown(const struct subsys_desc *subsys)
wdrv = penv->driver;
pdev = penv->pdev;
- penv->dump_data.version = CNSS_DUMP_FORMAT_VER;
- strlcpy(penv->dump_data.name, CNSS_DUMP_NAME,
- sizeof(penv->dump_data.name));
-
if (pdev && wdrv && wdrv->crash_shutdown)
wdrv->crash_shutdown(pdev);
-
- penv->dump_data.magic = CNSS_DUMP_MAGIC_VER_V2;
}
void cnss_device_self_recovery(void)
@@ -2829,6 +2823,28 @@ static struct notifier_block mnb = {
.notifier_call = cnss_modem_notifier_nb,
};
+static int cnss_init_dump_entry(void)
+{
+ struct msm_dump_entry dump_entry;
+
+ if (!penv)
+ return -ENODEV;
+
+ if (!penv->ramdump_dynamic)
+ return 0;
+
+ penv->dump_data.addr = penv->ramdump_phys;
+ penv->dump_data.len = penv->ramdump_size;
+ penv->dump_data.version = CNSS_DUMP_FORMAT_VER;
+ penv->dump_data.magic = CNSS_DUMP_MAGIC_VER_V2;
+ strlcpy(penv->dump_data.name, CNSS_DUMP_NAME,
+ sizeof(penv->dump_data.name));
+ dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
+ dump_entry.addr = virt_to_phys(&penv->dump_data);
+
+ return msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
+}
+
static int cnss_probe(struct platform_device *pdev)
{
int ret = 0;
@@ -2836,7 +2852,6 @@ static int cnss_probe(struct platform_device *pdev)
const char *client_desc;
struct device *dev = &pdev->dev;
u32 rc_num;
- struct msm_dump_entry dump_entry;
struct resource *res;
u32 ramdump_size = 0;
u32 smmu_iova_address[2];
@@ -2952,18 +2967,10 @@ static int cnss_probe(struct platform_device *pdev)
goto skip_ramdump;
}
- if (penv->ramdump_dynamic) {
- penv->dump_data.addr = penv->ramdump_phys;
- penv->dump_data.len = penv->ramdump_size;
- dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
- dump_entry.addr = virt_to_phys(&penv->dump_data);
-
- ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
- if (ret) {
- pr_err("%s: Dump table setup failed: %d\n",
- __func__, ret);
- goto err_ramdump_create;
- }
+ ret = cnss_init_dump_entry();
+ if (ret) {
+ pr_err("%s: Dump table setup failed: %d\n", __func__, ret);
+ goto err_ramdump_create;
}
penv->ramdump_dev = create_ramdump_device(penv->subsysdesc.name,
diff --git a/drivers/net/wireless/cnss/cnss_sdio.c b/drivers/net/wireless/cnss/cnss_sdio.c
index f773c5993d44..01b969ec627f 100644
--- a/drivers/net/wireless/cnss/cnss_sdio.c
+++ b/drivers/net/wireless/cnss/cnss_sdio.c
@@ -21,6 +21,8 @@
#include <linux/slab.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
#include <linux/io.h>
#include <soc/qcom/subsystem_restart.h>
#include <soc/qcom/subsystem_notif.h>
@@ -46,7 +48,7 @@
/* Values for Dynamic Ramdump Collection*/
#define CNSS_DUMP_FORMAT_VER 0x11
#define CNSS_DUMP_MAGIC_VER_V2 0x42445953
-#define CNSS_DUMP_NAME "CNSS_WLAN"
+#define CNSS_DUMP_NAME "CNSS_WLAN_SDIO"
#define CNSS_PINCTRL_SLEEP_STATE "sleep"
#define CNSS_PINCTRL_ACTIVE_STATE "active"
@@ -60,7 +62,11 @@ struct cnss_sdio_regulator {
struct cnss_sdio_info {
struct cnss_sdio_wlan_driver *wdrv;
struct sdio_func *func;
+ struct mmc_card *card;
+ struct mmc_host *host;
+ struct device *dev;
const struct sdio_device_id *id;
+ bool skip_wlan_en_toggle;
};
struct cnss_ssr_info {
@@ -212,19 +218,103 @@ void cnss_sdio_remove_pm_qos(void)
}
EXPORT_SYMBOL(cnss_sdio_remove_pm_qos);
+static int cnss_put_hw_resources(struct device *dev)
+{
+ int ret = -EINVAL;
+ struct cnss_sdio_info *info;
+ struct mmc_host *host;
+
+ if (!cnss_pdata)
+ return ret;
+
+ info = &cnss_pdata->cnss_sdio_info;
+
+ if (info->skip_wlan_en_toggle) {
+ pr_debug("%s: HW doesn't support wlan toggling\n", __func__);
+ return 0;
+ }
+
+ host = info->host;
+
+ if (!host) {
+ pr_err("%s: MMC host is invalid\n", __func__);
+ return 0;
+ }
+
+ ret = mmc_power_save_host(host);
+ if (ret) {
+ pr_err("%s: Failed to Power Save Host err:%d\n", __func__,
+ ret);
+ return ret;
+ }
+
+ if (!cnss_pdata->regulator.wlan_vreg) {
+ pr_debug("%s: wlan_vreg regulator is invalid\n", __func__);
+ return ret;
+ }
+
+ regulator_disable(cnss_pdata->regulator.wlan_vreg);
+
+ return ret;
+}
+
+static int cnss_get_hw_resources(struct device *dev)
+{
+ int ret = -EINVAL;
+ struct mmc_host *host;
+ struct cnss_sdio_info *info;
+
+ if (!cnss_pdata)
+ return ret;
+
+ info = &cnss_pdata->cnss_sdio_info;
+
+ if (info->skip_wlan_en_toggle) {
+ pr_debug("%s: HW doesn't support wlan toggling\n", __func__);
+ return 0;
+ }
+
+ host = info->host;
+
+ ret = regulator_enable(cnss_pdata->regulator.wlan_vreg);
+ if (ret) {
+ pr_err("%s: Failed to enable wlan vreg\n", __func__);
+ return ret;
+ }
+
+ ret = mmc_power_restore_host(host);
+ if (ret) {
+ pr_err("%s: Failed to restore host power ret:%d\n", __func__,
+ ret);
+ regulator_disable(cnss_pdata->regulator.wlan_vreg);
+ }
+
+ return ret;
+}
+
static int cnss_sdio_shutdown(const struct subsys_desc *subsys, bool force_stop)
{
struct cnss_sdio_info *cnss_info;
struct cnss_sdio_wlan_driver *wdrv;
+ int ret = 0;
if (!cnss_pdata)
return -ENODEV;
cnss_info = &cnss_pdata->cnss_sdio_info;
wdrv = cnss_info->wdrv;
- if (wdrv && wdrv->shutdown)
- wdrv->shutdown(cnss_info->func);
- return 0;
+ if (!wdrv)
+ return 0;
+ if (!wdrv->shutdown)
+ return 0;
+
+ wdrv->shutdown(cnss_info->func);
+ ret = cnss_put_hw_resources(cnss_info->dev);
+
+ if (ret)
+ pr_err("%s: Failed to put hw resources\n", __func__);
+
+ return ret;
}
static int cnss_sdio_powerup(const struct subsys_desc *subsys)
@@ -238,11 +328,23 @@ static int cnss_sdio_powerup(const struct subsys_desc *subsys)
cnss_info = &cnss_pdata->cnss_sdio_info;
wdrv = cnss_info->wdrv;
- if (wdrv && wdrv->reinit) {
- ret = wdrv->reinit(cnss_info->func, cnss_info->id);
- if (ret)
- pr_err("%s: wlan reinit error=%d\n", __func__, ret);
+
+ if (!wdrv)
+ return 0;
+
+ if (!wdrv->reinit)
+ return 0;
+
+ ret = cnss_get_hw_resources(cnss_info->dev);
+ if (ret) {
+ pr_err("%s: Failed to power up HW\n", __func__);
+ return ret;
}
+
+ ret = wdrv->reinit(cnss_info->func, cnss_info->id);
+ if (ret)
+ pr_err("%s: wlan reinit error=%d\n", __func__, ret);
+
return ret;
}
@@ -551,25 +653,41 @@ int cnss_get_restart_level(void)
}
EXPORT_SYMBOL(cnss_get_restart_level);
-static int cnss_sdio_wlan_inserted(
- struct sdio_func *func,
- const struct sdio_device_id *id)
+static int cnss_sdio_wlan_inserted(struct sdio_func *func,
+ const struct sdio_device_id *id)
{
+ struct cnss_sdio_info *info;
+
if (!cnss_pdata)
return -ENODEV;
- cnss_pdata->cnss_sdio_info.func = func;
- cnss_pdata->cnss_sdio_info.id = id;
+ info = &cnss_pdata->cnss_sdio_info;
+
+ info->func = func;
+ info->card = func->card;
+ info->host = func->card->host;
+ info->id = id;
+ info->dev = &func->dev;
+
+ cnss_put_hw_resources(cnss_pdata->cnss_sdio_info.dev);
+
+ pr_info("%s: SDIO Device is Probed\n", __func__);
return 0;
}
static void cnss_sdio_wlan_removed(struct sdio_func *func)
{
+ struct cnss_sdio_info *info;
+
if (!cnss_pdata)
return;
- cnss_pdata->cnss_sdio_info.func = NULL;
- cnss_pdata->cnss_sdio_info.id = NULL;
+ info = &cnss_pdata->cnss_sdio_info;
+
+ info->host = NULL;
+ info->card = NULL;
+ info->func = NULL;
+ info->id = NULL;
}
#if defined(CONFIG_PM)
@@ -577,6 +695,8 @@ static int cnss_sdio_wlan_suspend(struct device *dev)
{
struct cnss_sdio_wlan_driver *wdrv;
struct cnss_sdio_bus_bandwidth *bus_bandwidth;
+ struct sdio_func *func;
+
int error = 0;
if (!cnss_pdata)
@@ -588,11 +708,13 @@ static int cnss_sdio_wlan_suspend(struct device *dev)
bus_bandwidth->bus_client, CNSS_BUS_WIDTH_NONE);
}
+ func = cnss_pdata->cnss_sdio_info.func;
wdrv = cnss_pdata->cnss_sdio_info.wdrv;
if (!wdrv) {
/* This can happen when no wlan driver loaded (no register to
* platform driver).
*/
+ sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
pr_debug("wlan driver not registered\n");
return 0;
}
@@ -692,29 +814,49 @@ EXPORT_SYMBOL(cnss_sdio_configure_spdt);
int cnss_sdio_wlan_register_driver(struct cnss_sdio_wlan_driver *driver)
{
struct cnss_sdio_info *cnss_info;
- int error = 0;
+ struct device *dev;
+ int error = -EINVAL;
if (!cnss_pdata)
return -ENODEV;
cnss_info = &cnss_pdata->cnss_sdio_info;
+ dev = cnss_info->dev;
+
if (cnss_info->wdrv)
pr_debug("%s:wdrv already exists wdrv(%p)\n", __func__,
cnss_info->wdrv);
+ cnss_info->wdrv = driver;
+
+ if (!driver)
+ return error;
+
+ error = cnss_get_hw_resources(dev);
+ if (error) {
+ pr_err("%s: Failed to restore power err:%d\n", __func__, error);
+ return error;
+ }
+
error = cnss_set_pinctrl_state(cnss_pdata, PINCTRL_ACTIVE);
if (error) {
pr_err("%s: Fail to set pinctrl to active state\n", __func__);
- return -EFAULT;
+ goto put_hw;
}
- cnss_info->wdrv = driver;
- if (driver->probe) {
- error = driver->probe(cnss_info->func, cnss_info->id);
- if (error)
- pr_err("%s: wlan probe failed error=%d\n", __func__,
- error);
+ error = driver->probe ? driver->probe(cnss_info->func,
+ cnss_info->id) : error;
+ if (error) {
+ pr_err("%s: wlan probe failed error=%d\n", __func__, error);
+ goto pinctrl_sleep;
}
+
+ return error;
+
+pinctrl_sleep:
+ cnss_set_pinctrl_state(cnss_pdata, PINCTRL_SLEEP);
+put_hw:
+ cnss_put_hw_resources(dev);
return error;
}
EXPORT_SYMBOL(cnss_sdio_wlan_register_driver);
@@ -746,10 +888,17 @@ cnss_sdio_wlan_unregister_driver(struct cnss_sdio_wlan_driver *driver)
pr_err("%s: driver not registered\n", __func__);
return;
}
- if (cnss_info->wdrv->remove)
- cnss_info->wdrv->remove(cnss_info->func);
+
+ if (!driver)
+ return;
+
+ if (!driver->remove)
+ return;
+
+ driver->remove(cnss_info->func);
cnss_info->wdrv = NULL;
cnss_set_pinctrl_state(cnss_pdata, PINCTRL_SLEEP);
+ cnss_put_hw_resources(cnss_info->dev);
}
EXPORT_SYMBOL(cnss_sdio_wlan_unregister_driver);
@@ -1051,6 +1200,8 @@ static int cnss_sdio_init_bus_bandwidth(void)
static int cnss_sdio_probe(struct platform_device *pdev)
{
int error;
+ struct device *dev = &pdev->dev;
+ struct cnss_sdio_info *info;
if (pdev->dev.of_node) {
cnss_pdata = devm_kzalloc(
@@ -1065,6 +1216,7 @@ static int cnss_sdio_probe(struct platform_device *pdev)
return -EINVAL;
cnss_pdata->pdev = pdev;
+ info = &cnss_pdata->cnss_sdio_info;
error = cnss_sdio_pinctrl_init(cnss_pdata, pdev);
if (error) {
@@ -1103,6 +1255,9 @@ static int cnss_sdio_probe(struct platform_device *pdev)
}
}
+ info->skip_wlan_en_toggle = of_property_read_bool(dev->of_node,
+ "qcom,skip-wlan-en-toggle");
+
error = cnss_sdio_wlan_init();
if (error) {
dev_err(&pdev->dev, "cnss wlan init failed error=%d\n", error);
@@ -1152,15 +1307,20 @@ err_wlan_enable_regulator:
static int cnss_sdio_remove(struct platform_device *pdev)
{
+ struct cnss_sdio_info *info;
+
if (!cnss_pdata)
return -ENODEV;
+ info = &cnss_pdata->cnss_sdio_info;
+
cnss_sdio_deinit_bus_bandwidth();
cnss_sdio_wlan_exit();
cnss_subsys_exit();
cnss_ramdump_cleanup();
+ cnss_put_hw_resources(info->dev);
cnss_sdio_release_resource();
-
+ cnss_pdata = NULL;
return 0;
}
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index e278aab1e530..3f186137e730 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -501,6 +501,7 @@ struct msm_pcie_clk_info_t {
struct clk *hdl;
char *name;
u32 freq;
+ bool config_mem;
bool required;
};
@@ -710,49 +711,49 @@ static struct msm_pcie_gpio_info_t msm_pcie_gpio_info[MSM_PCIE_MAX_GPIO] = {
static struct msm_pcie_clk_info_t
msm_pcie_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_CLK] = {
{
- {NULL, "pcie_0_ref_clk_src", 0, false},
- {NULL, "pcie_0_aux_clk", 1010000, true},
- {NULL, "pcie_0_cfg_ahb_clk", 0, true},
- {NULL, "pcie_0_mstr_axi_clk", 0, true},
- {NULL, "pcie_0_slv_axi_clk", 0, true},
- {NULL, "pcie_0_ldo", 0, true},
- {NULL, "pcie_0_smmu_clk", 0, false},
- {NULL, "pcie_phy_cfg_ahb_clk", 0, false},
- {NULL, "pcie_phy_aux_clk", 0, false},
- {NULL, "pcie_phy_reset", 0, false},
- {NULL, "pcie_phy_com_reset", 0, false},
- {NULL, "pcie_phy_nocsr_com_phy_reset", 0, false},
- {NULL, "pcie_0_phy_reset", 0, true}
+ {NULL, "pcie_0_ref_clk_src", 0, false, false},
+ {NULL, "pcie_0_aux_clk", 1010000, false, true},
+ {NULL, "pcie_0_cfg_ahb_clk", 0, false, true},
+ {NULL, "pcie_0_mstr_axi_clk", 0, true, true},
+ {NULL, "pcie_0_slv_axi_clk", 0, true, true},
+ {NULL, "pcie_0_ldo", 0, false, true},
+ {NULL, "pcie_0_smmu_clk", 0, false, false},
+ {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
+ {NULL, "pcie_phy_aux_clk", 0, false, false},
+ {NULL, "pcie_phy_reset", 0, false, false},
+ {NULL, "pcie_phy_com_reset", 0, false, false},
+ {NULL, "pcie_phy_nocsr_com_phy_reset", 0, false, false},
+ {NULL, "pcie_0_phy_reset", 0, false, true}
},
{
- {NULL, "pcie_1_ref_clk_src", 0, false},
- {NULL, "pcie_1_aux_clk", 1010000, true},
- {NULL, "pcie_1_cfg_ahb_clk", 0, true},
- {NULL, "pcie_1_mstr_axi_clk", 0, true},
- {NULL, "pcie_1_slv_axi_clk", 0, true},
- {NULL, "pcie_1_ldo", 0, true},
- {NULL, "pcie_1_smmu_clk", 0, false},
- {NULL, "pcie_phy_cfg_ahb_clk", 0, false},
- {NULL, "pcie_phy_aux_clk", 0, false},
- {NULL, "pcie_phy_reset", 0, false},
- {NULL, "pcie_phy_com_reset", 0, false},
- {NULL, "pcie_phy_nocsr_com_phy_reset", 0, false},
- {NULL, "pcie_1_phy_reset", 0, true}
+ {NULL, "pcie_1_ref_clk_src", 0, false, false},
+ {NULL, "pcie_1_aux_clk", 1010000, false, true},
+ {NULL, "pcie_1_cfg_ahb_clk", 0, false, true},
+ {NULL, "pcie_1_mstr_axi_clk", 0, true, true},
+ {NULL, "pcie_1_slv_axi_clk", 0, true, true},
+ {NULL, "pcie_1_ldo", 0, false, true},
+ {NULL, "pcie_1_smmu_clk", 0, false, false},
+ {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
+ {NULL, "pcie_phy_aux_clk", 0, false, false},
+ {NULL, "pcie_phy_reset", 0, false, false},
+ {NULL, "pcie_phy_com_reset", 0, false, false},
+ {NULL, "pcie_phy_nocsr_com_phy_reset", 0, false, false},
+ {NULL, "pcie_1_phy_reset", 0, false, true}
},
{
- {NULL, "pcie_2_ref_clk_src", 0, false},
- {NULL, "pcie_2_aux_clk", 1010000, true},
- {NULL, "pcie_2_cfg_ahb_clk", 0, true},
- {NULL, "pcie_2_mstr_axi_clk", 0, true},
- {NULL, "pcie_2_slv_axi_clk", 0, true},
- {NULL, "pcie_2_ldo", 0, true},
- {NULL, "pcie_2_smmu_clk", 0, false},
- {NULL, "pcie_phy_cfg_ahb_clk", 0, false},
- {NULL, "pcie_phy_aux_clk", 0, false},
- {NULL, "pcie_phy_reset", 0, false},
- {NULL, "pcie_phy_com_reset", 0, false},
- {NULL, "pcie_phy_nocsr_com_phy_reset", 0, false},
- {NULL, "pcie_2_phy_reset", 0, true}
+ {NULL, "pcie_2_ref_clk_src", 0, false, false},
+ {NULL, "pcie_2_aux_clk", 1010000, false, true},
+ {NULL, "pcie_2_cfg_ahb_clk", 0, false, true},
+ {NULL, "pcie_2_mstr_axi_clk", 0, true, true},
+ {NULL, "pcie_2_slv_axi_clk", 0, true, true},
+ {NULL, "pcie_2_ldo", 0, false, true},
+ {NULL, "pcie_2_smmu_clk", 0, false, false},
+ {NULL, "pcie_phy_cfg_ahb_clk", 0, false, false},
+ {NULL, "pcie_phy_aux_clk", 0, false, false},
+ {NULL, "pcie_phy_reset", 0, false, false},
+ {NULL, "pcie_phy_com_reset", 0, false, false},
+ {NULL, "pcie_phy_nocsr_com_phy_reset", 0, false, false},
+ {NULL, "pcie_2_phy_reset", 0, false, true}
}
};
@@ -760,13 +761,13 @@ static struct msm_pcie_clk_info_t
static struct msm_pcie_clk_info_t
msm_pcie_pipe_clk_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_CLK] = {
{
- {NULL, "pcie_0_pipe_clk", 125000000, true},
+ {NULL, "pcie_0_pipe_clk", 125000000, true, true},
},
{
- {NULL, "pcie_1_pipe_clk", 125000000, true},
+ {NULL, "pcie_1_pipe_clk", 125000000, true, true},
},
{
- {NULL, "pcie_2_pipe_clk", 125000000, true},
+ {NULL, "pcie_2_pipe_clk", 125000000, true, true},
}
};
@@ -861,6 +862,32 @@ static inline void msm_pcie_write_reg_field(void *base, u32 offset,
wmb();
}
+static inline void msm_pcie_config_clock_mem(struct msm_pcie_dev_t *dev,
+ struct msm_pcie_clk_info_t *info)
+{
+ int ret;
+
+ ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_MEM);
+ if (ret)
+ PCIE_ERR(dev,
+ "PCIe: RC%d can't configure core memory for clk %s: %d.\n",
+ dev->rc_idx, info->name, ret);
+ else
+ PCIE_DBG2(dev,
+ "PCIe: RC%d configured core memory for clk %s.\n",
+ dev->rc_idx, info->name);
+
+ ret = clk_set_flags(info->hdl, CLKFLAG_NORETAIN_PERIPH);
+ if (ret)
+ PCIE_ERR(dev,
+ "PCIe: RC%d can't configure peripheral memory for clk %s: %d.\n",
+ dev->rc_idx, info->name, ret);
+ else
+ PCIE_DBG2(dev,
+ "PCIe: RC%d configured peripheral memory for clk %s.\n",
+ dev->rc_idx, info->name);
+}
+
#if defined(CONFIG_ARCH_FSM9010)
#define PCIE20_PARF_PHY_STTS 0x3c
#define PCIE2_PHY_RESET_CTRL 0x44
@@ -3450,6 +3477,9 @@ static int msm_pcie_clk_init(struct msm_pcie_dev_t *dev)
if (i >= MSM_PCIE_MAX_CLK - (dev->common_phy ? 4 : 1))
clk_reset(info->hdl, CLK_RESET_DEASSERT);
+ if (info->config_mem)
+ msm_pcie_config_clock_mem(dev, info);
+
if (info->freq) {
rc = clk_set_rate(info->hdl, info->freq);
if (rc) {
@@ -3543,6 +3573,9 @@ static int msm_pcie_pipe_clk_init(struct msm_pcie_dev_t *dev)
clk_reset(info->hdl, CLK_RESET_DEASSERT);
+ if (info->config_mem)
+ msm_pcie_config_clock_mem(dev, info);
+
if (info->freq) {
rc = clk_set_rate(info->hdl, info->freq);
if (rc) {
@@ -3921,8 +3954,8 @@ static int msm_pcie_get_resources(struct msm_pcie_dev_t *dev,
cnt = of_property_count_strings((&pdev->dev)->of_node,
"clock-names");
if (cnt > 0) {
- clkfreq = kzalloc(cnt * sizeof(*clkfreq),
- GFP_KERNEL);
+ clkfreq = kzalloc((MSM_PCIE_MAX_CLK + MSM_PCIE_MAX_PIPE_CLK) *
+ sizeof(*clkfreq), GFP_KERNEL);
if (!clkfreq) {
PCIE_ERR(dev, "PCIe: memory alloc failed for RC%d\n",
dev->rc_idx);
diff --git a/drivers/phy/phy-qcom-ufs-qmp-v3.c b/drivers/phy/phy-qcom-ufs-qmp-v3.c
index 6b8dbc29f6e8..57c23f70eb63 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-v3.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-v3.c
@@ -23,12 +23,26 @@ int ufs_qcom_phy_qmp_v3_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
int err;
int tbl_size_A, tbl_size_B;
struct ufs_qcom_phy_calibration *tbl_A, *tbl_B;
+ u8 major = ufs_qcom_phy->host_ctrl_rev_major;
+ u16 minor = ufs_qcom_phy->host_ctrl_rev_minor;
+ u16 step = ufs_qcom_phy->host_ctrl_rev_step;
tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
tbl_B = phy_cal_table_rate_B;
- tbl_A = phy_cal_table_rate_A;
- tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A);
+ if ((major == 0x3) && (minor == 0x000) && (step == 0x0000)) {
+ tbl_A = phy_cal_table_rate_A_3_0_0;
+ tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_3_0_0);
+ } else if ((major == 0x3) && (minor == 0x001) && (step == 0x0000)) {
+ tbl_A = phy_cal_table_rate_A_3_1_0;
+ tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_3_1_0);
+ } else {
+ dev_err(ufs_qcom_phy->dev,
+ "%s: Unknown UFS-PHY version (major 0x%x minor 0x%x step 0x%x), no calibration values\n",
+ __func__, major, minor, step);
+ err = -ENODEV;
+ goto out;
+ }
err = ufs_qcom_phy_calibrate(ufs_qcom_phy,
tbl_A, tbl_size_A,
@@ -39,6 +53,8 @@ int ufs_qcom_phy_qmp_v3_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
dev_err(ufs_qcom_phy->dev,
"%s: ufs_qcom_phy_calibrate() failed %d\n",
__func__, err);
+
+out:
return err;
}
diff --git a/drivers/phy/phy-qcom-ufs-qmp-v3.h b/drivers/phy/phy-qcom-ufs-qmp-v3.h
index 8b77e3a7fee2..cda57855acb5 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-v3.h
+++ b/drivers/phy/phy-qcom-ufs-qmp-v3.h
@@ -133,9 +133,12 @@
#define UFS_PHY_TX_SMALL_AMP_DRV_LVL PHY_OFF(0x34)
#define UFS_PHY_LINECFG_DISABLE PHY_OFF(0x130)
#define UFS_PHY_RX_SYM_RESYNC_CTRL PHY_OFF(0x134)
+#define UFS_PHY_RX_SIGDET_CTRL1 PHY_OFF(0x13C)
#define UFS_PHY_RX_SIGDET_CTRL2 PHY_OFF(0x140)
#define UFS_PHY_RX_PWM_GEAR_BAND PHY_OFF(0x14C)
#define UFS_PHY_PCS_READY_STATUS PHY_OFF(0x160)
+#define UFS_PHY_TX_MID_TERM_CTRL1 PHY_OFF(0x1BC)
+#define UFS_PHY_MULTI_LANE_CTRL1 PHY_OFF(0x1C4)
/* UFS PHY TX registers */
#define QSERDES_TX0_TRANSCEIVER_BIAS_EN TX_OFF(0, 0x5C)
@@ -143,6 +146,9 @@
#define QSERDES_TX0_LANE_MODE_2 TX_OFF(0, 0x90)
#define QSERDES_TX0_LANE_MODE_3 TX_OFF(0, 0x94)
+#define QSERDES_TX1_LANE_MODE_1 TX_OFF(1, 0x8C)
+
+
/* UFS PHY RX registers */
#define QSERDES_RX0_UCDR_SVS_SO_GAIN_HALF RX_OFF(0, 0x24)
#define QSERDES_RX0_UCDR_SVS_SO_GAIN_QUARTER RX_OFF(0, 0x28)
@@ -163,6 +169,22 @@
#define QSERDES_RX0_SIGDET_DEGLITCH_CNTRL RX_OFF(0, 0x10C)
#define QSERDES_RX0_RX_INTERFACE_MODE RX_OFF(0, 0x11C)
+#define QSERDES_RX1_UCDR_SVS_SO_GAIN_HALF RX_OFF(1, 0x24)
+#define QSERDES_RX1_UCDR_SVS_SO_GAIN_QUARTER RX_OFF(1, 0x28)
+#define QSERDES_RX1_UCDR_SVS_SO_GAIN RX_OFF(1, 0x2C)
+#define QSERDES_RX1_UCDR_FASTLOCK_FO_GAIN RX_OFF(1, 0x30)
+#define QSERDES_RX1_UCDR_SO_SATURATION_AND_ENABLE RX_OFF(1, 0x34)
+#define QSERDES_RX1_UCDR_FASTLOCK_COUNT_LOW RX_OFF(1, 0x3C)
+#define QSERDES_RX1_UCDR_PI_CONTROLS RX_OFF(1, 0x44)
+#define QSERDES_RX1_RX_TERM_BW RX_OFF(1, 0x7C)
+#define QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL2 RX_OFF(1, 0xD4)
+#define QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL3 RX_OFF(1, 0xD8)
+#define QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL4 RX_OFF(1, 0xDC)
+#define QSERDES_RX1_SIGDET_CNTRL RX_OFF(1, 0x104)
+#define QSERDES_RX1_SIGDET_LVL RX_OFF(1, 0x108)
+#define QSERDES_RX1_SIGDET_DEGLITCH_CNTRL RX_OFF(1, 0x10C)
+#define QSERDES_RX1_RX_INTERFACE_MODE RX_OFF(1, 0x11C)
+
#define UFS_PHY_RX_LINECFG_DISABLE_BIT BIT(1)
/*
@@ -177,10 +199,70 @@ struct ufs_qcom_phy_qmp_v3 {
struct ufs_qcom_phy common_cfg;
};
-static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = {
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_3_0_0[] = {
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x06),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0xD5),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x02),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0A),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_CTRL, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IVCO, 0x0F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL1, 0xFF),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x06),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x36),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0xDA),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x01),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xFF),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0C),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x06),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x36),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x3F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xC1),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_LANE_MODE_1, 0x06),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_SIGDET_LVL, 0x24),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_SIGDET_CNTRL, 0x0F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_SIGDET_DEGLITCH_CNTRL, 0x1E),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_INTERFACE_MODE, 0x40),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_TERM_BW, 0x5B),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL4, 0x1D),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SVS_SO_GAIN_HALF, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SVS_SO_GAIN, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SO_SATURATION_AND_ENABLE, 0x4B),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_PI_CONTROLS, 0xF1),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6C),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_3_1_0[] = {
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x06),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0xD5),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x20),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x02),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
@@ -230,10 +312,29 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = {
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SO_SATURATION_AND_ENABLE, 0x4B),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_PI_CONTROLS, 0xF1),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX1_LANE_MODE_1, 0x06),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_SIGDET_LVL, 0x24),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_SIGDET_CNTRL, 0x0F),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_SIGDET_DEGLITCH_CNTRL, 0x1E),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_INTERFACE_MODE, 0x40),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_TERM_BW, 0x5B),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_RX_EQU_ADAPTOR_CNTRL4, 0x1D),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SVS_SO_GAIN_HALF, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SVS_SO_GAIN, 0x04),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_SO_SATURATION_AND_ENABLE, 0x4B),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_PI_CONTROLS, 0xF1),
+ UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX1_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_MULTI_LANE_CTRL1, 0x02),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6C),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_MID_TERM_CTRL1, 0x43),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL1, 0x0F),
};
static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index a2f1c5c9af27..9cb0b1f3c379 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -575,6 +575,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
struct ipa_ioc_v4_nat_del nat_del;
struct ipa_ioc_rm_dependency rm_depend;
size_t sz;
+ int pre_entry;
IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
@@ -623,11 +624,11 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
-
+ pre_entry =
+ ((struct ipa_ioc_nat_dma_cmd *)header)->entries;
pyld_sz =
sizeof(struct ipa_ioc_nat_dma_cmd) +
- ((struct ipa_ioc_nat_dma_cmd *)header)->entries *
- sizeof(struct ipa_ioc_nat_dma_one);
+ pre_entry * sizeof(struct ipa_ioc_nat_dma_one);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -638,7 +639,15 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
-
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_nat_dma_cmd *)param)->entries,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
retval = -EFAULT;
break;
@@ -663,10 +672,11 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_add_hdr *)header)->num_hdrs;
pyld_sz =
sizeof(struct ipa_ioc_add_hdr) +
- ((struct ipa_ioc_add_hdr *)header)->num_hdrs *
- sizeof(struct ipa_hdr_add);
+ pre_entry * sizeof(struct ipa_hdr_add);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -676,6 +686,15 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_add_hdr *)param)->num_hdrs,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_add_hdr((struct ipa_ioc_add_hdr *)param)) {
retval = -EFAULT;
break;
@@ -692,10 +711,11 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_del_hdr *)header)->num_hdls;
pyld_sz =
sizeof(struct ipa_ioc_del_hdr) +
- ((struct ipa_ioc_del_hdr *)header)->num_hdls *
- sizeof(struct ipa_hdr_del);
+ pre_entry * sizeof(struct ipa_hdr_del);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -705,6 +725,15 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_del_hdr *)param)->num_hdls,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_del_hdr((struct ipa_ioc_del_hdr *)param)) {
retval = -EFAULT;
break;
@@ -721,10 +750,11 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_add_rt_rule *)header)->num_rules;
pyld_sz =
sizeof(struct ipa_ioc_add_rt_rule) +
- ((struct ipa_ioc_add_rt_rule *)header)->num_rules *
- sizeof(struct ipa_rt_rule_add);
+ pre_entry * sizeof(struct ipa_rt_rule_add);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -734,6 +764,16 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_add_rt_rule *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
retval = -EFAULT;
break;
@@ -750,10 +790,11 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules;
pyld_sz =
sizeof(struct ipa_ioc_mdfy_rt_rule) +
- ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules *
- sizeof(struct ipa_rt_rule_mdfy);
+ pre_entry * sizeof(struct ipa_rt_rule_mdfy);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -763,6 +804,16 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_mdfy_rt_rule *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) {
retval = -EFAULT;
break;
@@ -779,10 +830,11 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_del_rt_rule *)header)->num_hdls;
pyld_sz =
sizeof(struct ipa_ioc_del_rt_rule) +
- ((struct ipa_ioc_del_rt_rule *)header)->num_hdls *
- sizeof(struct ipa_rt_rule_del);
+ pre_entry * sizeof(struct ipa_rt_rule_del);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -792,6 +844,15 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
retval = -EFAULT;
break;
@@ -808,10 +869,11 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_add_flt_rule *)header)->num_rules;
pyld_sz =
sizeof(struct ipa_ioc_add_flt_rule) +
- ((struct ipa_ioc_add_flt_rule *)header)->num_rules *
- sizeof(struct ipa_flt_rule_add);
+ pre_entry * sizeof(struct ipa_flt_rule_add);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -821,6 +883,16 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_add_flt_rule *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
retval = -EFAULT;
break;
@@ -837,10 +909,11 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_del_flt_rule *)header)->num_hdls;
pyld_sz =
sizeof(struct ipa_ioc_del_flt_rule) +
- ((struct ipa_ioc_del_flt_rule *)header)->num_hdls *
- sizeof(struct ipa_flt_rule_del);
+ pre_entry * sizeof(struct ipa_flt_rule_del);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -850,6 +923,16 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_del_flt_rule *)param)->
+ num_hdls,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
retval = -EFAULT;
break;
@@ -866,10 +949,11 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules;
pyld_sz =
sizeof(struct ipa_ioc_mdfy_flt_rule) +
- ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules *
- sizeof(struct ipa_flt_rule_mdfy);
+ pre_entry * sizeof(struct ipa_flt_rule_mdfy);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -879,6 +963,16 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_mdfy_flt_rule *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) {
retval = -EFAULT;
break;
@@ -992,9 +1086,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
-
- pyld_sz = sz + ((struct ipa_ioc_query_intf_tx_props *)
- header)->num_tx_props *
+ pre_entry =
+ ((struct ipa_ioc_query_intf_tx_props *)
+ header)->num_tx_props;
+ pyld_sz = sz + pre_entry *
sizeof(struct ipa_ioc_tx_intf_prop);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
@@ -1005,6 +1100,16 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
+ param)->num_tx_props
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_query_intf_tx_props *)
+ param)->num_tx_props, pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa_query_intf_tx_props(
(struct ipa_ioc_query_intf_tx_props *)param)) {
retval = -1;
@@ -1027,9 +1132,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
-
- pyld_sz = sz + ((struct ipa_ioc_query_intf_rx_props *)
- header)->num_rx_props *
+ pre_entry =
+ ((struct ipa_ioc_query_intf_rx_props *)
+ header)->num_rx_props;
+ pyld_sz = sz + pre_entry *
sizeof(struct ipa_ioc_rx_intf_prop);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
@@ -1040,6 +1146,15 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
+ param)->num_rx_props != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_query_intf_rx_props *)
+ param)->num_rx_props, pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa_query_intf_rx_props(
(struct ipa_ioc_query_intf_rx_props *)param)) {
retval = -1;
@@ -1062,9 +1177,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
-
- pyld_sz = sz + ((struct ipa_ioc_query_intf_ext_props *)
- header)->num_ext_props *
+ pre_entry =
+ ((struct ipa_ioc_query_intf_ext_props *)
+ header)->num_ext_props;
+ pyld_sz = sz + pre_entry *
sizeof(struct ipa_ioc_ext_intf_prop);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
@@ -1075,6 +1191,15 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
+ param)->num_ext_props != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_query_intf_ext_props *)
+ param)->num_ext_props, pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa_query_intf_ext_props(
(struct ipa_ioc_query_intf_ext_props *)param)) {
retval = -1;
@@ -1091,8 +1216,10 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- pyld_sz = sizeof(struct ipa_msg_meta) +
+ pre_entry =
((struct ipa_msg_meta *)header)->msg_len;
+ pyld_sz = sizeof(struct ipa_msg_meta) +
+ pre_entry;
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -1102,6 +1229,15 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_msg_meta *)param)->msg_len
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_msg_meta *)param)->msg_len,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa_pull_msg((struct ipa_msg_meta *)param,
(char *)param + sizeof(struct ipa_msg_meta),
((struct ipa_msg_meta *)param)->msg_len) !=
@@ -1218,10 +1354,12 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_add_hdr_proc_ctx *)
+ header)->num_proc_ctxs;
pyld_sz =
sizeof(struct ipa_ioc_add_hdr_proc_ctx) +
- ((struct ipa_ioc_add_hdr_proc_ctx *)header)->num_proc_ctxs *
- sizeof(struct ipa_hdr_proc_ctx_add);
+ pre_entry * sizeof(struct ipa_hdr_proc_ctx_add);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -1231,6 +1369,15 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
+ param)->num_proc_ctxs != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_add_hdr_proc_ctx *)
+ param)->num_proc_ctxs, pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_add_hdr_proc_ctx(
(struct ipa_ioc_add_hdr_proc_ctx *)param)) {
retval = -EFAULT;
@@ -1247,10 +1394,11 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls;
pyld_sz =
sizeof(struct ipa_ioc_del_hdr_proc_ctx) +
- ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls *
- sizeof(struct ipa_hdr_proc_ctx_del);
+ pre_entry * sizeof(struct ipa_hdr_proc_ctx_del);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -1260,6 +1408,16 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
+ param)->num_hdls != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_del_hdr_proc_ctx *)param)->
+ num_hdls,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa2_del_hdr_proc_ctx(
(struct ipa_ioc_del_hdr_proc_ctx *)param)) {
retval = -EFAULT;
@@ -3612,6 +3770,7 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
ipa_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
ipa_ctx->ipa_bam_remote_mode = resource_p->ipa_bam_remote_mode;
ipa_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt;
+ ipa_ctx->ipa_wdi2 = resource_p->ipa_wdi2;
ipa_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size;
ipa_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size;
ipa_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset;
@@ -4150,6 +4309,7 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
ipa_drv_res->ipa_hw_mode = 0;
ipa_drv_res->ipa_bam_remote_mode = false;
ipa_drv_res->modem_cfg_emb_pipe_flt = false;
+ ipa_drv_res->ipa_wdi2 = false;
ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
@@ -4216,6 +4376,13 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
ipa_drv_res->modem_cfg_emb_pipe_flt
? "True" : "False");
+ ipa_drv_res->ipa_wdi2 =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,ipa-wdi2");
+ IPADBG(": WDI-2.0 = %s\n",
+ ipa_drv_res->ipa_wdi2
+ ? "True" : "False");
+
ipa_drv_res->skip_uc_pipe_reset =
of_property_read_bool(pdev->dev.of_node,
"qcom,skip-uc-pipe-reset");
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index 5b8abb25cfb0..581a5f9d8a2e 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -50,6 +50,9 @@
#define IPA_DL_CHECKSUM_LENGTH (8)
#define IPA_NUM_DESC_PER_SW_TX (2)
#define IPA_GENERIC_RX_POOL_SZ 1000
+#define IPA_UC_FINISH_MAX 6
+#define IPA_UC_WAIT_MIN_SLEEP 1000
+#define IPA_UC_WAII_MAX_SLEEP 1200
#define IPA_MAX_STATUS_STAT_NUM 30
@@ -890,6 +893,14 @@ struct ipa_uc_ctx {
u32 uc_status;
bool uc_zip_error;
u32 uc_error_type;
+ phys_addr_t rdy_ring_base_pa;
+ phys_addr_t rdy_ring_rp_pa;
+ u32 rdy_ring_size;
+ phys_addr_t rdy_comp_ring_base_pa;
+ phys_addr_t rdy_comp_ring_wp_pa;
+ u32 rdy_comp_ring_size;
+ u32 *rdy_ring_rp_va;
+ u32 *rdy_comp_ring_wp_va;
};
/**
@@ -994,6 +1005,7 @@ struct ipacm_client_info {
* @use_ipa_teth_bridge: use tethering bridge driver
* @ipa_bam_remote_mode: ipa bam is in remote mode
* @modem_cfg_emb_pipe_flt: modem configure embedded pipe filtering rules
+ * @ipa_wdi2: using wdi-2.0
* @ipa_bus_hdl: msm driver handle for the data path bus
* @ctrl: holds the core specific operations based on
* core version (vtable like)
@@ -1081,6 +1093,7 @@ struct ipa_context {
bool use_ipa_teth_bridge;
bool ipa_bam_remote_mode;
bool modem_cfg_emb_pipe_flt;
+ bool ipa_wdi2;
/* featurize if memory footprint becomes a concern */
struct ipa_stats stats;
void *smem_pipe_mem;
@@ -1171,6 +1184,7 @@ struct ipa_plat_drv_res {
u32 ee;
bool ipa_bam_remote_mode;
bool modem_cfg_emb_pipe_flt;
+ bool ipa_wdi2;
u32 wan_rx_ring_size;
u32 lan_rx_ring_size;
bool skip_uc_pipe_reset;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
index a1072638b281..4e79fec076e2 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -25,12 +25,14 @@
#define IPA_WDI_RESUMED BIT(2)
#define IPA_UC_POLL_SLEEP_USEC 100
-#define IPA_WDI_RX_RING_RES 0
-#define IPA_WDI_RX_RING_RP_RES 1
-#define IPA_WDI_TX_RING_RES 2
-#define IPA_WDI_CE_RING_RES 3
-#define IPA_WDI_CE_DB_RES 4
-#define IPA_WDI_MAX_RES 5
+#define IPA_WDI_RX_RING_RES 0
+#define IPA_WDI_RX_RING_RP_RES 1
+#define IPA_WDI_RX_COMP_RING_RES 2
+#define IPA_WDI_RX_COMP_RING_WP_RES 3
+#define IPA_WDI_TX_RING_RES 4
+#define IPA_WDI_CE_RING_RES 5
+#define IPA_WDI_CE_DB_RES 6
+#define IPA_WDI_MAX_RES 7
struct ipa_wdi_res {
struct ipa_wdi_buffer_info *res;
@@ -232,6 +234,21 @@ struct IpaHwWdiTxSetUpCmdData_t {
u8 reserved;
} __packed;
+struct IpaHwWdi2TxSetUpCmdData_t {
+ u32 comp_ring_base_pa;
+ u32 comp_ring_base_pa_hi;
+ u16 comp_ring_size;
+ u16 reserved_comp_ring;
+ u32 ce_ring_base_pa;
+ u32 ce_ring_base_pa_hi;
+ u16 ce_ring_size;
+ u16 reserved_ce_ring;
+ u32 ce_ring_doorbell_pa;
+ u32 ce_ring_doorbell_pa_hi;
+ u16 num_tx_buffers;
+ u8 ipa_pipe_number;
+ u8 reserved;
+} __packed;
/**
* struct IpaHwWdiRxSetUpCmdData_t - Structure holding the parameters for
* IPA_CPU_2_HW_CMD_WDI_RX_SET_UP command.
@@ -253,6 +270,19 @@ struct IpaHwWdiRxSetUpCmdData_t {
u8 ipa_pipe_number;
} __packed;
+struct IpaHwWdi2RxSetUpCmdData_t {
+ u32 rx_ring_base_pa;
+ u32 rx_ring_base_pa_hi;
+ u32 rx_ring_size;
+ u32 rx_ring_rp_pa;
+ u32 rx_ring_rp_pa_hi;
+ u32 rx_comp_ring_base_pa;
+ u32 rx_comp_ring_base_pa_hi;
+ u32 rx_comp_ring_size;
+ u32 rx_comp_ring_wp_pa;
+ u32 rx_comp_ring_wp_pa_hi;
+ u8 ipa_pipe_number;
+} __packed;
/**
* union IpaHwWdiRxExtCfgCmdData_t - Structure holding the parameters for
* IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG command.
@@ -560,7 +590,10 @@ static void ipa_release_uc_smmu_mappings(enum ipa_client_type client)
end = IPA_WDI_CE_DB_RES;
} else {
start = IPA_WDI_RX_RING_RES;
- end = IPA_WDI_RX_RING_RP_RES;
+ if (ipa_ctx->ipa_wdi2)
+ end = IPA_WDI_RX_COMP_RING_WP_RES;
+ else
+ end = IPA_WDI_RX_RING_RP_RES;
}
for (i = start; i <= end; i++) {
@@ -708,6 +741,8 @@ int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in,
struct ipa_mem_buffer cmd;
struct IpaHwWdiTxSetUpCmdData_t *tx;
struct IpaHwWdiRxSetUpCmdData_t *rx;
+ struct IpaHwWdi2TxSetUpCmdData_t *tx_2;
+ struct IpaHwWdi2RxSetUpCmdData_t *rx_2;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
unsigned long va;
phys_addr_t pa;
@@ -760,7 +795,10 @@ int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in,
IPADBG("client=%d ep=%d\n", in->sys.client, ipa_ep_idx);
if (IPA_CLIENT_IS_CONS(in->sys.client)) {
- cmd.size = sizeof(*tx);
+ if (ipa_ctx->ipa_wdi2)
+ cmd.size = sizeof(*tx_2);
+ else
+ cmd.size = sizeof(*tx);
IPADBG("comp_ring_base_pa=0x%pa\n",
&in->u.dl.comp_ring_base_pa);
IPADBG("comp_ring_size=%d\n", in->u.dl.comp_ring_size);
@@ -770,10 +808,59 @@ int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in,
&in->u.dl.ce_door_bell_pa);
IPADBG("num_tx_buffers=%d\n", in->u.dl.num_tx_buffers);
} else {
- cmd.size = sizeof(*rx);
- IPADBG("rx_ring_base_pa=0x%pa\n", &in->u.ul.rdy_ring_base_pa);
- IPADBG("rx_ring_size=%d\n", in->u.ul.rdy_ring_size);
- IPADBG("rx_ring_rp_pa=0x%pa\n", &in->u.ul.rdy_ring_rp_pa);
+ if (ipa_ctx->ipa_wdi2) {
+ /* WDI2.0 feature */
+ cmd.size = sizeof(*rx_2);
+ IPADBG("rdy_ring_rp value =%d\n",
+ *in->u.ul.rdy_ring_rp_va);
+ IPADBG("rx_comp_ring_wp value=%d\n",
+ *in->u.ul.rdy_comp_ring_wp_va);
+ ipa_ctx->uc_ctx.rdy_ring_rp_va =
+ in->u.ul.rdy_ring_rp_va;
+ ipa_ctx->uc_ctx.rdy_comp_ring_wp_va =
+ in->u.ul.rdy_comp_ring_wp_va;
+ } else {
+ cmd.size = sizeof(*rx);
+ }
+ IPADBG("rx_ring_base_pa=0x%pa\n",
+ &in->u.ul.rdy_ring_base_pa);
+ IPADBG("rx_ring_size=%d\n",
+ in->u.ul.rdy_ring_size);
+ IPADBG("rx_ring_rp_pa=0x%pa\n",
+ &in->u.ul.rdy_ring_rp_pa);
+
+ IPADBG("rx_comp_ring_base_pa=0x%pa\n",
+ &in->u.ul.rdy_comp_ring_base_pa);
+ IPADBG("rx_comp_ring_size=%d\n",
+ in->u.ul.rdy_comp_ring_size);
+ IPADBG("rx_comp_ring_wp_pa=0x%pa\n",
+ &in->u.ul.rdy_comp_ring_wp_pa);
+
+ ipa_ctx->uc_ctx.rdy_ring_base_pa =
+ in->u.ul.rdy_ring_base_pa;
+ ipa_ctx->uc_ctx.rdy_ring_rp_pa =
+ in->u.ul.rdy_ring_rp_pa;
+ ipa_ctx->uc_ctx.rdy_ring_size =
+ in->u.ul.rdy_ring_size;
+ ipa_ctx->uc_ctx.rdy_comp_ring_base_pa =
+ in->u.ul.rdy_comp_ring_base_pa;
+ ipa_ctx->uc_ctx.rdy_comp_ring_wp_pa =
+ in->u.ul.rdy_comp_ring_wp_pa;
+ ipa_ctx->uc_ctx.rdy_comp_ring_size =
+ in->u.ul.rdy_comp_ring_size;
+
+ /* check if the VA is empty */
+ if (!in->u.ul.rdy_ring_rp_va && ipa_ctx->ipa_wdi2) {
+ IPAERR("rdy_ring_rp_va is empty, wdi2.0(%d)\n",
+ ipa_ctx->ipa_wdi2);
+ goto dma_alloc_fail;
+ }
+ if (!in->u.ul.rdy_comp_ring_wp_va &&
+ ipa_ctx->ipa_wdi2) {
+ IPAERR("comp_ring_wp_va is empty, wdi2.0(%d)\n",
+ ipa_ctx->ipa_wdi2);
+ goto dma_alloc_fail;
+ }
}
cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size,
@@ -785,14 +872,16 @@ int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in,
}
if (IPA_CLIENT_IS_CONS(in->sys.client)) {
- tx = (struct IpaHwWdiTxSetUpCmdData_t *)cmd.base;
+ if (ipa_ctx->ipa_wdi2) {
+ tx_2 = (struct IpaHwWdi2TxSetUpCmdData_t *)cmd.base;
- len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size :
- in->u.dl.comp_ring_size;
- IPADBG("TX ring smmu_en=%d ring_size=%d %d\n", in->smmu_enabled,
+ len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size :
+ in->u.dl.comp_ring_size;
+ IPADBG("TX_2 ring smmu_en=%d ring_size=%d %d\n",
+ in->smmu_enabled,
in->u.dl_smmu.comp_ring_size,
in->u.dl.comp_ring_size);
- if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES,
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES,
in->smmu_enabled,
in->u.dl.comp_ring_base_pa,
&in->u.dl_smmu.comp_ring,
@@ -802,47 +891,121 @@ int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in,
IPAERR("fail to create uc mapping TX ring.\n");
result = -ENOMEM;
goto uc_timeout;
- }
- tx->comp_ring_base_pa = va;
- tx->comp_ring_size = len;
-
- len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size :
- in->u.dl.ce_ring_size;
- IPADBG("TX CE ring smmu_en=%d ring_size=%d %d\n",
- in->smmu_enabled,
- in->u.dl_smmu.ce_ring_size,
- in->u.dl.ce_ring_size);
- if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES,
+ }
+ tx_2->comp_ring_base_pa_hi =
+ (u32) ((va & 0xFFFFFFFF00000000) >> 32);
+ tx_2->comp_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+ tx_2->comp_ring_size = len;
+ IPADBG("TX_2 comp_ring_base_pa_hi=0x%08x :0x%08x\n",
+ tx_2->comp_ring_base_pa_hi,
+ tx_2->comp_ring_base_pa);
+
+ len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size :
+ in->u.dl.ce_ring_size;
+ IPADBG("TX_2 CE ring smmu_en=%d ring_size=%d %d\n",
in->smmu_enabled,
- in->u.dl.ce_ring_base_pa,
- &in->u.dl_smmu.ce_ring,
- len,
- false,
- &va)) {
+ in->u.dl_smmu.ce_ring_size,
+ in->u.dl.ce_ring_size);
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES,
+ in->smmu_enabled,
+ in->u.dl.ce_ring_base_pa,
+ &in->u.dl_smmu.ce_ring,
+ len,
+ false,
+ &va)) {
IPAERR("fail to create uc mapping CE ring.\n");
result = -ENOMEM;
goto uc_timeout;
- }
- tx->ce_ring_base_pa = va;
- tx->ce_ring_size = len;
+ }
+ tx_2->ce_ring_base_pa_hi =
+ (u32) ((va & 0xFFFFFFFF00000000) >> 32);
+ tx_2->ce_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+ tx_2->ce_ring_size = len;
+ IPADBG("TX_2 ce_ring_base_pa_hi=0x%08x :0x%08x\n",
+ tx_2->ce_ring_base_pa_hi,
+ tx_2->ce_ring_base_pa);
+
+ pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa :
+ in->u.dl.ce_door_bell_pa;
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES,
+ in->smmu_enabled,
+ pa,
+ NULL,
+ 4,
+ true,
+ &va)) {
+ IPAERR("fail to create uc mapping CE DB.\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ tx_2->ce_ring_doorbell_pa_hi =
+ (u32) ((va & 0xFFFFFFFF00000000) >> 32);
+ tx_2->ce_ring_doorbell_pa = (u32) (va & 0xFFFFFFFF);
+ IPADBG("TX_2 ce_ring_doorbell_pa_hi=0x%08x :0x%08x\n",
+ tx_2->ce_ring_doorbell_pa_hi,
+ tx_2->ce_ring_doorbell_pa);
+
+ tx_2->num_tx_buffers = in->u.dl.num_tx_buffers;
+ tx_2->ipa_pipe_number = ipa_ep_idx;
+ } else {
+ tx = (struct IpaHwWdiTxSetUpCmdData_t *)cmd.base;
+ len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size :
+ in->u.dl.comp_ring_size;
+ IPADBG("TX ring smmu_en=%d ring_size=%d %d\n",
+ in->smmu_enabled,
+ in->u.dl_smmu.comp_ring_size,
+ in->u.dl.comp_ring_size);
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES,
+ in->smmu_enabled,
+ in->u.dl.comp_ring_base_pa,
+ &in->u.dl_smmu.comp_ring,
+ len,
+ false,
+ &va)) {
+ IPAERR("fail to create uc mapping TX ring.\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ tx->comp_ring_base_pa = va;
+ tx->comp_ring_size = len;
- pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa :
- in->u.dl.ce_door_bell_pa;
- if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES,
+ len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size :
+ in->u.dl.ce_ring_size;
+ IPADBG("TX CE ring smmu_en=%d ring_size=%d %d\n",
in->smmu_enabled,
- pa,
- NULL,
- 4,
- true,
- &va)) {
+ in->u.dl_smmu.ce_ring_size,
+ in->u.dl.ce_ring_size);
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES,
+ in->smmu_enabled,
+ in->u.dl.ce_ring_base_pa,
+ &in->u.dl_smmu.ce_ring,
+ len,
+ false,
+ &va)) {
+ IPAERR("fail to create uc mapping CE ring.\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ tx->ce_ring_base_pa = va;
+ tx->ce_ring_size = len;
+ pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa :
+ in->u.dl.ce_door_bell_pa;
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES,
+ in->smmu_enabled,
+ pa,
+ NULL,
+ 4,
+ true,
+ &va)) {
IPAERR("fail to create uc mapping CE DB.\n");
result = -ENOMEM;
goto uc_timeout;
+ }
+ tx->ce_ring_doorbell_pa = va;
+ tx->num_tx_buffers = in->u.dl.num_tx_buffers;
+ tx->ipa_pipe_number = ipa_ep_idx;
}
- tx->ce_ring_doorbell_pa = va;
- tx->num_tx_buffers = in->u.dl.num_tx_buffers;
- tx->ipa_pipe_number = ipa_ep_idx;
if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) {
out->uc_door_bell_pa =
ipa_ctx->ipa_wrapper_base +
@@ -859,43 +1022,141 @@ int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in,
IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
}
} else {
- rx = (struct IpaHwWdiRxSetUpCmdData_t *)cmd.base;
+ if (ipa_ctx->ipa_wdi2) {
+ rx_2 = (struct IpaHwWdi2RxSetUpCmdData_t *)cmd.base;
- len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size :
- in->u.ul.rdy_ring_size;
- IPADBG("RX ring smmu_en=%d ring_size=%d %d\n", in->smmu_enabled,
+ len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size :
+ in->u.ul.rdy_ring_size;
+ IPADBG("RX_2 ring smmu_en=%d ring_size=%d %d\n",
+ in->smmu_enabled,
in->u.ul_smmu.rdy_ring_size,
in->u.ul.rdy_ring_size);
- if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES,
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES,
+ in->smmu_enabled,
+ in->u.ul.rdy_ring_base_pa,
+ &in->u.ul_smmu.rdy_ring,
+ len,
+ false,
+ &va)) {
+ IPAERR("fail to create uc RX_2 ring.\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ rx_2->rx_ring_base_pa_hi =
+ (u32) ((va & 0xFFFFFFFF00000000) >> 32);
+ rx_2->rx_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+ rx_2->rx_ring_size = len;
+ IPADBG("RX_2 rx_ring_base_pa_hi=0x%08x:0x%08x\n",
+ rx_2->rx_ring_base_pa_hi,
+ rx_2->rx_ring_base_pa);
+
+ pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa :
+ in->u.ul.rdy_ring_rp_pa;
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
+ in->smmu_enabled,
+ pa,
+ NULL,
+ 4,
+ false,
+ &va)) {
+ IPAERR("fail to create uc RX_2 rng RP\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ rx_2->rx_ring_rp_pa_hi =
+ (u32) ((va & 0xFFFFFFFF00000000) >> 32);
+ rx_2->rx_ring_rp_pa = (u32) (va & 0xFFFFFFFF);
+ IPADBG("RX_2 rx_ring_rp_pa_hi=0x%08x :0x%08x\n",
+ rx_2->rx_ring_rp_pa_hi,
+ rx_2->rx_ring_rp_pa);
+ len = in->smmu_enabled ?
+ in->u.ul_smmu.rdy_comp_ring_size :
+ in->u.ul.rdy_comp_ring_size;
+ IPADBG("RX_2 ring smmu_en=%d comp_ring_size=%d %d\n",
in->smmu_enabled,
- in->u.ul.rdy_ring_base_pa,
- &in->u.ul_smmu.rdy_ring,
- len,
- false,
- &va)) {
- IPAERR("fail to create uc mapping RX ring.\n");
+ in->u.ul_smmu.rdy_comp_ring_size,
+ in->u.ul.rdy_comp_ring_size);
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_COMP_RING_RES,
+ in->smmu_enabled,
+ in->u.ul.rdy_comp_ring_base_pa,
+ &in->u.ul_smmu.rdy_comp_ring,
+ len,
+ false,
+ &va)) {
+ IPAERR("fail to create uc RX_2 comp_ring.\n");
result = -ENOMEM;
goto uc_timeout;
- }
- rx->rx_ring_base_pa = va;
- rx->rx_ring_size = len;
+ }
+ rx_2->rx_comp_ring_base_pa_hi =
+ (u32) ((va & 0xFFFFFFFF00000000) >> 32);
+ rx_2->rx_comp_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+ rx_2->rx_comp_ring_size = len;
+ IPADBG("RX_2 rx_comp_ring_base_pa_hi=0x%08x:0x%08x\n",
+ rx_2->rx_comp_ring_base_pa_hi,
+ rx_2->rx_comp_ring_base_pa);
+
+ pa = in->smmu_enabled ?
+ in->u.ul_smmu.rdy_comp_ring_wp_pa :
+ in->u.ul.rdy_comp_ring_wp_pa;
+ if (ipa_create_uc_smmu_mapping(
+ IPA_WDI_RX_COMP_RING_WP_RES,
+ in->smmu_enabled,
+ pa,
+ NULL,
+ 4,
+ false,
+ &va)) {
+ IPAERR("fail to create uc RX_2 comp_rng WP\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ rx_2->rx_comp_ring_wp_pa_hi =
+ (u32) ((va & 0xFFFFFFFF00000000) >> 32);
+ rx_2->rx_comp_ring_wp_pa = (u32) (va & 0xFFFFFFFF);
+ IPADBG("RX_2 rx_comp_ring_wp_pa_hi=0x%08x:0x%08x\n",
+ rx_2->rx_comp_ring_wp_pa_hi,
+ rx_2->rx_comp_ring_wp_pa);
+ rx_2->ipa_pipe_number = ipa_ep_idx;
+ } else {
+ rx = (struct IpaHwWdiRxSetUpCmdData_t *)cmd.base;
- pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa :
- in->u.ul.rdy_ring_rp_pa;
- if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
+ len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size :
+ in->u.ul.rdy_ring_size;
+ IPADBG("RX ring smmu_en=%d ring_size=%d %d\n",
in->smmu_enabled,
- pa,
- NULL,
- 4,
- false,
- &va)) {
+ in->u.ul_smmu.rdy_ring_size,
+ in->u.ul.rdy_ring_size);
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES,
+ in->smmu_enabled,
+ in->u.ul.rdy_ring_base_pa,
+ &in->u.ul_smmu.rdy_ring,
+ len,
+ false,
+ &va)) {
+ IPAERR("fail to create uc mapping RX ring.\n");
+ result = -ENOMEM;
+ goto uc_timeout;
+ }
+ rx->rx_ring_base_pa = va;
+ rx->rx_ring_size = len;
+
+ pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa :
+ in->u.ul.rdy_ring_rp_pa;
+ if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
+ in->smmu_enabled,
+ pa,
+ NULL,
+ 4,
+ false,
+ &va)) {
IPAERR("fail to create uc mapping RX rng RP\n");
result = -ENOMEM;
goto uc_timeout;
+ }
+ rx->rx_ring_rp_pa = va;
+ rx->ipa_pipe_number = ipa_ep_idx;
}
- rx->rx_ring_rp_pa = va;
- rx->ipa_pipe_number = ipa_ep_idx;
if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) {
out->uc_door_bell_pa =
ipa_ctx->ipa_wrapper_base +
@@ -1127,6 +1388,7 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl)
union IpaHwWdiCommonChCmdData_t disable;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
u32 prod_hdl;
+ int i;
if (unlikely(!ipa_ctx)) {
IPAERR("IPA driver was not initialized\n");
@@ -1143,6 +1405,28 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl)
if (result)
return result;
+ /* checking rdy_ring_rp_pa matches the rdy_comp_ring_wp_pa on WDI2.0 */
+ if (ipa_ctx->ipa_wdi2) {
+ for (i = 0; i < IPA_UC_FINISH_MAX; i++) {
+ IPADBG("(%d) rp_value(%u), comp_wp_value(%u)\n",
+ i,
+ *ipa_ctx->uc_ctx.rdy_ring_rp_va,
+ *ipa_ctx->uc_ctx.rdy_comp_ring_wp_va);
+ if (*ipa_ctx->uc_ctx.rdy_ring_rp_va !=
+ *ipa_ctx->uc_ctx.rdy_comp_ring_wp_va) {
+ usleep_range(IPA_UC_WAIT_MIN_SLEEP,
+ IPA_UC_WAII_MAX_SLEEP);
+ } else {
+ break;
+ }
+ }
+ /* In case ipa_uc still haven't processed all
+ * pending descriptors, we have to assert
+ */
+ if (i == IPA_UC_FINISH_MAX)
+ BUG();
+ }
+
IPADBG("ep=%d\n", clnt_hdl);
ep = &ipa_ctx->ep[clnt_hdl];
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index c9120ce83da8..1df2bc6b902c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -596,6 +596,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
struct ipa_ioc_v4_nat_del nat_del;
struct ipa_ioc_rm_dependency rm_depend;
size_t sz;
+ int pre_entry;
IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
@@ -649,11 +650,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
-
+ pre_entry =
+ ((struct ipa_ioc_nat_dma_cmd *)header)->entries;
pyld_sz =
sizeof(struct ipa_ioc_nat_dma_cmd) +
- ((struct ipa_ioc_nat_dma_cmd *)header)->entries *
- sizeof(struct ipa_ioc_nat_dma_one);
+ pre_entry * sizeof(struct ipa_ioc_nat_dma_one);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -664,7 +665,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
-
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_nat_dma_cmd *)param)->entries,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
retval = -EFAULT;
break;
@@ -689,10 +698,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_add_hdr *)header)->num_hdrs;
pyld_sz =
sizeof(struct ipa_ioc_add_hdr) +
- ((struct ipa_ioc_add_hdr *)header)->num_hdrs *
- sizeof(struct ipa_hdr_add);
+ pre_entry * sizeof(struct ipa_hdr_add);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -702,6 +712,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_add_hdr *)param)->num_hdrs,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_add_hdr((struct ipa_ioc_add_hdr *)param)) {
retval = -EFAULT;
break;
@@ -718,10 +737,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_del_hdr *)header)->num_hdls;
pyld_sz =
sizeof(struct ipa_ioc_del_hdr) +
- ((struct ipa_ioc_del_hdr *)header)->num_hdls *
- sizeof(struct ipa_hdr_del);
+ pre_entry * sizeof(struct ipa_hdr_del);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -731,6 +751,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_del_hdr *)param)->num_hdls,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_del_hdr((struct ipa_ioc_del_hdr *)param)) {
retval = -EFAULT;
break;
@@ -747,10 +776,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_add_rt_rule *)header)->num_rules;
pyld_sz =
sizeof(struct ipa_ioc_add_rt_rule) +
- ((struct ipa_ioc_add_rt_rule *)header)->num_rules *
- sizeof(struct ipa_rt_rule_add);
+ pre_entry * sizeof(struct ipa_rt_rule_add);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -760,6 +790,16 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_add_rt_rule *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
retval = -EFAULT;
break;
@@ -776,10 +816,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_add_rt_rule_after *)header)->num_rules;
pyld_sz =
sizeof(struct ipa_ioc_add_rt_rule_after) +
- ((struct ipa_ioc_add_rt_rule_after *)header)->num_rules *
- sizeof(struct ipa_rt_rule_add);
+ pre_entry * sizeof(struct ipa_rt_rule_add);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -789,6 +830,16 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_rt_rule_after *)param)->
+ num_rules != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_add_rt_rule_after *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_add_rt_rule_after(
(struct ipa_ioc_add_rt_rule_after *)param)) {
@@ -807,10 +858,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules;
pyld_sz =
sizeof(struct ipa_ioc_mdfy_rt_rule) +
- ((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules *
- sizeof(struct ipa_rt_rule_mdfy);
+ pre_entry * sizeof(struct ipa_rt_rule_mdfy);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -820,6 +872,16 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_mdfy_rt_rule *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) {
retval = -EFAULT;
break;
@@ -836,10 +898,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_del_rt_rule *)header)->num_hdls;
pyld_sz =
sizeof(struct ipa_ioc_del_rt_rule) +
- ((struct ipa_ioc_del_rt_rule *)header)->num_hdls *
- sizeof(struct ipa_rt_rule_del);
+ pre_entry * sizeof(struct ipa_rt_rule_del);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -849,6 +912,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
retval = -EFAULT;
break;
@@ -865,10 +937,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_add_flt_rule *)header)->num_rules;
pyld_sz =
sizeof(struct ipa_ioc_add_flt_rule) +
- ((struct ipa_ioc_add_flt_rule *)header)->num_rules *
- sizeof(struct ipa_flt_rule_add);
+ pre_entry * sizeof(struct ipa_flt_rule_add);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -878,6 +951,16 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_add_flt_rule *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
retval = -EFAULT;
break;
@@ -895,10 +978,12 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_add_flt_rule_after *)header)->
+ num_rules;
pyld_sz =
sizeof(struct ipa_ioc_add_flt_rule_after) +
- ((struct ipa_ioc_add_flt_rule_after *)header)->num_rules *
- sizeof(struct ipa_flt_rule_add);
+ pre_entry * sizeof(struct ipa_flt_rule_add);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -908,6 +993,16 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_flt_rule_after *)param)->
+ num_rules != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_add_flt_rule_after *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_add_flt_rule_after(
(struct ipa_ioc_add_flt_rule_after *)param)) {
retval = -EFAULT;
@@ -925,10 +1020,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_del_flt_rule *)header)->num_hdls;
pyld_sz =
sizeof(struct ipa_ioc_del_flt_rule) +
- ((struct ipa_ioc_del_flt_rule *)header)->num_hdls *
- sizeof(struct ipa_flt_rule_del);
+ pre_entry * sizeof(struct ipa_flt_rule_del);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -938,6 +1034,16 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_del_flt_rule *)param)->
+ num_hdls,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
retval = -EFAULT;
break;
@@ -954,10 +1060,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules;
pyld_sz =
sizeof(struct ipa_ioc_mdfy_flt_rule) +
- ((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules *
- sizeof(struct ipa_flt_rule_mdfy);
+ pre_entry * sizeof(struct ipa_flt_rule_mdfy);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -967,6 +1074,16 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_mdfy_flt_rule *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) {
retval = -EFAULT;
break;
@@ -1080,9 +1197,10 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
-
- pyld_sz = sz + ((struct ipa_ioc_query_intf_tx_props *)
- header)->num_tx_props *
+ pre_entry =
+ ((struct ipa_ioc_query_intf_tx_props *)
+ header)->num_tx_props;
+ pyld_sz = sz + pre_entry *
sizeof(struct ipa_ioc_tx_intf_prop);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
@@ -1093,6 +1211,16 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
+ param)->num_tx_props
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_query_intf_tx_props *)
+ param)->num_tx_props, pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_query_intf_tx_props(
(struct ipa_ioc_query_intf_tx_props *)param)) {
retval = -1;
@@ -1115,9 +1243,10 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
-
- pyld_sz = sz + ((struct ipa_ioc_query_intf_rx_props *)
- header)->num_rx_props *
+ pre_entry =
+ ((struct ipa_ioc_query_intf_rx_props *)
+ header)->num_rx_props;
+ pyld_sz = sz + pre_entry *
sizeof(struct ipa_ioc_rx_intf_prop);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
@@ -1128,6 +1257,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
+ param)->num_rx_props != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_query_intf_rx_props *)
+ param)->num_rx_props, pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_query_intf_rx_props(
(struct ipa_ioc_query_intf_rx_props *)param)) {
retval = -1;
@@ -1150,9 +1288,10 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
-
- pyld_sz = sz + ((struct ipa_ioc_query_intf_ext_props *)
- header)->num_ext_props *
+ pre_entry =
+ ((struct ipa_ioc_query_intf_ext_props *)
+ header)->num_ext_props;
+ pyld_sz = sz + pre_entry *
sizeof(struct ipa_ioc_ext_intf_prop);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
@@ -1163,6 +1302,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
+ param)->num_ext_props != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_query_intf_ext_props *)
+ param)->num_ext_props, pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_query_intf_ext_props(
(struct ipa_ioc_query_intf_ext_props *)param)) {
retval = -1;
@@ -1179,8 +1327,10 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- pyld_sz = sizeof(struct ipa_msg_meta) +
+ pre_entry =
((struct ipa_msg_meta *)header)->msg_len;
+ pyld_sz = sizeof(struct ipa_msg_meta) +
+ pre_entry;
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -1190,6 +1340,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_msg_meta *)param)->msg_len
+ != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_msg_meta *)param)->msg_len,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_pull_msg((struct ipa_msg_meta *)param,
(char *)param + sizeof(struct ipa_msg_meta),
((struct ipa_msg_meta *)param)->msg_len) !=
@@ -1306,10 +1465,12 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_add_hdr_proc_ctx *)
+ header)->num_proc_ctxs;
pyld_sz =
sizeof(struct ipa_ioc_add_hdr_proc_ctx) +
- ((struct ipa_ioc_add_hdr_proc_ctx *)header)->num_proc_ctxs *
- sizeof(struct ipa_hdr_proc_ctx_add);
+ pre_entry * sizeof(struct ipa_hdr_proc_ctx_add);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -1319,6 +1480,15 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
+ param)->num_proc_ctxs != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_add_hdr_proc_ctx *)
+ param)->num_proc_ctxs, pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_add_hdr_proc_ctx(
(struct ipa_ioc_add_hdr_proc_ctx *)param)) {
retval = -EFAULT;
@@ -1335,10 +1505,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ pre_entry =
+ ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls;
pyld_sz =
sizeof(struct ipa_ioc_del_hdr_proc_ctx) +
- ((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls *
- sizeof(struct ipa_hdr_proc_ctx_del);
+ pre_entry * sizeof(struct ipa_hdr_proc_ctx_del);
param = kzalloc(pyld_sz, GFP_KERNEL);
if (!param) {
retval = -ENOMEM;
@@ -1348,6 +1519,16 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
+ /* add check in case user-space module compromised */
+ if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
+ param)->num_hdls != pre_entry)) {
+ IPAERR("current %d pre %d\n",
+ ((struct ipa_ioc_del_hdr_proc_ctx *)param)->
+ num_hdls,
+ pre_entry);
+ retval = -EFAULT;
+ break;
+ }
if (ipa3_del_hdr_proc_ctx(
(struct ipa_ioc_del_hdr_proc_ctx *)param)) {
retval = -EFAULT;
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 60dc8b21044a..bfa82ca64499 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -127,10 +127,10 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
if (!host->is_lane_clks_enabled)
return;
- if (host->hba->lanes_per_direction > 1)
+ if (host->tx_l1_sync_clk)
clk_disable_unprepare(host->tx_l1_sync_clk);
clk_disable_unprepare(host->tx_l0_sync_clk);
- if (host->hba->lanes_per_direction > 1)
+ if (host->rx_l1_sync_clk)
clk_disable_unprepare(host->rx_l1_sync_clk);
clk_disable_unprepare(host->rx_l0_sync_clk);
@@ -161,17 +161,14 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
if (err)
goto disable_tx_l0;
- err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
- host->tx_l1_sync_clk);
- if (err)
- goto disable_rx_l1;
+ /* The tx lane1 clk could be muxed, hence keep this optional */
+ if (host->tx_l1_sync_clk)
+ ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
+ host->tx_l1_sync_clk);
}
host->is_lane_clks_enabled = true;
goto out;
-disable_rx_l1:
- if (host->hba->lanes_per_direction > 1)
- clk_disable_unprepare(host->rx_l1_sync_clk);
disable_tx_l0:
clk_disable_unprepare(host->tx_l0_sync_clk);
disable_rx_l0:
@@ -202,8 +199,9 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
if (err)
goto out;
- err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
- &host->tx_l1_sync_clk);
+ /* The tx lane1 clk could be muxed, hence keep this optional */
+ ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
+ &host->tx_l1_sync_clk);
}
out:
return err;
@@ -2089,6 +2087,11 @@ static inline int ufs_qcom_configure_lpm(struct ufs_hba *hba, bool enable)
if (!ufs_qcom_cap_svs2(host))
goto out;
+ if (!((host->hw_ver.major == 0x3) &&
+ (host->hw_ver.minor == 0x0) &&
+ (host->hw_ver.step == 0x0)))
+ goto out;
+
/*
* The link should be put in hibern8 state before
* configuring the PHY to enter/exit SVS2 mode.
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index f54d9c3f4f3d..20a4a3c7fdf0 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -30,6 +30,7 @@
#include "glink_private.h"
#include "glink_xprt_if.h"
+#define GLINK_CTX_CANARY 0x58544324 /* "$CTX" */
/* Number of internal IPC Logging log pages */
#define NUM_LOG_PAGES 10
#define GLINK_PM_QOS_HOLDOFF_MS 10
@@ -38,6 +39,8 @@
#define GLINK_QOS_DEF_MTU 2048
#define GLINK_KTHREAD_PRIO 1
+
+static rwlock_t magic_lock;
/**
* struct glink_qos_priority_bin - Packet Scheduler's priority bucket
* @max_rate_kBps: Maximum rate supported by the priority bucket.
@@ -308,6 +311,7 @@ struct channel_ctx {
unsigned long req_rate_kBps;
uint32_t tx_intent_cnt;
uint32_t tx_cnt;
+ uint32_t magic_number;
};
static struct glink_core_if core_impl;
@@ -436,6 +440,37 @@ static void glink_core_deinit_xprt_qos_cfg(
#define GLINK_GET_CH_TX_STATE(ctx) \
((ctx)->tx_intent_cnt || (ctx)->tx_cnt)
+static int glink_get_ch_ctx(struct channel_ctx *ctx)
+{
+ unsigned long flags;
+
+ if (!ctx)
+ return -EINVAL;
+ read_lock_irqsave(&magic_lock, flags);
+ if (ctx->magic_number != GLINK_CTX_CANARY) {
+ read_unlock_irqrestore(&magic_lock, flags);
+ return -EINVAL;
+ }
+ rwref_get(&ctx->ch_state_lhb2);
+ read_unlock_irqrestore(&magic_lock, flags);
+ return 0;
+}
+
+static int glink_put_ch_ctx(struct channel_ctx *ctx, bool update_magic)
+{
+ unsigned long flags;
+
+ if (!update_magic) {
+ rwref_put(&ctx->ch_state_lhb2);
+ return 0;
+ }
+ write_lock_irqsave(&magic_lock, flags);
+ ctx->magic_number = 0;
+ rwref_put(&ctx->ch_state_lhb2);
+ write_unlock_irqrestore(&magic_lock, flags);
+ return 0;
+}
+
/**
* glink_ssr() - Clean up locally for SSR by simulating remote close
* @subsystem: The name of the subsystem being restarted
@@ -2583,7 +2618,7 @@ void *glink_open(const struct glink_open_config *cfg)
GLINK_INFO_CH(ctx, "%s: Created channel, sent OPEN command. ctx %p\n",
__func__, ctx);
-
+ ctx->magic_number = GLINK_CTX_CANARY;
return ctx;
}
EXPORT_SYMBOL(glink_open);
@@ -2681,15 +2716,19 @@ int glink_close(void *handle)
unsigned long flags;
bool is_empty = false;
- if (!ctx)
- return -EINVAL;
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
GLINK_INFO_CH(ctx, "%s: Closing channel, ctx: %p\n", __func__, ctx);
- if (ctx->local_open_state == GLINK_CHANNEL_CLOSED)
+ if (ctx->local_open_state == GLINK_CHANNEL_CLOSED) {
+ glink_put_ch_ctx(ctx, false);
return 0;
+ }
if (ctx->local_open_state == GLINK_CHANNEL_CLOSING) {
/* close already pending */
+ glink_put_ch_ctx(ctx, false);
return -EBUSY;
}
@@ -2754,6 +2793,7 @@ relock: xprt_ctx = ctx->transport_ptr;
rwref_put(&ctx->ch_state_lhb2);
rwref_read_put(&xprt_ctx->xprt_state_lhb0);
+ glink_put_ch_ctx(ctx, true);
return ret;
}
EXPORT_SYMBOL(glink_close);
@@ -2812,29 +2852,30 @@ static int glink_tx_common(void *handle, void *pkt_priv,
if (!size)
return -EINVAL;
- if (!ctx)
- return -EINVAL;
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
rwref_read_get_atomic(&ctx->ch_state_lhb2, is_atomic);
if (!(vbuf_provider || pbuf_provider)) {
- rwref_read_put(&ctx->ch_state_lhb2);
- return -EINVAL;
+ ret = -EINVAL;
+ goto glink_tx_common_err;
}
if (!ch_is_fully_opened(ctx)) {
- rwref_read_put(&ctx->ch_state_lhb2);
- return -EBUSY;
+ ret = -EBUSY;
+ goto glink_tx_common_err;
}
if (size > GLINK_MAX_PKT_SIZE) {
- rwref_read_put(&ctx->ch_state_lhb2);
- return -EINVAL;
+ ret = -EINVAL;
+ goto glink_tx_common_err;
}
if (unlikely(tx_flags & GLINK_TX_TRACER_PKT)) {
if (!(ctx->transport_ptr->capabilities & GCAP_TRACER_PKT)) {
- rwref_read_put(&ctx->ch_state_lhb2);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto glink_tx_common_err;
}
tracer_pkt_log_event(data, GLINK_CORE_TX);
}
@@ -2846,16 +2887,16 @@ static int glink_tx_common(void *handle, void *pkt_priv,
GLINK_ERR_CH(ctx,
"%s: R[%u]:%zu Intent not present for lcid\n",
__func__, riid, size);
- rwref_read_put(&ctx->ch_state_lhb2);
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto glink_tx_common_err;
}
if (is_atomic && !(ctx->transport_ptr->capabilities &
GCAP_AUTO_QUEUE_RX_INT)) {
GLINK_ERR_CH(ctx,
"%s: Cannot request intent in atomic context\n",
__func__);
- rwref_read_put(&ctx->ch_state_lhb2);
- return -EINVAL;
+ ret = -EINVAL;
+ goto glink_tx_common_err;
}
/* request intent of correct size */
@@ -2865,20 +2906,18 @@ static int glink_tx_common(void *handle, void *pkt_priv,
if (ret) {
GLINK_ERR_CH(ctx, "%s: Request intent failed %d\n",
__func__, ret);
- rwref_read_put(&ctx->ch_state_lhb2);
- return ret;
+ goto glink_tx_common_err;
}
while (ch_pop_remote_rx_intent(ctx, size, &riid,
&intent_size, &cookie)) {
- rwref_get(&ctx->ch_state_lhb2);
rwref_read_put(&ctx->ch_state_lhb2);
if (is_atomic) {
GLINK_ERR_CH(ctx,
"%s Intent of size %zu not ready\n",
__func__, size);
- rwref_put(&ctx->ch_state_lhb2);
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto glink_tx_common_err_2;
}
if (ctx->transport_ptr->local_state == GLINK_XPRT_DOWN
@@ -2886,8 +2925,8 @@ static int glink_tx_common(void *handle, void *pkt_priv,
GLINK_ERR_CH(ctx,
"%s: Channel closed while waiting for intent\n",
__func__);
- rwref_put(&ctx->ch_state_lhb2);
- return -EBUSY;
+ ret = -EBUSY;
+ goto glink_tx_common_err_2;
}
/* wait for the remote intent req ack */
@@ -2897,8 +2936,8 @@ static int glink_tx_common(void *handle, void *pkt_priv,
GLINK_ERR_CH(ctx,
"%s: Intent request ack with size: %zu not granted for lcid\n",
__func__, size);
- rwref_put(&ctx->ch_state_lhb2);
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto glink_tx_common_err_2;
}
if (!ctx->int_req_ack) {
@@ -2906,8 +2945,8 @@ static int glink_tx_common(void *handle, void *pkt_priv,
"%s: Intent Request with size: %zu %s",
__func__, size,
"not granted for lcid\n");
- rwref_put(&ctx->ch_state_lhb2);
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto glink_tx_common_err_2;
}
/* wait for the rx_intent from remote side */
@@ -2917,13 +2956,12 @@ static int glink_tx_common(void *handle, void *pkt_priv,
GLINK_ERR_CH(ctx,
"%s: Intent request with size: %zu not granted for lcid\n",
__func__, size);
- rwref_put(&ctx->ch_state_lhb2);
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto glink_tx_common_err_2;
}
reinit_completion(&ctx->int_req_complete);
rwref_read_get(&ctx->ch_state_lhb2);
- rwref_put(&ctx->ch_state_lhb2);
}
}
@@ -2943,8 +2981,8 @@ static int glink_tx_common(void *handle, void *pkt_priv,
if (!tx_info) {
GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__);
ch_push_remote_rx_intent(ctx, intent_size, riid, cookie);
- rwref_read_put(&ctx->ch_state_lhb2);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto glink_tx_common_err;
}
rwref_lock_init(&tx_info->pkt_ref, glink_tx_pkt_release);
INIT_LIST_HEAD(&tx_info->list_done);
@@ -2970,7 +3008,10 @@ static int glink_tx_common(void *handle, void *pkt_priv,
else
xprt_schedule_tx(ctx->transport_ptr, ctx, tx_info);
+glink_tx_common_err:
rwref_read_put(&ctx->ch_state_lhb2);
+glink_tx_common_err_2:
+ glink_put_ch_ctx(ctx, false);
return ret;
}
@@ -3011,13 +3052,15 @@ int glink_queue_rx_intent(void *handle, const void *pkt_priv, size_t size)
struct glink_core_rx_intent *intent_ptr;
int ret = 0;
- if (!ctx)
- return -EINVAL;
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
if (!ch_is_fully_opened(ctx)) {
/* Can only queue rx intents if channel is fully opened */
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
+ glink_put_ch_ctx(ctx, false);
return -EBUSY;
}
@@ -3026,13 +3069,16 @@ int glink_queue_rx_intent(void *handle, const void *pkt_priv, size_t size)
GLINK_ERR_CH(ctx,
"%s: Intent pointer allocation failed size[%zu]\n",
__func__, size);
+ glink_put_ch_ctx(ctx, false);
return -ENOMEM;
}
GLINK_DBG_CH(ctx, "%s: L[%u]:%zu\n", __func__, intent_ptr->id,
intent_ptr->intent_size);
- if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS)
+ if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS) {
+ glink_put_ch_ctx(ctx, false);
return ret;
+ }
/* notify remote side of rx intent */
ret = ctx->transport_ptr->ops->tx_cmd_local_rx_intent(
@@ -3040,7 +3086,7 @@ int glink_queue_rx_intent(void *handle, const void *pkt_priv, size_t size)
if (ret)
/* unable to transmit, dequeue intent */
ch_remove_local_rx_intent(ctx, intent_ptr->id);
-
+ glink_put_ch_ctx(ctx, false);
return ret;
}
EXPORT_SYMBOL(glink_queue_rx_intent);
@@ -3059,20 +3105,25 @@ bool glink_rx_intent_exists(void *handle, size_t size)
struct channel_ctx *ctx = (struct channel_ctx *)handle;
struct glink_core_rx_intent *intent;
unsigned long flags;
+ int ret;
if (!ctx || !ch_is_fully_opened(ctx))
return false;
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return false;
spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
list_for_each_entry(intent, &ctx->local_rx_intent_list, list) {
if (size <= intent->intent_size) {
spin_unlock_irqrestore(
&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ glink_put_ch_ctx(ctx, false);
return true;
}
}
spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
-
+ glink_put_ch_ctx(ctx, false);
return false;
}
EXPORT_SYMBOL(glink_rx_intent_exists);
@@ -3093,11 +3144,15 @@ int glink_rx_done(void *handle, const void *ptr, bool reuse)
uint32_t id;
int ret = 0;
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
liid_ptr = ch_get_local_rx_intent_notified(ctx, ptr);
if (IS_ERR_OR_NULL(liid_ptr)) {
/* invalid pointer */
GLINK_ERR_CH(ctx, "%s: Invalid pointer %p\n", __func__, ptr);
+ glink_put_ch_ctx(ctx, false);
return -EINVAL;
}
@@ -3123,7 +3178,7 @@ int glink_rx_done(void *handle, const void *ptr, bool reuse)
/* send rx done */
ctx->transport_ptr->ops->tx_cmd_local_rx_done(ctx->transport_ptr->ops,
ctx->lcid, id, reuse);
-
+ glink_put_ch_ctx(ctx, false);
return ret;
}
EXPORT_SYMBOL(glink_rx_done);
@@ -3171,12 +3226,13 @@ int glink_sigs_set(void *handle, uint32_t sigs)
struct channel_ctx *ctx = (struct channel_ctx *)handle;
int ret;
- if (!ctx)
- return -EINVAL;
-
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
+ glink_put_ch_ctx(ctx, false);
return -EBUSY;
}
@@ -3186,6 +3242,7 @@ int glink_sigs_set(void *handle, uint32_t sigs)
ctx->lcid, ctx->lsigs);
GLINK_INFO_CH(ctx, "%s: Sent SIGNAL SET command\n", __func__);
+ glink_put_ch_ctx(ctx, false);
return ret;
}
EXPORT_SYMBOL(glink_sigs_set);
@@ -3201,17 +3258,22 @@ EXPORT_SYMBOL(glink_sigs_set);
int glink_sigs_local_get(void *handle, uint32_t *sigs)
{
struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
- if (!ctx || !sigs)
+ if (!sigs)
return -EINVAL;
-
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
+ glink_put_ch_ctx(ctx, false);
return -EBUSY;
}
*sigs = ctx->lsigs;
+ glink_put_ch_ctx(ctx, false);
return 0;
}
EXPORT_SYMBOL(glink_sigs_local_get);
@@ -3227,17 +3289,23 @@ EXPORT_SYMBOL(glink_sigs_local_get);
int glink_sigs_remote_get(void *handle, uint32_t *sigs)
{
struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
- if (!ctx || !sigs)
+ if (!sigs)
return -EINVAL;
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
+ glink_put_ch_ctx(ctx, false);
return -EBUSY;
}
*sigs = ctx->rsigs;
+ glink_put_ch_ctx(ctx, false);
return 0;
}
EXPORT_SYMBOL(glink_sigs_remote_get);
@@ -3333,12 +3401,16 @@ int glink_qos_latency(void *handle, unsigned long latency_us, size_t pkt_size)
int ret;
unsigned long req_rate_kBps;
- if (!ctx || !latency_us || !pkt_size)
+ if (!latency_us || !pkt_size)
return -EINVAL;
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
+ glink_put_ch_ctx(ctx, false);
return -EBUSY;
}
@@ -3348,7 +3420,7 @@ int glink_qos_latency(void *handle, unsigned long latency_us, size_t pkt_size)
if (ret < 0)
GLINK_ERR_CH(ctx, "%s: QoS %lu:%zu cannot be met\n",
__func__, latency_us, pkt_size);
-
+ glink_put_ch_ctx(ctx, false);
return ret;
}
EXPORT_SYMBOL(glink_qos_latency);
@@ -3366,16 +3438,18 @@ int glink_qos_cancel(void *handle)
struct channel_ctx *ctx = (struct channel_ctx *)handle;
int ret;
- if (!ctx)
- return -EINVAL;
-
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
+ glink_put_ch_ctx(ctx, false);
return -EBUSY;
}
ret = glink_qos_reset_priority(ctx);
+ glink_put_ch_ctx(ctx, false);
return ret;
}
EXPORT_SYMBOL(glink_qos_cancel);
@@ -3396,12 +3470,13 @@ int glink_qos_start(void *handle)
int ret;
unsigned long flags;
- if (!ctx)
- return -EINVAL;
-
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
+ glink_put_ch_ctx(ctx, false);
return -EBUSY;
}
@@ -3410,6 +3485,7 @@ int glink_qos_start(void *handle)
ret = glink_qos_add_ch_tx_intent(ctx);
spin_unlock(&ctx->tx_lists_lock_lhc3);
spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
+ glink_put_ch_ctx(ctx, false);
return ret;
}
EXPORT_SYMBOL(glink_qos_start);
@@ -3428,16 +3504,20 @@ EXPORT_SYMBOL(glink_qos_start);
unsigned long glink_qos_get_ramp_time(void *handle, size_t pkt_size)
{
struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
- if (!ctx)
- return (unsigned long)-EINVAL;
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return (unsigned long)ret;
if (!ch_is_fully_opened(ctx)) {
GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
__func__);
+ glink_put_ch_ctx(ctx, false);
return (unsigned long)-EBUSY;
}
+ glink_put_ch_ctx(ctx, false);
return ctx->transport_ptr->ops->get_power_vote_ramp_time(
ctx->transport_ptr->ops,
glink_prio_to_power_state(ctx->transport_ptr,
@@ -3521,12 +3601,16 @@ EXPORT_SYMBOL(glink_rpm_mask_rx_interrupt);
int glink_wait_link_down(void *handle)
{
struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
- if (!ctx)
- return -EINVAL;
- if (!ctx->transport_ptr)
+ ret = glink_get_ch_ctx(ctx);
+ if (ret)
+ return ret;
+ if (!ctx->transport_ptr) {
+ glink_put_ch_ctx(ctx, false);
return -EOPNOTSUPP;
-
+ }
+ glink_put_ch_ctx(ctx, false);
return ctx->transport_ptr->ops->wait_link_down(ctx->transport_ptr->ops);
}
EXPORT_SYMBOL(glink_wait_link_down);
@@ -6019,6 +6103,7 @@ EXPORT_SYMBOL(glink_get_xprt_log_ctx);
static int glink_init(void)
{
log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "glink", 0);
+ rwlock_init(&magic_lock);
if (!log_ctx)
GLINK_ERR("%s: unable to create log context\n", __func__);
glink_debugfs_init();
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index aff9683b394f..26b1cad9d5aa 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -208,15 +208,15 @@ enum icnss_debug_quirks {
PDR_ONLY,
};
-#define ICNSS_QUIRKS_DEFAULT ( \
- BIT(SSR_ONLY) \
- )
+#define ICNSS_QUIRKS_DEFAULT 0
unsigned long quirks = ICNSS_QUIRKS_DEFAULT;
module_param(quirks, ulong, 0600);
void *icnss_ipc_log_context;
+#define ICNSS_EVENT_PENDING 2989
+
enum icnss_driver_event_type {
ICNSS_DRIVER_EVENT_SERVER_ARRIVE,
ICNSS_DRIVER_EVENT_SERVER_EXIT,
@@ -370,6 +370,7 @@ static struct icnss_priv {
struct notifier_block get_service_nb;
void *modem_notify_handler;
struct notifier_block modem_ssr_nb;
+ struct wakeup_source ws;
} *penv;
static void icnss_hw_write_reg(void *base, u32 offset, u32 val)
@@ -486,6 +487,7 @@ static int icnss_driver_event_post(enum icnss_driver_event_type type,
event->type = type;
event->data = data;
init_completion(&event->complete);
+ event->ret = ICNSS_EVENT_PENDING;
event->sync = sync;
spin_lock_irqsave(&penv->event_lock, flags);
@@ -494,12 +496,26 @@ static int icnss_driver_event_post(enum icnss_driver_event_type type,
penv->stats.events[type].posted++;
queue_work(penv->event_wq, &penv->event_work);
- if (sync) {
- ret = wait_for_completion_interruptible(&event->complete);
- if (ret == 0)
- ret = event->ret;
- kfree(event);
+
+ if (!sync)
+ return ret;
+
+ ret = wait_for_completion_interruptible(&event->complete);
+
+ icnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
+ icnss_driver_event_to_str(type), type, penv->state, ret,
+ event->ret);
+
+ spin_lock_irqsave(&penv->event_lock, flags);
+ if (ret == -ERESTARTSYS && event->ret == ICNSS_EVENT_PENDING) {
+ event->sync = false;
+ spin_unlock_irqrestore(&penv->event_lock, flags);
+ return ret;
}
+ spin_unlock_irqrestore(&penv->event_lock, flags);
+
+ ret = event->ret;
+ kfree(event);
return ret;
}
@@ -1039,7 +1055,7 @@ int icnss_hw_reset(struct icnss_priv *priv)
MPM_WCSSAON_CONFIG_FORCE_ACTIVE, 1);
icnss_hw_poll_reg_field(priv->mem_base_va, SR_WCSSAON_SR_LSB_OFFSET,
- SR_WCSSAON_SR_LSB_RETENTION_STATUS, 1, 10,
+ SR_WCSSAON_SR_LSB_RETENTION_STATUS, 1, 100,
ICNSS_HW_REG_RETRY);
for (i = 0; i < ICNSS_HW_REG_RETRY; i++) {
@@ -1546,6 +1562,13 @@ static int wlfw_wlan_mode_send_sync_msg(enum wlfw_driver_mode_enum_v01 mode)
goto out;
}
+ /* During recovery do not send mode request for WLAN OFF as
+ * FW not able to process it.
+ */
+ if (test_bit(ICNSS_PD_RESTART, &penv->state) &&
+ mode == QMI_WLFW_OFF_V01)
+ return 0;
+
icnss_pr_dbg("Sending Mode request, state: 0x%lx, mode: %d\n",
penv->state, mode);
@@ -1886,6 +1909,9 @@ static int icnss_call_driver_reinit(struct icnss_priv *priv)
if (!priv->ops || !priv->ops->reinit)
goto out;
+ if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state))
+ goto out;
+
icnss_hw_power_on(priv);
ret = priv->ops->reinit(&priv->pdev->dev);
@@ -1916,6 +1942,8 @@ static int icnss_driver_event_fw_ready_ind(void *data)
if (!penv)
return -ENODEV;
+ __pm_stay_awake(&penv->ws);
+
set_bit(ICNSS_FW_READY, &penv->state);
icnss_pr_info("WLAN FW is ready: 0x%lx\n", penv->state);
@@ -1933,7 +1961,10 @@ static int icnss_driver_event_fw_ready_ind(void *data)
else
ret = icnss_call_driver_probe(penv);
+ __pm_relax(&penv->ws);
+
out:
+ __pm_relax(&penv->ws);
return ret;
}
@@ -1941,10 +1972,10 @@ static int icnss_driver_event_register_driver(void *data)
{
int ret = 0;
- if (penv->ops) {
- ret = -EEXIST;
- goto out;
- }
+ if (penv->ops)
+ return -EEXIST;
+
+ __pm_stay_awake(&penv->ws);
penv->ops = data;
@@ -1971,16 +2002,21 @@ static int icnss_driver_event_register_driver(void *data)
set_bit(ICNSS_DRIVER_PROBED, &penv->state);
+ __pm_relax(&penv->ws);
+
return 0;
power_off:
icnss_hw_power_off(penv);
out:
+ __pm_relax(&penv->ws);
return ret;
}
static int icnss_driver_event_unregister_driver(void *data)
{
+ __pm_stay_awake(&penv->ws);
+
if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state)) {
penv->ops = NULL;
goto out;
@@ -1996,6 +2032,7 @@ static int icnss_driver_event_unregister_driver(void *data)
icnss_hw_power_off(penv);
out:
+ __pm_relax(&penv->ws);
return 0;
}
@@ -2012,6 +2049,9 @@ static int icnss_qmi_pd_event_service_down(struct icnss_priv *priv, void *data)
if (!priv->ops || !priv->ops->shutdown)
goto out;
+ if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state))
+ goto out;
+
priv->ops->shutdown(&priv->pdev->dev);
out:
@@ -2071,11 +2111,15 @@ static void icnss_driver_event_work(struct work_struct *work)
penv->stats.events[event->type].processed++;
+ spin_lock_irqsave(&penv->event_lock, flags);
if (event->sync) {
event->ret = ret;
complete(&event->complete);
- } else
- kfree(event);
+ continue;
+ }
+ spin_unlock_irqrestore(&penv->event_lock, flags);
+
+ kfree(event);
spin_lock_irqsave(&penv->event_lock, flags);
}
@@ -2372,6 +2416,9 @@ int icnss_register_driver(struct icnss_driver_ops *ops)
ret = icnss_driver_event_post(ICNSS_DRIVER_EVENT_REGISTER_DRIVER,
true, ops);
+ if (ret == -ERESTARTSYS)
+ ret = 0;
+
out:
return ret;
}
@@ -2890,7 +2937,7 @@ static int icnss_get_vreg_info(struct device *dev,
reg = devm_regulator_get_optional(dev, vreg_info->name);
- if (IS_ERR(reg) == -EPROBE_DEFER) {
+ if (PTR_ERR(reg) == -EPROBE_DEFER) {
icnss_pr_err("EPROBE_DEFER for regulator: %s\n",
vreg_info->name);
ret = PTR_ERR(reg);
@@ -2910,7 +2957,6 @@ static int icnss_get_vreg_info(struct device *dev,
vreg_info->name, ret);
goto done;
}
-
}
vreg_info->reg = reg;
@@ -3502,6 +3548,8 @@ static int icnss_probe(struct platform_device *pdev)
spin_lock_init(&priv->event_lock);
spin_lock_init(&priv->on_off_lock);
+ wakeup_source_init(&priv->ws, "icnss_ws");
+
priv->event_wq = alloc_workqueue("icnss_driver_event", WQ_UNBOUND, 1);
if (!priv->event_wq) {
icnss_pr_err("Workqueue creation failed\n");
@@ -3563,6 +3611,8 @@ static int icnss_remove(struct platform_device *pdev)
icnss_bw_deinit(penv);
+ wakeup_source_trash(&penv->ws);
+
icnss_hw_power_off(penv);
dev_set_drvdata(&pdev->dev, NULL);
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index 908cfb33bf99..3908bc151c2b 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -609,8 +609,7 @@ static ssize_t acc_read(struct file *fp, char __user *buf,
{
struct acc_dev *dev = fp->private_data;
struct usb_request *req;
- ssize_t r = count;
- unsigned xfer;
+ ssize_t r = count, xfer, len;
int ret = 0;
pr_debug("acc_read(%zu)\n", count);
@@ -623,6 +622,8 @@ static ssize_t acc_read(struct file *fp, char __user *buf,
if (count > BULK_BUFFER_SIZE)
count = BULK_BUFFER_SIZE;
+ len = ALIGN(count, dev->ep_out->maxpacket);
+
/* we will block until we're online */
pr_debug("acc_read: waiting for online\n");
ret = wait_event_interruptible(dev->read_wq, dev->online);
@@ -640,7 +641,7 @@ static ssize_t acc_read(struct file *fp, char __user *buf,
requeue_req:
/* queue a request */
req = dev->rx_req[0];
- req->length = count;
+ req->length = len;
dev->rx_done = 0;
ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
if (ret < 0) {
@@ -936,6 +937,8 @@ int acc_ctrlrequest(struct usb_composite_dev *cdev,
memset(dev->serial, 0, sizeof(dev->serial));
dev->start_requested = 0;
dev->audio_mode = 0;
+ strlcpy(dev->manufacturer, "Android", ACC_STRING_SIZE);
+ strlcpy(dev->model, "Android", ACC_STRING_SIZE);
}
}
@@ -1239,13 +1242,13 @@ static int acc_setup(void)
INIT_DELAYED_WORK(&dev->start_work, acc_start_work);
INIT_WORK(&dev->hid_work, acc_hid_work);
- /* _acc_dev must be set before calling usb_gadget_register_driver */
- _acc_dev = dev;
-
ret = misc_register(&acc_device);
if (ret)
goto err;
+ /* _acc_dev must be set before calling usb_gadget_register_driver */
+ _acc_dev = dev;
+
return 0;
err:
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
index eb02cad2a634..37081e5e4a0b 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -2971,28 +2971,6 @@ static int hdmi_tx_get_cable_status(struct platform_device *pdev, u32 vote)
return hpd;
}
-int msm_hdmi_register_audio_codec(struct platform_device *pdev,
- struct msm_ext_disp_audio_codec_ops *ops)
-{
- struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
- int ret = 0;
-
- if (!hdmi_ctrl || !ops) {
- DEV_ERR("%s: invalid input\n", __func__);
- return -EPROBE_DEFER;
- }
-
- ret = msm_ext_disp_register_audio_codec(hdmi_ctrl->ext_pdev, ops);
- if (ret) {
- pr_err("%s: failed to register codec\n", __func__);
- goto end;
- }
-
-end:
- return ret;
-} /* hdmi_tx_audio_register */
-EXPORT_SYMBOL(msm_hdmi_register_audio_codec);
-
static int hdmi_tx_setup_tmds_clk_rate(struct hdmi_tx_ctrl *hdmi_ctrl)
{
u32 rate = 0;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index ed55057e1d7e..abc048866313 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -4315,9 +4315,11 @@ void mdss_mdp_check_ctl_reset_status(struct mdss_mdp_ctl *ctl)
return;
pr_debug("hw ctl reset is set for ctl:%d\n", ctl->num);
- status = mdss_mdp_poll_ctl_reset_status(ctl, 5);
+ /* poll for at least ~1 frame */
+ status = mdss_mdp_poll_ctl_reset_status(ctl, 320);
if (status) {
- pr_err("hw recovery is not complete for ctl:%d\n", ctl->num);
+ pr_err("hw recovery is not complete for ctl:%d status:0x%x\n",
+ ctl->num, status);
MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt", "dbg_bus",
"vbif_dbg_bus", "panic");
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
index 04e3c09e36d7..9dda467e53cc 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -4429,16 +4429,20 @@ static int __mdss_overlay_src_split_sort(struct msm_fb_data_type *mfd,
__overlay_swap_func);
for (i = 0; i < num_ovs; i++) {
+ if (ovs[i].z_order >= MDSS_MDP_MAX_STAGE) {
+ pr_err("invalid stage:%u\n", ovs[i].z_order);
+ return -EINVAL;
+ }
if (ovs[i].dst_rect.x < left_lm_w) {
if (left_lm_zo_cnt[ovs[i].z_order] == 2) {
- pr_err("more than 2 ov @ stage%d on left lm\n",
+ pr_err("more than 2 ov @ stage%u on left lm\n",
ovs[i].z_order);
return -EINVAL;
}
left_lm_zo_cnt[ovs[i].z_order]++;
} else {
if (right_lm_zo_cnt[ovs[i].z_order] == 2) {
- pr_err("more than 2 ov @ stage%d on right lm\n",
+ pr_err("more than 2 ov @ stage%u on right lm\n",
ovs[i].z_order);
return -EINVAL;
}
diff --git a/drivers/video/fbdev/msm/msm_ext_display.c b/drivers/video/fbdev/msm/msm_ext_display.c
index 903cab1ac059..e229f52057d4 100644
--- a/drivers/video/fbdev/msm/msm_ext_display.c
+++ b/drivers/video/fbdev/msm/msm_ext_display.c
@@ -636,6 +636,12 @@ end:
return ret;
}
+int msm_hdmi_register_audio_codec(struct platform_device *pdev,
+ struct msm_ext_disp_audio_codec_ops *ops)
+{
+ return msm_ext_disp_register_audio_codec(pdev, ops);
+}
+
int msm_ext_disp_register_audio_codec(struct platform_device *pdev,
struct msm_ext_disp_audio_codec_ops *ops)
{
diff --git a/include/linux/msm_ext_display.h b/include/linux/msm_ext_display.h
index 54c99d9cb245..873a778d5370 100644
--- a/include/linux/msm_ext_display.h
+++ b/include/linux/msm_ext_display.h
@@ -133,58 +133,17 @@ int msm_ext_disp_register_audio_codec(struct platform_device *pdev,
struct msm_ext_disp_audio_codec_ops *ops);
/*
+ * msm_hdmi_register_audio_codec() - wrapper for hdmi audio codec registration
+ * @pdev: platform device pointer
+ * @codec_ops: audio codec operations
+ */
+int msm_hdmi_register_audio_codec(struct platform_device *pdev,
+ struct msm_ext_disp_audio_codec_ops *ops);
+/*
* msm_ext_disp_register_intf() - display interface registration
* @init_data: data needed to register the display interface
*/
int msm_ext_disp_register_intf(struct platform_device *pdev,
struct msm_ext_disp_init_data *init_data);
-/* TODO: remove all the display specific functions below */
-#ifdef CONFIG_FB_MSM_MDSS_DP_PANEL
-int msm_dp_register_audio_codec(struct platform_device *pdev,
- struct msm_ext_disp_audio_codec_ops *ops);
-
-#else
-static inline int msm_dp_register_audio_codec(struct platform_device *pdev,
- struct msm_ext_disp_audio_codec_ops *ops) {
- return 0;
-}
-#endif /* CONFIG_FB_MSM_MDSS_DP_PANEL */
-#ifdef CONFIG_FB_MSM_MDSS_HDMI_PANEL
-/*
- * Register for HDMI cable connect or disconnect notification.
- * @param handler callback handler for notification
- * @return negative value as error otherwise current status of cable
- */
-int register_hdmi_cable_notification(
- struct ext_disp_cable_notify *handler);
-
-/*
- * Un-register for HDMI cable connect or disconnect notification.
- * @param handler callback handler for notification
- * @return negative value as error
- */
-int unregister_hdmi_cable_notification(
- struct ext_disp_cable_notify *handler);
-
-int msm_hdmi_register_audio_codec(struct platform_device *pdev,
- struct msm_ext_disp_audio_codec_ops *ops);
-
-#else
-static inline int register_hdmi_cable_notification(
- struct ext_disp_cable_notify *handler) {
- return 0;
-}
-
-static inline int unregister_hdmi_cable_notification(
- struct ext_disp_cable_notify *handler) {
- return 0;
-}
-
-static inline int msm_hdmi_register_audio_codec(struct platform_device *pdev,
- struct msm_ext_disp_audio_codec_ops *ops) {
- return 0;
-}
-#endif /* CONFIG_FB_MSM_MDSS_HDMI_PANEL */
-
#endif /*_MSM_EXT_DISPLAY_H_*/
diff --git a/include/linux/qpnp/qpnp-haptic.h b/include/linux/qpnp/qpnp-haptic.h
index 92a66e844f94..95e514a513e2 100644
--- a/include/linux/qpnp/qpnp-haptic.h
+++ b/include/linux/qpnp/qpnp-haptic.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,12 +10,13 @@
* GNU General Public License for more details.
*/
#ifndef __QPNP_HAPTIC_H
+#define __QPNP_HAPTIC_H
/* interface for the other module to play different sequences */
#ifdef CONFIG_QPNP_HAPTIC
int qpnp_hap_play_byte(u8 data, bool on);
#else
-int qpnp_hap_play_byte(u8 data, bool on);
+static inline int qpnp_hap_play_byte(u8 data, bool on)
{
return 0;
}
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index e0b0d2b12b88..73b0fbe034fb 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -63,6 +63,9 @@
struct wiphy;
+#define CFG80211_SCAN_BSSID 1
+#define CFG80211_CONNECT_PREV_BSSID 1
+
/*
* wireless hardware capability structures
*/
@@ -1455,6 +1458,7 @@ struct cfg80211_ssid {
* @mac_addr_mask: MAC address mask used with randomisation, bits that
* are 0 in the mask should be randomised, bits that are 1 should
* be taken from the @mac_addr
+ * @bssid: BSSID to scan for (most commonly, the wildcard BSSID)
*/
struct cfg80211_scan_request {
struct cfg80211_ssid *ssids;
@@ -1471,6 +1475,7 @@ struct cfg80211_scan_request {
u8 mac_addr[ETH_ALEN] __aligned(2);
u8 mac_addr_mask[ETH_ALEN] __aligned(2);
+ u8 bssid[ETH_ALEN] __aligned(2);
/* internal */
struct wiphy *wiphy;
@@ -1893,6 +1898,7 @@ struct cfg80211_ibss_params {
* @vht_capa_mask: The bits of vht_capa which are to be used.
* @pbss: if set, connect to a PCP instead of AP. Valid for DMG
* networks.
+ * @prev_bssid: previous BSSID, if not %NULL use reassociate frame
*/
struct cfg80211_connect_params {
struct ieee80211_channel *channel;
@@ -1916,6 +1922,7 @@ struct cfg80211_connect_params {
struct ieee80211_vht_cap vht_capa;
struct ieee80211_vht_cap vht_capa_mask;
bool pbss;
+ const u8 *prev_bssid;
};
/**
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index b5323800eeb5..c4984741be61 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -322,7 +322,9 @@
* @NL80211_CMD_GET_SCAN: get scan results
* @NL80211_CMD_TRIGGER_SCAN: trigger a new scan with the given parameters
* %NL80211_ATTR_TX_NO_CCK_RATE is used to decide whether to send the
- * probe requests at CCK rate or not.
+ * probe requests at CCK rate or not. %NL80211_ATTR_MAC can be used to
+ * specify a BSSID to scan for; if not included, the wildcard BSSID will
+ * be used.
* @NL80211_CMD_NEW_SCAN_RESULTS: scan notification (as a reply to
* NL80211_CMD_GET_SCAN and on the "scan" multicast group)
* @NL80211_CMD_SCAN_ABORTED: scan was aborted, for unspecified reasons,
diff --git a/include/uapi/media/msm_cam_sensor.h b/include/uapi/media/msm_cam_sensor.h
index 540a96c57e5b..2c7ada5d02cf 100644
--- a/include/uapi/media/msm_cam_sensor.h
+++ b/include/uapi/media/msm_cam_sensor.h
@@ -34,6 +34,9 @@
#define MAX_NUMBER_OF_STEPS 47
#define MAX_REGULATOR 5
+/*msm_flash_query_data_t query types*/
+#define FLASH_QUERY_CURRENT 1
+
#define MSM_V4L2_PIX_FMT_META v4l2_fourcc('M', 'E', 'T', 'A') /* META */
#define MSM_V4L2_PIX_FMT_META10 v4l2_fourcc('M', 'E', '1', '0') /* META10 */
#define MSM_V4L2_PIX_FMT_SBGGR14 v4l2_fourcc('B', 'G', '1', '4')
@@ -531,6 +534,12 @@ struct msm_flash_cfg_data_t {
} cfg;
};
+struct msm_flash_query_data_t {
+ int32_t flags;
+ int32_t query_type;
+ int32_t max_avail_curr;
+};
+
/* sensor init structures and enums */
enum msm_sensor_init_cfg_type_t {
CFG_SINIT_PROBE,
@@ -586,5 +595,8 @@ struct sensor_init_cfg_data {
#define VIDIOC_MSM_OIS_CFG_DOWNLOAD \
_IOWR('V', BASE_VIDIOC_PRIVATE + 14, struct msm_ois_cfg_download_data)
+#define VIDIOC_MSM_FLASH_QUERY_DATA \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 15, struct msm_flash_query_data_t)
+
#endif
diff --git a/include/uapi/media/msmb_isp.h b/include/uapi/media/msmb_isp.h
index 9f933dc7e84f..82e0bdd9209d 100644
--- a/include/uapi/media/msmb_isp.h
+++ b/include/uapi/media/msmb_isp.h
@@ -324,7 +324,9 @@ enum msm_vfe_axi_stream_update_type {
UPDATE_STREAM_ADD_BUFQ,
UPDATE_STREAM_REMOVE_BUFQ,
UPDATE_STREAM_SW_FRAME_DROP,
+ UPDATE_STREAM_REQUEST_FRAMES_VER2,
};
+#define UPDATE_STREAM_REQUEST_FRAMES_VER2 UPDATE_STREAM_REQUEST_FRAMES_VER2
enum msm_vfe_iommu_type {
IOMMU_ATTACH,
@@ -347,6 +349,13 @@ struct msm_vfe_axi_stream_cfg_update_info {
struct msm_isp_sw_framskip sw_skip_info;
};
+struct msm_vfe_axi_stream_cfg_update_info_req_frm {
+ uint32_t stream_handle;
+ uint32_t user_stream_id;
+ uint32_t frame_id;
+ uint32_t buf_index;
+};
+
struct msm_vfe_axi_halt_cmd {
uint32_t stop_camif;
uint32_t overflow_detected;
@@ -365,8 +374,15 @@ struct msm_vfe_axi_restart_cmd {
struct msm_vfe_axi_stream_update_cmd {
uint32_t num_streams;
enum msm_vfe_axi_stream_update_type update_type;
- struct msm_vfe_axi_stream_cfg_update_info
+ /*
+ * For backward compatibility, ensure 1st member of any struct
+ * in union below is uint32_t stream_handle.
+ */
+ union {
+ struct msm_vfe_axi_stream_cfg_update_info
update_info[MSM_ISP_STATS_MAX];
+ struct msm_vfe_axi_stream_cfg_update_info_req_frm req_frm_ver2;
+ };
};
struct msm_vfe_smmu_attach_cmd {
@@ -808,82 +824,133 @@ struct msm_isp_ahb_clk_cfg {
#define V4L2_PIX_FMT_SGRBG14 v4l2_fourcc('B', 'A', '1', '4') /* 14 GRGR.BGBG.*/
#define V4L2_PIX_FMT_SRGGB14 v4l2_fourcc('R', 'G', '1', '4') /* 14 RGRG.GBGB.*/
+enum msm_isp_ioctl_cmd_code {
+ MSM_VFE_REG_CFG = BASE_VIDIOC_PRIVATE,
+ MSM_ISP_REQUEST_BUF,
+ MSM_ISP_ENQUEUE_BUF,
+ MSM_ISP_RELEASE_BUF,
+ MSM_ISP_REQUEST_STREAM,
+ MSM_ISP_CFG_STREAM,
+ MSM_ISP_RELEASE_STREAM,
+ MSM_ISP_INPUT_CFG,
+ MSM_ISP_SET_SRC_STATE,
+ MSM_ISP_REQUEST_STATS_STREAM,
+ MSM_ISP_CFG_STATS_STREAM,
+ MSM_ISP_RELEASE_STATS_STREAM,
+ MSM_ISP_REG_UPDATE_CMD,
+ MSM_ISP_UPDATE_STREAM,
+ MSM_VFE_REG_LIST_CFG,
+ MSM_ISP_SMMU_ATTACH,
+ MSM_ISP_UPDATE_STATS_STREAM,
+ MSM_ISP_AXI_HALT,
+ MSM_ISP_AXI_RESET,
+ MSM_ISP_AXI_RESTART,
+ MSM_ISP_FETCH_ENG_START,
+ MSM_ISP_DEQUEUE_BUF,
+ MSM_ISP_SET_DUAL_HW_MASTER_SLAVE,
+ MSM_ISP_MAP_BUF_START_FE,
+ MSM_ISP_UNMAP_BUF,
+};
+
#define VIDIOC_MSM_VFE_REG_CFG \
- _IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_vfe_cfg_cmd2)
+ _IOWR('V', MSM_VFE_REG_CFG, \
+ struct msm_vfe_cfg_cmd2)
#define VIDIOC_MSM_ISP_REQUEST_BUF \
- _IOWR('V', BASE_VIDIOC_PRIVATE+1, struct msm_isp_buf_request)
+ _IOWR('V', MSM_ISP_REQUEST_BUF, \
+ struct msm_isp_buf_request)
#define VIDIOC_MSM_ISP_ENQUEUE_BUF \
- _IOWR('V', BASE_VIDIOC_PRIVATE+2, struct msm_isp_qbuf_info)
+ _IOWR('V', MSM_ISP_ENQUEUE_BUF, \
+ struct msm_isp_qbuf_info)
#define VIDIOC_MSM_ISP_RELEASE_BUF \
- _IOWR('V', BASE_VIDIOC_PRIVATE+3, struct msm_isp_buf_request)
+ _IOWR('V', MSM_ISP_RELEASE_BUF, \
+ struct msm_isp_buf_request)
#define VIDIOC_MSM_ISP_REQUEST_STREAM \
- _IOWR('V', BASE_VIDIOC_PRIVATE+4, struct msm_vfe_axi_stream_request_cmd)
+ _IOWR('V', MSM_ISP_REQUEST_STREAM, \
+ struct msm_vfe_axi_stream_request_cmd)
#define VIDIOC_MSM_ISP_CFG_STREAM \
- _IOWR('V', BASE_VIDIOC_PRIVATE+5, struct msm_vfe_axi_stream_cfg_cmd)
+ _IOWR('V', MSM_ISP_CFG_STREAM, \
+ struct msm_vfe_axi_stream_cfg_cmd)
#define VIDIOC_MSM_ISP_RELEASE_STREAM \
- _IOWR('V', BASE_VIDIOC_PRIVATE+6, struct msm_vfe_axi_stream_release_cmd)
+ _IOWR('V', MSM_ISP_RELEASE_STREAM, \
+ struct msm_vfe_axi_stream_release_cmd)
#define VIDIOC_MSM_ISP_INPUT_CFG \
- _IOWR('V', BASE_VIDIOC_PRIVATE+7, struct msm_vfe_input_cfg)
+ _IOWR('V', MSM_ISP_INPUT_CFG, \
+ struct msm_vfe_input_cfg)
#define VIDIOC_MSM_ISP_SET_SRC_STATE \
- _IOWR('V', BASE_VIDIOC_PRIVATE+8, struct msm_vfe_axi_src_state)
+ _IOWR('V', MSM_ISP_SET_SRC_STATE, \
+ struct msm_vfe_axi_src_state)
#define VIDIOC_MSM_ISP_REQUEST_STATS_STREAM \
- _IOWR('V', BASE_VIDIOC_PRIVATE+9, \
- struct msm_vfe_stats_stream_request_cmd)
+ _IOWR('V', MSM_ISP_REQUEST_STATS_STREAM, \
+ struct msm_vfe_stats_stream_request_cmd)
#define VIDIOC_MSM_ISP_CFG_STATS_STREAM \
- _IOWR('V', BASE_VIDIOC_PRIVATE+10, struct msm_vfe_stats_stream_cfg_cmd)
+ _IOWR('V', MSM_ISP_CFG_STATS_STREAM, \
+ struct msm_vfe_stats_stream_cfg_cmd)
#define VIDIOC_MSM_ISP_RELEASE_STATS_STREAM \
- _IOWR('V', BASE_VIDIOC_PRIVATE+11, \
- struct msm_vfe_stats_stream_release_cmd)
+ _IOWR('V', MSM_ISP_RELEASE_STATS_STREAM, \
+ struct msm_vfe_stats_stream_release_cmd)
#define VIDIOC_MSM_ISP_REG_UPDATE_CMD \
- _IOWR('V', BASE_VIDIOC_PRIVATE+12, enum msm_vfe_input_src)
+ _IOWR('V', MSM_ISP_REG_UPDATE_CMD, \
+ enum msm_vfe_input_src)
#define VIDIOC_MSM_ISP_UPDATE_STREAM \
- _IOWR('V', BASE_VIDIOC_PRIVATE+13, struct msm_vfe_axi_stream_update_cmd)
+ _IOWR('V', MSM_ISP_UPDATE_STREAM, \
+ struct msm_vfe_axi_stream_update_cmd)
#define VIDIOC_MSM_VFE_REG_LIST_CFG \
- _IOWR('V', BASE_VIDIOC_PRIVATE+14, struct msm_vfe_cfg_cmd_list)
+ _IOWR('V', MSM_VFE_REG_LIST_CFG, \
+ struct msm_vfe_cfg_cmd_list)
#define VIDIOC_MSM_ISP_SMMU_ATTACH \
- _IOWR('V', BASE_VIDIOC_PRIVATE+15, struct msm_vfe_smmu_attach_cmd)
+ _IOWR('V', MSM_ISP_SMMU_ATTACH, \
+ struct msm_vfe_smmu_attach_cmd)
#define VIDIOC_MSM_ISP_UPDATE_STATS_STREAM \
- _IOWR('V', BASE_VIDIOC_PRIVATE+16, struct msm_vfe_axi_stream_update_cmd)
+ _IOWR('V', MSM_ISP_UPDATE_STATS_STREAM, \
+ struct msm_vfe_axi_stream_update_cmd)
#define VIDIOC_MSM_ISP_AXI_HALT \
- _IOWR('V', BASE_VIDIOC_PRIVATE+17, struct msm_vfe_axi_halt_cmd)
+ _IOWR('V', MSM_ISP_AXI_HALT, \
+ struct msm_vfe_axi_halt_cmd)
#define VIDIOC_MSM_ISP_AXI_RESET \
- _IOWR('V', BASE_VIDIOC_PRIVATE+18, struct msm_vfe_axi_reset_cmd)
+ _IOWR('V', MSM_ISP_AXI_RESET, \
+ struct msm_vfe_axi_reset_cmd)
#define VIDIOC_MSM_ISP_AXI_RESTART \
- _IOWR('V', BASE_VIDIOC_PRIVATE+19, struct msm_vfe_axi_restart_cmd)
+ _IOWR('V', MSM_ISP_AXI_RESTART, \
+ struct msm_vfe_axi_restart_cmd)
#define VIDIOC_MSM_ISP_FETCH_ENG_START \
- _IOWR('V', BASE_VIDIOC_PRIVATE+20, struct msm_vfe_fetch_eng_start)
+ _IOWR('V', MSM_ISP_FETCH_ENG_START, \
+ struct msm_vfe_fetch_eng_start)
#define VIDIOC_MSM_ISP_DEQUEUE_BUF \
- _IOWR('V', BASE_VIDIOC_PRIVATE+21, struct msm_isp_qbuf_info)
+ _IOWR('V', MSM_ISP_DEQUEUE_BUF, \
+ struct msm_isp_qbuf_info)
#define VIDIOC_MSM_ISP_SET_DUAL_HW_MASTER_SLAVE \
- _IOWR('V', BASE_VIDIOC_PRIVATE+22, struct msm_isp_set_dual_hw_ms_cmd)
+ _IOWR('V', MSM_ISP_SET_DUAL_HW_MASTER_SLAVE, \
+ struct msm_isp_set_dual_hw_ms_cmd)
#define VIDIOC_MSM_ISP_MAP_BUF_START_FE \
- _IOWR('V', BASE_VIDIOC_PRIVATE+23, struct msm_vfe_fetch_eng_start)
+ _IOWR('V', MSM_ISP_MAP_BUF_START_FE, \
+ struct msm_vfe_fetch_eng_start)
#define VIDIOC_MSM_ISP_UNMAP_BUF \
- _IOWR('V', BASE_VIDIOC_PRIVATE+24, struct msm_isp_unmap_buf_req)
+ _IOWR('V', MSM_ISP_UNMAP_BUF, \
+ struct msm_isp_unmap_buf_req)
#define VIDIOC_MSM_ISP_AHB_CLK_CFG \
_IOWR('V', BASE_VIDIOC_PRIVATE+25, struct msm_isp_ahb_clk_cfg)
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 0551c219c40e..fb42418507ae 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/debug_locks.h>
#include <linux/osq_lock.h>
+#include <linux/delay.h>
/*
* In the DEBUG case we are using the "NULL fastpath" for mutexes,
@@ -378,6 +379,17 @@ static bool mutex_optimistic_spin(struct mutex *lock,
* values at the cost of a few extra spins.
*/
cpu_relax_lowlatency();
+
+ /*
+ * On arm systems, we must slow down the waiter's repeated
+ * aquisition of spin_mlock and atomics on the lock count, or
+ * we risk starving out a thread attempting to release the
+ * mutex. The mutex slowpath release must take spin lock
+ * wait_lock. This spin lock can share a monitor with the
+ * other waiter atomics in the mutex data structure, so must
+ * take care to rate limit the waiters.
+ */
+ udelay(1);
}
osq_unlock(&lock->osq);
diff --git a/net/ipc_router/ipc_router_core.c b/net/ipc_router/ipc_router_core.c
index fae41583d0e9..008d034fcf8f 100644
--- a/net/ipc_router/ipc_router_core.c
+++ b/net/ipc_router/ipc_router_core.c
@@ -2176,7 +2176,6 @@ static void cleanup_rmt_server(struct msm_ipc_router_xprt_info *xprt_info,
{
union rr_control_msg ctl;
- ipc_router_reset_conn(rport_ptr);
memset(&ctl, 0, sizeof(ctl));
ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
ctl.srv.service = server->name.service;
@@ -2207,6 +2206,7 @@ static void cleanup_rmt_ports(struct msm_ipc_router_xprt_info *xprt_info,
server = rport_ptr->server;
rport_ptr->server = NULL;
mutex_unlock(&rport_ptr->rport_lock_lhb2);
+ ipc_router_reset_conn(rport_ptr);
if (server) {
cleanup_rmt_server(xprt_info, rport_ptr,
server);
@@ -2361,13 +2361,13 @@ static void ipc_router_reset_conn(struct msm_ipc_router_remote_port *rport_ptr)
list_for_each_entry_safe(conn_info, tmp_conn_info,
&rport_ptr->conn_info_list, list) {
port_ptr = ipc_router_get_port_ref(conn_info->port_id);
- if (!port_ptr)
- continue;
- mutex_lock(&port_ptr->port_lock_lhc3);
- port_ptr->conn_status = CONNECTION_RESET;
- mutex_unlock(&port_ptr->port_lock_lhc3);
- wake_up(&port_ptr->port_rx_wait_q);
- kref_put(&port_ptr->ref, ipc_router_release_port);
+ if (port_ptr) {
+ mutex_lock(&port_ptr->port_lock_lhc3);
+ port_ptr->conn_status = CONNECTION_RESET;
+ mutex_unlock(&port_ptr->port_lock_lhc3);
+ wake_up(&port_ptr->port_rx_wait_q);
+ kref_put(&port_ptr->ref, ipc_router_release_port);
+ }
list_del(&conn_info->list);
kfree(conn_info);
@@ -2651,6 +2651,7 @@ static int process_rmv_client_msg(struct msm_ipc_router_xprt_info *xprt_info,
server = rport_ptr->server;
rport_ptr->server = NULL;
mutex_unlock(&rport_ptr->rport_lock_lhb2);
+ ipc_router_reset_conn(rport_ptr);
down_write(&server_list_lock_lha2);
if (server)
cleanup_rmt_server(NULL, rport_ptr, server);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 30f54d1fc841..913843530213 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -6022,6 +6022,12 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
request->no_cck =
nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
+ if (info->attrs[NL80211_ATTR_MAC])
+ memcpy(request->bssid, nla_data(info->attrs[NL80211_ATTR_MAC]),
+ ETH_ALEN);
+ else
+ eth_broadcast_addr(request->bssid);
+
request->wdev = wdev;
request->wiphy = &rdev->wiphy;
request->scan_start = jiffies;
@@ -7932,6 +7938,10 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
connect.mfp = NL80211_MFP_NO;
}
+ if (info->attrs[NL80211_ATTR_PREV_BSSID])
+ connect.prev_bssid =
+ nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]);
+
if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
connect.channel = nl80211_get_valid_chan(
wiphy, info->attrs[NL80211_ATTR_WIPHY_FREQ]);
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 30f967665e84..16c3424507c3 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -1293,6 +1293,8 @@ int cfg80211_wext_siwscan(struct net_device *dev,
if (wiphy->bands[i])
creq->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
+ eth_broadcast_addr(creq->bssid);
+
rdev->scan_req = creq;
err = rdev_scan(rdev, creq);
if (err) {
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index e9be8c3b177b..44f420dfa8e3 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -142,6 +142,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
wdev->conn->params.ssid_len);
request->ssids[0].ssid_len = wdev->conn->params.ssid_len;
+ eth_broadcast_addr(request->bssid);
+
request->wdev = wdev;
request->wiphy = &rdev->wiphy;
request->scan_start = jiffies;
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 43edcf8f6d5e..c800ad6dd5dd 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1221,6 +1221,7 @@ TRACE_EVENT(rdev_connect,
__field(bool, privacy)
__field(u32, wpa_versions)
__field(u32, flags)
+ MAC_ENTRY(prev_bssid)
),
TP_fast_assign(
WIPHY_ASSIGN;
@@ -1232,13 +1233,14 @@ TRACE_EVENT(rdev_connect,
__entry->privacy = sme->privacy;
__entry->wpa_versions = sme->crypto.wpa_versions;
__entry->flags = sme->flags;
+ MAC_ASSIGN(prev_bssid, sme->prev_bssid);
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT
", ssid: %s, auth type: %d, privacy: %s, wpa versions: %u, "
- "flags: %u",
+ "flags: %u, previous bssid: " MAC_PR_FMT,
WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->ssid,
__entry->auth_type, BOOL_TO_STR(__entry->privacy),
- __entry->wpa_versions, __entry->flags)
+ __entry->wpa_versions, __entry->flags, MAC_PR_ARG(prev_bssid))
);
TRACE_EVENT(rdev_set_cqm_rssi_config,