summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLakshmi Narayana Kalavala <lkalaval@codeaurora.org>2016-02-03 14:50:32 -0800
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:48:07 -0700
commit12d7df3314bf94ce93153eb1133cd9b24d46c918 (patch)
tree072f32476613a2f6d6ae994fccad5fba56b77017 /drivers
parent649b8715182350d94a752660bb577cd08ae64a53 (diff)
msm: camera: Add all camera drivers
Add all camera drivers by picking them up from AU_LINUX_ANDROID_LA.HB.1.3.1.06.00.00.187.056 (e70ad0cd) Signed-off-by: Lakshmi Narayana Kalavala <lkalaval@codeaurora.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/media/platform/msm/Kconfig40
-rw-r--r--drivers/media/platform/msm/Makefile5
-rw-r--r--drivers/media/platform/msm/camera_v2/Kconfig243
-rw-r--r--drivers/media/platform/msm/camera_v2/Makefile24
-rw-r--r--drivers/media/platform/msm/camera_v2/camera/Makefile3
-rw-r--r--drivers/media/platform/msm/camera_v2/camera/camera.c814
-rw-r--r--drivers/media/platform/msm/camera_v2/camera/camera.h23
-rw-r--r--drivers/media/platform/msm/camera_v2/common/Makefile2
-rw-r--r--drivers/media/platform/msm/camera_v2/common/cam_hw_ops.c273
-rw-r--r--drivers/media/platform/msm/camera_v2/common/cam_hw_ops.h37
-rw-r--r--drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c1619
-rw-r--r--drivers/media/platform/msm/camera_v2/common/cam_smmu_api.h166
-rw-r--r--drivers/media/platform/msm/camera_v2/common/cam_soc_api.c724
-rw-r--r--drivers/media/platform/msm/camera_v2/common/cam_soc_api.h289
-rw-r--r--drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c843
-rw-r--r--drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.h93
-rw-r--r--drivers/media/platform/msm/camera_v2/fd/Makefile5
-rw-r--r--drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c1356
-rw-r--r--drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h253
-rw-r--r--drivers/media/platform/msm/camera_v2/fd/msm_fd_hw.c1626
-rw-r--r--drivers/media/platform/msm/camera_v2/fd/msm_fd_hw.h82
-rw-r--r--drivers/media/platform/msm/camera_v2/fd/msm_fd_regs.h169
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/Makefile5
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c1462
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h209
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.c680
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.h728
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp32.c1576
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp32.h17
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp40.c2451
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp40.h17
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp44.c2089
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp44.h17
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp46.c2193
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp46.h17
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.c2418
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.h17
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c3356
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h114
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c881
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h30
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c2293
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h85
-rw-r--r--drivers/media/platform/msm/camera_v2/ispif/Makefile4
-rw-r--r--drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c1734
-rw-r--r--drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h83
-rw-r--r--drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h118
-rw-r--r--drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h99
-rw-r--r--drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v3.h102
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_10/Makefile7
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_common.h38
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_core.c380
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_core.h40
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_dev.c343
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw.c931
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw.h142
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw_reg.h210
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.c646
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.h41
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_sync.c1573
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_sync.h142
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_dma/Makefile4
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c1340
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.h373
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.c2116
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.h78
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_regs.h122
-rw-r--r--drivers/media/platform/msm/camera_v2/msm.c1236
-rw-r--r--drivers/media/platform/msm/camera_v2/msm.h124
-rw-r--r--drivers/media/platform/msm/camera_v2/msm_buf_mgr/Makefile2
-rw-r--r--drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c667
-rw-r--r--drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.h54
-rw-r--r--drivers/media/platform/msm/camera_v2/msm_sd.h90
-rw-r--r--drivers/media/platform/msm/camera_v2/msm_vb2/Makefile3
-rw-r--r--drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c351
-rw-r--r--drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.h69
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/Makefile1
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/Makefile5
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c4403
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h267
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/vpe/Makefile3
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/vpe/msm_vpe.c1683
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/vpe/msm_vpe.h257
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/Makefile8
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/actuator/Makefile5
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c1974
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.h110
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/cci/Makefile4
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/cci/msm_cam_cci_hwreg.h69
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c2172
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h238
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csid/Makefile4
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_2_0_hwreg.h65
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_2_2_hwreg.h64
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_0_hwreg.h64
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_1_hwreg.h64
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_2_hwreg.h64
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_4_1_hwreg.h63
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_4_2_hwreg.h63
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_5_hwreg.h64
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_6_0_hwreg.h63
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c1317
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.h121
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/Makefile4
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_2_0_hwreg.h46
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_2_2_hwreg.h46
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_0_hwreg.h46
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_1_hwreg.h46
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_2_hwreg.h46
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_4_2_hwreg.h93
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h93
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c1505
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h169
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/eeprom/Makefile5
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/eeprom/msm_eeprom.c1818
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/eeprom/msm_eeprom.h51
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/flash/Makefile5
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c1218
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h120
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/Makefile5
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c543
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c1562
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.h59
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c.h149
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c_mux.c186
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c_mux.h46
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c550
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_spi.c836
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_spi.h120
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c1392
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/msm_sensor.h125
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c1376
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.h21
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/msm_sensor_init.c225
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/msm_sensor_init.h25
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/ois/Makefile5
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c774
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.h63
138 files changed, 69194 insertions, 0 deletions
diff --git a/drivers/media/platform/msm/Kconfig b/drivers/media/platform/msm/Kconfig
new file mode 100644
index 000000000000..af15e78be5d8
--- /dev/null
+++ b/drivers/media/platform/msm/Kconfig
@@ -0,0 +1,40 @@
+#
+# MSM camera configuration
+#
+
+comment "Qualcomm MSM Camera And Video"
+
+menuconfig MSM_CAMERA
+ bool "Qualcomm MSM camera and video capture support"
+ depends on ARCH_MSM && VIDEO_V4L2 && I2C
+ ---help---
+ Say Y here to enable selecting the video adapters for
+ Qualcomm msm camera and video capture drivers. enabling this
+ adds support for the camera driver stack including sensor, isp
+ and postprocessing drivers for legacy chipsets.
+
+config MSM_CAMERA_DEBUG
+ bool "Qualcomm MSM camera debugging with printk"
+ depends on MSM_CAMERA
+ default n
+ ---help---
+ Enable printk() debug for msm camera
+
+menuconfig MSMB_CAMERA
+ bool "Qualcomm MSM camera and video capture 2.0 support"
+ depends on ARCH_MSM && VIDEO_V4L2 && I2C
+ ---help---
+ Say Y here to enable selecting the video adapters for
+ Qualcomm msm camera and video capture 2.0, enabling this
+ adds support for the camera driver stack including sensor, isp
+ and postprocessing drivers.
+
+config MSMB_CAMERA_DEBUG
+ bool "Qualcomm MSM camera 2.0 debugging with printk"
+ depends on MSMB_CAMERA
+ ---help---
+ Enable printk() debug for msm camera 2.0
+
+if MSMB_CAMERA
+source "drivers/media/platform/msm/camera_v2/Kconfig"
+endif # MSMB_CAMERA
diff --git a/drivers/media/platform/msm/Makefile b/drivers/media/platform/msm/Makefile
new file mode 100644
index 000000000000..99437b4a2c12
--- /dev/null
+++ b/drivers/media/platform/msm/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the QCOM specific video device drivers
+# based on V4L2.
+#
+obj-$(CONFIG_MSMB_CAMERA) += camera_v2/
diff --git a/drivers/media/platform/msm/camera_v2/Kconfig b/drivers/media/platform/msm/camera_v2/Kconfig
new file mode 100644
index 000000000000..f084d07eb6ca
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/Kconfig
@@ -0,0 +1,243 @@
+config MSM_CAMERA_SENSOR
+ bool "Qualcomm MSM camera sensor support"
+ depends on MSMB_CAMERA
+ select NEW_LEDS
+ select LEDS_CLASS
+ ---help---
+ This flag enables support for Camera Sensor.
+ The sensor driver is capable of providing real time
+ data for camera support. The driver support V4L2
+ subdev APIs.
+
+config MSM_CPP
+ bool "Qualcomm MSM Camera Post Processing Engine support"
+ depends on MSMB_CAMERA
+ ---help---
+ Enable support for Camera Post-processing Engine
+ The Post processing engine is capable of scaling
+ and cropping image. The driver support V4L2 subdev
+ APIs.
+
+config MSM_CCI
+ bool "Qualcomm MSM Camera Control Interface support"
+ depends on MSMB_CAMERA
+ ---help---
+ Enable support for Camera Control Interface driver only
+ for those platforms that have hardware support. This driver
+ is responsible for handling I2C read and write on the I2C
+ bus. It is also responsible for synchronization with
+ GPIO and data frames.
+
+config MSM_CSI20_HEADER
+ bool "Qualcomm MSM CSI 2.0 Header"
+ depends on MSMB_CAMERA
+ ---help---
+ Enable support for CSI drivers to include 2.0
+ header. This header has register macros and its
+ values and bit mask for register configuration bits
+ This config macro is required targets based on 8960,
+ 8930 and 8064 platforms.
+
+config MSM_CSI22_HEADER
+ bool "Qualcomm MSM CSI 2.2 Header"
+ depends on MSMB_CAMERA
+ ---help---
+ Enable support for CSI drivers to include 2.2
+ header. This header has register macros and its
+ values and bit mask for register configuration bits
+ This config macro is required targets based on 8610
+ platform.
+
+config MSM_CSI30_HEADER
+ bool "Qualcomm MSM CSI 3.0 Header"
+ depends on MSMB_CAMERA
+ ---help---
+ Enable support for CSI drivers to include 3.0
+ header. This header has register macros and its
+ values and bit mask for register configuration bits
+ This config macro is required for targets based on
+ 8064 platforms.
+
+config MSM_CSI31_HEADER
+ bool "Qualcomm MSM CSI 3.1 Header"
+ depends on MSMB_CAMERA
+ ---help---
+ Enable support for CSI drivers to include 3.0
+ header. This header has register macros and its
+ values and bit mask for register configuration bits
+ This config macro is required for targets based on
+ APQ8084 platform.
+
+config MSM_CSIPHY
+ bool "Qualcomm MSM Camera Serial Interface Physical receiver support"
+ depends on MSMB_CAMERA
+ ---help---
+ Enable support for Camera Serial Interface
+ Physical receiver. It deserializes packets and
+ supports detection of packet start and stop
+ signalling.
+
+config MSM_CSID
+ bool "Qualcomm MSM Camera Serial Interface decoder support"
+ depends on MSMB_CAMERA
+ ---help---
+ Enable support for Camera Serial Interface decoder.
+ It supports lane merging and decoding of packets
+ based on cid which is mapped to a virtual channel
+ and datatype.
+
+config MSM_EEPROM
+ bool "Qualcomm MSM Camera ROM Interface for Calibration support"
+ depends on MSMB_CAMERA
+ ---help---
+ Enable support for ROM Interface for Calibration
+ Provides interface for reading the Claibration data.
+ and also provides support for writing data in case of FLASH ROM.
+ Currently supports I2C, CCI and SPI protocol
+
+config MSM_ISPIF
+ bool "Qualcomm MSM Image Signal Processing interface support"
+ depends on MSMB_CAMERA
+ ---help---
+ Enable support for Image Signal Processing interface module.
+ This module acts as a crossbar between CSID and VFE. Output
+ of any CID of CSID can be routed to of of pixel or raw
+ data interface in VFE.
+
+config MSM_ISPIF_V1
+ bool "Qualcomm MSM Image Signal Processing interface support"
+ depends on MSMB_CAMERA
+ ---help---
+ Enable support for Image Signal Processing interface module.
+ This module acts as a crossbar between CSID and VFE. Output
+ of any CID of MSM_CSI22_HEADER can be routed to of pixel
+ or raw data interface in VFE.
+
+config MSM_ISPIF_V2
+ bool "Qualcomm MSM Image Signal Processing interface support"
+ depends on MSMB_CAMERA
+ ---help---
+ Enable support for Image Signal Processing interface module.
+ This module acts as a crossbar between CSID and VFE. Output
+ of any CID of CSID can be routed to of pixel
+ or raw data interface in VFE.
+
+config IMX134
+ bool "Sensor IMX134 (BAYER 8M)"
+ depends on MSMB_CAMERA
+ ---help---
+ Sony 8 MP Bayer Sensor with auto focus, uses
+ 4 mipi lanes full resolution @30fps and
+ HFR @60fps and @120fps,
+ Video HDR support.
+
+config IMX132
+ bool "Sensor IMX132 (BAYER 2M)"
+ depends on MSMB_CAMERA
+ ---help---
+ Sony 2 MP Bayer Sensor with auto focus, uses
+ 2 mipi lanes, preview config = 1920 x 1080 at 30 fps,
+ snapshot config = 1920 x 1080 at 30 fps,
+ Video HDR support.
+
+config OV9724
+ bool "Sensor OV9724 (BAYER 2M)"
+ depends on MSMB_CAMERA
+ ---help---
+ OmniVision 2 MP Bayer Sensor, supports 2 mipi lanes,
+ preview and snapshot config at 1280*720 at 30 fps,
+ hfr video at 60, 90 and 120 fps. This sensor driver does
+ not support auto focus.
+
+config OV5648
+ bool "Sensor OV5648 (BAYER 5M)"
+ depends on MSMB_CAMERA
+ ---help---
+ OmniVision 5 MP Bayer Sensor, only use 1 mipi lane,
+ preview set to 1296*972 at 30 fps,
+ snapshot set to 2592*1944 at 12 fps,
+ This sensor driver does not support auto focus.
+
+config GC0339
+ bool "Sensor GC0339 (BAYER .3M)"
+ depends on MSMB_CAMERA
+ ---help---
+ gc0339 is a Galaxycore .3 MP Bayer Sensor.
+ It supports 1 or 2 mipi lanes.
+ Preview and snapshot resolution shall be 640*480 at 30 fps,
+ It does not support auto focus.
+
+config OV8825
+ bool "OmniVision OV8825 (BAYER 8MP)"
+ depends on MSMB_CAMERA
+ ---help---
+ OmniVision 8 MP Bayer Sensor with auto focus.uses
+ 2 mipi lanes, preview config = 1632*1224 30 fps,
+ snapshot config = 3264 * 2448 at 18 fps.
+ 2 lanes max fps is 18, 4 lanes max fps is 24.
+
+config OV8865
+ bool "OmniVision OV8865 (BAYER 8MP)"
+ depends on MSMB_CAMERA
+ ---help---
+ OmniVision 8 MP Bayer Sensor with auto focus.uses
+ 4 mipi lanes, preview config = 1632*1224 30 fps,
+ snapshot config = 3264 * 2448 at 30 fps.
+ Max fps is 30fps at 3264 * 2448, 60fps at 1632 * 1224
+
+config s5k4e1
+ bool "Sensor s5k4e1 (BAYER 5MP)"
+ depends on MSMB_CAMERA
+ ---help---
+ Samsung 5 MP Bayer Sensor. It uses 2 mipi lanes,
+ supports 720P preview at 30 fps
+ and QSXGA snapshot at 15 fps.
+ This sensor driver does not support auto focus.
+
+config OV12830
+ bool "OmniVision OV12830 (BAYER 12MP)"
+ depends on MSMB_CAMERA
+ ---help---
+ OmniVision 12.8 MP Bayer Sensor with auto focus.uses
+ 4 mipi lanes, preview config = 2112 * 1500 at 30 fps,
+ snapshot config = 4224 * 3000 at 15 fps.
+ 2 lanes max fps is 18, 4 lanes max fps is 24.
+
+config MSM_V4L2_VIDEO_OVERLAY_DEVICE
+ tristate "Qualcomm MSM V4l2 video overlay device"
+ ---help---
+ Enables support for the MSM V4L2 video
+ overlay driver. This allows video rendering
+ apps to render overlaid video using Video4Linux2
+ APIs, by using /dev/videoX device
+
+config MSMB_JPEG
+ tristate "Qualcomm MSM Jpeg Encoder Engine support"
+ depends on MSMB_CAMERA && (ARCH_MSM8974 || ARCH_MSM8226 || ARCH_APQ8084 || ARCH_MSM8916 || ARCH_MSM)
+ ---help---
+ Enable support for Jpeg Encoder/Decoder
+ Engine for 8974.
+ This module serves as the common driver
+ for the JPEG 1.0 encoder and decoder.
+
+config MSM_GEMINI
+ tristate "Qualcomm MSM Gemini JPEG engine support"
+ depends on MSMB_CAMERA && (ARCH_MSM7X30 || ARCH_MSM8X60 || ARCH_MSM8960)
+ ---help---
+ Enables support for the Gemini JPEG encoder
+ Engine for 8x60, 7x30 and 8960.
+ This module serves as the driver
+ for JPEG encoding functionality.
+
+config MSM_FD
+ tristate "Qualcomm MSM FD face detection engine support"
+ depends on MSMB_CAMERA
+ ---help---
+ Enables support for the MSM FD face detection engine.
+
+config MSM_JPEGDMA
+ tristate "Qualcomm Technologies Inc. MSM Jpeg dma"
+ depends on MSMB_CAMERA
+ select V4L2_MEM2MEM_DEV
+ ---help---
+ Enable support for Jpeg dma engine.
diff --git a/drivers/media/platform/msm/camera_v2/Makefile b/drivers/media/platform/msm/camera_v2/Makefile
new file mode 100644
index 000000000000..cdb0468ae9e0
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/Makefile
@@ -0,0 +1,24 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/codecs
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/isps
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/pproc
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/msm_vb2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/camera
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/jpeg_10
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/jpeg_dma
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/fd
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+
+obj-$(CONFIG_MSMB_CAMERA) += common/
+obj-$(CONFIG_MSMB_CAMERA) += msm.o
+obj-$(CONFIG_MSMB_CAMERA) += camera/
+obj-$(CONFIG_MSMB_CAMERA) += msm_vb2/
+obj-$(CONFIG_MSMB_CAMERA) += sensor/
+obj-$(CONFIG_MSMB_CAMERA) += pproc/
+obj-$(CONFIG_MSMB_CAMERA) += isp/
+obj-$(CONFIG_MSMB_CAMERA) += ispif/
+obj-$(CONFIG_MSMB_JPEG) += jpeg_10/
+obj-$(CONFIG_MSM_JPEGDMA) += jpeg_dma/
+obj-$(CONFIG_MSMB_CAMERA) += msm_buf_mgr/
+obj-$(CONFIG_MSM_FD) += fd/
diff --git a/drivers/media/platform/msm/camera_v2/camera/Makefile b/drivers/media/platform/msm/camera_v2/camera/Makefile
new file mode 100644
index 000000000000..bd707509d50e
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/camera/Makefile
@@ -0,0 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/msm_vb2
+obj-$(CONFIG_MSMB_CAMERA) += camera.o
diff --git a/drivers/media/platform/msm/camera_v2/camera/camera.c b/drivers/media/platform/msm/camera_v2/camera/camera.c
new file mode 100644
index 000000000000..70c681084bbc
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/camera/camera.c
@@ -0,0 +1,814 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/ioctl.h>
+#include <linux/spinlock.h>
+#include <linux/proc_fs.h>
+#include <linux/atomic.h>
+#include <linux/wait.h>
+#include <linux/videodev2.h>
+#include <linux/msm_ion.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-fh.h>
+
+#include "camera.h"
+#include "msm.h"
+#include "msm_vb2.h"
+
+#define fh_to_private(__fh) \
+ container_of(__fh, struct camera_v4l2_private, fh)
+
+struct camera_v4l2_private {
+ struct v4l2_fh fh;
+ unsigned int stream_id;
+ unsigned int is_vb2_valid; /*0 if no vb2 buffers on stream, else 1*/
+ struct vb2_queue vb2_q;
+};
+
+static void camera_pack_event(struct file *filep, int evt_id,
+ int command, int value, struct v4l2_event *event)
+{
+ struct msm_v4l2_event_data *event_data =
+ (struct msm_v4l2_event_data *)&event->u.data[0];
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ struct camera_v4l2_private *sp = fh_to_private(filep->private_data);
+
+ /* always MSM_CAMERA_V4L2_EVENT_TYPE */
+ event->type = MSM_CAMERA_V4L2_EVENT_TYPE;
+ event->id = evt_id;
+ event_data->command = command;
+ event_data->session_id = pvdev->vdev->num;
+ event_data->stream_id = sp->stream_id;
+ event_data->arg_value = value;
+}
+
+static int camera_check_event_status(struct v4l2_event *event)
+{
+ struct msm_v4l2_event_data *event_data =
+ (struct msm_v4l2_event_data *)&event->u.data[0];
+
+ if (event_data->status > MSM_CAMERA_ERR_EVT_BASE) {
+ pr_err("%s : event_data status out of bounds\n",
+ __func__);
+ pr_err("%s : Line %d event_data->status 0X%x\n",
+ __func__, __LINE__, event_data->status);
+
+ switch (event_data->status) {
+ case MSM_CAMERA_ERR_CMD_FAIL:
+ case MSM_CAMERA_ERR_MAPPING:
+ return -EFAULT;
+ case MSM_CAMERA_ERR_DEVICE_BUSY:
+ return -EBUSY;
+ default:
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static int camera_v4l2_querycap(struct file *filep, void *fh,
+ struct v4l2_capability *cap)
+{
+ int rc;
+ struct v4l2_event event;
+
+ /* can use cap->driver to make differentiation */
+ camera_pack_event(filep, MSM_CAMERA_GET_PARM,
+ MSM_CAMERA_PRIV_QUERY_CAP, -1, &event);
+
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ rc = camera_check_event_status(&event);
+
+ return rc;
+}
+
+static int camera_v4l2_s_crop(struct file *filep, void *fh,
+ const struct v4l2_crop *crop)
+{
+ int rc = 0;
+ struct v4l2_event event;
+
+ if (crop->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+
+ camera_pack_event(filep, MSM_CAMERA_SET_PARM,
+ MSM_CAMERA_PRIV_S_CROP, -1, &event);
+
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ rc = camera_check_event_status(&event);
+ }
+
+ return rc;
+}
+
+static int camera_v4l2_g_crop(struct file *filep, void *fh,
+ struct v4l2_crop *crop)
+{
+ int rc = 0;
+ struct v4l2_event event;
+
+ if (crop->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ camera_pack_event(filep, MSM_CAMERA_GET_PARM,
+ MSM_CAMERA_PRIV_G_CROP, -1, &event);
+
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ rc = camera_check_event_status(&event);
+ }
+
+ return rc;
+}
+
+static int camera_v4l2_queryctrl(struct file *filep, void *fh,
+ struct v4l2_queryctrl *ctrl)
+{
+ int rc = 0;
+ struct v4l2_event event;
+
+ if (ctrl->type == V4L2_CTRL_TYPE_MENU) {
+
+ camera_pack_event(filep, MSM_CAMERA_GET_PARM,
+ ctrl->id, -1, &event);
+
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ rc = camera_check_event_status(&event);
+ }
+
+ return rc;
+}
+
+static int camera_v4l2_g_ctrl(struct file *filep, void *fh,
+ struct v4l2_control *ctrl)
+{
+ int rc = 0;
+ struct v4l2_event event;
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ unsigned int session_id = pvdev->vdev->num;
+
+ if (ctrl->id >= V4L2_CID_PRIVATE_BASE) {
+ if (ctrl->id == MSM_CAMERA_PRIV_G_SESSION_ID) {
+ ctrl->value = session_id;
+ } else {
+ camera_pack_event(filep, MSM_CAMERA_GET_PARM,
+ ctrl->id, -1, &event);
+
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ rc = camera_check_event_status(&event);
+ }
+ }
+
+ return rc;
+}
+
+static int camera_v4l2_s_ctrl(struct file *filep, void *fh,
+ struct v4l2_control *ctrl)
+{
+ int rc = 0;
+ struct v4l2_event event;
+ struct msm_v4l2_event_data *event_data;
+ if (ctrl->id >= V4L2_CID_PRIVATE_BASE) {
+ camera_pack_event(filep, MSM_CAMERA_SET_PARM, ctrl->id,
+ ctrl->value, &event);
+
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ return rc;
+ event_data = (struct msm_v4l2_event_data *)event.u.data;
+ ctrl->value = event_data->ret_value;
+ rc = camera_check_event_status(&event);
+ }
+
+ return rc;
+}
+
+static int camera_v4l2_reqbufs(struct file *filep, void *fh,
+ struct v4l2_requestbuffers *req)
+{
+ int ret;
+ struct msm_session *session;
+ struct camera_v4l2_private *sp = fh_to_private(fh);
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ unsigned int session_id = pvdev->vdev->num;
+ session = msm_session_find(session_id);
+ if (WARN_ON(!session))
+ return -EIO;
+ mutex_lock(&session->lock_q);
+ ret = vb2_reqbufs(&sp->vb2_q, req);
+ mutex_unlock(&session->lock_q);
+ return ret;
+}
+
+static int camera_v4l2_querybuf(struct file *filep, void *fh,
+ struct v4l2_buffer *pb)
+{
+ return 0;
+}
+
+static int camera_v4l2_qbuf(struct file *filep, void *fh,
+ struct v4l2_buffer *pb)
+{
+ int ret;
+ struct msm_session *session;
+ struct camera_v4l2_private *sp = fh_to_private(fh);
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ unsigned int session_id = pvdev->vdev->num;
+ session = msm_session_find(session_id);
+ if (WARN_ON(!session))
+ return -EIO;
+ mutex_lock(&session->lock_q);
+ ret = vb2_qbuf(&sp->vb2_q, pb);
+ mutex_unlock(&session->lock_q);
+ return ret;
+}
+
+static int camera_v4l2_dqbuf(struct file *filep, void *fh,
+ struct v4l2_buffer *pb)
+{
+ int ret;
+ struct msm_session *session;
+ struct camera_v4l2_private *sp = fh_to_private(fh);
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ unsigned int session_id = pvdev->vdev->num;
+ session = msm_session_find(session_id);
+ if (WARN_ON(!session))
+ return -EIO;
+ mutex_lock(&session->lock_q);
+ ret = vb2_dqbuf(&sp->vb2_q, pb, filep->f_flags & O_NONBLOCK);
+ mutex_unlock(&session->lock_q);
+ return ret;
+}
+
+static int camera_v4l2_streamon(struct file *filep, void *fh,
+ enum v4l2_buf_type buf_type)
+{
+ struct v4l2_event event;
+ int rc;
+ struct camera_v4l2_private *sp = fh_to_private(fh);
+
+ rc = vb2_streamon(&sp->vb2_q, buf_type);
+ camera_pack_event(filep, MSM_CAMERA_SET_PARM,
+ MSM_CAMERA_PRIV_STREAM_ON, -1, &event);
+
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ rc = camera_check_event_status(&event);
+ return rc;
+}
+
+static int camera_v4l2_streamoff(struct file *filep, void *fh,
+ enum v4l2_buf_type buf_type)
+{
+ struct v4l2_event event;
+ int rc;
+ struct camera_v4l2_private *sp = fh_to_private(fh);
+
+ camera_pack_event(filep, MSM_CAMERA_SET_PARM,
+ MSM_CAMERA_PRIV_STREAM_OFF, -1, &event);
+
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ rc = camera_check_event_status(&event);
+ vb2_streamoff(&sp->vb2_q, buf_type);
+ return rc;
+}
+
+static int camera_v4l2_g_fmt_vid_cap_mplane(struct file *filep, void *fh,
+ struct v4l2_format *pfmt)
+{
+ int rc = -EINVAL;
+
+ if (pfmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ struct v4l2_event event;
+
+ camera_pack_event(filep, MSM_CAMERA_GET_PARM,
+ MSM_CAMERA_PRIV_G_FMT, -1, &event);
+
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ rc = camera_check_event_status(&event);
+ }
+
+ return rc;
+}
+
+static int camera_v4l2_s_fmt_vid_cap_mplane(struct file *filep, void *fh,
+ struct v4l2_format *pfmt)
+{
+ int rc = 0;
+ int i = 0;
+ struct v4l2_event event;
+ struct camera_v4l2_private *sp = fh_to_private(fh);
+ struct msm_v4l2_format_data *user_fmt;
+
+ if (pfmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+
+ if (WARN_ON(!sp->vb2_q.drv_priv))
+ return -ENOMEM;
+
+ memcpy(sp->vb2_q.drv_priv, pfmt->fmt.raw_data,
+ sizeof(struct msm_v4l2_format_data));
+ user_fmt = (struct msm_v4l2_format_data *)sp->vb2_q.drv_priv;
+
+ pr_debug("%s: num planes :%c\n", __func__,
+ user_fmt->num_planes);
+ /*num_planes need to bound checked, otherwise for loop
+ can execute forever */
+ if (WARN_ON(user_fmt->num_planes > VIDEO_MAX_PLANES))
+ return -EINVAL;
+ for (i = 0; i < user_fmt->num_planes; i++)
+ pr_debug("%s: plane size[%d]\n", __func__,
+ user_fmt->plane_sizes[i]);
+
+ camera_pack_event(filep, MSM_CAMERA_SET_PARM,
+ MSM_CAMERA_PRIV_S_FMT, -1, &event);
+
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ rc = camera_check_event_status(&event);
+ if (rc < 0)
+ return rc;
+
+ sp->is_vb2_valid = 1;
+ }
+
+ return rc;
+
+}
+
+static int camera_v4l2_try_fmt_vid_cap_mplane(struct file *filep, void *fh,
+ struct v4l2_format *pfmt)
+{
+ return 0;
+}
+
+
+static int camera_v4l2_g_parm(struct file *filep, void *fh,
+ struct v4l2_streamparm *a)
+{
+ /* TODO */
+ return 0;
+}
+
+static int camera_v4l2_s_parm(struct file *filep, void *fh,
+ struct v4l2_streamparm *parm)
+{
+ int rc = 0;
+ struct v4l2_event event;
+ struct msm_v4l2_event_data *event_data =
+ (struct msm_v4l2_event_data *)&event.u.data[0];
+ struct camera_v4l2_private *sp = fh_to_private(fh);
+
+ camera_pack_event(filep, MSM_CAMERA_SET_PARM,
+ MSM_CAMERA_PRIV_NEW_STREAM, -1, &event);
+
+ rc = msm_create_stream(event_data->session_id,
+ event_data->stream_id, &sp->vb2_q);
+ if (rc < 0)
+ return rc;
+
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0)
+ goto error;
+
+ rc = camera_check_event_status(&event);
+ if (rc < 0)
+ goto error;
+
+ /* use stream_id as stream index */
+ parm->parm.capture.extendedmode = sp->stream_id;
+
+ return rc;
+
+error:
+ msm_delete_stream(event_data->session_id,
+ event_data->stream_id);
+ return rc;
+}
+
+static int camera_v4l2_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ int rc = 0;
+ struct camera_v4l2_private *sp = fh_to_private(fh);
+
+ rc = v4l2_event_subscribe(&sp->fh, sub, 5, NULL);
+
+ return rc;
+}
+
+static int camera_v4l2_unsubscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ int rc = 0;
+ struct camera_v4l2_private *sp = fh_to_private(fh);
+
+ rc = v4l2_event_unsubscribe(&sp->fh, sub);
+
+ return rc;
+}
+
+static const struct v4l2_ioctl_ops camera_v4l2_ioctl_ops = {
+ .vidioc_querycap = camera_v4l2_querycap,
+ .vidioc_s_crop = camera_v4l2_s_crop,
+ .vidioc_g_crop = camera_v4l2_g_crop,
+ .vidioc_queryctrl = camera_v4l2_queryctrl,
+ .vidioc_g_ctrl = camera_v4l2_g_ctrl,
+ .vidioc_s_ctrl = camera_v4l2_s_ctrl,
+ .vidioc_reqbufs = camera_v4l2_reqbufs,
+ .vidioc_querybuf = camera_v4l2_querybuf,
+ .vidioc_qbuf = camera_v4l2_qbuf,
+ .vidioc_dqbuf = camera_v4l2_dqbuf,
+ .vidioc_streamon = camera_v4l2_streamon,
+ .vidioc_streamoff = camera_v4l2_streamoff,
+ .vidioc_g_fmt_vid_cap_mplane = camera_v4l2_g_fmt_vid_cap_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = camera_v4l2_s_fmt_vid_cap_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = camera_v4l2_try_fmt_vid_cap_mplane,
+
+ /* Stream type-dependent parameter ioctls */
+ .vidioc_g_parm = camera_v4l2_g_parm,
+ .vidioc_s_parm = camera_v4l2_s_parm,
+
+ /* event subscribe/unsubscribe */
+ .vidioc_subscribe_event = camera_v4l2_subscribe_event,
+ .vidioc_unsubscribe_event = camera_v4l2_unsubscribe_event,
+};
+
+static int camera_v4l2_fh_open(struct file *filep)
+{
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ struct camera_v4l2_private *sp;
+ unsigned int stream_id;
+
+ sp = kzalloc(sizeof(*sp), GFP_KERNEL);
+ if (!sp) {
+ pr_err("%s : memory not available\n", __func__);
+ return -ENOMEM;
+ }
+
+ filep->private_data = &sp->fh;
+
+ /* stream_id = open id */
+ stream_id = atomic_read(&pvdev->opened);
+ sp->stream_id = find_first_zero_bit(
+ (const unsigned long *)&stream_id, MSM_CAMERA_STREAM_CNT_BITS);
+ pr_debug("%s: Found stream_id=%d\n", __func__, sp->stream_id);
+
+ v4l2_fh_init(&sp->fh, pvdev->vdev);
+ v4l2_fh_add(&sp->fh);
+
+ return 0;
+}
+
+static int camera_v4l2_fh_release(struct file *filep)
+{
+ struct camera_v4l2_private *sp = fh_to_private(filep->private_data);
+
+ if (sp) {
+ v4l2_fh_del(&sp->fh);
+ v4l2_fh_exit(&sp->fh);
+ }
+
+ kzfree(sp);
+ return 0;
+}
+
+static int camera_v4l2_vb2_q_init(struct file *filep)
+{
+ struct camera_v4l2_private *sp = fh_to_private(filep->private_data);
+ struct vb2_queue *q = &sp->vb2_q;
+
+ memset(q, 0, sizeof(struct vb2_queue));
+
+ /* free up this buffer when stream is done */
+ q->drv_priv =
+ kzalloc(sizeof(struct msm_v4l2_format_data), GFP_KERNEL);
+ if (!q->drv_priv) {
+ pr_err("%s : memory not available\n", __func__);
+ return -ENOMEM;
+ }
+
+ q->mem_ops = msm_vb2_get_q_mem_ops();
+ q->ops = msm_vb2_get_q_ops();
+
+ /* default queue type */
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q->io_modes = VB2_USERPTR;
+ q->io_flags = 0;
+ q->buf_struct_size = sizeof(struct msm_vb2_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ return vb2_queue_init(q);
+}
+
+static void camera_v4l2_vb2_q_release(struct file *filep)
+{
+ struct camera_v4l2_private *sp = filep->private_data;
+
+ kzfree(sp->vb2_q.drv_priv);
+ vb2_queue_release(&sp->vb2_q);
+}
+
+static int camera_v4l2_open(struct file *filep)
+{
+ int rc = 0;
+ struct v4l2_event event;
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ unsigned int opn_idx, idx;
+ BUG_ON(!pvdev);
+
+ rc = camera_v4l2_fh_open(filep);
+ if (rc < 0) {
+ pr_err("%s : camera_v4l2_fh_open failed Line %d rc %d\n",
+ __func__, __LINE__, rc);
+ goto fh_open_fail;
+ }
+
+ opn_idx = atomic_read(&pvdev->opened);
+ idx = opn_idx;
+ /* every stream has a vb2 queue */
+ rc = camera_v4l2_vb2_q_init(filep);
+ if (rc < 0) {
+ pr_err("%s : vb2 queue init fails Line %d rc %d\n",
+ __func__, __LINE__, rc);
+ goto vb2_q_fail;
+ }
+
+ if (!atomic_read(&pvdev->opened)) {
+ pm_stay_awake(&pvdev->vdev->dev);
+
+ /* create a new session when first opened */
+ rc = msm_create_session(pvdev->vdev->num, pvdev->vdev);
+ if (rc < 0) {
+ pr_err("%s : session creation failed Line %d rc %d\n",
+ __func__, __LINE__, rc);
+ goto session_fail;
+ }
+
+ rc = msm_create_command_ack_q(pvdev->vdev->num,
+ find_first_zero_bit((const unsigned long *)&opn_idx,
+ MSM_CAMERA_STREAM_CNT_BITS));
+ if (rc < 0) {
+ pr_err("%s : creation of command_ack queue failed\n",
+ __func__);
+ pr_err("%s : Line %d rc %d\n", __func__, __LINE__, rc);
+ goto command_ack_q_fail;
+ }
+
+ camera_pack_event(filep, MSM_CAMERA_NEW_SESSION, 0, -1, &event);
+ rc = msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+ if (rc < 0) {
+ pr_err("%s : posting of NEW_SESSION event failed\n",
+ __func__);
+ pr_err("%s : Line %d rc %d\n", __func__, __LINE__, rc);
+ goto post_fail;
+ }
+
+ rc = camera_check_event_status(&event);
+ if (rc < 0) {
+ pr_err("%s : checking event status fails Line %d rc %d\n",
+ __func__, __LINE__, rc);
+ goto post_fail;
+ }
+ } else {
+ rc = msm_create_command_ack_q(pvdev->vdev->num,
+ find_first_zero_bit((const unsigned long *)&opn_idx,
+ MSM_CAMERA_STREAM_CNT_BITS));
+ if (rc < 0) {
+ pr_err("%s : creation of command_ack queue failed Line %d rc %d\n",
+ __func__, __LINE__, rc);
+ goto stream_fail;
+ }
+ }
+ idx |= (1 << find_first_zero_bit((const unsigned long *)&opn_idx,
+ MSM_CAMERA_STREAM_CNT_BITS));
+ atomic_cmpxchg(&pvdev->opened, opn_idx, idx);
+ return rc;
+
+post_fail:
+ msm_delete_command_ack_q(pvdev->vdev->num, 0);
+command_ack_q_fail:
+ msm_destroy_session(pvdev->vdev->num);
+session_fail:
+ pm_relax(&pvdev->vdev->dev);
+stream_fail:
+ camera_v4l2_vb2_q_release(filep);
+vb2_q_fail:
+ camera_v4l2_fh_release(filep);
+fh_open_fail:
+ return rc;
+}
+
+static unsigned int camera_v4l2_poll(struct file *filep,
+ struct poll_table_struct *wait)
+{
+ int rc = 0;
+ struct camera_v4l2_private *sp = fh_to_private(filep->private_data);
+ if (sp->is_vb2_valid == 1)
+ rc = vb2_poll(&sp->vb2_q, filep, wait);
+
+ poll_wait(filep, &sp->fh.wait, wait);
+ if (v4l2_event_pending(&sp->fh))
+ rc |= POLLPRI;
+
+ return rc;
+}
+
+static int camera_v4l2_close(struct file *filep)
+{
+ int rc = 0;
+ struct v4l2_event event;
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ struct camera_v4l2_private *sp = fh_to_private(filep->private_data);
+ unsigned int opn_idx, mask;
+ BUG_ON(!pvdev);
+
+ opn_idx = atomic_read(&pvdev->opened);
+ pr_debug("%s: close stream_id=%d\n", __func__, sp->stream_id);
+ mask = (1 << sp->stream_id);
+ opn_idx &= ~mask;
+ atomic_set(&pvdev->opened, opn_idx);
+
+ if (atomic_read(&pvdev->opened) == 0) {
+
+ camera_pack_event(filep, MSM_CAMERA_SET_PARM,
+ MSM_CAMERA_PRIV_DEL_STREAM, -1, &event);
+ msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+
+ camera_pack_event(filep, MSM_CAMERA_DEL_SESSION, 0, -1, &event);
+ msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+
+ msm_delete_command_ack_q(pvdev->vdev->num, 0);
+
+ /* This should take care of both normal close
+ * and application crashes */
+ camera_v4l2_vb2_q_release(filep);
+ msm_destroy_session(pvdev->vdev->num);
+ pm_relax(&pvdev->vdev->dev);
+ } else {
+ camera_pack_event(filep, MSM_CAMERA_SET_PARM,
+ MSM_CAMERA_PRIV_DEL_STREAM, -1, &event);
+ msm_post_event(&event, MSM_POST_EVT_TIMEOUT);
+
+ msm_delete_command_ack_q(pvdev->vdev->num,
+ sp->stream_id);
+
+ camera_v4l2_vb2_q_release(filep);
+ msm_delete_stream(pvdev->vdev->num, sp->stream_id);
+ }
+
+ camera_v4l2_fh_release(filep);
+
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+long camera_v4l2_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return -ENOIOCTLCMD;
+}
+#endif
+static struct v4l2_file_operations camera_v4l2_fops = {
+ .owner = THIS_MODULE,
+ .open = camera_v4l2_open,
+ .poll = camera_v4l2_poll,
+ .release = camera_v4l2_close,
+ .ioctl = video_ioctl2,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = camera_v4l2_compat_ioctl,
+#endif
+};
+
+int camera_init_v4l2(struct device *dev, unsigned int *session)
+{
+ struct msm_video_device *pvdev;
+ struct v4l2_device *v4l2_dev;
+ int rc = 0;
+
+ pvdev = kzalloc(sizeof(struct msm_video_device),
+ GFP_KERNEL);
+ if (WARN_ON(!pvdev)) {
+ rc = -ENOMEM;
+ goto init_end;
+ }
+
+ pvdev->vdev = video_device_alloc();
+ if (WARN_ON(!pvdev->vdev)) {
+ rc = -ENOMEM;
+ goto video_fail;
+ }
+
+ v4l2_dev = kzalloc(sizeof(struct v4l2_device), GFP_KERNEL);
+ if (WARN_ON(!v4l2_dev)) {
+ rc = -ENOMEM;
+ goto v4l2_fail;
+ }
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ v4l2_dev->mdev = kzalloc(sizeof(struct media_device),
+ GFP_KERNEL);
+ if (!v4l2_dev->mdev) {
+ rc = -ENOMEM;
+ goto mdev_fail;
+ }
+ strlcpy(v4l2_dev->mdev->model, MSM_CAMERA_NAME,
+ sizeof(v4l2_dev->mdev->model));
+
+ v4l2_dev->mdev->dev = dev;
+
+ rc = media_device_register(v4l2_dev->mdev);
+ if (WARN_ON(rc < 0))
+ goto media_fail;
+
+ rc = media_entity_init(&pvdev->vdev->entity, 0, NULL, 0);
+ if (WARN_ON(rc < 0))
+ goto entity_fail;
+ pvdev->vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L;
+ pvdev->vdev->entity.group_id = QCAMERA_VNODE_GROUP_ID;
+#endif
+
+ v4l2_dev->notify = NULL;
+ pvdev->vdev->v4l2_dev = v4l2_dev;
+
+ rc = v4l2_device_register(dev, pvdev->vdev->v4l2_dev);
+ if (WARN_ON(rc < 0))
+ goto register_fail;
+
+ strlcpy(pvdev->vdev->name, "msm-sensor", sizeof(pvdev->vdev->name));
+ pvdev->vdev->release = video_device_release;
+ pvdev->vdev->fops = &camera_v4l2_fops;
+ pvdev->vdev->ioctl_ops = &camera_v4l2_ioctl_ops;
+ pvdev->vdev->minor = -1;
+ pvdev->vdev->vfl_type = VFL_TYPE_GRABBER;
+ rc = video_register_device(pvdev->vdev,
+ VFL_TYPE_GRABBER, -1);
+ if (WARN_ON(rc < 0))
+ goto video_register_fail;
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ /* FIXME: How to get rid of this messy? */
+ pvdev->vdev->entity.name = video_device_node_name(pvdev->vdev);
+#endif
+
+ *session = pvdev->vdev->num;
+ atomic_set(&pvdev->opened, 0);
+ video_set_drvdata(pvdev->vdev, pvdev);
+ device_init_wakeup(&pvdev->vdev->dev, 1);
+ goto init_end;
+
+video_register_fail:
+ v4l2_device_unregister(pvdev->vdev->v4l2_dev);
+register_fail:
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ media_entity_cleanup(&pvdev->vdev->entity);
+entity_fail:
+ media_device_unregister(v4l2_dev->mdev);
+media_fail:
+ kzfree(v4l2_dev->mdev);
+mdev_fail:
+#endif
+ kzfree(v4l2_dev);
+v4l2_fail:
+ video_device_release(pvdev->vdev);
+video_fail:
+ kzfree(pvdev);
+init_end:
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v2/camera/camera.h b/drivers/media/platform/msm/camera_v2/camera/camera.h
new file mode 100644
index 000000000000..c83b42631e55
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/camera/camera.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAMERA_H
+#define _CAMERA_H
+
+enum stream_state {
+ START_STREAM = 0,
+ STOP_STREAM,
+};
+
+int camera_init_v4l2(struct device *dev, unsigned int *session);
+
+#endif /*_CAMERA_H */
diff --git a/drivers/media/platform/msm/camera_v2/common/Makefile b/drivers/media/platform/msm/camera_v2/common/Makefile
new file mode 100644
index 000000000000..2e9028480145
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/common/Makefile
@@ -0,0 +1,2 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/
+obj-$(CONFIG_MSMB_CAMERA) += msm_camera_io_util.o cam_smmu_api.o cam_hw_ops.o cam_soc_api.o
diff --git a/drivers/media/platform/msm/camera_v2/common/cam_hw_ops.c b/drivers/media/platform/msm/camera_v2/common/cam_hw_ops.c
new file mode 100644
index 000000000000..e0691a27d856
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/common/cam_hw_ops.c
@@ -0,0 +1,273 @@
+/* Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-AHB %s:%d " fmt, __func__, __LINE__
+#define TRUE 1
+#include <linux/module.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/of_platform.h>
+#include "cam_hw_ops.h"
+
+struct cam_ahb_client {
+ enum cam_ahb_clk_vote vote;
+};
+
+struct cam_bus_vector {
+ const char *name;
+};
+
+struct cam_ahb_client_data {
+ struct msm_bus_scale_pdata *pbus_data;
+ u32 ahb_client;
+ u32 ahb_clk_state;
+ struct msm_bus_vectors *paths;
+ struct msm_bus_paths *usecases;
+ struct cam_bus_vector *vectors;
+ u32 *votes;
+ u32 cnt;
+ u32 probe_done;
+ struct cam_ahb_client clients[CAM_AHB_CLIENT_MAX];
+ struct mutex lock;
+};
+
+/* Note: The mask array defined here should match
+ * the order of strings and number of strings
+ * in dtsi bus-vectors
+ */
+
+static enum cam_ahb_clk_vote mask[] = {
+ CAMERA_AHB_SUSPEND_VOTE,
+ CAMERA_AHB_SVS_VOTE,
+ CAMERA_AHB_NOMINAL_VOTE,
+ CAMERA_AHB_TURBO_VOTE
+};
+
+static struct cam_ahb_client_data data;
+
+int get_vector_index(char *name)
+{
+ int i = 0, rc = -1;
+
+ for (i = 0; i < data.cnt; i++) {
+ if (strcmp(name, data.vectors[i].name) == 0)
+ return i;
+ }
+
+ return rc;
+}
+
+int cam_ahb_clk_init(struct platform_device *pdev)
+{
+ int i = 0, cnt = 0, rc = 0, index = 0;
+ struct device_node *of_node;
+
+ if (!pdev) {
+ pr_err("invalid pdev argument\n");
+ return -EINVAL;
+ }
+
+ of_node = pdev->dev.of_node;
+ data.cnt = of_property_count_strings(of_node, "bus-vectors");
+ if (data.cnt == 0) {
+ pr_err("no vectors strings found in device tree, count=%d",
+ data.cnt);
+ return 0;
+ }
+
+ cnt = of_property_count_u32_elems(of_node, "qcom,bus-votes");
+ if (cnt == 0) {
+ pr_err("no vector values found in device tree, count=%d", cnt);
+ return 0;
+ }
+
+ if (data.cnt != cnt) {
+ pr_err("vector mismatch num of strings=%u, num of values %d\n",
+ data.cnt, cnt);
+ return -EINVAL;
+ }
+
+ pr_debug("number of bus vectors: %d\n", data.cnt);
+
+ data.vectors = devm_kzalloc(&pdev->dev,
+ sizeof(struct cam_bus_vector) * cnt,
+ GFP_KERNEL);
+ if (!data.vectors)
+ return -ENOMEM;
+
+ for (i = 0; i < data.cnt; i++) {
+ rc = of_property_read_string_index(of_node, "bus-vectors",
+ i, &(data.vectors[i].name));
+ pr_debug("dbg: names[%d] = %s\n", i, data.vectors[i].name);
+ if (rc < 0) {
+ pr_err("failed\n");
+ rc = -EINVAL;
+ goto err1;
+ }
+ }
+
+ data.paths = devm_kzalloc(&pdev->dev,
+ sizeof(struct msm_bus_vectors) * cnt,
+ GFP_KERNEL);
+ if (!data.paths) {
+ rc = -ENOMEM;
+ goto err1;
+ }
+
+ data.usecases = devm_kzalloc(&pdev->dev,
+ sizeof(struct msm_bus_paths) * cnt,
+ GFP_KERNEL);
+ if (!data.usecases) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ data.pbus_data = devm_kzalloc(&pdev->dev,
+ sizeof(struct msm_bus_scale_pdata),
+ GFP_KERNEL);
+ if (!data.pbus_data) {
+ rc = -ENOMEM;
+ goto err3;
+ }
+
+ data.votes = devm_kzalloc(&pdev->dev, sizeof(u32) * cnt,
+ GFP_KERNEL);
+ if (!data.votes) {
+ rc = -ENOMEM;
+ goto err4;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,bus-votes",
+ data.votes, cnt);
+
+ for (i = 0; i < data.cnt; i++) {
+ data.paths[i] = (struct msm_bus_vectors) {
+ MSM_BUS_MASTER_AMPSS_M0,
+ MSM_BUS_SLAVE_CAMERA_CFG,
+ 0,
+ data.votes[i]
+ };
+ data.usecases[i] = (struct msm_bus_paths) {
+ .num_paths = 1,
+ .vectors = &data.paths[i],
+ };
+ pr_debug("dbg: votes[%d] = %u\n", i, data.votes[i]);
+ }
+
+ *data.pbus_data = (struct msm_bus_scale_pdata) {
+ .name = "msm_camera_ahb",
+ .num_usecases = data.cnt,
+ .usecase = data.usecases,
+ };
+
+ data.ahb_client =
+ msm_bus_scale_register_client(data.pbus_data);
+ if (!data.ahb_client) {
+ pr_err("ahb vote registering failed\n");
+ rc = -EINVAL;
+ goto err5;
+ }
+
+ index = get_vector_index("suspend");
+ if (index < 0) {
+ pr_err("svs vector not supported\n");
+ rc = -EINVAL;
+ goto err6;
+ }
+
+ /* request for svs in init */
+ msm_bus_scale_client_update_request(data.ahb_client,
+ index);
+ data.ahb_clk_state = CAMERA_AHB_SUSPEND_VOTE;
+ data.probe_done = TRUE;
+ mutex_init(&data.lock);
+
+ pr_debug("dbg, done registering ahb votes\n");
+ pr_debug("dbg, clk state :%u, probe :%d\n",
+ data.ahb_clk_state, data.probe_done);
+ return rc;
+
+err6:
+ msm_bus_scale_unregister_client(data.ahb_client);
+err5:
+ devm_kfree(&pdev->dev, data.votes);
+ data.votes = NULL;
+err4:
+ devm_kfree(&pdev->dev, data.pbus_data);
+ data.pbus_data = NULL;
+err3:
+ devm_kfree(&pdev->dev, data.usecases);
+ data.usecases = NULL;
+err2:
+ devm_kfree(&pdev->dev, data.paths);
+ data.paths = NULL;
+err1:
+ devm_kfree(&pdev->dev, data.vectors);
+ data.vectors = NULL;
+ return rc;
+}
+EXPORT_SYMBOL(cam_ahb_clk_init);
+
+int cam_config_ahb_clk(enum cam_ahb_clk_client id, enum cam_ahb_clk_vote vote)
+{
+ int i = 0, n = 0;
+ u32 final_vote = 0;
+
+ if (data.probe_done != TRUE) {
+ pr_err("ahb init is not done yet\n");
+ return -EINVAL;
+ }
+
+ if (vote > CAMERA_AHB_TURBO_VOTE || id >= CAM_AHB_CLIENT_MAX) {
+ pr_err("err: invalid argument\n");
+ return -EINVAL;
+ }
+
+ pr_debug("dbg: id :%u, vote : %u\n", id, vote);
+ data.clients[id].vote = vote;
+
+ mutex_lock(&data.lock);
+
+ if (vote == data.ahb_clk_state) {
+ pr_debug("dbg: already at desired vote\n");
+ mutex_unlock(&data.lock);
+ return 0;
+ }
+
+ /* oring all the client votes */
+ for (i = 0; i < CAM_AHB_CLIENT_MAX; i++)
+ final_vote |= data.clients[i].vote;
+
+ pr_debug("dbg: final vote : %u\n", final_vote);
+ /* find the max client vote */
+ for (n = data.cnt - 1; n >= 0; n--) {
+ if (!(final_vote & mask[n]))
+ continue;
+ else
+ break;
+ }
+
+ if (n >= 0) {
+ if (mask[n] != data.ahb_clk_state) {
+ msm_bus_scale_client_update_request(data.ahb_client, n);
+ data.ahb_clk_state = mask[n];
+ pr_debug("dbg: state : %u, vote : %d\n",
+ data.ahb_clk_state, n);
+ }
+ } else {
+ pr_err("err: no bus vector found\n");
+ return -EINVAL;
+ }
+ mutex_unlock(&data.lock);
+ return 0;
+}
+EXPORT_SYMBOL(cam_config_ahb_clk);
diff --git a/drivers/media/platform/msm/camera_v2/common/cam_hw_ops.h b/drivers/media/platform/msm/camera_v2/common/cam_hw_ops.h
new file mode 100644
index 000000000000..2fe35da6b04c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/common/cam_hw_ops.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+enum cam_ahb_clk_vote {
+ /* need to update the voting requests
+ * according to dtsi entries.
+ */
+ CAMERA_AHB_SUSPEND_VOTE = 0x01,
+ CAMERA_AHB_SVS_VOTE = 0x02,
+ CAMERA_AHB_NOMINAL_VOTE = 0x04,
+ CAMERA_AHB_TURBO_VOTE = 0x08,
+};
+
+enum cam_ahb_clk_client {
+ CAM_AHB_CLIENT_CSIPHY,
+ CAM_AHB_CLIENT_CSID,
+ CAM_AHB_CLIENT_CCI,
+ CAM_AHB_CLIENT_ISPIF,
+ CAM_AHB_CLIENT_VFE,
+ CAM_AHB_CLIENT_CPP,
+ CAM_AHB_CLIENT_FD,
+ CAM_AHB_CLIENT_JPEG,
+ CAM_AHB_CLIENT_MAX
+};
+
+int cam_config_ahb_clk(enum cam_ahb_clk_client id,
+ enum cam_ahb_clk_vote vote);
+int cam_ahb_clk_init(struct platform_device *pdev);
diff --git a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
new file mode 100644
index 000000000000..6fdb5d92c838
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
@@ -0,0 +1,1619 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "CAM-SMMU %s:%d " fmt, __func__, __LINE__
+
+#include <linux/module.h>
+#include <linux/dma-buf.h>
+#include <asm/dma-iommu.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-attrs.h>
+#include <linux/of_platform.h>
+#include <linux/iommu.h>
+#include <linux/slab.h>
+#include <linux/qcom_iommu.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_dma_iommu_mapping.h>
+#include "cam_smmu_api.h"
+
+#define SCRATCH_ALLOC_START SZ_128K
+#define SCRATCH_ALLOC_END SZ_256M
+#define VA_SPACE_END SZ_2G
+#define IOMMU_INVALID_DIR -1
+#define BYTE_SIZE 8
+#define COOKIE_NUM_BYTE 2
+#define COOKIE_SIZE (BYTE_SIZE*COOKIE_NUM_BYTE)
+#define COOKIE_MASK ((1<<COOKIE_SIZE)-1)
+#define HANDLE_INIT (-1)
+#define CAM_SMMU_CB_MAX 2
+
+#define GET_SMMU_HDL(x, y) (((x) << COOKIE_SIZE) | ((y) & COOKIE_MASK))
+#define GET_SMMU_TABLE_IDX(x) (((x) >> COOKIE_SIZE) & COOKIE_MASK)
+
+#ifdef CONFIG_CAM_SMMU_DBG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+enum cam_protection_type {
+ CAM_PROT_INVALID,
+ CAM_NON_SECURE,
+ CAM_SECURE,
+ CAM_PROT_MAX,
+};
+
+enum cam_iommu_type {
+ CAM_SMMU_INVALID,
+ CAM_QSMMU,
+ CAM_ARM_SMMU,
+ CAM_SMMU_MAX,
+};
+
+enum cam_smmu_buf_state {
+ CAM_SMMU_BUFF_EXIST,
+ CAM_SMMU_BUFF_NOT_EXIST
+};
+
+enum cam_smmu_init_dir {
+ CAM_SMMU_TABLE_INIT,
+ CAM_SMMU_TABLE_DEINIT,
+};
+
+struct scratch_mapping {
+ void *bitmap;
+ size_t bits;
+ unsigned int order;
+ dma_addr_t base;
+};
+
+struct cam_context_bank_info {
+ struct device *dev;
+ struct dma_iommu_mapping *mapping;
+ dma_addr_t va_start;
+ size_t va_len;
+ const char *name;
+ bool is_secure;
+ uint8_t scratch_buf_support;
+ struct scratch_mapping scratch_map;
+ struct list_head smmu_buf_list;
+ struct mutex lock;
+ int handle;
+ enum cam_smmu_ops_param state;
+ int (*handler[CAM_SMMU_CB_MAX])(struct iommu_domain *,
+ struct device *, unsigned long,
+ int, void*);
+ void *token[CAM_SMMU_CB_MAX];
+ int cb_count;
+};
+
+struct cam_iommu_cb_set {
+ struct cam_context_bank_info *cb_info;
+ u32 cb_num;
+ u32 cb_init_count;
+};
+
+static struct of_device_id msm_cam_smmu_dt_match[] = {
+ { .compatible = "qcom,msm-cam-smmu", },
+ { .compatible = "qcom,msm-cam-smmu-cb", },
+ { .compatible = "qcom,qsmmu-cam-cb", },
+ {}
+};
+
+struct cam_dma_buff_info {
+ struct dma_buf *buf;
+ struct dma_buf_attachment *attach;
+ struct sg_table *table;
+ enum dma_data_direction dir;
+ int iommu_dir;
+ int ref_count;
+ dma_addr_t paddr;
+ struct list_head list;
+ int ion_fd;
+ size_t len;
+ size_t phys_len;
+};
+
+static struct cam_iommu_cb_set iommu_cb_set;
+
+static enum dma_data_direction cam_smmu_translate_dir(
+ enum cam_smmu_map_dir dir);
+
+static int cam_smmu_check_handle_unique(int hdl);
+
+static int cam_smmu_create_iommu_handle(int idx);
+
+static int cam_smmu_create_add_handle_in_table(char *name,
+ int *hdl);
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
+ int ion_fd);
+
+static int cam_smmu_init_scratch_map(struct scratch_mapping *scratch_map,
+ dma_addr_t base, size_t size,
+ int order);
+
+static int cam_smmu_alloc_scratch_va(struct scratch_mapping *mapping,
+ size_t size,
+ dma_addr_t *iova);
+
+static int cam_smmu_free_scratch_va(struct scratch_mapping *mapping,
+ dma_addr_t addr, size_t size);
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx,
+ dma_addr_t virt_addr);
+
+static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
+ enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+ size_t *len_ptr);
+
+static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx,
+ size_t virt_len,
+ size_t phys_len,
+ unsigned int iommu_dir,
+ dma_addr_t *virt_addr);
+static int cam_smmu_unmap_buf_and_remove_from_list(
+ struct cam_dma_buff_info *mapping_info, int idx);
+
+static int cam_smmu_free_scratch_buffer_remove_from_list(
+ struct cam_dma_buff_info *mapping_info,
+ int idx);
+
+static void cam_smmu_clean_buffer_list(int idx);
+
+static void cam_smmu_print_list(int idx);
+
+static void cam_smmu_print_table(void);
+
+static int cam_smmu_probe(struct platform_device *pdev);
+
+static void cam_smmu_print_list(int idx)
+{
+ struct cam_dma_buff_info *mapping;
+
+ pr_err("index = %d ", idx);
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+ pr_err("ion_fd = %d, paddr= 0x%p, len = %u\n",
+ mapping->ion_fd, (void *)mapping->paddr,
+ (unsigned int)mapping->len);
+ }
+}
+
+static void cam_smmu_print_table(void)
+{
+ int i;
+
+ for (i = 0; i < iommu_cb_set.cb_num; i++) {
+ pr_err("i= %d, handle= %d, name_addr=%p\n", i,
+ (int)iommu_cb_set.cb_info[i].handle,
+ (void *)iommu_cb_set.cb_info[i].name);
+ pr_err("dev = %p ", iommu_cb_set.cb_info[i].dev);
+ }
+}
+
+
+int cam_smmu_query_vaddr_in_range(int handle,
+ unsigned long fault_addr, unsigned long *start_addr,
+ unsigned long *end_addr, int *fd)
+{
+ int idx, rc = -EINVAL;
+ struct cam_dma_buff_info *mapping;
+ unsigned long sa, ea;
+
+ if (!start_addr || !end_addr || !fd) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+ sa = (unsigned long)mapping->paddr;
+ ea = (unsigned long)mapping->paddr + mapping->len;
+
+ if (sa <= fault_addr && fault_addr < ea) {
+ *start_addr = sa;
+ *end_addr = ea;
+ *fd = mapping->ion_fd;
+ rc = 0;
+ break;
+ }
+ }
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_query_vaddr_in_range);
+
+static void cam_smmu_check_vaddr_in_range(int idx, void *vaddr)
+{
+ struct cam_dma_buff_info *mapping;
+ unsigned long start_addr, end_addr, current_addr;
+
+ current_addr = (unsigned long)vaddr;
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+ start_addr = (unsigned long)mapping->paddr;
+ end_addr = (unsigned long)mapping->paddr + mapping->len;
+
+ if (start_addr <= current_addr && current_addr < end_addr) {
+ pr_err("Error: va %p is valid: range:%p-%p, fd = %d cb: %s\n",
+ vaddr, (void *)start_addr, (void *)end_addr,
+ mapping->ion_fd,
+ iommu_cb_set.cb_info[idx].name);
+ return;
+ } else {
+ CDBG("va %p is not in this range: %p-%p, fd = %d\n",
+ vaddr, (void *)start_addr, (void *)end_addr,
+ mapping->ion_fd);
+ }
+ }
+ pr_err("Cannot find vaddr:%p in SMMU. %s uses invalid virtual address\n",
+ vaddr, iommu_cb_set.cb_info[idx].name);
+ return;
+}
+
+void cam_smmu_reg_client_page_fault_handler(int handle,
+ int (*client_page_fault_handler)(struct iommu_domain *,
+ struct device *, unsigned long,
+ int, void*), void *token)
+{
+ int idx, i = 0;
+
+ if (!token) {
+ pr_err("Error: token is NULL\n");
+ return;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return;
+ }
+
+ if (client_page_fault_handler) {
+ if (iommu_cb_set.cb_info[idx].cb_count == CAM_SMMU_CB_MAX) {
+ pr_err("%s Should not regiester more handlers\n",
+ iommu_cb_set.cb_info[idx].name);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return;
+ }
+ iommu_cb_set.cb_info[idx].cb_count++;
+ for (i = 0; i < iommu_cb_set.cb_info[idx].cb_count; i++) {
+ if (iommu_cb_set.cb_info[idx].token[i] == NULL) {
+ iommu_cb_set.cb_info[idx].token[i] = token;
+ iommu_cb_set.cb_info[idx].handler[i] =
+ client_page_fault_handler;
+ break;
+ }
+ }
+ } else {
+ for (i = 0; i < CAM_SMMU_CB_MAX; i++) {
+ if (iommu_cb_set.cb_info[idx].token[i] == token) {
+ iommu_cb_set.cb_info[idx].token[i] = NULL;
+ iommu_cb_set.cb_info[idx].handler[i] =
+ NULL;
+ iommu_cb_set.cb_info[idx].cb_count--;
+ break;
+ }
+ }
+ if (i == CAM_SMMU_CB_MAX)
+ pr_err("Error: hdl %x no matching tokens: %s\n",
+ handle, iommu_cb_set.cb_info[idx].name);
+ }
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return;
+}
+
+static int cam_smmu_iommu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova,
+ int flags, void *token)
+{
+ char *cb_name;
+ int idx, rc = -ENOSYS, j = 0;
+
+ if (!token) {
+ pr_err("Error: token is NULL\n");
+ return -ENOSYS;
+ }
+
+ cb_name = (char *)token;
+ /* check wether it is in the table */
+ for (idx = 0; idx < iommu_cb_set.cb_num; idx++) {
+ if (!strcmp(iommu_cb_set.cb_info[idx].name, cb_name))
+ break;
+ }
+
+ if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: index is not valid, index = %d, token = %s\n",
+ idx, cb_name);
+ return rc;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ cam_smmu_check_vaddr_in_range(idx, (void *)iova);
+ for (j = 0; j < CAM_SMMU_CB_MAX; j++) {
+ if ((iommu_cb_set.cb_info[idx].handler[j])) {
+ rc = iommu_cb_set.cb_info[idx].handler[j](
+ domain, dev, iova, flags,
+ iommu_cb_set.cb_info[idx].token[j]);
+ }
+ }
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+
+static int cam_smmu_translate_dir_to_iommu_dir(
+ enum cam_smmu_map_dir dir)
+{
+ switch (dir) {
+ case CAM_SMMU_MAP_READ:
+ return IOMMU_READ;
+ case CAM_SMMU_MAP_WRITE:
+ return IOMMU_WRITE;
+ case CAM_SMMU_MAP_RW:
+ return IOMMU_READ|IOMMU_WRITE;
+ case CAM_SMMU_MAP_INVALID:
+ default:
+ pr_err("Error: Direction is invalid. dir = %d\n", dir);
+ break;
+ };
+ return IOMMU_INVALID_DIR;
+}
+
+static enum dma_data_direction cam_smmu_translate_dir(
+ enum cam_smmu_map_dir dir)
+{
+ switch (dir) {
+ case CAM_SMMU_MAP_READ:
+ return DMA_FROM_DEVICE;
+ case CAM_SMMU_MAP_WRITE:
+ return DMA_TO_DEVICE;
+ case CAM_SMMU_MAP_RW:
+ return DMA_BIDIRECTIONAL;
+ case CAM_SMMU_MAP_INVALID:
+ default:
+ pr_err("Error: Direction is invalid. dir = %d\n", (int)dir);
+ break;
+ }
+ return DMA_NONE;
+}
+
+void cam_smmu_reset_iommu_table(enum cam_smmu_init_dir ops)
+{
+ unsigned int i;
+ int j = 0;
+ for (i = 0; i < iommu_cb_set.cb_num; i++) {
+ iommu_cb_set.cb_info[i].handle = HANDLE_INIT;
+ INIT_LIST_HEAD(&iommu_cb_set.cb_info[i].smmu_buf_list);
+ iommu_cb_set.cb_info[i].state = CAM_SMMU_DETACH;
+ iommu_cb_set.cb_info[i].dev = NULL;
+ iommu_cb_set.cb_info[i].cb_count = 0;
+ for (j = 0; j < CAM_SMMU_CB_MAX; j++) {
+ iommu_cb_set.cb_info[i].token[j] = NULL;
+ iommu_cb_set.cb_info[i].handler[j] = NULL;
+ }
+ if (ops == CAM_SMMU_TABLE_INIT)
+ mutex_init(&iommu_cb_set.cb_info[i].lock);
+ else
+ mutex_destroy(&iommu_cb_set.cb_info[i].lock);
+ }
+}
+
+static int cam_smmu_check_handle_unique(int hdl)
+{
+ int i;
+
+ if (hdl == HANDLE_INIT) {
+ CDBG("iommu handle is init number. Need to try again\n");
+ return 1;
+ }
+
+ for (i = 0; i < iommu_cb_set.cb_num; i++) {
+ if (iommu_cb_set.cb_info[i].handle == HANDLE_INIT)
+ continue;
+
+ if (iommu_cb_set.cb_info[i].handle == hdl) {
+ CDBG("iommu handle %d conflicts\n", (int)hdl);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * use low 2 bytes for handle cookie
+ */
+static int cam_smmu_create_iommu_handle(int idx)
+{
+ int rand, hdl = 0;
+ get_random_bytes(&rand, COOKIE_NUM_BYTE);
+ hdl = GET_SMMU_HDL(idx, rand);
+ CDBG("create handle value = %x\n", (int)hdl);
+ return hdl;
+}
+
+static int cam_smmu_attach_device(int idx)
+{
+ int rc;
+ struct cam_context_bank_info *cb = &iommu_cb_set.cb_info[idx];
+
+ /* attach the mapping to device */
+ rc = arm_iommu_attach_device(cb->dev, cb->mapping);
+ if (rc < 0) {
+ pr_err("Error: ARM IOMMU attach failed. ret = %d\n", rc);
+ return -ENODEV;
+ }
+ return rc;
+}
+
+static int cam_smmu_create_add_handle_in_table(char *name,
+ int *hdl)
+{
+ int i;
+ int handle;
+
+ /* create handle and add in the iommu hardware table */
+ for (i = 0; i < iommu_cb_set.cb_num; i++) {
+ if (!strcmp(iommu_cb_set.cb_info[i].name, name)) {
+ mutex_lock(&iommu_cb_set.cb_info[i].lock);
+ if (iommu_cb_set.cb_info[i].handle != HANDLE_INIT) {
+ pr_err("Error: %s already got handle 0x%x\n",
+ name,
+ iommu_cb_set.cb_info[i].handle);
+ mutex_unlock(&iommu_cb_set.cb_info[i].lock);
+ return -EINVAL;
+ }
+
+ /* make sure handle is unique */
+ do {
+ handle = cam_smmu_create_iommu_handle(i);
+ } while (cam_smmu_check_handle_unique(handle));
+
+ /* put handle in the table */
+ iommu_cb_set.cb_info[i].handle = handle;
+ iommu_cb_set.cb_info[i].cb_count = 0;
+ *hdl = handle;
+ CDBG("%s creates handle 0x%x\n", name, handle);
+ mutex_unlock(&iommu_cb_set.cb_info[i].lock);
+ return 0;
+ }
+ }
+
+ /* if i == iommu_cb_set.cb_num */
+ pr_err("Error: Cannot find name %s or all handle exist!\n",
+ name);
+ cam_smmu_print_table();
+ return -EINVAL;
+}
+
+static int cam_smmu_init_scratch_map(struct scratch_mapping *scratch_map,
+ dma_addr_t base, size_t size,
+ int order)
+{
+ unsigned int count = size >> (PAGE_SHIFT + order);
+ unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+ int err = 0;
+
+ if (!count) {
+ err = -EINVAL;
+ pr_err("Error: wrong size passed, page count can't be zero");
+ goto bail;
+ }
+
+ scratch_map->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!scratch_map->bitmap) {
+ err = -ENOMEM;
+ goto bail;
+ }
+
+ scratch_map->base = base;
+ scratch_map->bits = BITS_PER_BYTE * bitmap_size;
+ scratch_map->order = order;
+
+bail:
+ return err;
+}
+
+static int cam_smmu_alloc_scratch_va(struct scratch_mapping *mapping,
+ size_t size,
+ dma_addr_t *iova)
+{
+ int rc = 0;
+ unsigned int order = get_order(size);
+ unsigned int align = 0;
+ unsigned int count, start;
+
+ count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
+ (1 << mapping->order) - 1) >> mapping->order;
+
+ /* Transparently, add a guard page to the total count of pages
+ * to be allocated */
+ count++;
+
+ if (order > mapping->order)
+ align = (1 << (order - mapping->order)) - 1;
+
+ start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
+ count, align);
+
+ if (start > mapping->bits)
+ rc = -ENOMEM;
+
+ bitmap_set(mapping->bitmap, start, count);
+
+ *iova = mapping->base + (start << (mapping->order + PAGE_SHIFT));
+ return rc;
+}
+
+static int cam_smmu_free_scratch_va(struct scratch_mapping *mapping,
+ dma_addr_t addr, size_t size)
+{
+ unsigned int start = (addr - mapping->base) >>
+ (mapping->order + PAGE_SHIFT);
+ unsigned int count = ((size >> PAGE_SHIFT) +
+ (1 << mapping->order) - 1) >> mapping->order;
+
+ if (!addr) {
+ pr_err("Error: Invalid address\n");
+ return -EINVAL;
+ }
+
+ if (start + count > mapping->bits) {
+ pr_err("Error: Invalid page bits in scratch map\n");
+ return -EINVAL;
+ }
+
+ /* Transparently, add a guard page to the total count of pages
+ * to be freed */
+ count++;
+
+ bitmap_clear(mapping->bitmap, start, count);
+
+ return 0;
+}
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx,
+ dma_addr_t virt_addr)
+{
+ struct cam_dma_buff_info *mapping;
+
+ list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
+ list) {
+ if (mapping->paddr == virt_addr) {
+ CDBG("Found virtual address %lx\n",
+ (unsigned long)virt_addr);
+ return mapping;
+ }
+ }
+
+ pr_err("Error: Cannot find virtual address %lx by index %d\n",
+ (unsigned long)virt_addr, idx);
+ return NULL;
+}
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
+ int ion_fd)
+{
+ struct cam_dma_buff_info *mapping;
+
+ list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
+ list) {
+ if (mapping->ion_fd == ion_fd) {
+ CDBG(" find ion_fd %d\n", ion_fd);
+ return mapping;
+ }
+ }
+
+ pr_err("Error: Cannot find fd %d by index %d\n",
+ ion_fd, idx);
+ return NULL;
+}
+
+static void cam_smmu_clean_buffer_list(int idx)
+{
+ int ret;
+ struct cam_dma_buff_info *mapping_info, *temp;
+
+ list_for_each_entry_safe(mapping_info, temp,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+ CDBG("Free mapping address %p, i = %d, fd = %d\n",
+ (void *)mapping_info->paddr, idx,
+ mapping_info->ion_fd);
+
+ if (mapping_info->ion_fd == 0xDEADBEEF)
+ /* Clean up scratch buffers */
+ ret = cam_smmu_free_scratch_buffer_remove_from_list(
+ mapping_info, idx);
+ else
+ /* Clean up regular mapped buffers */
+ ret = cam_smmu_unmap_buf_and_remove_from_list(
+ mapping_info,
+ idx);
+
+ if (ret < 0) {
+ pr_err("Buffer delete failed: idx = %d\n", idx);
+ pr_err("Buffer delete failed: addr = %lx, fd = %d\n",
+ (unsigned long)mapping_info->paddr,
+ mapping_info->ion_fd);
+ /*
+ * Ignore this error and continue to delete other
+ * buffers in the list
+ */
+ continue;
+ }
+ }
+}
+
+static int cam_smmu_attach(int idx)
+{
+ int ret;
+
+ if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_ATTACH) {
+ ret = 0;
+ } else if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_DETACH) {
+ ret = cam_smmu_attach_device(idx);
+ if (ret < 0) {
+ pr_err("Error: ATTACH fail\n");
+ return -ENODEV;
+ }
+ iommu_cb_set.cb_info[idx].state = CAM_SMMU_ATTACH;
+ ret = 0;
+ } else {
+ pr_err("Error: Not detach/attach\n");
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
+ enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+ size_t *len_ptr)
+{
+ int rc = -1;
+ struct cam_dma_buff_info *mapping_info;
+ struct dma_buf *buf = NULL;
+ struct dma_buf_attachment *attach = NULL;
+ struct sg_table *table = NULL;
+
+ /* allocate memory for each buffer information */
+ buf = dma_buf_get(ion_fd);
+ if (IS_ERR_OR_NULL(buf)) {
+ rc = PTR_ERR(buf);
+ pr_err("Error: dma get buf failed. fd = %d\n", ion_fd);
+ goto err_out;
+ }
+
+ attach = dma_buf_attach(buf, iommu_cb_set.cb_info[idx].dev);
+ if (IS_ERR_OR_NULL(attach)) {
+ rc = PTR_ERR(attach);
+ pr_err("Error: dma buf attach failed\n");
+ goto err_put;
+ }
+
+ table = dma_buf_map_attachment(attach, dma_dir);
+ if (IS_ERR_OR_NULL(table)) {
+ rc = PTR_ERR(table);
+ pr_err("Error: dma buf map attachment failed\n");
+ goto err_detach;
+ }
+
+ rc = msm_dma_map_sg_lazy(iommu_cb_set.cb_info[idx].dev, table->sgl,
+ table->nents, dma_dir, buf);
+ if (!rc) {
+ pr_err("Error: msm_dma_map_sg_lazy failed\n");
+ goto err_unmap_sg;
+ }
+
+ if (table->sgl) {
+ CDBG("DMA buf: %p, device: %p, attach: %p, table: %p\n",
+ (void *)buf,
+ (void *)iommu_cb_set.cb_info[idx].dev,
+ (void *)attach, (void *)table);
+ CDBG("table sgl: %p, rc: %d, dma_address: 0x%x\n",
+ (void *)table->sgl, rc,
+ (unsigned int)table->sgl->dma_address);
+ } else {
+ rc = -EINVAL;
+ pr_err("Error: table sgl is null\n");
+ goto err_unmap_sg;
+ }
+
+ /* fill up mapping_info */
+ mapping_info = kzalloc(sizeof(struct cam_dma_buff_info), GFP_KERNEL);
+ if (!mapping_info) {
+ pr_err("Error: No enough space!\n");
+ rc = -ENOSPC;
+ goto err_unmap_sg;
+ }
+ mapping_info->ion_fd = ion_fd;
+ mapping_info->buf = buf;
+ mapping_info->attach = attach;
+ mapping_info->table = table;
+ mapping_info->paddr = sg_dma_address(table->sgl);
+ mapping_info->len = (size_t)sg_dma_len(table->sgl);
+ mapping_info->dir = dma_dir;
+ mapping_info->ref_count = 1;
+
+ /* return paddr and len to client */
+ *paddr_ptr = sg_dma_address(table->sgl);
+ *len_ptr = (size_t)sg_dma_len(table->sgl);
+
+ if (!paddr_ptr) {
+ pr_err("Error: Space Allocation failed!\n");
+ rc = -ENOSPC;
+ goto err_unmap_sg;
+ }
+ CDBG("ion_fd = %d, dev = %p, paddr= %p, len = %u\n", ion_fd,
+ (void *)iommu_cb_set.cb_info[idx].dev,
+ (void *)*paddr_ptr, (unsigned int)*len_ptr);
+
+ /* add to the list */
+ list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
+ return 0;
+
+err_unmap_sg:
+ dma_buf_unmap_attachment(attach, table, dma_dir);
+err_detach:
+ dma_buf_detach(buf, attach);
+err_put:
+ dma_buf_put(buf);
+err_out:
+ return rc;
+}
+
+static int cam_smmu_unmap_buf_and_remove_from_list(
+ struct cam_dma_buff_info *mapping_info,
+ int idx)
+{
+ if ((!mapping_info->buf) || (!mapping_info->table) ||
+ (!mapping_info->attach)) {
+ pr_err("Error: Invalid params dev = %p, table = %p",
+ (void *)iommu_cb_set.cb_info[idx].dev,
+ (void *)mapping_info->table);
+ pr_err("Error:dma_buf = %p, attach = %p\n",
+ (void *)mapping_info->buf,
+ (void *)mapping_info->attach);
+ return -EINVAL;
+ }
+
+ /* iommu buffer clean up */
+ msm_dma_unmap_sg(iommu_cb_set.cb_info[idx].dev,
+ mapping_info->table->sgl, mapping_info->table->nents,
+ mapping_info->dir, mapping_info->buf);
+ dma_buf_unmap_attachment(mapping_info->attach,
+ mapping_info->table, mapping_info->dir);
+ dma_buf_detach(mapping_info->buf, mapping_info->attach);
+ dma_buf_put(mapping_info->buf);
+ mapping_info->buf = NULL;
+
+ list_del_init(&mapping_info->list);
+
+ /* free one buffer */
+ kfree(mapping_info);
+ return 0;
+}
+
+static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx,
+ int ion_fd, dma_addr_t *paddr_ptr,
+ size_t *len_ptr)
+{
+ struct cam_dma_buff_info *mapping;
+ list_for_each_entry(mapping,
+ &iommu_cb_set.cb_info[idx].smmu_buf_list,
+ list) {
+ if (mapping->ion_fd == ion_fd) {
+ mapping->ref_count++;
+ *paddr_ptr = mapping->paddr;
+ *len_ptr = mapping->len;
+ return CAM_SMMU_BUFF_EXIST;
+ }
+ }
+ return CAM_SMMU_BUFF_NOT_EXIST;
+}
+
+int cam_smmu_get_handle(char *identifier, int *handle_ptr)
+{
+ int ret = 0;
+
+ if (!identifier) {
+ pr_err("Error: iommu harware name is NULL\n");
+ return -EFAULT;
+ }
+
+ if (!handle_ptr) {
+ pr_err("Error: handle pointer is NULL\n");
+ return -EFAULT;
+ }
+
+ /* create and put handle in the table */
+ ret = cam_smmu_create_add_handle_in_table(identifier, handle_ptr);
+ if (ret < 0) {
+ pr_err("Error: %s get handle fail\n", identifier);
+ return ret;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(cam_smmu_get_handle);
+
+int cam_smmu_ops(int handle, enum cam_smmu_ops_param ops)
+{
+ int ret = 0, idx;
+
+ CDBG("E: ops = %d\n", ops);
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -EINVAL;
+ }
+
+ switch (ops) {
+ case CAM_SMMU_ATTACH: {
+ ret = cam_smmu_attach(idx);
+ break;
+ }
+ case CAM_SMMU_DETACH: {
+ ret = 0;
+ break;
+ }
+ case CAM_SMMU_VOTE:
+ case CAM_SMMU_DEVOTE:
+ default:
+ pr_err("Error: idx = %d, ops = %d\n", idx, ops);
+ ret = -EINVAL;
+ }
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return ret;
+}
+EXPORT_SYMBOL(cam_smmu_ops);
+
+static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx,
+ size_t virt_len,
+ size_t phys_len,
+ unsigned int iommu_dir,
+ dma_addr_t *virt_addr)
+{
+ unsigned long nents = virt_len / phys_len;
+ struct cam_dma_buff_info *mapping_info = NULL;
+ size_t unmapped;
+ dma_addr_t iova = 0;
+ struct scatterlist *sg;
+ int i = 0;
+ int rc;
+ struct iommu_domain *domain = NULL;
+ struct page *page;
+ struct sg_table *table = NULL;
+
+ CDBG("%s: nents = %lu, idx = %d, virt_len = %zx\n",
+ __func__, nents, idx, virt_len);
+ CDBG("%s: phys_len = %zx, iommu_dir = %d, virt_addr = %p\n",
+ __func__, phys_len, iommu_dir, virt_addr);
+
+ /* This table will go inside the 'mapping' structure
+ * where it will be held until put_scratch_buffer is called
+ */
+ table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+ if (!table) {
+ rc = -ENOMEM;
+ goto err_table_alloc;
+ }
+
+ rc = sg_alloc_table(table, nents, GFP_KERNEL);
+ if (rc < 0) {
+ rc = -EINVAL;
+ goto err_sg_alloc;
+ }
+
+ page = alloc_pages(GFP_KERNEL, get_order(phys_len));
+ if (!page) {
+ rc = -ENOMEM;
+ goto err_page_alloc;
+ }
+
+ /* Now we create the sg list */
+ for_each_sg(table->sgl, sg, table->nents, i)
+ sg_set_page(sg, page, phys_len, 0);
+
+
+ /* Get the domain from within our cb_set struct and map it*/
+ domain = iommu_cb_set.cb_info[idx].mapping->domain;
+
+ rc = cam_smmu_alloc_scratch_va(&iommu_cb_set.cb_info[idx].scratch_map,
+ virt_len, &iova);
+
+ if (rc < 0) {
+ pr_err("Could not find valid iova for scratch buffer");
+ goto err_iommu_map;
+ }
+
+ if (iommu_map_sg(domain,
+ iova,
+ table->sgl,
+ table->nents,
+ iommu_dir) != virt_len) {
+ pr_err("iommu_map_sg() failed");
+ goto err_iommu_map;
+ }
+
+ /* Now update our mapping information within the cb_set struct */
+ mapping_info = kzalloc(sizeof(struct cam_dma_buff_info), GFP_KERNEL);
+ if (!mapping_info) {
+ rc = -ENOMEM;
+ goto err_mapping_info;
+ }
+
+ mapping_info->ion_fd = 0xDEADBEEF;
+ mapping_info->buf = NULL;
+ mapping_info->attach = NULL;
+ mapping_info->table = table;
+ mapping_info->paddr = iova;
+ mapping_info->len = virt_len;
+ mapping_info->iommu_dir = iommu_dir;
+ mapping_info->ref_count = 1;
+ mapping_info->phys_len = phys_len;
+
+ CDBG("%s: paddr = %p, len = %zx, phys_len = %zx",
+ __func__, (void *)mapping_info->paddr,
+ mapping_info->len, mapping_info->phys_len);
+
+ list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
+
+ *virt_addr = (dma_addr_t)iova;
+
+ CDBG("%s: mapped virtual address = %lx\n", __func__,
+ (unsigned long)*virt_addr);
+ return 0;
+
+err_mapping_info:
+ unmapped = iommu_unmap(domain, iova, virt_len);
+ if (unmapped != virt_len)
+ pr_err("Unmapped only %zx instead of %zx", unmapped, virt_len);
+err_iommu_map:
+ __free_pages(sg_page(table->sgl), get_order(phys_len));
+err_page_alloc:
+ sg_free_table(table);
+err_sg_alloc:
+ kfree(table);
+err_table_alloc:
+ return rc;
+}
+
+static int cam_smmu_free_scratch_buffer_remove_from_list(
+ struct cam_dma_buff_info *mapping_info,
+ int idx)
+{
+ int rc = 0;
+ size_t unmapped;
+ struct iommu_domain *domain =
+ iommu_cb_set.cb_info[idx].mapping->domain;
+ struct scratch_mapping *scratch_map =
+ &iommu_cb_set.cb_info[idx].scratch_map;
+
+ if (!mapping_info->table) {
+ pr_err("Error: Invalid params: dev = %p, table = %p, ",
+ (void *)iommu_cb_set.cb_info[idx].dev,
+ (void *)mapping_info->table);
+ return -EINVAL;
+ }
+
+ /* Clean up the mapping_info struct from the list */
+ unmapped = iommu_unmap(domain, mapping_info->paddr, mapping_info->len);
+ if (unmapped != mapping_info->len)
+ pr_err("Unmapped only %zx instead of %zx",
+ unmapped, mapping_info->len);
+
+ rc = cam_smmu_free_scratch_va(scratch_map,
+ mapping_info->paddr,
+ mapping_info->len);
+ if (rc < 0) {
+ pr_err("Error: Invalid iova while freeing scratch buffer\n");
+ rc = -EINVAL;
+ }
+
+ __free_pages(sg_page(mapping_info->table->sgl),
+ get_order(mapping_info->phys_len));
+ sg_free_table(mapping_info->table);
+ kfree(mapping_info->table);
+ list_del_init(&mapping_info->list);
+
+ kfree(mapping_info);
+ mapping_info = NULL;
+
+ return rc;
+}
+
+int cam_smmu_get_phy_addr_scratch(int handle,
+ enum cam_smmu_map_dir dir,
+ dma_addr_t *paddr_ptr,
+ size_t virt_len,
+ size_t phys_len)
+{
+ int idx, rc;
+ unsigned int iommu_dir;
+
+ if (!paddr_ptr || !virt_len || !phys_len) {
+ pr_err("Error: Input pointer or lengths invalid\n");
+ return -EINVAL;
+ }
+
+ if (virt_len < phys_len) {
+ pr_err("Error: virt_len > phys_len");
+ return -EINVAL;
+ }
+
+ iommu_dir = cam_smmu_translate_dir_to_iommu_dir(dir);
+ if (iommu_dir == IOMMU_INVALID_DIR) {
+ pr_err("Error: translate direction failed. dir = %d\n", dir);
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].scratch_buf_support) {
+ pr_err("Error: Context bank does not support scratch bufs\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ CDBG("%s: smmu handle = %x, idx = %d, dir = %d\n",
+ __func__, handle, idx, dir);
+ CDBG("%s: virt_len = %zx, phys_len = %zx\n",
+ __func__, phys_len, virt_len);
+
+ if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
+ pr_err("Error: Device %s should call SMMU attach before map buffer\n",
+ iommu_cb_set.cb_info[idx].name);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (!IS_ALIGNED(virt_len, PAGE_SIZE)) {
+ pr_err("Requested scratch buffer length not page aligned");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (!IS_ALIGNED(virt_len, phys_len)) {
+ pr_err("Requested virtual length not aligned with physical length");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ rc = cam_smmu_alloc_scratch_buffer_add_to_list(idx,
+ virt_len,
+ phys_len,
+ iommu_dir,
+ paddr_ptr);
+ if (rc < 0) {
+ pr_err("Error: mapping or add list fail\n");
+ goto error;
+ }
+
+error:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+
+int cam_smmu_put_phy_addr_scratch(int handle,
+ dma_addr_t paddr)
+{
+ int idx;
+ int rc = -1;
+ struct cam_dma_buff_info *mapping_info;
+
+ /* find index in the iommu_cb_set.cb_info */
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto handle_err;
+ }
+
+ if (!iommu_cb_set.cb_info[idx].scratch_buf_support) {
+ pr_err("Error: Context bank does not support scratch buffers");
+ rc = -EINVAL;
+ goto handle_err;
+ }
+
+ /* Based on virtual address and index, we can find mapping info
+ * of the scratch buffer
+ */
+ mapping_info = cam_smmu_find_mapping_by_virt_address(idx, paddr);
+ if (!mapping_info) {
+ pr_err("Error: Invalid params\n");
+ rc = -EINVAL;
+ goto handle_err;
+ }
+
+ /* unmapping one buffer from device */
+ rc = cam_smmu_free_scratch_buffer_remove_from_list(mapping_info, idx);
+ if (rc < 0) {
+ pr_err("Error: unmap or remove list fail\n");
+ goto handle_err;
+ }
+
+handle_err:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+
+int cam_smmu_get_phy_addr(int handle, int ion_fd,
+ enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
+ size_t *len_ptr)
+{
+ int idx, rc;
+ enum dma_data_direction dma_dir;
+ enum cam_smmu_buf_state buf_state;
+
+ if (!paddr_ptr || !len_ptr) {
+ pr_err("Error: Input pointers are invalid\n");
+ return -EINVAL;
+ }
+ /* clean the content from clients */
+ *paddr_ptr = (dma_addr_t)NULL;
+ *len_ptr = (size_t)0;
+
+ dma_dir = cam_smmu_translate_dir(dir);
+ if (dma_dir == DMA_NONE) {
+ pr_err("Error: translate direction failed. dir = %d\n", dir);
+ return -EINVAL;
+ }
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
+ pr_err("Error: Device %s should call SMMU attach before map buffer\n",
+ iommu_cb_set.cb_info[idx].name);
+ rc = -EINVAL;
+ goto get_addr_end;
+ }
+
+ buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr, len_ptr);
+ if (buf_state == CAM_SMMU_BUFF_EXIST) {
+ CDBG("ion_fd:%d already in the list, give same addr back",
+ ion_fd);
+ rc = 0;
+ goto get_addr_end;
+ }
+ rc = cam_smmu_map_buffer_and_add_to_list(idx, ion_fd, dma_dir,
+ paddr_ptr, len_ptr);
+ if (rc < 0) {
+ pr_err("Error: mapping or add list fail\n");
+ goto get_addr_end;
+ }
+
+get_addr_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_get_phy_addr);
+
+int cam_smmu_put_phy_addr(int handle, int ion_fd)
+{
+ int idx, rc;
+ struct cam_dma_buff_info *mapping_info;
+
+ /* find index in the iommu_cb_set.cb_info */
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ rc = -EINVAL;
+ goto put_addr_end;
+ }
+
+ /* based on ion fd and index, we can find mapping info of buffer */
+ mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
+ if (!mapping_info) {
+ pr_err("Error: Invalid params! idx = %d, fd = %d\n",
+ idx, ion_fd);
+ rc = -EINVAL;
+ goto put_addr_end;
+ }
+
+ mapping_info->ref_count--;
+ if (mapping_info->ref_count > 0) {
+ CDBG("There are still %u buffer(s) with same fd %d",
+ mapping_info->ref_count, mapping_info->ion_fd);
+ rc = 0;
+ goto put_addr_end;
+ }
+
+ /* unmapping one buffer from device */
+ rc = cam_smmu_unmap_buf_and_remove_from_list(mapping_info, idx);
+ if (rc < 0) {
+ pr_err("Error: unmap or remove list fail\n");
+ goto put_addr_end;
+ }
+
+put_addr_end:
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return rc;
+}
+EXPORT_SYMBOL(cam_smmu_put_phy_addr);
+
+int cam_smmu_destroy_handle(int handle)
+{
+ int idx;
+
+ idx = GET_SMMU_TABLE_IDX(handle);
+ if (handle == HANDLE_INIT || idx < 0 || idx >= iommu_cb_set.cb_num) {
+ pr_err("Error: handle or index invalid. idx = %d hdl = %x\n",
+ idx, handle);
+ return -EINVAL;
+ }
+
+ mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+ if (iommu_cb_set.cb_info[idx].handle != handle) {
+ pr_err("Error: hdl is not valid, table_hdl = %x, hdl = %x\n",
+ iommu_cb_set.cb_info[idx].handle, handle);
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return -EINVAL;
+ }
+
+ if (!list_empty_careful(&iommu_cb_set.cb_info[idx].smmu_buf_list)) {
+ pr_err("Client %s buffer list is not clean!\n",
+ iommu_cb_set.cb_info[idx].name);
+ cam_smmu_print_list(idx);
+ cam_smmu_clean_buffer_list(idx);
+ }
+
+ iommu_cb_set.cb_info[idx].cb_count = 0;
+ iommu_cb_set.cb_info[idx].handle = HANDLE_INIT;
+ mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+ return 0;
+}
+EXPORT_SYMBOL(cam_smmu_destroy_handle);
+
+/*This function can only be called after smmu driver probe*/
+int cam_smmu_get_num_of_clients(void)
+{
+ return iommu_cb_set.cb_num;
+}
+
+static void cam_smmu_release_cb(struct platform_device *pdev)
+{
+ int i = 0;
+
+ for (i = 0; i < iommu_cb_set.cb_num; i++) {
+ arm_iommu_detach_device(iommu_cb_set.cb_info[i].dev);
+ arm_iommu_release_mapping(iommu_cb_set.cb_info[i].mapping);
+ }
+
+ devm_kfree(&pdev->dev, iommu_cb_set.cb_info);
+ iommu_cb_set.cb_num = 0;
+}
+
+static int cam_smmu_setup_cb(struct cam_context_bank_info *cb,
+ struct device *dev)
+{
+ int rc = 0;
+ int disable_htw = 1;
+
+ if (!cb || !dev) {
+ pr_err("Error: invalid input params\n");
+ return -EINVAL;
+ }
+
+ cb->dev = dev;
+ /* Reserve 256M if scratch buffer support is desired
+ * and initialize the scratch mapping structure
+ */
+ if (cb->scratch_buf_support) {
+ cb->va_start = SCRATCH_ALLOC_END;
+ cb->va_len = VA_SPACE_END - SCRATCH_ALLOC_END;
+
+ rc = cam_smmu_init_scratch_map(&cb->scratch_map,
+ SCRATCH_ALLOC_START,
+ SCRATCH_ALLOC_END - SCRATCH_ALLOC_START,
+ 0);
+ if (rc < 0) {
+ pr_err("Error: failed to create scratch map\n");
+ rc = -ENODEV;
+ goto end;
+ }
+ } else {
+ cb->va_start = SZ_128K;
+ cb->va_len = VA_SPACE_END - SZ_128K;
+ }
+
+ /* create a virtual mapping */
+ cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
+ cb->va_start, cb->va_len);
+ if (IS_ERR(cb->mapping)) {
+ pr_err("Error: create mapping Failed\n");
+ rc = -ENODEV;
+ goto end;
+ }
+
+ /*
+ * Set the domain attributes
+ * disable L2 redirect since it decreases
+ * performance
+ */
+ if (iommu_domain_set_attr(cb->mapping->domain,
+ DOMAIN_ATTR_COHERENT_HTW_DISABLE,
+ &disable_htw)) {
+ pr_err("Error: couldn't disable coherent HTW\n");
+ rc = -ENODEV;
+ goto err_set_attr;
+ }
+ return 0;
+err_set_attr:
+ arm_iommu_release_mapping(cb->mapping);
+end:
+ return rc;
+}
+
+static int cam_alloc_smmu_context_banks(struct device *dev)
+{
+ struct device_node *domains_child_node = NULL;
+ if (!dev) {
+ pr_err("Error: Invalid device\n");
+ return -ENODEV;
+ }
+
+ iommu_cb_set.cb_num = 0;
+
+ /* traverse thru all the child nodes and increment the cb count */
+ for_each_child_of_node(dev->of_node, domains_child_node) {
+ if (of_device_is_compatible(domains_child_node,
+ "qcom,msm-cam-smmu-cb"))
+ iommu_cb_set.cb_num++;
+
+ if (of_device_is_compatible(domains_child_node,
+ "qcom,qsmmu-cam-cb"))
+ iommu_cb_set.cb_num++;
+ }
+
+ if (iommu_cb_set.cb_num == 0) {
+ pr_err("Error: no context banks present\n");
+ return -ENOENT;
+ }
+
+ /* allocate memory for the context banks */
+ iommu_cb_set.cb_info = devm_kzalloc(dev,
+ iommu_cb_set.cb_num * sizeof(struct cam_context_bank_info),
+ GFP_KERNEL);
+
+ if (!iommu_cb_set.cb_info) {
+ pr_err("Error: cannot allocate context banks\n");
+ return -ENOMEM;
+ }
+
+ cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_INIT);
+ iommu_cb_set.cb_init_count = 0;
+
+ CDBG("no of context banks :%d\n", iommu_cb_set.cb_num);
+ return 0;
+}
+
+static int cam_populate_smmu_context_banks(struct device *dev,
+ enum cam_iommu_type type)
+{
+ int rc = 0;
+ struct cam_context_bank_info *cb;
+ struct device *ctx;
+
+ if (!dev) {
+ pr_err("Error: Invalid device\n");
+ return -ENODEV;
+ }
+
+ /* check the bounds */
+ if (iommu_cb_set.cb_init_count >= iommu_cb_set.cb_num) {
+ pr_err("Error: populate more than allocated cb\n");
+ rc = -EBADHANDLE;
+ goto cb_init_fail;
+ }
+
+ /* read the context bank from cb set */
+ cb = &iommu_cb_set.cb_info[iommu_cb_set.cb_init_count];
+
+ /* set the name of the context bank */
+ rc = of_property_read_string(dev->of_node, "label", &cb->name);
+ if (rc) {
+ pr_err("Error: failed to read label from sub device\n");
+ goto cb_init_fail;
+ }
+
+ /* Check if context bank supports scratch buffers */
+ if (of_property_read_bool(dev->of_node, "qcom,scratch-buf-support"))
+ cb->scratch_buf_support = 1;
+ else
+ cb->scratch_buf_support = 0;
+
+ /* set the secure/non secure domain type */
+ if (of_property_read_bool(dev->of_node, "qcom,secure-context"))
+ cb->is_secure = CAM_SECURE;
+ else
+ cb->is_secure = CAM_NON_SECURE;
+
+ CDBG("cb->name :%s, cb->is_secure :%d, cb->scratch_support :%d\n",
+ cb->name, cb->is_secure, cb->scratch_buf_support);
+
+ /* set up the iommu mapping for the context bank */
+ if (type == CAM_QSMMU) {
+ ctx = msm_iommu_get_ctx(cb->name);
+ if (IS_ERR_OR_NULL(ctx)) {
+ rc = PTR_ERR(ctx);
+ pr_err("Invalid pointer of ctx : %s rc = %d\n",
+ cb->name, rc);
+ return -EINVAL;
+ }
+ CDBG("getting QSMMU ctx : %s\n", cb->name);
+ } else {
+ ctx = dev;
+ CDBG("getting Arm SMMU ctx : %s\n", cb->name);
+ }
+ rc = cam_smmu_setup_cb(cb, ctx);
+ if (rc < 0)
+ pr_err("Error: failed to setup cb : %s\n", cb->name);
+
+ iommu_set_fault_handler(cb->mapping->domain,
+ cam_smmu_iommu_fault_handler,
+ (void *)cb->name);
+
+ /* increment count to next bank */
+ iommu_cb_set.cb_init_count++;
+
+ CDBG("X: cb init count :%d\n", iommu_cb_set.cb_init_count);
+ return rc;
+
+cb_init_fail:
+ iommu_cb_set.cb_info = NULL;
+ return rc;
+}
+
+static int cam_smmu_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct device *dev = &pdev->dev;
+
+ if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu")) {
+ rc = cam_alloc_smmu_context_banks(dev);
+ if (rc < 0) {
+ pr_err("Error: allocating context banks\n");
+ return -ENOMEM;
+ }
+ }
+ if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu-cb")) {
+ rc = cam_populate_smmu_context_banks(dev, CAM_ARM_SMMU);
+ if (rc < 0) {
+ pr_err("Error: populating context banks\n");
+ return -ENOMEM;
+ }
+ return rc;
+ }
+ if (of_device_is_compatible(dev->of_node, "qcom,qsmmu-cam-cb")) {
+ rc = cam_populate_smmu_context_banks(dev, CAM_QSMMU);
+ if (rc < 0) {
+ pr_err("Error: populating context banks\n");
+ return -ENOMEM;
+ }
+ return rc;
+ }
+
+ /* probe thru all the subdevices */
+ rc = of_platform_populate(pdev->dev.of_node, msm_cam_smmu_dt_match,
+ NULL, &pdev->dev);
+ if (rc < 0)
+ pr_err("Error: populating devices\n");
+ return rc;
+}
+
+static int cam_smmu_remove(struct platform_device *pdev)
+{
+ /* release all the context banks and memory allocated */
+ cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_DEINIT);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-cam-smmu"))
+ cam_smmu_release_cb(pdev);
+ return 0;
+}
+
+static struct platform_driver cam_smmu_driver = {
+ .probe = cam_smmu_probe,
+ .remove = cam_smmu_remove,
+ .driver = {
+ .name = "msm_cam_smmu",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_cam_smmu_dt_match,
+ },
+};
+
+static int __init cam_smmu_init_module(void)
+{
+ return platform_driver_register(&cam_smmu_driver);
+}
+
+static void __exit cam_smmu_exit_module(void)
+{
+ platform_driver_unregister(&cam_smmu_driver);
+}
+
+module_init(cam_smmu_init_module);
+module_exit(cam_smmu_exit_module);
+MODULE_DESCRIPTION("MSM Camera SMMU driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.h b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.h
new file mode 100644
index 000000000000..f9c3a836dafe
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.h
@@ -0,0 +1,166 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_SMMU_API_H_
+#define _CAM_SMMU_API_H_
+
+#include <linux/dma-direction.h>
+#include <linux/module.h>
+#include <linux/dma-buf.h>
+#include <asm/dma-iommu.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-attrs.h>
+#include <linux/of_platform.h>
+#include <linux/iommu.h>
+#include <linux/random.h>
+#include <linux/spinlock_types.h>
+#include <linux/mutex.h>
+
+/*
+ * Enum for possible CAM SMMU operations
+ */
+
+enum cam_smmu_ops_param {
+ CAM_SMMU_ATTACH,
+ CAM_SMMU_DETACH,
+ CAM_SMMU_VOTE,
+ CAM_SMMU_DEVOTE,
+ CAM_SMMU_OPS_INVALID
+};
+
+enum cam_smmu_map_dir {
+ CAM_SMMU_MAP_READ,
+ CAM_SMMU_MAP_WRITE,
+ CAM_SMMU_MAP_RW,
+ CAM_SMMU_MAP_INVALID
+};
+
+/**
+ * @param identifier: Unique identifier to be used by clients which they
+ * should get from device tree. CAM SMMU driver will
+ * not enforce how this string is obtained and will
+ * only validate this against the list of permitted
+ * identifiers
+ * @param handle_ptr: Based on the indentifier, CAM SMMU drivier will
+ * fill the handle pointed by handle_ptr
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_handle(char *identifier, int *handle_ptr);
+
+/**
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param op : Operation to be performed. Can be either CAM_SMMU_ATTACH
+ * or CAM_SMMU_DETACH
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_ops(int handle, enum cam_smmu_ops_param op);
+
+/**
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param ion_fd: ION handle identifying the memory buffer.
+ * @phys_addr : Pointer to physical address where mapped address will be
+ * returned.
+ * @dir : Mapping direction: which will traslate toDMA_BIDIRECTIONAL,
+ * DMA_TO_DEVICE or DMA_FROM_DEVICE
+ * @len : Length of buffer mapped returned by CAM SMMU driver.
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_phy_addr(int handle,
+ int ion_fd, enum cam_smmu_map_dir dir,
+ dma_addr_t *dma_addr, size_t *len_ptr);
+
+/**
+ * @param handle: Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
+ * @param ion_fd: ION handle identifying the memory buffer.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_put_phy_addr(int handle, int ion_fd);
+
+/**
+ * @brief : Allocates a scratch buffer
+ *
+ * This function allocates a scratch virtual buffer of length virt_len in the
+ * device virtual address space mapped to phys_len physically contiguous bytes
+ * in that device's SMMU.
+ *
+ * virt_len and phys_len are expected to be aligned to PAGE_SIZE and with each
+ * other, otherwise -EINVAL is returned.
+ *
+ * -EINVAL will be returned if virt_len is less than phys_len.
+ *
+ * Passing a too large phys_len might also cause failure if that much size is
+ * not available for allocation in a physically contiguous way.
+ *
+ * @param handle : Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
+ * @param dir : Direction of mapping which will translate to IOMMU_READ
+ * IOMMU_WRITE or a bit mask of both.
+ * @param paddr_ptr: Device virtual address that the client device will be
+ * able to read from/write to
+ * @param virt_len : Virtual length of the scratch buffer
+ * @param phys_len : Physical length of the scratch buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int cam_smmu_get_phy_addr_scratch(int handle,
+ enum cam_smmu_map_dir dir,
+ dma_addr_t *paddr_ptr,
+ size_t virt_len,
+ size_t phys_len);
+
+/**
+ * @brief : Frees a scratch buffer
+ *
+ * This function frees a scratch buffer and releases the corresponding SMMU
+ * mappings.
+ *
+ * @param handle : Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
+ * IOMMU_WRITE or a bit mask of both.
+ * @param paddr_ptr: Device virtual address of client's scratch buffer that
+ * will be freed.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int cam_smmu_put_phy_addr_scratch(int handle,
+ dma_addr_t paddr);
+
+/**
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_destroy_handle(int handle);
+
+/**
+ * @return numger of client. Zero in case of error.
+ */
+int cam_smmu_get_num_of_clients(void);
+
+/**
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @return Index of SMMU client. Nagative in case of error.
+ */
+int cam_smmu_find_index_by_handle(int hdl);
+
+/**
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param client_page_fault_handler: It is triggered in IOMMU page fault
+ * @param token: It is input param when trigger page fault handler
+ */
+void cam_smmu_reg_client_page_fault_handler(int handle,
+ int (*client_page_fault_handler)(struct iommu_domain *,
+ struct device *, unsigned long,
+ int, void*), void *token);
+
+#endif /* _CAM_SMMU_API_H_ */
diff --git a/drivers/media/platform/msm/camera_v2/common/cam_soc_api.c b/drivers/media/platform/msm/camera_v2/common/cam_soc_api.c
new file mode 100644
index 000000000000..b8adb3cd80df
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/common/cam_soc_api.c
@@ -0,0 +1,724 @@
+/* Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "CAM-SOC %s:%d " fmt, __func__, __LINE__
+#define NO_SET_RATE -1
+#define INIT_RATE -2
+
+#ifdef CONFIG_CAM_SOC_API_DBG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/msm-bus.h>
+#include "cam_soc_api.h"
+
+struct msm_cam_bus_pscale_data {
+ struct msm_bus_scale_pdata *pdata;
+ uint32_t bus_client;
+ uint32_t num_usecases;
+ uint32_t num_paths;
+ unsigned int vector_index;
+ bool dyn_vote;
+};
+
+struct msm_cam_bus_pscale_data g_cv[CAM_AHB_CLIENT_MAX];
+
+
+/* Get all clocks from DT */
+int msm_camera_get_clk_info(struct platform_device *pdev,
+ struct msm_cam_clk_info **clk_info,
+ struct clk ***clk_ptr,
+ size_t *num_clk)
+{
+ int rc = 0;
+ size_t cnt, tmp;
+ uint32_t *rates, i = 0;
+ const char *clk_ctl = NULL;
+ bool clock_cntl_support = false;
+ struct device_node *of_node;
+
+ if (!pdev || !clk_info || !num_clk)
+ return -EINVAL;
+
+ of_node = pdev->dev.of_node;
+
+ cnt = of_property_count_strings(of_node, "clock-names");
+ if (cnt <= 0) {
+ pr_err("err: No clocks found in DT=%zu\n", cnt);
+ return -EINVAL;
+ }
+
+ tmp = of_property_count_u32_elems(of_node, "qcom,clock-rates");
+ if (tmp <= 0) {
+ pr_err("err: No clk rates device tree, count=%zu", tmp);
+ return -EINVAL;
+ }
+
+ if (cnt != tmp) {
+ pr_err("err: clk name/rates mismatch, strings=%zu, rates=%zu\n",
+ cnt, tmp);
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(of_node, "qcom,clock-cntl-support")) {
+ tmp = of_property_count_strings(of_node,
+ "qcom,clock-control");
+ if (tmp <= 0) {
+ pr_err("err: control strings not found in DT count=%zu",
+ tmp);
+ return -EINVAL;
+ }
+ if (cnt != tmp) {
+ pr_err("err: controls mismatch, strings=%zu, ctl=%zu\n",
+ cnt, tmp);
+ return -EINVAL;
+ }
+ clock_cntl_support = true;
+ }
+
+ *num_clk = cnt;
+
+ *clk_info = devm_kcalloc(&pdev->dev, cnt,
+ sizeof(struct msm_cam_clk_info), GFP_KERNEL);
+ if (!*clk_info)
+ return -ENOMEM;
+
+ *clk_ptr = devm_kcalloc(&pdev->dev, cnt, sizeof(struct clk *),
+ GFP_KERNEL);
+ if (!*clk_ptr) {
+ rc = -ENOMEM;
+ goto err1;
+ }
+
+ rates = devm_kcalloc(&pdev->dev, cnt, sizeof(long), GFP_KERNEL);
+ if (!rates) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,clock-rates",
+ rates, cnt);
+ if (rc < 0) {
+ pr_err("err: failed reading clock rates\n");
+ rc = -EINVAL;
+ goto err3;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ rc = of_property_read_string_index(of_node, "clock-names",
+ i, &((*clk_info)[i].clk_name));
+ if (rc < 0) {
+ pr_err("%s reading clock-name failed index %d\n",
+ __func__, i);
+ rc = -EINVAL;
+ goto err3;
+ }
+
+ CDBG("dbg: clk-name[%d] = %s\n", i, (*clk_info)[i].clk_name);
+ if (clock_cntl_support) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,clock-control", i, &clk_ctl);
+ if (rc < 0) {
+ pr_err("%s reading clock-control failed index %d\n",
+ __func__, i);
+ rc = -EINVAL;
+ goto err3;
+ }
+
+ if (!strcmp(clk_ctl, "NO_SET_RATE"))
+ (*clk_info)[i].clk_rate = NO_SET_RATE;
+ else if (!strcmp(clk_ctl, "INIT_RATE"))
+ (*clk_info)[i].clk_rate = INIT_RATE;
+ else if (!strcmp(clk_ctl, "SET_RATE"))
+ (*clk_info)[i].clk_rate = rates[i];
+ else {
+ pr_err("%s: error: clock control has invalid value\n",
+ __func__);
+ rc = -EBUSY;
+ goto err3;
+ }
+ } else
+ (*clk_info)[i].clk_rate =
+ (rates[i] == 0) ? (long)-1 : rates[i];
+
+ CDBG("dbg: clk-rate[%d] = rate: %ld\n",
+ i, (*clk_info)[i].clk_rate);
+
+ (*clk_ptr)[i] =
+ devm_clk_get(&pdev->dev, (*clk_info)[i].clk_name);
+ if (IS_ERR((*clk_ptr)[i])) {
+ rc = PTR_ERR((*clk_ptr)[i]);
+ goto err4;
+ }
+ CDBG("clk ptr[%d] :%p\n", i, (*clk_ptr)[i]);
+ }
+
+ devm_kfree(&pdev->dev, rates);
+ return rc;
+
+err4:
+ for (--i; i >= 0; i--)
+ devm_clk_put(&pdev->dev, (*clk_ptr)[i]);
+err3:
+ devm_kfree(&pdev->dev, rates);
+err2:
+ devm_kfree(&pdev->dev, *clk_ptr);
+err1:
+ devm_kfree(&pdev->dev, *clk_info);
+ return rc;
+}
+EXPORT_SYMBOL(msm_camera_get_clk_info);
+
+/* Enable/Disable all clocks */
+int msm_camera_clk_enable(struct device *dev,
+ struct msm_cam_clk_info *clk_info,
+ struct clk **clk_ptr, int num_clk, int enable)
+{
+ int i;
+ int rc = 0;
+ long clk_rate;
+
+ if (enable) {
+ for (i = 0; i < num_clk; i++) {
+ CDBG("enable %s\n", clk_info[i].clk_name);
+ if (clk_info[i].clk_rate > 0) {
+ clk_rate = clk_round_rate(clk_ptr[i],
+ clk_info[i].clk_rate);
+ if (clk_rate < 0) {
+ pr_err("%s round failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_set_err;
+ }
+ rc = clk_set_rate(clk_ptr[i],
+ clk_rate);
+ if (rc < 0) {
+ pr_err("%s set failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_set_err;
+ }
+
+ } else if (clk_info[i].clk_rate == INIT_RATE) {
+ clk_rate = clk_get_rate(clk_ptr[i]);
+ if (clk_rate == 0) {
+ clk_rate =
+ clk_round_rate(clk_ptr[i], 0);
+ if (clk_rate < 0) {
+ pr_err("%s round rate failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_set_err;
+ }
+ rc = clk_set_rate(clk_ptr[i],
+ clk_rate);
+ if (rc < 0) {
+ pr_err("%s set rate failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_set_err;
+ }
+ }
+ }
+ rc = clk_prepare_enable(clk_ptr[i]);
+ if (rc < 0) {
+ pr_err("%s enable failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_enable_err;
+ }
+ if (clk_info[i].delay > 20) {
+ msleep(clk_info[i].delay);
+ } else if (clk_info[i].delay) {
+ usleep_range(clk_info[i].delay * 1000,
+ (clk_info[i].delay * 1000) + 1000);
+ }
+ }
+ } else {
+ for (i = num_clk - 1; i >= 0; i--) {
+ if (clk_ptr[i] != NULL) {
+ CDBG("%s disable %s\n", __func__,
+ clk_info[i].clk_name);
+ clk_disable_unprepare(clk_ptr[i]);
+ }
+ }
+ }
+ return rc;
+
+cam_clk_enable_err:
+cam_clk_set_err:
+ for (i--; i >= 0; i--) {
+ if (clk_ptr[i] != NULL)
+ clk_disable_unprepare(clk_ptr[i]);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(msm_camera_clk_enable);
+
+/* Set rate on a specific clock */
+int msm_camera_clk_set_rate(struct device *dev,
+ struct clk *clk,
+ long clk_rate)
+{
+ int rc = 0;
+
+ if (!dev || !clk)
+ return -EINVAL;
+
+ clk_rate = clk_round_rate(clk, clk_rate);
+ if (clk_rate < 0) {
+ pr_err("round rate failed:\n");
+ return -EINVAL;
+ }
+
+ rc = clk_set_rate(clk, clk_rate);
+ if (rc < 0) {
+ pr_err("set rate failed:\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_camera_clk_set_rate);
+
+/* release memory allocated for clocks */
+int msm_camera_put_clk_info(struct platform_device *pdev,
+ struct msm_cam_clk_info **clk_info,
+ struct clk ***clk_ptr, int cnt)
+{
+ int i;
+
+ for (i = cnt - 1; i >= 0; i--) {
+ if (clk_ptr[i] != NULL)
+ devm_clk_put(&pdev->dev, (*clk_ptr)[i]);
+
+ CDBG("clk ptr[%d] :%p\n", i, (*clk_ptr)[i]);
+ }
+ devm_kfree(&pdev->dev, *clk_info);
+ devm_kfree(&pdev->dev, *clk_ptr);
+ *clk_info = NULL;
+ *clk_ptr = NULL;
+ return 0;
+}
+EXPORT_SYMBOL(msm_camera_put_clk_info);
+
+/* Get regulators from DT */
+int msm_camera_get_regulator_info(struct platform_device *pdev,
+ struct regulator ***vdd,
+ int *num_reg)
+{
+ uint32_t cnt;
+ int i, rc;
+ struct device_node *of_node;
+ const char *name;
+ char prop_name[32];
+
+ if (!pdev || !vdd || !num_reg)
+ return -EINVAL;
+
+ of_node = pdev->dev.of_node;
+
+ if (!of_get_property(of_node, "qcom,vdd-names", NULL)) {
+ pr_err("err: Regulators property not found\n");
+ return -EINVAL;
+ }
+
+ cnt = of_property_count_strings(of_node, "qcom,vdd-names");
+ if (cnt <= 0) {
+ pr_err("err: no regulators found in device tree, count=%d",
+ cnt);
+ return -EINVAL;
+ }
+
+ *num_reg = cnt;
+ (*vdd) = devm_kcalloc(&pdev->dev, cnt, sizeof(struct regulator *),
+ GFP_KERNEL);
+ if (!*vdd)
+ return -ENOMEM;
+
+ for (i = 0; i < cnt; i++) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,vdd-names", i, &name);
+ if (rc < 0) {
+ pr_err("Fail to fetch regulators: %d\n", i);
+ rc = -EINVAL;
+ goto err1;
+ }
+
+ CDBG("regulator-names[%d] = %s\n", i, name);
+
+ snprintf(prop_name, 32, "%s-supply", name);
+
+ if (of_get_property(of_node, prop_name, NULL)) {
+ (*vdd)[i] = devm_regulator_get(&pdev->dev, name);
+ if (IS_ERR((*vdd)[i])) {
+ rc = -EINVAL;
+ pr_err("Fail to get regulator :%d\n", i);
+ goto err1;
+ }
+ } else {
+ pr_err("Regulator phandle not found :%s\n", name);
+ goto err1;
+ }
+ CDBG("vdd ptr[%d] :%p\n", i, (*vdd)[i]);
+ }
+
+ return 0;
+
+err1:
+ for (--i; i >= 0; i--)
+ devm_regulator_put((*vdd)[i]);
+ devm_kfree(&pdev->dev, *vdd);
+ return rc;
+}
+EXPORT_SYMBOL(msm_camera_get_regulator_info);
+
+
+/* Enable/Disable regulators */
+int msm_camera_regulator_enable(struct regulator **vdd,
+ int cnt, int enable)
+{
+ int i;
+ int rc;
+
+ CDBG("cnt : %d, enable : %d\n", cnt, enable);
+ if (!vdd) {
+ pr_err("Invalid Params");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (enable) {
+ rc = regulator_enable(vdd[i]);
+ if (rc < 0) {
+ pr_err("regulator enable failed %d\n", i);
+ goto error;
+ }
+ } else {
+ rc = regulator_disable(vdd[i]);
+ if (rc < 0)
+ pr_err("regulator disable failed %d\n", i);
+ }
+ }
+
+ return 0;
+error:
+ for (--i; i > 0; i--) {
+ if (!IS_ERR_OR_NULL(vdd[i]))
+ regulator_disable(vdd[i]);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(msm_camera_regulator_enable);
+
+/* Put regulators regulators */
+void msm_camera_put_regulators(struct platform_device *pdev,
+ struct regulator ***vdd,
+ int cnt)
+{
+ int i;
+
+ if (!*vdd) {
+ pr_err("Invalid params\n");
+ return;
+ }
+
+ for (i = cnt - 1; i >= 0; i--) {
+ if (!IS_ERR_OR_NULL((*vdd)[i]))
+ devm_regulator_put((*vdd)[i]);
+ CDBG("vdd ptr[%d] :%p\n", i, (*vdd)[i]);
+ }
+
+ devm_kfree(&pdev->dev, *vdd);
+ *vdd = NULL;
+}
+EXPORT_SYMBOL(msm_camera_put_regulators);
+
+struct resource *msm_camera_get_irq(struct platform_device *pdev,
+ char *irq_name)
+{
+ if (!pdev || !irq_name) {
+ pr_err("Invalid Params\n");
+ return NULL;
+ }
+
+ CDBG("Get irq for %s\n", irq_name);
+ return platform_get_resource_byname(pdev, IORESOURCE_IRQ, irq_name);
+}
+EXPORT_SYMBOL(msm_camera_get_irq);
+
+int msm_camera_register_irq(struct platform_device *pdev,
+ struct resource *irq, irq_handler_t handler,
+ char *irq_name, void *dev_id)
+{
+ int rc = 0;
+
+ if (!pdev || !irq || !handler || !irq_name || !dev_id) {
+ pr_err("Invalid Params\n");
+ return -EINVAL;
+ }
+
+ rc = devm_request_irq(&pdev->dev, irq->start, handler,
+ IRQF_TRIGGER_RISING, irq_name, dev_id);
+ if (rc < 0) {
+ pr_err("irq request fail\n");
+ rc = -EINVAL;
+ }
+
+ CDBG("Registered irq for %s[resource - %p]\n", irq_name, irq);
+
+ return rc;
+}
+EXPORT_SYMBOL(msm_camera_register_irq);
+
+int msm_camera_enable_irq(struct resource *irq, int enable)
+{
+ if (!irq) {
+ pr_err("Invalid Params\n");
+ return -EINVAL;
+ }
+
+ CDBG("irq Enable %d\n", enable);
+ if (enable)
+ enable_irq(irq->start);
+ else
+ disable_irq(irq->start);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_camera_enable_irq);
+
+int msm_camera_unregister_irq(struct platform_device *pdev,
+ struct resource *irq, void *dev_id)
+{
+
+ if (!pdev || !irq || !dev_id) {
+ pr_err("Invalid Params\n");
+ return -EINVAL;
+ }
+
+ CDBG("Un Registering irq for [resource - %p]\n", irq);
+ devm_free_irq(&pdev->dev, irq->start, dev_id);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_camera_unregister_irq);
+
+void __iomem *msm_camera_get_reg_base(struct platform_device *pdev,
+ char *device_name)
+{
+ struct resource *mem;
+ void *base;
+
+ if (!pdev || !device_name) {
+ pr_err("Invalid Params\n");
+ return NULL;
+ }
+
+ CDBG("device name :%s\n", device_name);
+ mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, device_name);
+ if (!mem) {
+ pr_err("err: mem resource %s not found\n", device_name);
+ return NULL;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
+ device_name)) {
+ pr_err("err: no valid mem region for device:%s\n", device_name);
+ return NULL;
+ }
+
+ base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+ if (!base) {
+ devm_release_mem_region(&pdev->dev, mem->start,
+ resource_size(mem));
+ pr_err("err: ioremap failed: %s\n", device_name);
+ return NULL;
+ }
+
+ CDBG("base : %p\n", base);
+ return base;
+}
+EXPORT_SYMBOL(msm_camera_get_reg_base);
+
+int msm_camera_put_reg_base(struct platform_device *pdev, void __iomem *base,
+ char *device_name)
+{
+ struct resource *mem;
+
+ if (!pdev || !base || !device_name) {
+ pr_err("Invalid Params\n");
+ return -EINVAL;
+ }
+
+ CDBG("device name :%s\n", device_name);
+ mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, device_name);
+ if (!mem) {
+ pr_err("err: mem resource %s not found\n", device_name);
+ return -EINVAL;
+ }
+
+ devm_iounmap(&pdev->dev, base);
+ devm_release_mem_region(&pdev->dev, mem->start, resource_size(mem));
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_camera_put_reg_base);
+
+/* Register the bus client */
+uint32_t msm_camera_register_bus_client(struct platform_device *pdev,
+ enum cam_ahb_clk_client id)
+{
+ int rc = 0;
+ uint32_t bus_client, num_usecases, num_paths;
+ struct msm_bus_scale_pdata *pdata;
+ struct device_node *of_node;
+
+ CDBG("Register client ID: %d\n", id);
+
+ if (id >= CAM_AHB_CLIENT_MAX || !pdev) {
+ pr_err("Invalid Params");
+ return -EINVAL;
+ }
+
+ of_node = pdev->dev.of_node;
+
+ if (!g_cv[id].pdata) {
+ rc = of_property_read_u32(of_node, "qcom,msm-bus,num-cases",
+ &num_usecases);
+ if (rc) {
+ pr_err("num-usecases not found\n");
+ return -EINVAL;
+ }
+ rc = of_property_read_u32(of_node, "qcom,msm-bus,num-paths",
+ &num_paths);
+ if (rc) {
+ pr_err("num-usecases not found\n");
+ return -EINVAL;
+ }
+
+ if (num_paths != 1) {
+ pr_err("Exceeds number of paths\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_bool(of_node,
+ "qcom,msm-bus-vector-dyn-vote")) {
+ if (num_usecases != 2) {
+ pr_err("Excess or less vectors\n");
+ return -EINVAL;
+ }
+ g_cv[id].dyn_vote = true;
+ }
+
+ pdata = msm_bus_cl_get_pdata(pdev);
+ if (!pdata) {
+ pr_err("failed get_pdata client_id :%d\n", id);
+ return -EINVAL;
+ }
+ bus_client = msm_bus_scale_register_client(pdata);
+ if (!bus_client) {
+ pr_err("Unable to register bus client :%d\n", id);
+ return -EINVAL;
+ }
+ } else {
+ pr_err("vector already setup client_id : %d\n", id);
+ return -EINVAL;
+ }
+
+ g_cv[id].pdata = pdata;
+ g_cv[id].bus_client = bus_client;
+ g_cv[id].vector_index = 0;
+ g_cv[id].num_usecases = num_usecases;
+ g_cv[id].num_paths = num_paths;
+
+ CDBG("Exit Client ID: %d\n", id);
+ return 0;
+}
+EXPORT_SYMBOL(msm_camera_register_bus_client);
+
+/* Update the bus bandwidth */
+uint32_t msm_camera_update_bus_bw(int id, uint64_t ab, uint64_t ib)
+{
+ struct msm_bus_paths *path;
+ struct msm_bus_scale_pdata *pdata;
+ int idx = 0;
+
+ if (id >= CAM_AHB_CLIENT_MAX) {
+ pr_err("Invalid Params");
+ return -EINVAL;
+ }
+ if (g_cv[id].num_usecases != 2 ||
+ g_cv[id].num_paths != 1 ||
+ g_cv[id].dyn_vote != true) {
+ pr_err("dynamic update not allowed\n");
+ return -EINVAL;
+ }
+
+
+ idx = g_cv[id].vector_index;
+ pdata = g_cv[id].pdata;
+
+ idx = 1 - idx;
+ path = &(pdata->usecase[idx]);
+ path->vectors[0].ab = ab;
+ path->vectors[0].ib = ib;
+
+ CDBG("Register client ID : %d [ab : %llx, ib : %llx], update :%d\n",
+ id, ab, ib, idx);
+ msm_bus_scale_client_update_request(g_cv[id].bus_client, idx);
+ g_cv[id].vector_index = idx;
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_camera_update_bus_bw);
+
+/* Update the bus vector */
+uint32_t msm_camera_update_bus_vector(enum cam_ahb_clk_client id,
+ int vector_index)
+{
+ if (id >= CAM_AHB_CLIENT_MAX || g_cv[id].dyn_vote == true) {
+ pr_err("Invalid Params");
+ return -EINVAL;
+ }
+
+ if (vector_index < 0 || vector_index > g_cv[id].num_usecases) {
+ pr_err("Invalid Params");
+ return -EINVAL;
+ }
+
+ msm_bus_scale_client_update_request(g_cv[id].bus_client,
+ vector_index);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_camera_update_bus_vector);
+
+/* Unregister the bus client */
+uint32_t msm_camera_unregister_bus_client(enum cam_ahb_clk_client id)
+{
+ if (id >= CAM_AHB_CLIENT_MAX) {
+ pr_err("Invalid Params");
+ return -EINVAL;
+ }
+
+ CDBG("UnRegister client ID: %d\n", id);
+
+ msm_bus_scale_unregister_client(g_cv[id].bus_client);
+ msm_bus_cl_clear_pdata(g_cv[id].pdata);
+ memset(&g_cv[id], 0, sizeof(struct msm_cam_bus_pscale_data));
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_camera_unregister_bus_client);
diff --git a/drivers/media/platform/msm/camera_v2/common/cam_soc_api.h b/drivers/media/platform/msm/camera_v2/common/cam_soc_api.h
new file mode 100644
index 000000000000..a806b5f74f4a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/common/cam_soc_api.h
@@ -0,0 +1,289 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _CAM_SOC_API_H_
+#define _CAM_SOC_API_H_
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock_types.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <soc/qcom/camera2.h>
+#include "cam_hw_ops.h"
+
+/**
+ * @brief : Gets clock information from dtsi
+ *
+ * This function extracts the clocks information for a specific
+ * platform device
+ *
+ * @param pdev : Platform device to get clocks information
+ * @param clk_info : Pointer to populate clock information array
+ * @param clk_info : Pointer to populate clock resource pointers
+ * @param num_clk: Pointer to populate the number of clocks
+ * extracted from dtsi
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_get_clk_info(struct platform_device *pdev,
+ struct msm_cam_clk_info **clk_info,
+ struct clk ***clk_ptr,
+ size_t *num_clk);
+/**
+ * @brief : Puts clock information
+ *
+ * This function releases the memory allocated for the clocks
+ *
+ * @param pdev : Pointer to platform device
+ * @param clk_info : Pointer to release the allocated memory
+ * @param clk_ptr : Pointer to release the clock resources
+ * @param cnt : Number of clk resources
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_camera_put_clk_info(struct platform_device *pdev,
+ struct msm_cam_clk_info **clk_info,
+ struct clk ***clk_ptr, int cnt);
+/**
+ * @brief : Enable clocks
+ *
+ * This function enables the clocks for a specified device
+ *
+ * @param dev : Device to get clocks information
+ * @param clk_info : Pointer to populate clock information
+ * @param clk_ptr : Pointer to populate clock information
+ * @param num_clk: Pointer to populate the number of clocks
+ * extracted from dtsi
+ * @param enable : Flag to specify enable/disable
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_clk_enable(struct device *dev,
+ struct msm_cam_clk_info *clk_info,
+ struct clk **clk_ptr,
+ int num_clk,
+ int enable);
+/**
+ * @brief : Set clock rate
+ *
+ * This function sets the rate for a specified clock
+ *
+ * @param dev : Device to get clocks information
+ * @param clk : Pointer to clock to set rate
+ * @param clk_rate : Rate to be set
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_clk_set_rate(struct device *dev,
+ struct clk *clk,
+ long clk_rate);
+
+/**
+ * @brief : Gets regulator info
+ *
+ * This function extracts the regulator information for a specific
+ * platform device
+ *
+ * @param pdev : platform device to get regulator information
+ * @param vdd: Pointer to populate the regulator names
+ * @param num_reg: Pointer to populate the number of regulators
+ * extracted from dtsi
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int msm_camera_get_regulator_info(struct platform_device *pdev,
+ struct regulator ***vddd, int *num_reg);
+/**
+ * @brief : Enable/Disable the regultors
+ *
+ * This function enables/disables the regulators for a specific
+ * platform device
+ *
+ * @param vdd: Pointer to list of regulators
+ * @param cnt: Number of regulators to enable/disable
+ * @param enable: Flags specifies either enable/disable
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_regulator_enable(struct regulator **vdd,
+ int cnt, int enable);
+
+/**
+ * @brief : Release the regulators
+ *
+ * This function releases the regulator resources.
+ *
+ * @param pdev: Pointer to platform device
+ * @param vdd: Pointer to list of regulators
+ * @param cnt: Number of regulators to release
+ */
+
+void msm_camera_put_regulators(struct platform_device *pdev,
+ struct regulator ***vdd,
+ int cnt);
+/**
+ * @brief : Get the IRQ resource
+ *
+ * This function gets the irq resource from dtsi for a specific
+ * platform device
+ *
+ * @param pdev : Platform device to get IRQ
+ * @param irq_name: Name of the IRQ resource to get from DTSI
+ *
+ * @return Pointer to resource if success else null
+ */
+
+struct resource *msm_camera_get_irq(struct platform_device *pdev,
+ char *irq_name);
+/**
+ * @brief : Register the IRQ
+ *
+ * This function registers the irq resource for specified hardware
+ *
+ * @param pdev : Platform device to register IRQ resource
+ * @param irq : IRQ resource
+ * @param handler : IRQ handler
+ * @param irq_name: Name of the IRQ
+ * @param dev : Token of the device
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_register_irq(struct platform_device *pdev,
+ struct resource *irq,
+ irq_handler_t handler,
+ char *irq_name,
+ void *dev);
+
+/**
+ * @brief : Enable/Disable the IRQ
+ *
+ * This function enables or disables a specific IRQ
+ *
+ * @param irq : IRQ resource
+ * @param flag : flag to enable/disable
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_enable_irq(struct resource *irq, int flag);
+
+/**
+ * @brief : UnRegister the IRQ
+ *
+ * This function Unregisters/Frees the irq resource
+ *
+ * @param pdev : Pointer to platform device
+ * @param irq : IRQ resource
+ * @param dev : Token of the device
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_unregister_irq(struct platform_device *pdev,
+ struct resource *irq, void *dev_id);
+
+/**
+ * @brief : Gets device register base
+ *
+ * This function extracts the device's register base from the dtsi
+ * for the specified platform device
+ *
+ * @param pdev : Platform device to get regulator infor
+ * @param device_name : Name of the device to fetch the register base
+ *
+ * @return Pointer to resource if success else null
+ */
+
+void __iomem *msm_camera_get_reg_base(struct platform_device *pdev,
+ char *device_name);
+
+/**
+ * @brief : Puts device register base
+ *
+ * This function releases the memory region for the specified
+ * resource
+ *
+ * @param pdev : Pointer to platform device
+ * @param base : Pointer to base to unmap
+ * @param device_name : Device name
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int msm_camera_put_reg_base(struct platform_device *pdev, void __iomem *base,
+ char *device_name);
+
+/**
+ * @brief : Register the bus client
+ *
+ * This function registers the bus client
+ *
+ * @param client_id : client identifier
+ * @param pdev : Pointer to platform device
+ * @param vector_index : vector index to register
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+uint32_t msm_camera_register_bus_client(struct platform_device *pdev,
+ enum cam_ahb_clk_client id);
+
+/**
+ * @brief : Update bus vector
+ *
+ * This function votes for the specified vector to the bus
+ *
+ * @param id : client identifier
+ * @param vector_index : vector index to register
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+uint32_t msm_camera_update_bus_vector(enum cam_ahb_clk_client id,
+ int vector_index);
+
+/**
+ * @brief : Update the bus bandwidth
+ *
+ * This function updates the bandwidth for the specific client
+ *
+ * @param client_id : client identifier
+ * @param ab : Asolute bandwidth
+ * @param ib : Instantaneous bandwidth
+ *
+ * @return non-zero as client id if success else fail
+ */
+
+uint32_t msm_camera_update_bus_bw(int id, uint64_t ab, uint64_t ib);
+
+/**
+ * @brief : UnRegister the bus client
+ *
+ * This function unregisters the bus client
+ *
+ * @param id : client identifier
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+uint32_t msm_camera_unregister_bus_client(enum cam_ahb_clk_client id);
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c b/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c
new file mode 100644
index 000000000000..f978f97d7895
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c
@@ -0,0 +1,843 @@
+/* Copyright (c) 2011-2014, The Linux Foundataion. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <soc/qcom/camera2.h>
+#include <linux/msm-bus.h>
+#include "msm_camera_io_util.h"
+
+#define BUFF_SIZE_128 128
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+void msm_camera_io_w(u32 data, void __iomem *addr)
+{
+ CDBG("%s: 0x%p %08x\n", __func__, (addr), (data));
+ writel_relaxed((data), (addr));
+}
+
+/* This API is to write a block of data
+* to same address
+*/
+int32_t msm_camera_io_w_block(const u32 *addr, void __iomem *base,
+ u32 len)
+{
+ int i;
+
+ if (!addr || !len || !base)
+ return -EINVAL;
+
+ for (i = 0; i < len; i++) {
+ CDBG("%s: len =%d val=%x base =%p\n", __func__,
+ len, addr[i], base);
+ writel_relaxed(addr[i], base);
+ }
+ return 0;
+}
+
+/* This API is to write a block of registers
+* which is like a 2 dimensional array table with
+* register offset and data */
+int32_t msm_camera_io_w_reg_block(const u32 *addr, void __iomem *base,
+ u32 len)
+{
+ int i;
+
+ if (!addr || !len || !base)
+ return -EINVAL;
+
+ for (i = 0; i < len; i = i + 2) {
+ CDBG("%s: len =%d val=%x base =%p reg=%x\n", __func__,
+ len, addr[i + 1], base, addr[i]);
+ writel_relaxed(addr[i + 1], base + addr[i]);
+ }
+ return 0;
+}
+
+void msm_camera_io_w_mb(u32 data, void __iomem *addr)
+{
+ CDBG("%s: 0x%p %08x\n", __func__, (addr), (data));
+ /* ensure write is done */
+ wmb();
+ writel_relaxed((data), (addr));
+ /* ensure write is done */
+ wmb();
+}
+
+int32_t msm_camera_io_w_mb_block(const u32 *addr, void __iomem *base, u32 len)
+{
+ int i;
+
+ if (!addr || !len || !base)
+ return -EINVAL;
+
+ for (i = 0; i < len; i++) {
+ /* ensure write is done */
+ wmb();
+ CDBG("%s: len =%d val=%x base =%p\n", __func__,
+ len, addr[i], base);
+ writel_relaxed(addr[i], base);
+ }
+ /* ensure last write is done */
+ wmb();
+ return 0;
+}
+
+u32 msm_camera_io_r(void __iomem *addr)
+{
+ uint32_t data = readl_relaxed(addr);
+
+ CDBG("%s: 0x%p %08x\n", __func__, (addr), (data));
+ return data;
+}
+
+u32 msm_camera_io_r_mb(void __iomem *addr)
+{
+ uint32_t data;
+ /* ensure read is done */
+ rmb();
+ data = readl_relaxed(addr);
+ /* ensure read is done */
+ rmb();
+ CDBG("%s: 0x%p %08x\n", __func__, (addr), (data));
+ return data;
+}
+
+void msm_camera_io_memcpy_toio(void __iomem *dest_addr,
+ void __iomem *src_addr, u32 len)
+{
+ int i;
+ u32 *d = (u32 *) dest_addr;
+ u32 *s = (u32 *) src_addr;
+
+ for (i = 0; i < len; i++)
+ writel_relaxed(*s++, d++);
+}
+
+int32_t msm_camera_io_poll_value(void __iomem *addr, u32 wait_data, u32 retry,
+ unsigned long min_usecs, unsigned long max_usecs)
+{
+ uint32_t tmp, cnt = 0;
+ int32_t rc = 0;
+
+ if (!addr)
+ return -EINVAL;
+
+ tmp = msm_camera_io_r(addr);
+ while ((tmp != wait_data) && (cnt++ < retry)) {
+ if (min_usecs > 0 && max_usecs > 0)
+ usleep_range(min_usecs, max_usecs);
+ tmp = msm_camera_io_r(addr);
+ }
+ if (cnt > retry) {
+ pr_debug("Poll failed by value\n");
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+int32_t msm_camera_io_poll_value_wmask(void __iomem *addr, u32 wait_data,
+ u32 bmask, u32 retry, unsigned long min_usecs, unsigned long max_usecs)
+{
+ uint32_t tmp, cnt = 0;
+ int32_t rc = 0;
+
+ if (!addr)
+ return -EINVAL;
+
+ tmp = msm_camera_io_r(addr);
+ while (((tmp & bmask) != wait_data) && (cnt++ < retry)) {
+ if (min_usecs > 0 && max_usecs > 0)
+ usleep_range(min_usecs, max_usecs);
+ tmp = msm_camera_io_r(addr);
+ }
+ if (cnt > retry) {
+ pr_debug("Poll failed with mask\n");
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+void msm_camera_io_dump(void __iomem *addr, int size, int enable)
+{
+ char line_str[128], *p_str;
+ int i;
+ u32 *p = (u32 *) addr;
+ u32 data;
+
+ CDBG("%s: addr=%p size=%d\n", __func__, addr, size);
+
+ if (!p || (size <= 0) || !enable)
+ return;
+
+ line_str[0] = '\0';
+ p_str = line_str;
+ for (i = 0; i < size/4; i++) {
+ if (i % 4 == 0) {
+#ifdef CONFIG_COMPAT
+ snprintf(p_str, 20, "%016lx: ", (unsigned long) p);
+ p_str += 18;
+#else
+ snprintf(p_str, 12, "%08lx: ", (unsigned long) p);
+ p_str += 10;
+#endif
+ }
+ data = readl_relaxed(p++);
+ snprintf(p_str, 12, "%08x ", data);
+ p_str += 9;
+ if ((i + 1) % 4 == 0) {
+ pr_err("%s\n", line_str);
+ line_str[0] = '\0';
+ p_str = line_str;
+ }
+ }
+ if (line_str[0] != '\0')
+ pr_err("%s\n", line_str);
+}
+
+void msm_camera_io_dump_wstring_base(void __iomem *addr,
+ struct msm_cam_dump_string_info *dump_data,
+ int size)
+{
+ int i, u = sizeof(struct msm_cam_dump_string_info);
+
+ pr_debug("%s: addr=%p data=%p size=%d u=%d, cnt=%d\n", __func__,
+ addr, dump_data, size, u,
+ (size/u));
+
+ if (!addr || (size <= 0) || !dump_data) {
+ pr_err("%s: addr=%p data=%p size=%d\n", __func__,
+ addr, dump_data, size);
+ return;
+ }
+ for (i = 0; i < (size / u); i++)
+ pr_debug("%s 0x%x\n", (dump_data + i)->print,
+ readl_relaxed((dump_data + i)->offset + addr));
+}
+
+void msm_camera_io_memcpy(void __iomem *dest_addr,
+ void __iomem *src_addr, u32 len)
+{
+ CDBG("%s: %p %p %d\n", __func__, dest_addr, src_addr, len);
+ msm_camera_io_memcpy_toio(dest_addr, src_addr, len / 4);
+}
+
+void msm_camera_io_memcpy_mb(void __iomem *dest_addr,
+ void __iomem *src_addr, u32 len)
+{
+ int i;
+ u32 *d = (u32 *) dest_addr;
+ u32 *s = (u32 *) src_addr;
+ /* This is generic function called who needs to register
+ writes with memory barrier */
+ wmb();
+ for (i = 0; i < (len / 4); i++) {
+ msm_camera_io_w(*s++, d++);
+ /* ensure write is done after every iteration */
+ wmb();
+ }
+}
+
+int msm_cam_clk_sel_src(struct device *dev, struct msm_cam_clk_info *clk_info,
+ struct msm_cam_clk_info *clk_src_info, int num_clk)
+{
+ int i;
+ int rc = 0;
+ struct clk *mux_clk = NULL;
+ struct clk *src_clk = NULL;
+
+ for (i = 0; i < num_clk; i++) {
+ if (clk_src_info[i].clk_name) {
+ mux_clk = clk_get(dev, clk_info[i].clk_name);
+ if (IS_ERR(mux_clk)) {
+ pr_err("%s get failed\n",
+ clk_info[i].clk_name);
+ continue;
+ }
+ src_clk = clk_get(dev, clk_src_info[i].clk_name);
+ if (IS_ERR(src_clk)) {
+ pr_err("%s get failed\n",
+ clk_src_info[i].clk_name);
+ continue;
+ }
+ clk_set_parent(mux_clk, src_clk);
+ }
+ }
+ return rc;
+}
+
+int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info,
+ struct clk **clk_ptr, int num_clk, int enable)
+{
+ int i;
+ int rc = 0;
+ long clk_rate;
+
+ if (enable) {
+ for (i = 0; i < num_clk; i++) {
+ CDBG("%s enable %s\n", __func__, clk_info[i].clk_name);
+ clk_ptr[i] = clk_get(dev, clk_info[i].clk_name);
+ if (IS_ERR(clk_ptr[i])) {
+ pr_err("%s get failed\n", clk_info[i].clk_name);
+ rc = PTR_ERR(clk_ptr[i]);
+ goto cam_clk_get_err;
+ }
+ if (clk_info[i].clk_rate > 0) {
+ clk_rate = clk_round_rate(clk_ptr[i],
+ clk_info[i].clk_rate);
+ if (clk_rate < 0) {
+ pr_err("%s round failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_set_err;
+ }
+ rc = clk_set_rate(clk_ptr[i],
+ clk_rate);
+ if (rc < 0) {
+ pr_err("%s set failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_set_err;
+ }
+
+ } else if (clk_info[i].clk_rate == INIT_RATE) {
+ clk_rate = clk_get_rate(clk_ptr[i]);
+ if (clk_rate == 0) {
+ clk_rate =
+ clk_round_rate(clk_ptr[i], 0);
+ if (clk_rate < 0) {
+ pr_err("%s round rate failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_set_err;
+ }
+ rc = clk_set_rate(clk_ptr[i],
+ clk_rate);
+ if (rc < 0) {
+ pr_err("%s set rate failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_set_err;
+ }
+ }
+ }
+ rc = clk_prepare(clk_ptr[i]);
+ if (rc < 0) {
+ pr_err("%s prepare failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_prepare_err;
+ }
+
+ rc = clk_enable(clk_ptr[i]);
+ if (rc < 0) {
+ pr_err("%s enable failed\n",
+ clk_info[i].clk_name);
+ goto cam_clk_enable_err;
+ }
+ if (clk_info[i].delay > 20) {
+ msleep(clk_info[i].delay);
+ } else if (clk_info[i].delay) {
+ usleep_range(clk_info[i].delay * 1000,
+ (clk_info[i].delay * 1000) + 1000);
+ }
+ }
+ } else {
+ for (i = num_clk - 1; i >= 0; i--) {
+ if (clk_ptr[i] != NULL) {
+ CDBG("%s disable %s\n", __func__,
+ clk_info[i].clk_name);
+ clk_disable(clk_ptr[i]);
+ clk_unprepare(clk_ptr[i]);
+ clk_put(clk_ptr[i]);
+ }
+ }
+ }
+ return rc;
+
+
+cam_clk_enable_err:
+ clk_unprepare(clk_ptr[i]);
+cam_clk_prepare_err:
+cam_clk_set_err:
+ clk_put(clk_ptr[i]);
+cam_clk_get_err:
+ for (i--; i >= 0; i--) {
+ if (clk_ptr[i] != NULL) {
+ clk_disable(clk_ptr[i]);
+ clk_unprepare(clk_ptr[i]);
+ clk_put(clk_ptr[i]);
+ }
+ }
+ return rc;
+}
+
+int msm_camera_config_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
+ int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
+ int num_vreg_seq, struct regulator **reg_ptr, int config)
+{
+ int i = 0, j = 0;
+ int rc = 0;
+ struct camera_vreg_t *curr_vreg;
+
+ if (num_vreg_seq > num_vreg) {
+ pr_err("%s:%d vreg sequence invalid\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ if (!num_vreg_seq)
+ num_vreg_seq = num_vreg;
+
+ if (config) {
+ for (i = 0; i < num_vreg_seq; i++) {
+ if (vreg_seq) {
+ j = vreg_seq[i];
+ if (j >= num_vreg)
+ continue;
+ } else
+ j = i;
+ curr_vreg = &cam_vreg[j];
+ reg_ptr[j] = regulator_get(dev,
+ curr_vreg->reg_name);
+ if (IS_ERR(reg_ptr[j])) {
+ pr_err("%s: %s get failed\n",
+ __func__,
+ curr_vreg->reg_name);
+ reg_ptr[j] = NULL;
+ goto vreg_get_fail;
+ }
+ if (regulator_count_voltages(reg_ptr[j]) > 0) {
+ rc = regulator_set_voltage(
+ reg_ptr[j],
+ curr_vreg->min_voltage,
+ curr_vreg->max_voltage);
+ if (rc < 0) {
+ pr_err("%s: %s set voltage failed\n",
+ __func__,
+ curr_vreg->reg_name);
+ goto vreg_set_voltage_fail;
+ }
+ if (curr_vreg->op_mode >= 0) {
+ rc = regulator_set_optimum_mode(
+ reg_ptr[j],
+ curr_vreg->op_mode);
+ if (rc < 0) {
+ pr_err(
+ "%s:%s set optimum mode fail\n",
+ __func__,
+ curr_vreg->reg_name);
+ goto vreg_set_opt_mode_fail;
+ }
+ }
+ }
+ }
+ } else {
+ for (i = num_vreg_seq-1; i >= 0; i--) {
+ if (vreg_seq) {
+ j = vreg_seq[i];
+ if (j >= num_vreg)
+ continue;
+ } else
+ j = i;
+ curr_vreg = &cam_vreg[j];
+ if (reg_ptr[j]) {
+ if (regulator_count_voltages(reg_ptr[j]) > 0) {
+ if (curr_vreg->op_mode >= 0) {
+ regulator_set_optimum_mode(
+ reg_ptr[j], 0);
+ }
+ regulator_set_voltage(
+ reg_ptr[j], 0, curr_vreg->
+ max_voltage);
+ }
+ regulator_put(reg_ptr[j]);
+ reg_ptr[j] = NULL;
+ }
+ }
+ }
+ return 0;
+
+vreg_unconfig:
+if (regulator_count_voltages(reg_ptr[j]) > 0)
+ regulator_set_optimum_mode(reg_ptr[j], 0);
+
+vreg_set_opt_mode_fail:
+if (regulator_count_voltages(reg_ptr[j]) > 0)
+ regulator_set_voltage(reg_ptr[j], 0,
+ curr_vreg->max_voltage);
+
+vreg_set_voltage_fail:
+ regulator_put(reg_ptr[j]);
+ reg_ptr[j] = NULL;
+
+vreg_get_fail:
+ for (i--; i >= 0; i--) {
+ if (vreg_seq) {
+ j = vreg_seq[i];
+ if (j >= num_vreg)
+ continue;
+ } else
+ j = i;
+ curr_vreg = &cam_vreg[j];
+ goto vreg_unconfig;
+ }
+ return -ENODEV;
+}
+
+int msm_camera_enable_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
+ int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
+ int num_vreg_seq, struct regulator **reg_ptr, int enable)
+{
+ int i = 0, j = 0, rc = 0;
+
+ if (num_vreg_seq > num_vreg) {
+ pr_err("%s:%d vreg sequence invalid\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ if (!num_vreg_seq)
+ num_vreg_seq = num_vreg;
+
+ if (enable) {
+ for (i = 0; i < num_vreg_seq; i++) {
+ if (vreg_seq) {
+ j = vreg_seq[i];
+ if (j >= num_vreg)
+ continue;
+ } else
+ j = i;
+ if (IS_ERR(reg_ptr[j])) {
+ pr_err("%s: %s null regulator\n",
+ __func__, cam_vreg[j].reg_name);
+ goto disable_vreg;
+ }
+ rc = regulator_enable(reg_ptr[j]);
+ if (rc < 0) {
+ pr_err("%s: %s enable failed\n",
+ __func__, cam_vreg[j].reg_name);
+ goto disable_vreg;
+ }
+ if (cam_vreg[j].delay > 20)
+ msleep(cam_vreg[j].delay);
+ else if (cam_vreg[j].delay)
+ usleep_range(cam_vreg[j].delay * 1000,
+ (cam_vreg[j].delay * 1000) + 1000);
+ }
+ } else {
+ for (i = num_vreg_seq-1; i >= 0; i--) {
+ if (vreg_seq) {
+ j = vreg_seq[i];
+ if (j >= num_vreg)
+ continue;
+ } else
+ j = i;
+ regulator_disable(reg_ptr[j]);
+ if (cam_vreg[j].delay > 20)
+ msleep(cam_vreg[j].delay);
+ else if (cam_vreg[j].delay)
+ usleep_range(cam_vreg[j].delay * 1000,
+ (cam_vreg[j].delay * 1000) + 1000);
+ }
+ }
+ return rc;
+disable_vreg:
+ for (i--; i >= 0; i--) {
+ if (vreg_seq) {
+ j = vreg_seq[i];
+ if (j >= num_vreg)
+ continue;
+ } else
+ j = i;
+ regulator_disable(reg_ptr[j]);
+ if (cam_vreg[j].delay > 20)
+ msleep(cam_vreg[j].delay);
+ else if (cam_vreg[j].delay)
+ usleep_range(cam_vreg[j].delay * 1000,
+ (cam_vreg[j].delay * 1000) + 1000);
+ }
+ return rc;
+}
+
+void msm_camera_bus_scale_cfg(uint32_t bus_perf_client,
+ enum msm_bus_perf_setting perf_setting)
+{
+ int rc = 0;
+
+ if (!bus_perf_client) {
+ pr_err("%s: Bus Client NOT Registered!!!\n", __func__);
+ return;
+ }
+
+ switch (perf_setting) {
+ case S_EXIT:
+ rc = msm_bus_scale_client_update_request(bus_perf_client, 1);
+ msm_bus_scale_unregister_client(bus_perf_client);
+ break;
+ case S_PREVIEW:
+ rc = msm_bus_scale_client_update_request(bus_perf_client, 1);
+ break;
+ case S_VIDEO:
+ rc = msm_bus_scale_client_update_request(bus_perf_client, 2);
+ break;
+ case S_CAPTURE:
+ rc = msm_bus_scale_client_update_request(bus_perf_client, 3);
+ break;
+ case S_ZSL:
+ rc = msm_bus_scale_client_update_request(bus_perf_client, 4);
+ break;
+ case S_LIVESHOT:
+ rc = msm_bus_scale_client_update_request(bus_perf_client, 5);
+ break;
+ case S_DEFAULT:
+ break;
+ default:
+ pr_err("%s: INVALID CASE\n", __func__);
+ }
+}
+
+int msm_camera_set_gpio_table(struct msm_gpio_set_tbl *gpio_tbl,
+ uint8_t gpio_tbl_size, int gpio_en)
+{
+ int rc = 0, i;
+
+ if (gpio_en) {
+ for (i = 0; i < gpio_tbl_size; i++) {
+ gpio_set_value_cansleep(gpio_tbl[i].gpio,
+ gpio_tbl[i].flags);
+ usleep_range(gpio_tbl[i].delay,
+ gpio_tbl[i].delay + 1000);
+ }
+ } else {
+ for (i = gpio_tbl_size - 1; i >= 0; i--) {
+ if (gpio_tbl[i].flags)
+ gpio_set_value_cansleep(gpio_tbl[i].gpio,
+ GPIOF_OUT_INIT_LOW);
+ }
+ }
+ return rc;
+}
+
+int msm_camera_config_single_vreg(struct device *dev,
+ struct camera_vreg_t *cam_vreg, struct regulator **reg_ptr, int config)
+{
+ int rc = 0;
+ const char *vreg_name = NULL;
+
+ if (!dev || !cam_vreg || !reg_ptr) {
+ pr_err("%s: get failed NULL parameter\n", __func__);
+ goto vreg_get_fail;
+ }
+ if (cam_vreg->type == VREG_TYPE_CUSTOM) {
+ if (cam_vreg->custom_vreg_name == NULL) {
+ pr_err("%s : can't find sub reg name",
+ __func__);
+ goto vreg_get_fail;
+ }
+ vreg_name = cam_vreg->custom_vreg_name;
+ } else {
+ if (cam_vreg->reg_name == NULL) {
+ pr_err("%s : can't find reg name", __func__);
+ goto vreg_get_fail;
+ }
+ vreg_name = cam_vreg->reg_name;
+ }
+
+ if (config) {
+ CDBG("%s enable %s\n", __func__, vreg_name);
+ *reg_ptr = regulator_get(dev, vreg_name);
+ if (IS_ERR(*reg_ptr)) {
+ pr_err("%s: %s get failed\n", __func__, vreg_name);
+ *reg_ptr = NULL;
+ goto vreg_get_fail;
+ }
+ if (regulator_count_voltages(*reg_ptr) > 0) {
+ CDBG("%s: voltage min=%d, max=%d\n",
+ __func__, cam_vreg->min_voltage,
+ cam_vreg->max_voltage);
+ rc = regulator_set_voltage(
+ *reg_ptr, cam_vreg->min_voltage,
+ cam_vreg->max_voltage);
+ if (rc < 0) {
+ pr_err("%s: %s set voltage failed\n",
+ __func__, vreg_name);
+ goto vreg_set_voltage_fail;
+ }
+ if (cam_vreg->op_mode >= 0) {
+ rc = regulator_set_optimum_mode(*reg_ptr,
+ cam_vreg->op_mode);
+ if (rc < 0) {
+ pr_err(
+ "%s: %s set optimum mode failed\n",
+ __func__, vreg_name);
+ goto vreg_set_opt_mode_fail;
+ }
+ }
+ }
+ rc = regulator_enable(*reg_ptr);
+ if (rc < 0) {
+ pr_err("%s: %s regulator_enable failed\n", __func__,
+ vreg_name);
+ goto vreg_unconfig;
+ }
+ } else {
+ CDBG("%s disable %s\n", __func__, vreg_name);
+ if (*reg_ptr) {
+ CDBG("%s disable %s\n", __func__, vreg_name);
+ regulator_disable(*reg_ptr);
+ if (regulator_count_voltages(*reg_ptr) > 0) {
+ if (cam_vreg->op_mode >= 0)
+ regulator_set_optimum_mode(*reg_ptr, 0);
+ regulator_set_voltage(
+ *reg_ptr, 0, cam_vreg->max_voltage);
+ }
+ regulator_put(*reg_ptr);
+ *reg_ptr = NULL;
+ } else {
+ pr_err("%s can't disable %s\n", __func__, vreg_name);
+ }
+ }
+ return 0;
+
+vreg_unconfig:
+if (regulator_count_voltages(*reg_ptr) > 0)
+ regulator_set_optimum_mode(*reg_ptr, 0);
+
+vreg_set_opt_mode_fail:
+if (regulator_count_voltages(*reg_ptr) > 0)
+ regulator_set_voltage(*reg_ptr, 0, cam_vreg->max_voltage);
+
+vreg_set_voltage_fail:
+ regulator_put(*reg_ptr);
+ *reg_ptr = NULL;
+
+vreg_get_fail:
+ return -ENODEV;
+}
+
+int msm_camera_request_gpio_table(struct gpio *gpio_tbl, uint8_t size,
+ int gpio_en)
+{
+ int rc = 0, i = 0, err = 0;
+
+ if (!gpio_tbl || !size) {
+ pr_err("%s:%d invalid gpio_tbl %p / size %d\n", __func__,
+ __LINE__, gpio_tbl, size);
+ return -EINVAL;
+ }
+ for (i = 0; i < size; i++) {
+ CDBG("%s:%d i %d, gpio %d dir %ld\n", __func__, __LINE__, i,
+ gpio_tbl[i].gpio, gpio_tbl[i].flags);
+ }
+ if (gpio_en) {
+ for (i = 0; i < size; i++) {
+ err = gpio_request_one(gpio_tbl[i].gpio,
+ gpio_tbl[i].flags, gpio_tbl[i].label);
+ if (err) {
+ /*
+ * After GPIO request fails, contine to
+ * apply new gpios, outout a error message
+ * for driver bringup debug
+ */
+ pr_err("%s:%d gpio %d:%s request fails\n",
+ __func__, __LINE__,
+ gpio_tbl[i].gpio, gpio_tbl[i].label);
+ }
+ }
+ } else {
+ gpio_free_array(gpio_tbl, size);
+ }
+ return rc;
+}
+
+/*
+ * msm_camera_get_dt_reg_settings - Get dt reg settings from device-tree.
+ * @of_node: Pointer to device of_node from dev.
+ * @dt_prop_name: String of the property to search in of_node from dev.
+ * @reg_s: Double pointer will be allocated by this function and filled.
+ * @size: Pointer to fill the length of the available entries.
+ */
+int msm_camera_get_dt_reg_settings(struct device_node *of_node,
+ const char *dt_prop_name, uint32_t **reg_s,
+ unsigned int *size)
+{
+ int ret;
+ unsigned int cnt;
+
+ if (!of_node || !dt_prop_name || !size || !reg_s) {
+ pr_err("%s: Error invalid args %p:%p:%p:%p\n",
+ __func__, size, reg_s, of_node, dt_prop_name);
+ return -EINVAL;
+ }
+ if (!of_get_property(of_node, dt_prop_name, &cnt)) {
+ pr_debug("Missing dt reg settings for %s\n", dt_prop_name);
+ return -ENOENT;
+ }
+
+ if (!cnt || (cnt % 8)) {
+ pr_err("%s: Error invalid number of entries cnt=%d\n",
+ __func__, cnt);
+ return -EINVAL;
+ }
+ cnt /= 4;
+ if (cnt != 0) {
+ *reg_s = kcalloc(cnt, sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!*reg_s)
+ return -ENOMEM;
+ ret = of_property_read_u32_array(of_node,
+ dt_prop_name,
+ *reg_s,
+ cnt);
+ if (ret < 0) {
+ pr_err("%s: No dt reg info read for %s ret=%d\n",
+ __func__, dt_prop_name, ret);
+ kfree(*reg_s);
+ return -ENOENT;
+ }
+ *size = cnt;
+ } else {
+ pr_err("%s: Error invalid entries\n", __func__);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/*
+ * msm_camera_get_dt_reg_settings - Free dt reg settings memory.
+ * @reg_s: Double pointer will be allocated by this function and filled.
+ * @size: Pointer to set the length as invalid.
+ */
+void msm_camera_put_dt_reg_settings(uint32_t **reg_s,
+ unsigned int *size)
+{
+ kfree(*reg_s);
+ *reg_s = NULL;
+ *size = 0;
+}
+
+int msm_camera_hw_write_dt_reg_settings(void __iomem *base,
+ uint32_t *reg_s,
+ unsigned int size)
+{
+ int32_t rc = 0;
+
+ if (!reg_s || !base || !size) {
+ pr_err("%s: Error invalid args\n", __func__);
+ return -EINVAL;
+ }
+ rc = msm_camera_io_w_reg_block((const u32 *) reg_s,
+ base, size);
+ if (rc < 0)
+ pr_err("%s: Failed dt reg setting write\n", __func__);
+ return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.h b/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.h
new file mode 100644
index 000000000000..04c8822330f0
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.h
@@ -0,0 +1,93 @@
+/* Copyright (c) 2011-2014, The Linux Foundataion. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CAMERA_IO_UTIL_H
+#define __MSM_CAMERA_IO_UTIL_H
+
+#include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <soc/qcom/camera2.h>
+#include <media/msm_cam_sensor.h>
+#include <media/v4l2-ioctl.h>
+
+#define NO_SET_RATE -1
+#define INIT_RATE -2
+
+struct msm_gpio_set_tbl {
+ unsigned gpio;
+ unsigned long flags;
+ uint32_t delay;
+};
+
+struct msm_cam_dump_string_info {
+ const char *print;
+ uint32_t offset;
+};
+
+void msm_camera_io_w(u32 data, void __iomem *addr);
+void msm_camera_io_w_mb(u32 data, void __iomem *addr);
+u32 msm_camera_io_r(void __iomem *addr);
+u32 msm_camera_io_r_mb(void __iomem *addr);
+void msm_camera_io_dump(void __iomem *addr, int size, int enable);
+void msm_camera_io_memcpy(void __iomem *dest_addr,
+ void __iomem *src_addr, u32 len);
+void msm_camera_io_memcpy_mb(void __iomem *dest_addr,
+ void __iomem *src_addr, u32 len);
+int msm_cam_clk_sel_src(struct device *dev, struct msm_cam_clk_info *clk_info,
+ struct msm_cam_clk_info *clk_src_info, int num_clk);
+int msm_cam_clk_enable(struct device *dev, struct msm_cam_clk_info *clk_info,
+ struct clk **clk_ptr, int num_clk, int enable);
+
+int msm_camera_config_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
+ int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
+ int num_vreg_seq, struct regulator **reg_ptr, int config);
+int msm_camera_enable_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
+ int num_vreg, enum msm_camera_vreg_name_t *vreg_seq,
+ int num_vreg_seq, struct regulator **reg_ptr, int enable);
+
+void msm_camera_bus_scale_cfg(uint32_t bus_perf_client,
+ enum msm_bus_perf_setting perf_setting);
+
+int msm_camera_set_gpio_table(struct msm_gpio_set_tbl *gpio_tbl,
+ uint8_t gpio_tbl_size, int gpio_en);
+
+void msm_camera_config_single_gpio(uint16_t gpio, unsigned long flags,
+ int gpio_en);
+
+int msm_camera_config_single_vreg(struct device *dev,
+ struct camera_vreg_t *cam_vreg, struct regulator **reg_ptr, int config);
+
+int msm_camera_request_gpio_table(struct gpio *gpio_tbl, uint8_t size,
+ int gpio_en);
+void msm_camera_io_dump_wstring_base(void __iomem *addr,
+ struct msm_cam_dump_string_info *dump_data,
+ int size);
+int32_t msm_camera_io_poll_value_wmask(void __iomem *addr, u32 wait_data,
+ u32 bmask, u32 retry, unsigned long min_usecs,
+ unsigned long max_usecs);
+int32_t msm_camera_io_poll_value(void __iomem *addr, u32 wait_data, u32 retry,
+ unsigned long min_usecs, unsigned long max_usecs);
+int32_t msm_camera_io_w_block(const u32 *addr, void __iomem *base, u32 len);
+int32_t msm_camera_io_w_reg_block(const u32 *addr, void __iomem *base, u32 len);
+int32_t msm_camera_io_w_mb_block(const u32 *addr, void __iomem *base, u32 len);
+int msm_camera_get_dt_reg_settings(struct device_node *of_node,
+ const char *dt_prop_name, uint32_t **reg_s,
+ unsigned int *size);
+void msm_camera_put_dt_reg_settings(uint32_t **reg_s,
+ unsigned int *size);
+int msm_camera_hw_write_dt_reg_settings(void __iomem *base,
+ uint32_t *reg_s,
+ unsigned int size);
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/fd/Makefile b/drivers/media/platform/msm/camera_v2/fd/Makefile
new file mode 100644
index 000000000000..82b37a73bfa3
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/fd/Makefile
@@ -0,0 +1,5 @@
+GCC_VERSION := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
+ccflags-y += -Idrivers/media/video/msm
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+
+obj-$(CONFIG_MSM_FD) += msm_fd_dev.o msm_fd_hw.o
diff --git a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
new file mode 100644
index 000000000000..20da61f81622
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
@@ -0,0 +1,1356 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/ion.h>
+#include <linux/msm_ion.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-core.h>
+
+#include "msm_fd_dev.h"
+#include "msm_fd_hw.h"
+#include "msm_fd_regs.h"
+#include "cam_hw_ops.h"
+
+#define MSM_FD_DRV_NAME "msm_fd"
+
+#define MSM_FD_WORD_SIZE_BYTES 4
+
+/* Face detection thresholds definitions */
+#define MSM_FD_DEF_THRESHOLD 5
+#define MSM_FD_MAX_THRESHOLD_VALUE 9
+
+/* Face angle lookup table */
+#define MSM_FD_DEF_ANGLE_IDX 2
+static int msm_fd_angle[] = {45, 135, 359};
+
+/* Face direction lookup table */
+#define MSM_FD_DEF_DIR_IDX 0
+static int msm_fd_dir[] = {0, 90, 270, 180};
+
+/* Minimum face size lookup table */
+#define MSM_FD_DEF_MIN_SIZE_IDX 0
+static int msm_fd_min_size[] = {20, 25, 32, 40};
+
+/* Face detection size lookup table */
+static struct msm_fd_size fd_size[] = {
+ {
+ .width = 320,
+ .height = 240,
+ .reg_val = MSM_FD_IMAGE_SIZE_QVGA,
+ .work_size = (13120 * MSM_FD_WORD_SIZE_BYTES),
+ },
+ {
+ .width = 427,
+ .height = 240,
+ .reg_val = MSM_FD_IMAGE_SIZE_WQVGA,
+ .work_size = (17744 * MSM_FD_WORD_SIZE_BYTES),
+ },
+ {
+ .width = 640,
+ .height = 480,
+ .reg_val = MSM_FD_IMAGE_SIZE_VGA,
+ .work_size = (52624 * MSM_FD_WORD_SIZE_BYTES),
+ },
+ {
+ .width = 854,
+ .height = 480,
+ .reg_val = MSM_FD_IMAGE_SIZE_WVGA,
+ .work_size = (70560 * MSM_FD_WORD_SIZE_BYTES),
+ },
+};
+
+/*
+ * msm_fd_ctx_from_fh - Get fd context from v4l2 fh.
+ * @fh: Pointer to v4l2 fh.
+ */
+static inline struct fd_ctx *msm_fd_ctx_from_fh(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct fd_ctx, fh);
+}
+
+/*
+ * msm_fd_get_format_index - Get format index from v4l2 format.
+ * @f: Pointer to v4l2 format struct.
+ */
+static int msm_fd_get_format_index(struct v4l2_format *f)
+{
+ int index;
+
+ for (index = 0; index < ARRAY_SIZE(fd_size); index++) {
+ if (f->fmt.pix.width <= fd_size[index].width &&
+ f->fmt.pix.height <= fd_size[index].height)
+ return index;
+ }
+ return index - 1;
+}
+
+/*
+ * msm_fd_get_idx_from_value - Get array index from value.
+ * @value: Value for which index is needed.
+ * @array: Array in which index is searched for.
+ * @array_size: Array size.
+ */
+static int msm_fd_get_idx_from_value(int value, int *array, int array_size)
+{
+ int index;
+ int i;
+
+ index = 0;
+ for (i = 1; i < array_size; i++) {
+ if (value == array[i]) {
+ index = i;
+ break;
+ }
+ if (abs(value - array[i]) < abs(value - array[index]))
+ index = i;
+ }
+ return index;
+}
+
+/*
+ * msm_fd_fill_format_from_index - Fill v4l2 format struct from size index.
+ * @f: Pointer of v4l2 struct which will be filled.
+ * @index: Size index (Format will be filled based on this index).
+ */
+static int msm_fd_fill_format_from_index(struct v4l2_format *f, int index)
+{
+ f->fmt.pix.width = fd_size[index].width;
+ f->fmt.pix.height = fd_size[index].height;
+ f->fmt.pix.pixelformat = V4L2_PIX_FMT_GREY;
+ if (f->fmt.pix.bytesperline < f->fmt.pix.width)
+ f->fmt.pix.bytesperline = f->fmt.pix.width;
+
+ f->fmt.pix.bytesperline = ALIGN(f->fmt.pix.bytesperline, 16);
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+/*
+ * msm_fd_fill_format_from_ctx - Fill v4l2 format struct from fd context.
+ * @f: Pointer of v4l2 struct which will be filled.
+ * @c: Pointer to fd context.
+ */
+static int msm_fd_fill_format_from_ctx(struct v4l2_format *f, struct fd_ctx *c)
+{
+ if (NULL == c->format.size)
+ return -EINVAL;
+
+ f->fmt.pix.width = c->format.size->width;
+ f->fmt.pix.height = c->format.size->height;
+ f->fmt.pix.pixelformat = c->format.pixelformat;
+ f->fmt.pix.bytesperline = c->format.bytesperline;
+ f->fmt.pix.sizeimage = c->format.sizeimage;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+/*
+ * msm_fd_queue_setup - vb2_ops queue_setup callback.
+ * @q: Pointer to vb2 queue struct.
+ * @fmt: Pointer to v4l2 format struct (NULL is valid argument).
+ * @num_buffers: Pointer of number of buffers requested.
+ * @num_planes: Pointer to number of planes requested.
+ * @sizes: Array containing sizes of planes.
+ * @alloc_ctxs: Array of allocated contexts for each plane.
+ */
+static int msm_fd_queue_setup(struct vb2_queue *q,
+ const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct fd_ctx *ctx = vb2_get_drv_priv(q);
+
+ *num_planes = 1;
+
+ if (NULL == fmt)
+ sizes[0] = ctx->format.sizeimage;
+ else
+ sizes[0] = fmt->fmt.pix.sizeimage;
+
+ alloc_ctxs[0] = &ctx->mem_pool;
+
+ return 0;
+}
+
+/*
+ * msm_fd_buf_init - vb2_ops buf_init callback.
+ * @vb: Pointer to vb2 buffer struct.
+ */
+int msm_fd_buf_init(struct vb2_buffer *vb)
+{
+ struct msm_fd_buffer *fd_buffer =
+ (struct msm_fd_buffer *)vb;
+
+ INIT_LIST_HEAD(&fd_buffer->list);
+ atomic_set(&fd_buffer->active, 0);
+
+ return 0;
+}
+
+/*
+ * msm_fd_buf_queue - vb2_ops buf_queue callback.
+ * @vb: Pointer to vb2 buffer struct.
+ */
+static void msm_fd_buf_queue(struct vb2_buffer *vb)
+{
+ struct fd_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct msm_fd_buffer *fd_buffer =
+ (struct msm_fd_buffer *)vb;
+
+ fd_buffer->format = ctx->format;
+ fd_buffer->settings = ctx->settings;
+ fd_buffer->work_addr = ctx->work_buf.addr;
+ msm_fd_hw_add_buffer(ctx->fd_device, fd_buffer);
+
+ if (vb->vb2_queue->streaming)
+ msm_fd_hw_schedule_and_start(ctx->fd_device);
+
+ return;
+}
+
+/*
+ * msm_fd_start_streaming - vb2_ops start_streaming callback.
+ * @q: Pointer to vb2 queue struct.
+ * @count: Number of buffer queued before stream on call.
+ */
+static int msm_fd_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct fd_ctx *ctx = vb2_get_drv_priv(q);
+ int ret;
+
+ if (ctx->work_buf.fd == -1) {
+ dev_err(ctx->fd_device->dev, "Missing working buffer\n");
+ return -EINVAL;
+ }
+
+ ret = msm_fd_hw_get(ctx->fd_device, ctx->settings.speed);
+ if (ret < 0) {
+ dev_err(ctx->fd_device->dev, "Can not acquire fd hw\n");
+ goto out;
+ }
+
+ ret = msm_fd_hw_schedule_and_start(ctx->fd_device);
+ if (ret < 0)
+ dev_err(ctx->fd_device->dev, "Can not start fd hw\n");
+
+out:
+ return ret;
+}
+
+/*
+ * msm_fd_stop_streaming - vb2_ops stop_streaming callback.
+ * @q: Pointer to vb2 queue struct.
+ */
+static void msm_fd_stop_streaming(struct vb2_queue *q)
+{
+ struct fd_ctx *ctx = vb2_get_drv_priv(q);
+
+ msm_fd_hw_remove_buffers_from_queue(ctx->fd_device, q);
+ msm_fd_hw_put(ctx->fd_device);
+}
+
+/* Videobuf2 queue callbacks. */
+static struct vb2_ops msm_fd_vb2_q_ops = {
+ .queue_setup = msm_fd_queue_setup,
+ .buf_init = msm_fd_buf_init,
+ .buf_queue = msm_fd_buf_queue,
+ .start_streaming = msm_fd_start_streaming,
+ .stop_streaming = msm_fd_stop_streaming,
+};
+
+/*
+ * msm_fd_get_userptr - Map and get buffer handler for user pointer buffer.
+ * @alloc_ctx: Contexts allocated in buf_setup.
+ * @vaddr: Virtual addr passed from userpsace (in our case ion fd)
+ * @size: Size of the buffer
+ * @write: True if buffer will be used for writing the data.
+ */
+static void *msm_fd_get_userptr(void *alloc_ctx,
+ unsigned long vaddr, unsigned long size, int write)
+{
+ struct msm_fd_mem_pool *pool = alloc_ctx;
+ struct msm_fd_buf_handle *buf;
+ int ret;
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ ret = msm_fd_hw_map_buffer(pool, vaddr, buf);
+ if (ret < 0 || buf->size < size)
+ goto error;
+
+ return buf;
+error:
+ kzfree(buf);
+ return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * msm_fd_put_userptr - Unmap and free buffer handler.
+ * @buf_priv: Buffer handler allocated get_userptr callback.
+ */
+static void msm_fd_put_userptr(void *buf_priv)
+{
+ if (IS_ERR_OR_NULL(buf_priv))
+ return;
+
+ msm_fd_hw_unmap_buffer(buf_priv);
+
+ kzfree(buf_priv);
+}
+
+/* Videobuf2 memory callbacks. */
+static struct vb2_mem_ops msm_fd_vb2_mem_ops = {
+ .get_userptr = msm_fd_get_userptr,
+ .put_userptr = msm_fd_put_userptr,
+};
+
+/*
+ * msm_fd_open - Fd device open method.
+ * @file: Pointer to file struct.
+ */
+static int msm_fd_open(struct file *file)
+{
+ struct msm_fd_device *device = video_drvdata(file);
+ struct video_device *video = video_devdata(file);
+ struct fd_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->fd_device = device;
+
+ /* Initialize work buffer handler */
+ ctx->work_buf.pool = NULL;
+ ctx->work_buf.fd = -1;
+
+ /* Set ctx defaults */
+ ctx->settings.speed = ctx->fd_device->clk_rates_num - 1;
+ ctx->settings.angle_index = MSM_FD_DEF_ANGLE_IDX;
+ ctx->settings.direction_index = MSM_FD_DEF_DIR_IDX;
+ ctx->settings.min_size_index = MSM_FD_DEF_MIN_SIZE_IDX;
+ ctx->settings.threshold = MSM_FD_DEF_THRESHOLD;
+
+ atomic_set(&ctx->subscribed_for_event, 0);
+
+ v4l2_fh_init(&ctx->fh, video);
+
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ ctx->vb2_q.drv_priv = ctx;
+ ctx->vb2_q.mem_ops = &msm_fd_vb2_mem_ops;
+ ctx->vb2_q.ops = &msm_fd_vb2_q_ops;
+ ctx->vb2_q.buf_struct_size = sizeof(struct msm_fd_buffer);
+ ctx->vb2_q.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ ctx->vb2_q.io_modes = VB2_USERPTR;
+ ctx->vb2_q.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ ret = vb2_queue_init(&ctx->vb2_q);
+ if (ret < 0) {
+ dev_err(device->dev, "Error queue init\n");
+ goto error_vb2_queue_init;
+ }
+
+ ctx->mem_pool.fd_device = ctx->fd_device;
+ ctx->stats = vmalloc(sizeof(*ctx->stats) * MSM_FD_MAX_RESULT_BUFS);
+ if (!ctx->stats) {
+ dev_err(device->dev, "No memory for face statistics\n");
+ ret = -ENOMEM;
+ goto error_stats_vmalloc;
+ }
+
+ ret = cam_config_ahb_clk(CAM_AHB_CLIENT_FD, CAMERA_AHB_SVS_VOTE);
+ if (ret < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ return ret;
+ }
+
+ return 0;
+
+error_stats_vmalloc:
+ vb2_queue_release(&ctx->vb2_q);
+error_vb2_queue_init:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ return ret;
+}
+
+/*
+ * msm_fd_release - Fd device release method.
+ * @file: Pointer to file struct.
+ */
+static int msm_fd_release(struct file *file)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(file->private_data);
+
+ vb2_queue_release(&ctx->vb2_q);
+
+ vfree(ctx->stats);
+
+ if (ctx->work_buf.fd != -1)
+ msm_fd_hw_unmap_buffer(&ctx->work_buf);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+
+ kfree(ctx);
+
+ if (cam_config_ahb_clk(CAM_AHB_CLIENT_FD,
+ CAMERA_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+
+ return 0;
+}
+
+/*
+ * msm_fd_poll - Fd device pool method.
+ * @file: Pointer to file struct.
+ * @wait: Pointer to pool table struct.
+ */
+static unsigned int msm_fd_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(file->private_data);
+ unsigned int ret;
+
+ ret = vb2_poll(&ctx->vb2_q, file, wait);
+
+ if (atomic_read(&ctx->subscribed_for_event)) {
+ poll_wait(file, &ctx->fh.wait, wait);
+ if (v4l2_event_pending(&ctx->fh))
+ ret |= POLLPRI;
+ }
+
+ return ret;
+}
+
+/*
+ * msm_fd_private_ioctl - V4l2 private ioctl handler.
+ * @file: Pointer to file struct.
+ * @fd: V4l2 device file handle.
+ * @valid_prio: Priority ioctl valid flag.
+ * @cmd: Ioctl command.
+ * @arg: Ioctl argument.
+ */
+static long msm_fd_private_ioctl(struct file *file, void *fh,
+ bool valid_prio, unsigned int cmd, void *arg)
+{
+ struct msm_fd_result *req_result = arg;
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+ struct msm_fd_stats *stats;
+ int stats_idx;
+ int ret;
+ int i;
+
+ switch (cmd) {
+ case VIDIOC_MSM_FD_GET_RESULT:
+ if (req_result->frame_id == 0) {
+ dev_err(ctx->fd_device->dev, "Invalid frame id\n");
+ return -EINVAL;
+ }
+
+ stats_idx = req_result->frame_id % MSM_FD_MAX_RESULT_BUFS;
+ stats = &ctx->stats[stats_idx];
+ if (req_result->frame_id != atomic_read(&stats->frame_id)) {
+ dev_err(ctx->fd_device->dev, "Stats not available\n");
+ return -EINVAL;
+ }
+
+ if (req_result->face_cnt > stats->face_cnt)
+ req_result->face_cnt = stats->face_cnt;
+
+ for (i = 0; i < req_result->face_cnt; i++) {
+ ret = copy_to_user((void __user *)
+ &req_result->face_data[i],
+ &stats->face_data[i],
+ sizeof(struct msm_fd_face_data));
+ if (ret) {
+ dev_err(ctx->fd_device->dev, "Copy to user\n");
+ return -EFAULT;
+ }
+ }
+
+ if (req_result->frame_id != atomic_read(&stats->frame_id)) {
+ dev_err(ctx->fd_device->dev, "Erroneous buffer\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(ctx->fd_device->dev, "Wrong ioctl type %x\n", cmd);
+ ret = -ENOTTY;
+ break;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * msm_fd_compat_ioctl32 - Compat ioctl handler function.
+ * @file: Pointer to file struct.
+ * @cmd: Ioctl command.
+ * @arg: Ioctl argument.
+ */
+static long msm_fd_compat_ioctl32(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ switch (cmd) {
+ case VIDIOC_MSM_FD_GET_RESULT32:
+ {
+ struct msm_fd_result32 result32;
+ struct msm_fd_result result;
+
+ if (copy_from_user(&result32, (void __user *)arg,
+ sizeof(result32)))
+ return -EFAULT;
+
+ result.frame_id = result32.frame_id;
+ result.face_cnt = result32.face_cnt;
+ result.face_data = compat_ptr(result32.face_data);
+
+ ret = msm_fd_private_ioctl(file, file->private_data,
+ 0, VIDIOC_MSM_FD_GET_RESULT, (void *)&result);
+
+ result32.frame_id = result.frame_id;
+ result32.face_cnt = result.face_cnt;
+
+ if (copy_to_user((void __user *)arg, &result32,
+ sizeof(result32)))
+ return -EFAULT;
+
+ break;
+ }
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+
+ }
+
+ return ret;
+}
+#endif
+
+/* Fd device file operations callbacks */
+static const struct v4l2_file_operations fd_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_fd_open,
+ .release = msm_fd_release,
+ .poll = msm_fd_poll,
+ .unlocked_ioctl = video_ioctl2,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = msm_fd_compat_ioctl32,
+#endif
+};
+
+/*
+ * msm_fd_querycap - V4l2 ioctl query capability handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @cap: Pointer to v4l2_capability struct need to be filled.
+ */
+static int msm_fd_querycap(struct file *file,
+ void *fh, struct v4l2_capability *cap)
+{
+ cap->bus_info[0] = 0;
+ strlcpy(cap->driver, MSM_FD_DRV_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, MSM_FD_DRV_NAME, sizeof(cap->card));
+ cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT;
+
+ return 0;
+}
+
+/*
+ * msm_fd_enum_fmt_vid_out - V4l2 ioctl enumerate format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_fmtdesc struct need to be filled.
+ */
+static int msm_fd_enum_fmt_vid_out(struct file *file,
+ void *fh, struct v4l2_fmtdesc *f)
+{
+ if (f->index > 0)
+ return -EINVAL;
+
+ f->pixelformat = V4L2_PIX_FMT_GREY;
+ strlcpy(f->description, "8 Greyscale",
+ sizeof(f->description));
+
+ return 0;
+}
+
+/*
+ * msm_fd_g_fmt - V4l2 ioctl get format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct need to be filled.
+ */
+static int msm_fd_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+
+ return msm_fd_fill_format_from_ctx(f, ctx);
+}
+
+/*
+ * msm_fd_try_fmt_vid_out - V4l2 ioctl try format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct.
+ */
+static int msm_fd_try_fmt_vid_out(struct file *file,
+ void *fh, struct v4l2_format *f)
+{
+ int index;
+
+ index = msm_fd_get_format_index(f);
+
+ return msm_fd_fill_format_from_index(f, index);
+}
+
+/*
+ * msm_fd_s_fmt_vid_out - V4l2 ioctl set format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct.
+ */
+static int msm_fd_s_fmt_vid_out(struct file *file,
+ void *fh, struct v4l2_format *f)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+ int index;
+
+ index = msm_fd_get_format_index(f);
+
+ msm_fd_fill_format_from_index(f, index);
+
+ ctx->format.size = &fd_size[index];
+ ctx->format.pixelformat = f->fmt.pix.pixelformat;
+ ctx->format.bytesperline = f->fmt.pix.bytesperline;
+ ctx->format.sizeimage = f->fmt.pix.sizeimage;
+
+ /* Initialize crop */
+ ctx->format.crop.top = 0;
+ ctx->format.crop.left = 0;
+ ctx->format.crop.width = fd_size[index].width;
+ ctx->format.crop.height = fd_size[index].height;
+
+ return 0;
+}
+
+/*
+ * msm_fd_reqbufs - V4l2 ioctl request buffers handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @req: Pointer to v4l2_requestbuffer struct.
+ */
+static int msm_fd_reqbufs(struct file *file,
+ void *fh, struct v4l2_requestbuffers *req)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+
+ return vb2_reqbufs(&ctx->vb2_q, req);
+}
+
+/*
+ * msm_fd_qbuf - V4l2 ioctl queue buffer handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @pb: Pointer to v4l2_buffer struct.
+ */
+static int msm_fd_qbuf(struct file *file, void *fh,
+ struct v4l2_buffer *pb)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+
+ return vb2_qbuf(&ctx->vb2_q, pb);
+}
+
+/*
+ * msm_fd_dqbuf - V4l2 ioctl dequeue buffer handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @pb: Pointer to v4l2_buffer struct.
+ */
+static int msm_fd_dqbuf(struct file *file,
+ void *fh, struct v4l2_buffer *pb)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+
+ return vb2_dqbuf(&ctx->vb2_q, pb, file->f_flags & O_NONBLOCK);
+}
+
+/*
+ * msm_fd_streamon - V4l2 ioctl stream on handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @buf_type: V4l2 buffer type.
+ */
+static int msm_fd_streamon(struct file *file,
+ void *fh, enum v4l2_buf_type buf_type)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+ int ret;
+
+ ret = vb2_streamon(&ctx->vb2_q, buf_type);
+ if (ret < 0)
+ dev_err(ctx->fd_device->dev, "Stream on fails\n");
+
+ return ret;
+}
+
+/*
+ * msm_fd_streamoff - V4l2 ioctl stream off handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @buf_type: V4l2 buffer type.
+ */
+static int msm_fd_streamoff(struct file *file,
+ void *fh, enum v4l2_buf_type buf_type)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+ int ret;
+
+ ret = vb2_streamoff(&ctx->vb2_q, buf_type);
+ if (ret < 0)
+ dev_err(ctx->fd_device->dev, "Stream off fails\n");
+
+ return ret;
+}
+
+/*
+ * msm_fd_subscribe_event - V4l2 ioctl subscribe for event handler.
+ * @fh: V4l2 File handle.
+ * @sub: Pointer to v4l2_event_subscription containing event information.
+ */
+static int msm_fd_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+ int ret;
+
+ if (sub->type != MSM_EVENT_FD)
+ return -EINVAL;
+
+ ret = v4l2_event_subscribe(fh, sub, MSM_FD_MAX_RESULT_BUFS, NULL);
+ if (!ret)
+ atomic_set(&ctx->subscribed_for_event, 1);
+
+ return ret;
+}
+
+/*
+ * msm_fd_unsubscribe_event - V4l2 ioctl unsubscribe from event handler.
+ * @fh: V4l2 File handle.
+ * @sub: Pointer to v4l2_event_subscription containing event information.
+ */
+static int msm_fd_unsubscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+ int ret;
+
+ ret = v4l2_event_unsubscribe(fh, sub);
+ if (!ret)
+ atomic_set(&ctx->subscribed_for_event, 0);
+
+ return ret;
+}
+
+/*
+ * msm_fd_guery_ctrl - V4l2 ioctl query control.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @sub: Pointer to v4l2_queryctrl struct info need to be filled based on id.
+ */
+static int msm_fd_guery_ctrl(struct file *file, void *fh,
+ struct v4l2_queryctrl *a)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+
+ switch (a->id) {
+ case V4L2_CID_FD_SPEED:
+ a->type = V4L2_CTRL_TYPE_INTEGER;
+ a->default_value = ctx->fd_device->clk_rates_num;
+ a->minimum = 0;
+ a->maximum = ctx->fd_device->clk_rates_num;
+ a->step = 1;
+ strlcpy(a->name, "msm fd face speed idx",
+ sizeof(a->name));
+ break;
+ case V4L2_CID_FD_FACE_ANGLE:
+ a->type = V4L2_CTRL_TYPE_INTEGER;
+ a->default_value = msm_fd_angle[MSM_FD_DEF_ANGLE_IDX];
+ a->minimum = msm_fd_angle[0];
+ a->maximum = msm_fd_angle[ARRAY_SIZE(msm_fd_angle) - 1];
+ a->step = 1;
+ strlcpy(a->name, "msm fd face angle ctrl",
+ sizeof(a->name));
+ break;
+ case V4L2_CID_FD_FACE_DIRECTION:
+ a->type = V4L2_CTRL_TYPE_INTEGER;
+ a->default_value = msm_fd_dir[MSM_FD_DEF_DIR_IDX];
+ a->minimum = msm_fd_dir[0];
+ a->maximum = msm_fd_dir[ARRAY_SIZE(msm_fd_dir) - 1];
+ a->step = 1;
+ strlcpy(a->name, "msm fd face direction ctrl",
+ sizeof(a->name));
+ break;
+ case V4L2_CID_FD_MIN_FACE_SIZE:
+ a->type = V4L2_CTRL_TYPE_INTEGER;
+ a->default_value = msm_fd_min_size[MSM_FD_DEF_MIN_SIZE_IDX];
+ a->minimum = msm_fd_min_size[0];
+ a->maximum = msm_fd_min_size[ARRAY_SIZE(msm_fd_min_size) - 1];
+ a->step = 1;
+ strlcpy(a->name, "msm fd minimum face size (pixels)",
+ sizeof(a->name));
+ break;
+ case V4L2_CID_FD_DETECTION_THRESHOLD:
+ a->type = V4L2_CTRL_TYPE_INTEGER;
+ a->default_value = MSM_FD_DEF_THRESHOLD;
+ a->minimum = 0;
+ a->maximum = MSM_FD_MAX_THRESHOLD_VALUE;
+ a->step = 1;
+ strlcpy(a->name, "msm fd detection threshold",
+ sizeof(a->name));
+ break;
+ case V4L2_CID_FD_WORK_MEMORY_SIZE:
+ a->type = V4L2_CTRL_TYPE_INTEGER;
+ a->default_value = fd_size[0].work_size;
+ a->minimum = fd_size[(ARRAY_SIZE(fd_size) - 1)].work_size;
+ a->maximum = fd_size[0].work_size;
+ a->step = 1;
+ strlcpy(a->name, "msm fd working memory size",
+ sizeof(a->name));
+ break;
+ case V4L2_CID_FD_WORK_MEMORY_FD:
+ a->type = V4L2_CTRL_TYPE_INTEGER;
+ a->default_value = -1;
+ a->minimum = 0;
+ a->maximum = INT_MAX;
+ a->step = 1;
+ strlcpy(a->name, "msm fd ion fd of working memory",
+ sizeof(a->name));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * msm_fd_g_ctrl - V4l2 ioctl get control.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @sub: Pointer to v4l2_queryctrl struct need to be filled.
+ */
+static int msm_fd_g_ctrl(struct file *file, void *fh, struct v4l2_control *a)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+
+ switch (a->id) {
+ case V4L2_CID_FD_SPEED:
+ a->value = ctx->settings.speed;
+ break;
+ case V4L2_CID_FD_FACE_ANGLE:
+ a->value = msm_fd_angle[ctx->settings.angle_index];
+ break;
+ case V4L2_CID_FD_FACE_DIRECTION:
+ a->value = msm_fd_dir[ctx->settings.direction_index];
+ break;
+ case V4L2_CID_FD_MIN_FACE_SIZE:
+ a->value = msm_fd_min_size[ctx->settings.min_size_index];
+ break;
+ case V4L2_CID_FD_DETECTION_THRESHOLD:
+ a->value = ctx->settings.threshold;
+ break;
+ case V4L2_CID_FD_WORK_MEMORY_SIZE:
+ if (!ctx->format.size)
+ return -EINVAL;
+
+ a->value = ctx->format.size->work_size;
+ break;
+ case V4L2_CID_FD_WORK_MEMORY_FD:
+ if (ctx->work_buf.fd == -1)
+ return -EINVAL;
+
+ a->value = ctx->work_buf.fd;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * msm_fd_s_ctrl - V4l2 ioctl set control.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @sub: Pointer to v4l2_queryctrl struct need to be set.
+ */
+static int msm_fd_s_ctrl(struct file *file, void *fh, struct v4l2_control *a)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+ int idx;
+ int ret;
+
+ switch (a->id) {
+ case V4L2_CID_FD_SPEED:
+ if (a->value > ctx->fd_device->clk_rates_num - 1)
+ a->value = ctx->fd_device->clk_rates_num - 1;
+ else if (a->value < 0)
+ a->value = 0;
+
+ ctx->settings.speed = a->value;
+ break;
+ case V4L2_CID_FD_FACE_ANGLE:
+ idx = msm_fd_get_idx_from_value(a->value, msm_fd_angle,
+ ARRAY_SIZE(msm_fd_angle));
+
+ ctx->settings.angle_index = idx;
+ a->value = msm_fd_angle[ctx->settings.angle_index];
+ break;
+ case V4L2_CID_FD_FACE_DIRECTION:
+ idx = msm_fd_get_idx_from_value(a->value, msm_fd_dir,
+ ARRAY_SIZE(msm_fd_dir));
+
+ ctx->settings.direction_index = idx;
+ a->value = msm_fd_dir[ctx->settings.direction_index];
+ break;
+ case V4L2_CID_FD_MIN_FACE_SIZE:
+ idx = msm_fd_get_idx_from_value(a->value, msm_fd_min_size,
+ ARRAY_SIZE(msm_fd_min_size));
+
+ ctx->settings.min_size_index = idx;
+ a->value = msm_fd_min_size[ctx->settings.min_size_index];
+ break;
+ case V4L2_CID_FD_DETECTION_THRESHOLD:
+ if (a->value > MSM_FD_MAX_THRESHOLD_VALUE)
+ a->value = MSM_FD_MAX_THRESHOLD_VALUE;
+ else if (a->value < 0)
+ a->value = 0;
+
+ ctx->settings.threshold = a->value;
+ break;
+ case V4L2_CID_FD_WORK_MEMORY_SIZE:
+ if (!ctx->format.size)
+ return -EINVAL;
+
+ if (a->value < ctx->format.size->work_size)
+ a->value = ctx->format.size->work_size;
+ break;
+ case V4L2_CID_FD_WORK_MEMORY_FD:
+ if (ctx->work_buf.fd != -1)
+ msm_fd_hw_unmap_buffer(&ctx->work_buf);
+ if (a->value >= 0) {
+ ret = msm_fd_hw_map_buffer(&ctx->mem_pool,
+ a->value, &ctx->work_buf);
+ if (ret < 0)
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * msm_fd_cropcap - V4l2 ioctl crop capabilites.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @sub: Pointer to v4l2_cropcap struct need to be set.
+ */
+static int msm_fd_cropcap(struct file *file, void *fh, struct v4l2_cropcap *a)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+
+ if (!ctx->format.size) {
+ dev_err(ctx->fd_device->dev, "Cropcap fails format missing\n");
+ return -EINVAL;
+ }
+
+ a->bounds.top = 0;
+ a->bounds.left = 0;
+ a->bounds.width = ctx->format.size->width;
+ a->bounds.height = ctx->format.size->height;
+
+ a->defrect = ctx->format.crop;
+
+ a->pixelaspect.numerator = 1;
+ a->pixelaspect.denominator = 1;
+
+ return 0;
+}
+
+/*
+ * msm_fd_g_crop - V4l2 ioctl get crop.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @sub: Pointer to v4l2_crop struct need to be set.
+ */
+static int msm_fd_g_crop(struct file *file, void *fh, struct v4l2_crop *crop)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+
+ if (!ctx->format.size) {
+ dev_err(ctx->fd_device->dev, "Get crop, format missing!\n");
+ return -EINVAL;
+ }
+
+ crop->c = ctx->format.crop;
+
+ return 0;
+}
+
+/*
+ * msm_fd_s_crop - V4l2 ioctl set crop.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @sub: Pointer to v4l2_crop struct need to be set.
+ */
+static int msm_fd_s_crop(struct file *file, void *fh,
+ const struct v4l2_crop *crop)
+{
+ struct fd_ctx *ctx = msm_fd_ctx_from_fh(fh);
+ int min_face_size;
+
+ if (!ctx->format.size) {
+ dev_err(ctx->fd_device->dev, "Get crop, format missing!\n");
+ return -EINVAL;
+ }
+
+ /* First check that crop is valid */
+ min_face_size = msm_fd_min_size[ctx->settings.min_size_index];
+
+ if (crop->c.width < min_face_size || crop->c.height < min_face_size)
+ return -EINVAL;
+
+ if (crop->c.width + crop->c.left > ctx->format.size->width)
+ return -EINVAL;
+
+ if (crop->c.height + crop->c.top > ctx->format.size->height)
+ return -EINVAL;
+
+ ctx->format.crop = crop->c;
+
+ return 0;
+}
+
+/* V4l2 ioctl handlers */
+static const struct v4l2_ioctl_ops fd_ioctl_ops = {
+ .vidioc_querycap = msm_fd_querycap,
+ .vidioc_enum_fmt_vid_out = msm_fd_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out = msm_fd_g_fmt,
+ .vidioc_try_fmt_vid_out = msm_fd_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out = msm_fd_s_fmt_vid_out,
+ .vidioc_reqbufs = msm_fd_reqbufs,
+ .vidioc_qbuf = msm_fd_qbuf,
+ .vidioc_dqbuf = msm_fd_dqbuf,
+ .vidioc_streamon = msm_fd_streamon,
+ .vidioc_streamoff = msm_fd_streamoff,
+ .vidioc_queryctrl = msm_fd_guery_ctrl,
+ .vidioc_s_ctrl = msm_fd_s_ctrl,
+ .vidioc_g_ctrl = msm_fd_g_ctrl,
+ .vidioc_cropcap = msm_fd_cropcap,
+ .vidioc_g_crop = msm_fd_g_crop,
+ .vidioc_s_crop = msm_fd_s_crop,
+ .vidioc_subscribe_event = msm_fd_subscribe_event,
+ .vidioc_unsubscribe_event = msm_fd_unsubscribe_event,
+ .vidioc_default = msm_fd_private_ioctl,
+};
+
+/*
+ * msm_fd_fill_results - Read and fill face detection result.
+ * @fd: Pointer to fd device.
+ * @face: Pointer of face data which information need to be stored.
+ * @idx: Face number index need to be filled.
+ */
+static void msm_fd_fill_results(struct msm_fd_device *fd,
+ struct msm_fd_face_data *face, int idx)
+{
+ int half_face_size;
+
+ msm_fd_hw_get_result_angle_pose(fd, idx, &face->angle, &face->pose);
+
+ msm_fd_hw_get_result_conf_size(fd, idx, &face->confidence,
+ &face->face.width);
+ face->face.height = face->face.width;
+
+ face->face.left = msm_fd_hw_get_result_x(fd, idx);
+ face->face.top = msm_fd_hw_get_result_y(fd, idx);
+
+ half_face_size = (face->face.width >> 1);
+ if (face->face.left > half_face_size)
+ face->face.left -= half_face_size;
+ else
+ face->face.left = 0;
+
+ half_face_size = (face->face.height >> 1);
+ if (face->face.top > half_face_size)
+ face->face.top -= half_face_size;
+ else
+ face->face.top = 0;
+}
+
+/*
+ * msm_fd_wq_handler - Fd device workqueue handler.
+ * @work: Pointer to work struct.
+ *
+ * This function is bottom half of fd irq what it does:
+ *
+ * - Stop the fd engine.
+ * - Getter fd result and store in stats buffer.
+ * - If available schedule next buffer for processing.
+ * - Sent event to v4l2.
+ * - Release buffer from v4l2 queue.
+ */
+static void msm_fd_wq_handler(struct work_struct *work)
+{
+ struct msm_fd_buffer *active_buf;
+ struct msm_fd_stats *stats;
+ struct msm_fd_event *fd_event;
+ struct msm_fd_device *fd;
+ struct fd_ctx *ctx;
+ struct v4l2_event event;
+ int i;
+
+ fd = container_of(work, struct msm_fd_device, work);
+
+ active_buf = msm_fd_hw_get_active_buffer(fd);
+ if (!active_buf) {
+ /* This should never happen, something completely wrong */
+ dev_err(fd->dev, "Oops no active buffer empty queue\n");
+ return;
+ }
+ ctx = vb2_get_drv_priv(active_buf->vb.vb2_queue);
+
+ /* Increment sequence number, 0 means sequence is not valid */
+ ctx->sequence++;
+ if (unlikely(!ctx->sequence))
+ ctx->sequence = 1;
+
+ /* Fill face detection statistics */
+ stats = &ctx->stats[ctx->sequence % MSM_FD_MAX_RESULT_BUFS];
+
+ /* First mark stats as invalid */
+ atomic_set(&stats->frame_id, 0);
+
+ stats->face_cnt = msm_fd_hw_get_face_count(fd);
+ for (i = 0; i < stats->face_cnt; i++)
+ msm_fd_fill_results(fd, &stats->face_data[i], i);
+
+ /* Stats are ready, set correct frame id */
+ atomic_set(&stats->frame_id, ctx->sequence);
+
+ /* We have the data from fd hw, we can start next processing */
+ msm_fd_hw_schedule_next_buffer(fd);
+
+ /* Return buffer to vb queue */
+ active_buf->vb.v4l2_buf.sequence = ctx->fh.sequence;
+ vb2_buffer_done(&active_buf->vb, VB2_BUF_STATE_DONE);
+
+ /* Sent event */
+ memset(&event, 0x00, sizeof(event));
+ event.type = MSM_EVENT_FD;
+ fd_event = (struct msm_fd_event *)event.u.data;
+ fd_event->face_cnt = stats->face_cnt;
+ fd_event->buf_index = active_buf->vb.v4l2_buf.index;
+ fd_event->frame_id = ctx->sequence;
+ v4l2_event_queue_fh(&ctx->fh, &event);
+
+ /* Release buffer from the device */
+ msm_fd_hw_buffer_done(fd, active_buf);
+}
+
+/*
+ * fd_probe - Fd device probe method.
+ * @pdev: Pointer fd platform device.
+ */
+static int fd_probe(struct platform_device *pdev)
+{
+ struct msm_fd_device *fd;
+ int ret;
+
+ /* Face detection device struct */
+ fd = kzalloc(sizeof(struct msm_fd_device), GFP_KERNEL);
+ if (!fd)
+ return -ENOMEM;
+
+ mutex_init(&fd->lock);
+ spin_lock_init(&fd->slock);
+ init_completion(&fd->hw_halt_completion);
+ INIT_LIST_HEAD(&fd->buf_queue);
+ fd->dev = &pdev->dev;
+
+ /* Get resources */
+ ret = msm_fd_hw_get_mem_resources(pdev, fd);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Fail get resources\n");
+ ret = -ENODEV;
+ goto error_mem_resources;
+ }
+
+ ret = msm_fd_hw_get_regulators(fd);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Fail to get regulators\n");
+ goto error_get_regulator;
+ }
+ ret = msm_fd_hw_get_clocks(fd);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Fail to get clocks\n");
+ goto error_get_clocks;
+ }
+
+ ret = msm_fd_hw_get_bus(fd);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Fail to get bus\n");
+ goto error_get_bus;
+ }
+
+ /* Get face detect hw before read engine revision */
+ ret = msm_fd_hw_get(fd, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Fail to get hw\n");
+ goto error_hw_get_request_irq;
+ }
+ fd->hw_revision = msm_fd_hw_get_revision(fd);
+
+ msm_fd_hw_put(fd);
+
+ ret = msm_fd_hw_request_irq(pdev, fd, msm_fd_wq_handler);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Fail request irq\n");
+ goto error_hw_get_request_irq;
+ }
+
+ /* v4l2 device */
+ ret = v4l2_device_register(&pdev->dev, &fd->v4l2_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+ ret = -ENOENT;
+ goto error_v4l2_register;
+ }
+
+ fd->video.fops = &fd_fops;
+ fd->video.ioctl_ops = &fd_ioctl_ops;
+ fd->video.minor = -1;
+ fd->video.release = video_device_release;
+ fd->video.v4l2_dev = &fd->v4l2_dev;
+ fd->video.vfl_dir = VFL_DIR_TX;
+ fd->video.vfl_type = VFL_TYPE_GRABBER;
+ strlcpy(fd->video.name, MSM_FD_DRV_NAME, sizeof(fd->video.name));
+
+ ret = video_register_device(&fd->video, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ v4l2_err(&fd->v4l2_dev, "Failed to register video device\n");
+ goto error_video_register;
+ }
+
+ video_set_drvdata(&fd->video, fd);
+
+ platform_set_drvdata(pdev, fd);
+
+ return 0;
+
+error_video_register:
+ v4l2_device_unregister(&fd->v4l2_dev);
+error_v4l2_register:
+ msm_fd_hw_release_irq(fd);
+error_hw_get_request_irq:
+ msm_fd_hw_put_bus(fd);
+error_get_bus:
+ msm_fd_hw_put_clocks(fd);
+error_get_clocks:
+ msm_fd_hw_put_regulators(fd);
+error_get_regulator:
+ msm_fd_hw_release_mem_resources(fd);
+error_mem_resources:
+ kfree(fd);
+ return ret;
+}
+
+/*
+ * fd_device_remove - Fd device remove method.
+ * @pdev: Pointer fd platform device.
+ */
+static int fd_device_remove(struct platform_device *pdev)
+{
+ struct msm_fd_device *fd;
+
+ fd = platform_get_drvdata(pdev);
+ if (NULL == fd) {
+ dev_err(&pdev->dev, "Can not get fd drvdata\n");
+ return 0;
+ }
+ video_unregister_device(&fd->video);
+ v4l2_device_unregister(&fd->v4l2_dev);
+ msm_fd_hw_release_irq(fd);
+ msm_fd_hw_put_bus(fd);
+ msm_fd_hw_put_clocks(fd);
+ msm_fd_hw_put_regulators(fd);
+ msm_fd_hw_release_mem_resources(fd);
+ kfree(fd);
+
+ return 0;
+}
+
+/* Device tree match struct */
+static const struct of_device_id msm_fd_dt_match[] = {
+ {.compatible = "qcom,face-detection"},
+ {}
+};
+
+/* Fd platform driver definition */
+static struct platform_driver fd_driver = {
+ .probe = fd_probe,
+ .remove = fd_device_remove,
+ .driver = {
+ .name = MSM_FD_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_fd_dt_match,
+ },
+};
+
+static int __init msm_fd_init_module(void)
+{
+ return platform_driver_register(&fd_driver);
+}
+
+static void __exit msm_fd_exit_module(void)
+{
+ platform_driver_unregister(&fd_driver);
+}
+
+module_init(msm_fd_init_module);
+module_exit(msm_fd_exit_module);
+MODULE_DESCRIPTION("MSM FD driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h
new file mode 100644
index 000000000000..f467e46529d3
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h
@@ -0,0 +1,253 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_FD_DEV_H__
+#define __MSM_FD_DEV_H__
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ctrls.h>
+#include <linux/msm-bus.h>
+#include <media/msm_fd.h>
+#include <linux/dma-buf.h>
+#include <linux/msm_ion.h>
+/* Maximum number of result buffers */
+#define MSM_FD_MAX_RESULT_BUFS 5
+/* Max number of clocks defined in device tree */
+#define MSM_FD_MAX_CLK_NUM 15
+/* Max number of clock rates defined in device tree */
+#define MSM_FD_MAX_CLK_RATES 5
+/* Max number of faces which can be detected in one hw processing */
+#define MSM_FD_MAX_FACES_DETECTED 32
+/* Max number of regulators defined in device tree */
+#define MSM_FD_MAX_REGULATOR_NUM 3
+
+/*
+ * struct msm_fd_size - Structure contain FD size related values.
+ * @width: Image width.
+ * @height: Image height.
+ * @reg_val: Register value for this size.
+ * @work_size: Working buffer size in bytes for this size.
+ */
+struct msm_fd_size {
+ int width;
+ int height;
+ u32 reg_val;
+ int work_size;
+};
+
+/*
+ * struct msm_fd_setings - Structure contain FD settings values.
+ * @min_size_index: Minimum face size array index.
+ * @angle_index: Face detection angle array index.
+ * @direction_index: Face detection direction array index.
+ * @threshold: Face detection threshold value.
+ * @speed: Face detection speed value (it should match with clock rate index).
+ */
+struct msm_fd_setings {
+ unsigned int min_size_index;
+ unsigned int angle_index;
+ unsigned int direction_index;
+ unsigned int threshold;
+ unsigned int speed;
+};
+
+/*
+ * struct msm_fd_format - Structure contain FD format settings.
+ * @size: Pointer to fd size struct used for this format.
+ * @crop: V4l2 crop structure.
+ * @bytesperline: Bytes per line of input image buffer.
+ * @sizeimage: Size of input image buffer.
+ * @pixelformat: Pixel format of input image buffer.
+ */
+struct msm_fd_format {
+ struct msm_fd_size *size;
+ struct v4l2_rect crop;
+ int bytesperline;
+ int sizeimage;
+ u32 pixelformat;
+};
+
+/*
+ * struct msm_fd_mem_pool - Structure contain FD memory pool information.
+ * @fd_device: Pointer to fd device.
+ * @client: Pointer to ion client.
+ * @domain_num: Domain number associated with FD hw.
+ */
+struct msm_fd_mem_pool {
+ struct msm_fd_device *fd_device;
+};
+
+/*
+ * struct msm_fd_buf_handle - Structure contain FD buffer handle information.
+ * @fd: ion FD from which this buffer is imported.
+ * @pool: Pointer to FD memory pool struct.
+ * @handle: Pointer to ion handle.
+ * @size: Size of the buffer.
+ * @addr: Adders of FD mmu mapped buffer. This address should be set to FD hw.
+ */
+struct msm_fd_buf_handle {
+ int fd;
+ struct msm_fd_mem_pool *pool;
+ size_t size;
+ ion_phys_addr_t addr;
+};
+
+/*
+ * struct msm_fd_buffer - Vb2 buffer wrapper structure.
+ * @vb: Videobuf 2 buffer structure.
+ * @active: Flag indicating if buffer currently used by FD hw.
+ * @completion: Completion need to wait on, if buffer is used by FD hw.
+ * @format: Format information of this buffer.
+ * @settings: Settings value of this buffer.
+ * @work_addr: Working buffer address need to be used when for this buffer.
+ * @list: Buffer is part of FD device processing queue
+ */
+struct msm_fd_buffer {
+ struct vb2_buffer vb;
+ atomic_t active;
+ struct completion completion;
+ struct msm_fd_format format;
+ struct msm_fd_setings settings;
+ ion_phys_addr_t work_addr;
+ struct list_head list;
+};
+
+/*
+ * struct msm_fd_stats - Structure contains FD result statistic information.
+ * @frame_id: Frame id for which statistic corresponds to.
+ * @face_cnt: Number of faces detected and included in face data.
+ * @face_data: Structure containing detected face data information.
+ */
+struct msm_fd_stats {
+ atomic_t frame_id;
+ u32 face_cnt;
+ struct msm_fd_face_data face_data[MSM_FD_MAX_FACES_DETECTED];
+};
+
+/*
+ * struct fd_ctx - Structure contains per open file handle context.
+ * @fd_device: Pointer to fd device.
+ * @fh: V4l2 file handle.
+ * @vb2_q: Videobuf 2 queue.
+ * @sequence: Sequence number for this statistic.
+ * @format: Current format.
+ * @settings: Current settings.
+ * @mem_pool: FD hw memory pool.
+ * @stats: Pointer to statistic buffers.
+ * @work_buf: Working memory buffer handle.
+ */
+struct fd_ctx {
+ struct msm_fd_device *fd_device;
+ struct v4l2_fh fh;
+ struct vb2_queue vb2_q;
+ unsigned int sequence;
+ atomic_t subscribed_for_event;
+ struct msm_fd_format format;
+ struct msm_fd_setings settings;
+ struct msm_fd_mem_pool mem_pool;
+ struct msm_fd_stats *stats;
+ struct msm_fd_buf_handle work_buf;
+};
+
+/*
+ * enum msm_fd_device_state - FD device state.
+ * @MSM_FD_DEVICE_IDLE: Device is idle, we can start with processing.
+ * @MSM_FD_DEVICE_RUNNING: Device is running, next processing will be
+ * scheduled from fd irq.
+ */
+enum msm_fd_device_state {
+ MSM_FD_DEVICE_IDLE,
+ MSM_FD_DEVICE_RUNNING,
+};
+
+/*
+ * enum msm_fd_mem_resources - FD device iomem resources.
+ * @MSM_FD_IOMEM_CORE: Index of fd core registers.
+ * @MSM_FD_IOMEM_MISC: Index of fd misc registers.
+ * @MSM_FD_IOMEM_VBIF: Index of fd vbif registers.
+ * @MSM_FD_IOMEM_LAST: Not valid.
+ */
+enum msm_fd_mem_resources {
+ MSM_FD_IOMEM_CORE,
+ MSM_FD_IOMEM_MISC,
+ MSM_FD_IOMEM_VBIF,
+ MSM_FD_IOMEM_LAST
+};
+
+/*
+ * struct msm_fd_device - FD device structure.
+ * @hw_revision: Face detection hw revision.
+ * @lock: Lock used for reference count.
+ * @slock: Spinlock used to protect FD device struct.
+ * @irq_num: Face detection irq number.
+ * @ref_count: Device reference count.
+ * @res_mem: Array of memory resources used by FD device.
+ * @iomem_base: Array of register mappings used by FD device.
+ * @vdd: Pointer to vdd regulator.
+ * @clk_num: Number of clocks attached to the device.
+ * @clk: Array of clock resources used by fd device.
+ * @clk_rates: Array of clock rates set.
+ * @bus_vectors: Pointer to bus vectors array.
+ * @bus_paths: Pointer to bus paths array.
+ * @bus_scale_data: Memory access bus scale data.
+ * @bus_client: Memory access bus client.
+ * @iommu_attached_cnt: Iommu attached devices reference count.
+ * @iommu_hdl: reference for iommu context.
+ * @dev: Pointer to device struct.
+ * @v4l2_dev: V4l2 device.
+ * @video: Video device.
+ * @state: FD device state.
+ * @buf_queue: FD device processing queue.
+ * @work_queue: Pointer to FD device IRQ bottom half workqueue.
+ * @work: IRQ bottom half work struct.
+ * @hw_halt_completion: Completes when face detection hw halt completes.
+ */
+struct msm_fd_device {
+ u32 hw_revision;
+
+ struct mutex lock;
+ spinlock_t slock;
+ int ref_count;
+
+ int irq_num;
+ struct resource *res_mem[MSM_FD_IOMEM_LAST];
+ void __iomem *iomem_base[MSM_FD_IOMEM_LAST];
+ struct resource *ioarea[MSM_FD_IOMEM_LAST];
+ struct regulator *vdd[MSM_FD_MAX_REGULATOR_NUM];
+ unsigned int regulator_num;
+
+ unsigned int clk_num;
+ struct clk *clk[MSM_FD_MAX_CLK_NUM];
+ unsigned int clk_rates_num;
+ unsigned int clk_rates[MSM_FD_MAX_CLK_RATES][MSM_FD_MAX_CLK_NUM];
+
+ struct msm_bus_vectors *bus_vectors;
+ struct msm_bus_paths *bus_paths;
+ struct msm_bus_scale_pdata bus_scale_data;
+ uint32_t bus_client;
+
+ unsigned int iommu_attached_cnt;
+
+ int iommu_hdl;
+ struct device *dev;
+ struct v4l2_device v4l2_dev;
+ struct video_device video;
+
+ enum msm_fd_device_state state;
+ struct list_head buf_queue;
+ struct workqueue_struct *work_queue;
+ struct work_struct work;
+ struct completion hw_halt_completion;
+};
+
+#endif /* __MSM_FD_DEV_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/fd/msm_fd_hw.c b/drivers/media/platform/msm/camera_v2/fd/msm_fd_hw.c
new file mode 100644
index 000000000000..87102d5020ce
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/fd/msm_fd_hw.c
@@ -0,0 +1,1626 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/msm_ion.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <media/videobuf2-core.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include "msm_fd_dev.h"
+#include "msm_fd_hw.h"
+#include "msm_fd_regs.h"
+#include "cam_smmu_api.h"
+#include "msm_camera_io_util.h"
+
+/* After which revision misc irq for engine is needed */
+#define MSM_FD_MISC_IRQ_FROM_REV 0x10010000
+/* Face detection workqueue name */
+#define MSM_FD_WORQUEUE_NAME "face-detection"
+/* Face detection bus client name */
+#define MSM_FD_BUS_CLIENT_NAME "msm_face_detect"
+/* Face detection processing timeout in ms */
+#define MSM_FD_PROCESSING_TIMEOUT_MS 500
+/* Face detection halt timeout in ms */
+#define MSM_FD_HALT_TIMEOUT_MS 100
+/* Smmu callback name */
+#define MSM_FD_SMMU_CB_NAME "camera_fd"
+/*
+ * enum msm_fd_reg_setting_entries - FD register setting entries in DT.
+ * @MSM_FD_REG_ADDR_OFFSET_IDX: Register address offset index.
+ * @MSM_FD_REG_VALUE_IDX: Register value index.
+ * @MSM_FD_REG_MASK_IDX: Regester mask index.
+ * @MSM_FD_REG_LAST_IDX: Index count.
+ */
+enum msm_fd_dt_reg_setting_index {
+ MSM_FD_REG_ADDR_OFFSET_IDX,
+ MSM_FD_REG_VALUE_IDX,
+ MSM_FD_REG_MASK_IDX,
+ MSM_FD_REG_LAST_IDX
+};
+
+/*
+ * msm_fd_hw_read_reg - Fd read from register.
+ * @fd: Pointer to fd device.
+ * @base_idx: Fd memory resource index.
+ * @reg: Register addr need to be read from.
+ */
+static inline u32 msm_fd_hw_read_reg(struct msm_fd_device *fd,
+ enum msm_fd_mem_resources base_idx, u32 reg)
+{
+ return msm_camera_io_r(fd->iomem_base[base_idx] + reg);
+}
+
+/*
+ * msm_fd_hw_read_reg - Fd write to register.
+ * @fd: Pointer to fd device.
+ * @base_idx: Fd memory resource index.
+ * @reg: Register addr need to be read from.
+ e @value: Value to be written.
+ */
+static inline void msm_fd_hw_write_reg(struct msm_fd_device *fd,
+ enum msm_fd_mem_resources base_idx, u32 reg, u32 value)
+{
+ msm_camera_io_w(value, fd->iomem_base[base_idx] + reg);
+}
+
+/*
+ * msm_fd_hw_reg_clr - Fd clear register bits.
+ * @fd: Pointer to fd device.
+ * @base_idx: Fd memory resource index.
+ * @reg: Register addr need to be read from.
+ * @clr_bits: Bits need to be clear from register.
+ */
+static inline void msm_fd_hw_reg_clr(struct msm_fd_device *fd,
+ enum msm_fd_mem_resources mmio_range, u32 reg, u32 clr_bits)
+{
+ u32 bits = msm_fd_hw_read_reg(fd, mmio_range, reg);
+
+ msm_fd_hw_write_reg(fd, mmio_range, reg, (bits & ~clr_bits));
+}
+
+/*
+ * msm_fd_hw_reg_clr - Fd set register bits.
+ * @fd: Pointer to fd device.
+ * @base_idx: Fd memory resource index.
+ * @reg: Register addr need to be read from.
+ * @set_bits: Bits need to be set to register.
+ */
+static inline void msm_fd_hw_reg_set(struct msm_fd_device *fd,
+ enum msm_fd_mem_resources mmio_range, u32 reg, u32 set_bits)
+{
+ u32 bits = msm_fd_hw_read_reg(fd, mmio_range, reg);
+
+ msm_fd_hw_write_reg(fd, mmio_range, reg, (bits | set_bits));
+}
+
+/*
+ * msm_fd_hw_reg_clr - Fd set size mode register.
+ * @fd: Pointer to fd device.
+ * @mode: Size mode to be set.
+ */
+static inline void msm_fd_hw_set_size_mode(struct msm_fd_device *fd, u32 mode)
+{
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_IMAGE_SIZE, mode);
+}
+
+/*
+ * msm_fd_hw_reg_clr - Fd set crop registers.
+ * @fd: Pointer to fd device.
+ * @crop: Pointer to v4l2 crop struct containing the crop information
+ */
+static inline void msm_fd_hw_set_crop(struct msm_fd_device *fd,
+ struct v4l2_rect *crop)
+{
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_START_X,
+ (crop->top & MSM_FD_START_X_MASK));
+
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_START_Y,
+ (crop->left & MSM_FD_START_Y_MASK));
+
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_SIZE_X,
+ (crop->width & MSM_FD_SIZE_X_MASK));
+
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_SIZE_Y,
+ (crop->height & MSM_FD_SIZE_Y_MASK));
+}
+
+/*
+ * msm_fd_hw_reg_clr - Fd set bytes per line register.
+ * @fd: Pointer to fd device.
+ * @b: Bytes per line need to be set.
+ */
+static inline void msm_fd_hw_set_bytesperline(struct msm_fd_device *fd, u32 b)
+{
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_LINE_BYTES,
+ (b & MSM_FD_LINE_BYTES_MASK));
+}
+
+/*
+ * msm_fd_hw_reg_clr - Fd set image address.
+ * @fd: Pointer to fd device.
+ * @addr: Input image address need to be set.
+ */
+static inline void msm_fd_hw_set_image_addr(struct msm_fd_device *fd, u32 addr)
+{
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_IMAGE_ADDR, addr);
+}
+
+/*
+ * msm_fd_hw_set_work_addr - Fd set working buffer address.
+ * @fd: Pointer to fd device.
+ * @addr: Working buffer address need to be set.
+ */
+static inline void msm_fd_hw_set_work_addr(struct msm_fd_device *fd, u32 addr)
+{
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_WORK_ADDR, addr);
+}
+
+/*
+ * msm_fd_hw_set_direction_angle - Fd set face direction and face angle.
+ * @fd: Pointer to fd device.
+ * @direction: Face direction need to be set.
+ * @angle: Face angle need to be set.
+ */
+static inline void msm_fd_hw_set_direction_angle(struct msm_fd_device *fd,
+ u32 direction, u32 angle)
+{
+ u32 reg;
+ u32 value;
+
+ value = direction | (angle ? 1 << (angle + 1) : 0);
+ if (value > MSM_FD_CONDT_DIR_MAX)
+ value = MSM_FD_CONDT_DIR_MAX;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_CONDT);
+
+ reg &= ~MSM_FD_CONDT_DIR_MASK;
+ reg |= (value << MSM_FD_CONDT_DIR_SHIFT);
+
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_CONDT, reg);
+}
+
+/*
+ * msm_fd_hw_set_min_face - Fd set minimum face size register.
+ * @fd: Pointer to fd device.
+ * @size: Minimum face size need to be set.
+ */
+static inline void msm_fd_hw_set_min_face(struct msm_fd_device *fd, u32 size)
+{
+ u32 reg;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_CONDT);
+
+ reg &= ~MSM_FD_CONDT_MIN_MASK;
+ reg |= (size << MSM_FD_CONDT_MIN_SHIFT);
+
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_CONDT, reg);
+}
+
+/*
+ * msm_fd_hw_set_threshold - Fd set detection threshold register.
+ * @fd: Pointer to fd device.
+ * @c: Maximum face count need to be set.
+ */
+static inline void msm_fd_hw_set_threshold(struct msm_fd_device *fd, u32 thr)
+{
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_DHINT,
+ (thr & MSM_FD_DHINT_MASK));
+}
+
+/*
+ * msm_fd_hw_srst - Sw reset control registers.
+ * @fd: Pointer to fd device.
+ *
+ * Before every processing we need to toggle this bit,
+ * This functions set sw reset control bit to 1/0.
+ */
+static inline void msm_fd_hw_srst(struct msm_fd_device *fd)
+{
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_CONTROL,
+ MSM_FD_CONTROL_SRST);
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_CONTROL, 0);
+}
+
+/*
+ * msm_fd_hw_get_face_count - Fd read face count register.
+ * @fd: Pointer to fd device.
+ */
+int msm_fd_hw_get_face_count(struct msm_fd_device *fd)
+{
+ u32 reg;
+ u32 value;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_RESULT_CNT);
+
+ value = reg & MSM_FD_RESULT_CNT_MASK;
+ if (value > MSM_FD_MAX_FACES_DETECTED) {
+ dev_warn(fd->dev, "Face count %d out of limit\n", value);
+ value = MSM_FD_MAX_FACES_DETECTED;
+ }
+
+ return value;
+}
+
+/*
+ * msm_fd_hw_run - Starts face detection engine.
+ * @fd: Pointer to fd device.
+ *
+ * Before call this function make sure that control sw reset is perfomed
+ * (see function msm_fd_hw_srst).
+ * NOTE: Engine need to be reset before started again.
+ */
+static inline void msm_fd_hw_run(struct msm_fd_device *fd)
+{
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_CONTROL,
+ MSM_FD_CONTROL_RUN);
+}
+
+/*
+ * msm_fd_hw_is_finished - Check if fd hw engine is done with processing.
+ * @fd: Pointer to fd device.
+ *
+ * NOTE: If finish bit is not set, we should not read the result.
+ */
+static int msm_fd_hw_is_finished(struct msm_fd_device *fd)
+{
+ u32 reg;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_CONTROL);
+
+ return reg & MSM_FD_CONTROL_FINISH;
+}
+
+/*
+ * msm_fd_hw_is_runnig - Check if fd hw engine is busy.
+ * @fd: Pointer to fd device.
+ */
+static int msm_fd_hw_is_runnig(struct msm_fd_device *fd)
+{
+ u32 reg;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_CORE, MSM_FD_CONTROL);
+
+ return reg & MSM_FD_CONTROL_RUN;
+}
+
+/*
+ * msm_fd_hw_misc_irq_is_core - Check if fd received misc core irq.
+ * @fd: Pointer to fd device.
+ */
+static int msm_fd_hw_misc_irq_is_core(struct msm_fd_device *fd)
+{
+ u32 reg;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_MISC,
+ MSM_FD_MISC_IRQ_STATUS);
+
+ return reg & MSM_FD_MISC_IRQ_STATUS_CORE_IRQ;
+}
+
+/*
+ * msm_fd_hw_misc_irq_is_halt - Check if fd received misc halt irq.
+ * @fd: Pointer to fd device.
+ */
+static int msm_fd_hw_misc_irq_is_halt(struct msm_fd_device *fd)
+{
+ u32 reg;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_MISC,
+ MSM_FD_MISC_IRQ_STATUS);
+
+ return reg & MSM_FD_MISC_IRQ_STATUS_HALT_REQ;
+}
+
+/*
+* msm_fd_hw_misc_clear_all_irq - Clear all misc irq statuses.
+* @fd: Pointer to fd device.
+*/
+static void msm_fd_hw_misc_clear_all_irq(struct msm_fd_device *fd)
+{
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_MISC, MSM_FD_MISC_IRQ_CLEAR,
+ MSM_FD_MISC_IRQ_CLEAR_HALT | MSM_FD_MISC_IRQ_CLEAR_CORE);
+}
+
+/*
+* msm_fd_hw_misc_irq_enable - Enable fd misc core and halt irq.
+* @fd: Pointer to fd device.
+*/
+static void msm_fd_hw_misc_irq_enable(struct msm_fd_device *fd)
+{
+ msm_fd_hw_reg_set(fd, MSM_FD_IOMEM_MISC, MSM_FD_MISC_IRQ_MASK,
+ MSM_FD_MISC_IRQ_CLEAR_HALT | MSM_FD_MISC_IRQ_CLEAR_CORE);
+}
+
+/*
+* msm_fd_hw_misc_irq_disable - Disable fd misc core and halt irq.
+* @fd: Pointer to fd device.
+*/
+static void msm_fd_hw_misc_irq_disable(struct msm_fd_device *fd)
+{
+ msm_fd_hw_reg_clr(fd, MSM_FD_IOMEM_MISC, MSM_FD_MISC_IRQ_MASK,
+ MSM_FD_MISC_IRQ_CLEAR_HALT | MSM_FD_MISC_IRQ_CLEAR_CORE);
+}
+
+/*
+ * msm_fd_hw_get_revision - Get hw revision and store in to device.
+ * @fd: Pointer to fd device.
+ */
+int msm_fd_hw_get_revision(struct msm_fd_device *fd)
+{
+ u32 reg;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_MISC,
+ MSM_FD_MISC_HW_VERSION);
+
+ dev_dbg(fd->dev, "Face detection hw revision 0x%x\n", reg);
+
+ return reg;
+}
+
+/*
+ * msm_fd_hw_get_result_x - Get fd result center x coordinate.
+ * @fd: Pointer to fd device.
+ * @idx: Result face index
+ */
+int msm_fd_hw_get_result_x(struct msm_fd_device *fd, int idx)
+{
+ u32 reg;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_CORE,
+ MSM_FD_RESULT_CENTER_X(idx));
+
+ return reg;
+}
+
+/*
+ * msm_fd_hw_get_result_y - Get fd result center y coordinate.
+ * @fd: Pointer to fd device.
+ * @idx: Result face index
+ */
+int msm_fd_hw_get_result_y(struct msm_fd_device *fd, int idx)
+{
+ u32 reg;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_CORE,
+ MSM_FD_RESULT_CENTER_Y(idx));
+
+ return reg;
+}
+
+/*
+ * msm_fd_hw_get_result_conf_size - Get fd result confident level and size.
+ * @fd: Pointer to fd device.
+ * @idx: Result face index.
+ * @conf: Pointer to confident value need to be filled.
+ * @size: Pointer to size value need to be filled.
+ */
+void msm_fd_hw_get_result_conf_size(struct msm_fd_device *fd,
+ int idx, u32 *conf, u32 *size)
+{
+ u32 reg;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_CORE,
+ MSM_FD_RESULT_CONF_SIZE(idx));
+
+ *conf = (reg >> MSM_FD_RESULT_CONF_SHIFT) & MSM_FD_RESULT_CONF_MASK;
+ *size = (reg >> MSM_FD_RESULT_SIZE_SHIFT) & MSM_FD_RESULT_SIZE_MASK;
+}
+
+/*
+ * msm_fd_hw_get_result_angle_pose - Get fd result angle and pose.
+ * @fd: Pointer to fd device.
+ * @idx: Result face index.
+ * @angle: Pointer to angle value need to be filled.
+ * @pose: Pointer to pose value need to be filled.
+ */
+void msm_fd_hw_get_result_angle_pose(struct msm_fd_device *fd, int idx,
+ u32 *angle, u32 *pose)
+{
+ u32 reg;
+ u32 pose_reg;
+
+ reg = msm_fd_hw_read_reg(fd, MSM_FD_IOMEM_CORE,
+ MSM_FD_RESULT_ANGLE_POSE(idx));
+ *angle = (reg >> MSM_FD_RESULT_ANGLE_SHIFT) & MSM_FD_RESULT_ANGLE_MASK;
+ pose_reg = (reg >> MSM_FD_RESULT_POSE_SHIFT) & MSM_FD_RESULT_POSE_MASK;
+
+ switch (pose_reg) {
+ case MSM_FD_RESULT_POSE_FRONT:
+ *pose = MSM_FD_POSE_FRONT;
+ break;
+ case MSM_FD_RESULT_POSE_RIGHT_DIAGONAL:
+ *pose = MSM_FD_POSE_RIGHT_DIAGONAL;
+ break;
+ case MSM_FD_RESULT_POSE_RIGHT:
+ *pose = MSM_FD_POSE_RIGHT;
+ break;
+ case MSM_FD_RESULT_POSE_LEFT_DIAGONAL:
+ *pose = MSM_FD_POSE_LEFT_DIAGONAL;
+ break;
+ case MSM_FD_RESULT_POSE_LEFT:
+ *pose = MSM_FD_POSE_LEFT;
+ break;
+ default:
+ dev_err(fd->dev, "Invalid pose from the engine\n");
+ *pose = MSM_FD_POSE_FRONT;
+ break;
+ }
+}
+
+/*
+ * msm_fd_hw_misc_irq_supported - Check if misc irq is supported.
+ * @fd: Pointer to fd device.
+ */
+static int msm_fd_hw_misc_irq_supported(struct msm_fd_device *fd)
+{
+ return fd->hw_revision >= MSM_FD_MISC_IRQ_FROM_REV;
+}
+
+/*
+ * msm_fd_hw_halt - Halt fd core.
+ * @fd: Pointer to fd device.
+ */
+static void msm_fd_hw_halt(struct msm_fd_device *fd)
+{
+ unsigned long time;
+
+ if (msm_fd_hw_misc_irq_supported(fd)) {
+ init_completion(&fd->hw_halt_completion);
+
+ msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_MISC, MSM_FD_HW_STOP, 1);
+
+ time = wait_for_completion_timeout(&fd->hw_halt_completion,
+ msecs_to_jiffies(MSM_FD_HALT_TIMEOUT_MS));
+ if (!time)
+ dev_err(fd->dev, "Face detection halt timeout\n");
+
+ }
+}
+
+/*
+ * msm_fd_core_irq - Face detection core irq handler.
+ * @irq: Irq number.
+ * @dev_id: Pointer to fd device.
+ */
+static irqreturn_t msm_fd_hw_core_irq(int irq, void *dev_id)
+{
+ struct msm_fd_device *fd = dev_id;
+
+ if (msm_fd_hw_is_finished(fd))
+ queue_work(fd->work_queue, &fd->work);
+ else
+ dev_err(fd->dev, "Something wrong! FD still running\n");
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * msm_fd_hw_misc_irq - Face detection misc irq handler.
+ * @irq: Irq number.
+ * @dev_id: Pointer to fd device.
+ */
+static irqreturn_t msm_fd_hw_misc_irq(int irq, void *dev_id)
+{
+ struct msm_fd_device *fd = dev_id;
+
+ if (msm_fd_hw_misc_irq_is_core(fd))
+ msm_fd_hw_core_irq(irq, dev_id);
+
+ if (msm_fd_hw_misc_irq_is_halt(fd))
+ complete_all(&fd->hw_halt_completion);
+
+ msm_fd_hw_misc_clear_all_irq(fd);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * msm_fd_hw_request_irq - Configure and enable vbif interface.
+ * @pdev: Pointer to platform device.
+ * @fd: Pointer to fd device.
+ * @work_func: Pointer to work func used for irq bottom half.
+ */
+int msm_fd_hw_request_irq(struct platform_device *pdev,
+ struct msm_fd_device *fd, work_func_t work_func)
+{
+ int ret;
+
+ fd->irq_num = platform_get_irq(pdev, 0);
+ if (fd->irq_num < 0) {
+ dev_err(fd->dev, "Can not get fd core irq resource\n");
+ ret = -ENODEV;
+ goto error_irq;
+ }
+
+ /* If vbif is shared we will need wrapper irq for releasing vbif */
+ if (msm_fd_hw_misc_irq_supported(fd)) {
+ ret = devm_request_irq(fd->dev, fd->irq_num,
+ msm_fd_hw_misc_irq, IRQF_TRIGGER_RISING,
+ dev_name(&pdev->dev), fd);
+ if (ret) {
+ dev_err(fd->dev, "Can not claim wrapper IRQ %d\n",
+ fd->irq_num);
+ goto error_irq;
+ }
+ } else {
+ ret = devm_request_irq(fd->dev, fd->irq_num,
+ msm_fd_hw_core_irq, IRQF_TRIGGER_RISING,
+ dev_name(fd->dev), fd);
+ if (ret) {
+ dev_err(&pdev->dev, "Can not claim core IRQ %d\n",
+ fd->irq_num);
+ goto error_irq;
+ }
+
+ }
+
+ fd->work_queue = alloc_workqueue(MSM_FD_WORQUEUE_NAME,
+ WQ_HIGHPRI | WQ_UNBOUND, 0);
+ if (!fd->work_queue) {
+ dev_err(fd->dev, "Can not register workqueue\n");
+ ret = -ENOMEM;
+ goto error_alloc_workqueue;
+ }
+ INIT_WORK(&fd->work, work_func);
+
+ return 0;
+
+error_alloc_workqueue:
+ devm_free_irq(fd->dev, fd->irq_num, fd);
+error_irq:
+ return ret;
+}
+
+/*
+ * msm_fd_hw_release_irq - Free core and wrap irq.
+ * @fd: Pointer to fd device.
+ */
+void msm_fd_hw_release_irq(struct msm_fd_device *fd)
+{
+ if (fd->irq_num >= 0) {
+ devm_free_irq(fd->dev, fd->irq_num, fd);
+ fd->irq_num = -1;
+ }
+ if (fd->work_queue) {
+ destroy_workqueue(fd->work_queue);
+ fd->work_queue = NULL;
+ }
+}
+
+/*
+ * msm_fd_hw_set_dt_parms_by_name() - read DT params and write to registers.
+ * @fd: Pointer to fd device.
+ * @dt_prop_name: Name of the device tree property to read.
+ * @base_idx: Fd memory resource index.
+ *
+ * This function reads register offset and value pairs from dtsi based on
+ * device tree property name and writes to FD registers.
+ *
+ * Return: 0 on success and negative error on failure.
+ */
+int32_t msm_fd_hw_set_dt_parms_by_name(struct msm_fd_device *fd,
+ const char *dt_prop_name,
+ enum msm_fd_mem_resources base_idx)
+{
+ struct device_node *of_node;
+ int32_t i = 0 , rc = 0;
+ uint32_t *dt_reg_settings = NULL;
+ uint32_t dt_count = 0;
+
+ of_node = fd->dev->of_node;
+ pr_debug("%s:%d E\n", __func__, __LINE__);
+
+ if (!of_get_property(of_node, dt_prop_name, &dt_count)) {
+ pr_err("%s: Error property does not exist\n", __func__);
+ return -ENOENT;
+ }
+ if (dt_count % (sizeof(int32_t) * MSM_FD_REG_LAST_IDX)) {
+ pr_err("%s: Error invalid entries\n", __func__);
+ return -EINVAL;
+ }
+ dt_count /= sizeof(int32_t);
+ if (dt_count != 0) {
+ dt_reg_settings = kcalloc(dt_count,
+ sizeof(uint32_t),
+ GFP_KERNEL);
+
+ if (!dt_reg_settings)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(of_node,
+ dt_prop_name,
+ dt_reg_settings,
+ dt_count);
+ if (rc < 0) {
+ pr_err("%s: No reg info\n", __func__);
+ kfree(dt_reg_settings);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dt_count; i = i + MSM_FD_REG_LAST_IDX) {
+ msm_fd_hw_reg_clr(fd, base_idx,
+ dt_reg_settings[i + MSM_FD_REG_ADDR_OFFSET_IDX],
+ dt_reg_settings[i + MSM_FD_REG_MASK_IDX]);
+ msm_fd_hw_reg_set(fd, base_idx,
+ dt_reg_settings[i + MSM_FD_REG_ADDR_OFFSET_IDX],
+ dt_reg_settings[i + MSM_FD_REG_VALUE_IDX] &
+ dt_reg_settings[i + MSM_FD_REG_MASK_IDX]);
+ pr_debug("%s:%d] %p %08x\n", __func__, __LINE__,
+ fd->iomem_base[base_idx] +
+ dt_reg_settings[i + MSM_FD_REG_ADDR_OFFSET_IDX],
+ dt_reg_settings[i + MSM_FD_REG_VALUE_IDX] &
+ dt_reg_settings[i + MSM_FD_REG_MASK_IDX]);
+ }
+ kfree(dt_reg_settings);
+ }
+ return 0;
+}
+
+/*
+ * msm_fd_hw_set_dt_parms() - set FD device tree configuration.
+ * @fd: Pointer to fd device.
+ *
+ * This function holds an array of device tree property names and calls
+ * msm_fd_hw_set_dt_parms_by_name() for each property.
+ *
+ * Return: 0 on success and negative error on failure.
+ */
+int msm_fd_hw_set_dt_parms(struct msm_fd_device *fd)
+{
+ int rc = 0;
+ uint8_t dt_prop_cnt = MSM_FD_IOMEM_LAST;
+ char *dt_prop_name[MSM_FD_IOMEM_LAST] = {"qcom,fd-core-reg-settings",
+ "qcom,fd-misc-reg-settings", "qcom,fd-vbif-reg-settings"};
+
+ while (dt_prop_cnt) {
+ dt_prop_cnt--;
+ rc = msm_fd_hw_set_dt_parms_by_name(fd,
+ dt_prop_name[dt_prop_cnt],
+ dt_prop_cnt);
+ if (rc == -ENOENT) {
+ pr_debug("%s: No %s property\n", __func__,
+ dt_prop_name[dt_prop_cnt]);
+ rc = 0;
+ } else if (rc < 0) {
+ pr_err("%s: %s params set fail\n", __func__,
+ dt_prop_name[dt_prop_cnt]);
+ return rc;
+ }
+ }
+ return rc;
+}
+
+/*
+ * msm_fd_hw_release_mem_resources - Releases memory resources.
+ * @fd: Pointer to fd device.
+ */
+void msm_fd_hw_release_mem_resources(struct msm_fd_device *fd)
+{
+ int i;
+
+ /* Prepare memory resources */
+ for (i = 0; i < MSM_FD_IOMEM_LAST; i++) {
+ if (fd->iomem_base[i]) {
+ iounmap(fd->iomem_base[i]);
+ fd->iomem_base[i] = NULL;
+ }
+ if (fd->ioarea[i]) {
+ release_mem_region(fd->res_mem[i]->start,
+ resource_size(fd->res_mem[i]));
+ fd->ioarea[i] = NULL;
+ }
+ fd->res_mem[i] = NULL;
+ }
+}
+
+/*
+ * msm_fd_hw_get_mem_resources - Get memory resources.
+ * @pdev: Pointer to fd platform device.
+ * @fd: Pointer to fd device.
+ *
+ * Get and ioremap platform memory resources.
+ */
+int msm_fd_hw_get_mem_resources(struct platform_device *pdev,
+ struct msm_fd_device *fd)
+{
+ int i;
+ int ret = 0;
+
+ /* Prepare memory resources */
+ for (i = 0; i < MSM_FD_IOMEM_LAST; i++) {
+ /* Get resources */
+ fd->res_mem[i] = platform_get_resource(pdev,
+ IORESOURCE_MEM, i);
+ if (!fd->res_mem[i]) {
+ dev_err(fd->dev, "Fail get resource idx %d\n",
+ i);
+ ret = -ENODEV;
+ break;
+ }
+
+ fd->ioarea[i] = request_mem_region(fd->res_mem[i]->start,
+ resource_size(fd->res_mem[i]), fd->res_mem[i]->name);
+ if (!fd->ioarea[i]) {
+ dev_err(fd->dev, "%s can not request mem\n",
+ fd->res_mem[i]->name);
+ ret = -ENODEV;
+ break;
+ }
+
+ fd->iomem_base[i] = ioremap(fd->res_mem[i]->start,
+ resource_size(fd->res_mem[i]));
+ if (!fd->iomem_base[i]) {
+ dev_err(fd->dev, "%s can not remap region\n",
+ fd->res_mem[i]->name);
+ ret = -ENODEV;
+ break;
+ }
+ }
+
+ if (ret < 0)
+ msm_fd_hw_release_mem_resources(fd);
+
+ return ret;
+}
+
+/*
+ * msm_fd_hw_get_regulators - Get fd regulators.
+ * @fd: Pointer to fd device.
+ *
+ * Read regulator information from device tree and perform get regulator.
+ */
+int msm_fd_hw_get_regulators(struct msm_fd_device *fd)
+{
+ const char *regulator_name;
+ uint32_t cnt;
+ int i;
+ int ret;
+
+ if (of_get_property(fd->dev->of_node, "qcom,vdd-names", NULL)) {
+ cnt = of_property_count_strings(fd->dev->of_node,
+ "qcom,vdd-names");
+
+ if ((cnt == 0) || (cnt == -EINVAL)) {
+ dev_err(fd->dev, "no regulators found, count=%d\n",
+ cnt);
+ return -EINVAL;
+ }
+
+ if (cnt > MSM_FD_MAX_REGULATOR_NUM) {
+ dev_err(fd->dev,
+ "Exceed max number of regulators %d\n", cnt);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ ret = of_property_read_string_index(fd->dev->of_node,
+ "qcom,vdd-names",
+ i, &regulator_name);
+ if (ret < 0) {
+ dev_err(fd->dev,
+ "Cannot read regulator name %d\n", i);
+ return ret;
+ }
+
+ fd->vdd[i] = regulator_get(fd->dev, regulator_name);
+ if (IS_ERR(fd->vdd[i])) {
+ ret = PTR_ERR(fd->vdd[i]);
+ fd->vdd[i] = NULL;
+ dev_err(fd->dev, "Error regulator get %s\n",
+ regulator_name);
+ goto regulator_get_error;
+ }
+ dev_dbg(fd->dev, "Regulator name idx %d %s\n", i,
+ regulator_name);
+ }
+ fd->regulator_num = cnt;
+ } else {
+ fd->regulator_num = 1;
+ fd->vdd[0] = regulator_get(fd->dev, "vdd");
+ if (IS_ERR(fd->vdd[0])) {
+ dev_err(fd->dev, "Fail to get vdd regulator\n");
+ ret = PTR_ERR(fd->vdd[0]);
+ fd->vdd[0] = NULL;
+ return ret;
+ }
+ }
+ return 0;
+
+regulator_get_error:
+ for (; i > 0; i--) {
+ if (!IS_ERR_OR_NULL(fd->vdd[i - 1]))
+ regulator_put(fd->vdd[i - 1]);
+ }
+ return ret;
+}
+
+/*
+ * msm_fd_hw_put_regulators - Put fd regulators.
+ * @fd: Pointer to fd device.
+ */
+int msm_fd_hw_put_regulators(struct msm_fd_device *fd)
+{
+ int i;
+
+ for (i = fd->regulator_num - 1; i >= 0; i--) {
+ if (!IS_ERR_OR_NULL(fd->vdd[i]))
+ regulator_put(fd->vdd[i]);
+ }
+ return 0;
+}
+
+/*
+ * msm_fd_hw_enable_regulators - Prepare and enable fd regulators.
+ * @fd: Pointer to fd device.
+ */
+static int msm_fd_hw_enable_regulators(struct msm_fd_device *fd)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < fd->regulator_num; i++) {
+
+ ret = regulator_enable(fd->vdd[i]);
+ if (ret < 0) {
+ dev_err(fd->dev, "regulator enable failed %d\n", i);
+ regulator_put(fd->vdd[i]);
+ goto error;
+ }
+ }
+
+ return 0;
+error:
+ for (; i > 0; i--) {
+ if (!IS_ERR_OR_NULL(fd->vdd[i - 1])) {
+ regulator_disable(fd->vdd[i - 1]);
+ regulator_put(fd->vdd[i - 1]);
+ }
+ }
+ return ret;
+}
+
+/*
+ * msm_fd_hw_disable_regulators - Disable fd regulator.
+ * @fd: Pointer to fd device.
+ */
+static void msm_fd_hw_disable_regulators(struct msm_fd_device *fd)
+{
+ int i;
+
+ for (i = fd->regulator_num - 1; i >= 0; i--) {
+ if (!IS_ERR_OR_NULL(fd->vdd[i]))
+ regulator_disable(fd->vdd[i]);
+ }
+}
+
+/*
+ * msm_fd_hw_get_clocks - Get fd clocks.
+ * @fd: Pointer to fd device.
+ *
+ * Read clock information from device tree and perform get clock.
+ */
+int msm_fd_hw_get_clocks(struct msm_fd_device *fd)
+{
+ const char *clk_name;
+ size_t cnt;
+ int clk_rates;
+ int i;
+ int ret;
+
+ cnt = of_property_count_strings(fd->dev->of_node, "clock-names");
+ if (cnt > MSM_FD_MAX_CLK_NUM) {
+ dev_err(fd->dev, "Exceed max number of clocks %zu\n", cnt);
+ return -EINVAL;
+ }
+
+ clk_rates = 0;
+ for (i = 0; i < cnt; i++) {
+ ret = of_property_read_string_index(fd->dev->of_node,
+ "clock-names", i, &clk_name);
+ if (ret < 0) {
+ dev_err(fd->dev, "Can not read clock name %d\n", i);
+ goto error;
+ }
+
+ fd->clk[i] = clk_get(fd->dev, clk_name);
+ if (IS_ERR(fd->clk[i])) {
+ ret = -ENOENT;
+ dev_err(fd->dev, "Error clock get %s\n", clk_name);
+ goto error;
+ }
+ dev_dbg(fd->dev, "Clock name idx %d %s\n", i, clk_name);
+ }
+ fd->clk_num = cnt;
+
+ cnt = 0;
+ for (clk_rates = 0; clk_rates < MSM_FD_MAX_CLK_RATES; clk_rates++) {
+ for (i = 0; i < fd->clk_num; i++) {
+ ret = of_property_read_u32_index(fd->dev->of_node,
+ "clock-rates", cnt++,
+ &fd->clk_rates[clk_rates][i]);
+ if (ret < 0)
+ break;
+ dev_dbg(fd->dev, "Clock rate idx %d idx %d value %d\n",
+ clk_rates, i, fd->clk_rates[clk_rates][i]);
+
+ }
+ if (ret < 0)
+ break;
+ }
+ fd->clk_rates_num = clk_rates;
+ if (fd->clk_rates_num == 0) {
+ ret = -ENOENT;
+ dev_err(fd->dev, "Can not get clock rates\n");
+ goto error;
+ }
+
+ return 0;
+error:
+ for (; i > 0; i--)
+ clk_put(fd->clk[i - 1]);
+
+ return ret;
+}
+
+/*
+ * msm_fd_hw_get_clocks - Put fd clocks.
+ * @fd: Pointer to fd device.
+ */
+int msm_fd_hw_put_clocks(struct msm_fd_device *fd)
+{
+ int i;
+
+ for (i = 0; i < fd->clk_num; i++) {
+ if (!IS_ERR_OR_NULL(fd->clk[i]))
+ clk_put(fd->clk[i]);
+ fd->clk_num = 0;
+ }
+ return 0;
+}
+
+/*
+ * msm_fd_hw_set_clock_rate_idx - Set clock rate based on the index.
+ * @fd: Pointer to fd device.
+ * @idx: Clock Array index described in device tree.
+ */
+static int msm_fd_hw_set_clock_rate_idx(struct msm_fd_device *fd,
+ unsigned int idx)
+{
+ int ret;
+ long clk_rate;
+ int i;
+
+ if (idx >= fd->clk_rates_num) {
+ dev_err(fd->dev, "Invalid clock index %u\n", idx);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < fd->clk_num; i++) {
+
+ clk_rate = clk_round_rate(fd->clk[i], fd->clk_rates[idx][i]);
+ if (clk_rate < 0) {
+ dev_dbg(fd->dev, "Clk raund rate fail skip %d\n", i);
+ continue;
+ }
+
+ ret = clk_set_rate(fd->clk[i], clk_rate);
+ if (ret < 0) {
+ dev_err(fd->dev, "Fail clock rate %ld\n", clk_rate);
+ return -EINVAL;
+ }
+ dev_dbg(fd->dev, "Clk rate %d-%ld idx %d\n", i, clk_rate, idx);
+ }
+
+ return 0;
+}
+/*
+ * msm_fd_hw_enable_clocks - Prepare and enable fd clocks.
+ * @fd: Pointer to fd device.
+ */
+static int msm_fd_hw_enable_clocks(struct msm_fd_device *fd)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < fd->clk_num; i++) {
+ ret = clk_prepare(fd->clk[i]);
+ if (ret < 0) {
+ dev_err(fd->dev, "clock prepare failed %d\n", i);
+ goto error;
+ }
+
+ ret = clk_enable(fd->clk[i]);
+ if (ret < 0) {
+ dev_err(fd->dev, "clock enable %d\n", i);
+ clk_unprepare(fd->clk[i]);
+ goto error;
+ }
+ }
+
+ return 0;
+error:
+ for (; i > 0; i--) {
+ clk_disable(fd->clk[i - 1]);
+ clk_unprepare(fd->clk[i - 1]);
+ }
+ return ret;
+}
+/*
+ * msm_fd_hw_disable_clocks - Disable fd clock.
+ * @fd: Pointer to fd device.
+ */
+static void msm_fd_hw_disable_clocks(struct msm_fd_device *fd)
+{
+ int i;
+
+ for (i = 0; i < fd->clk_num; i++) {
+ clk_disable(fd->clk[i]);
+ clk_unprepare(fd->clk[i]);
+ }
+}
+
+/*
+ * msm_fd_hw_get_bus - Get bus bandwidth.
+ * @fd: Pointer to fd device.
+ *
+ * Read bus bandwidth information from device tree.
+ */
+int msm_fd_hw_get_bus(struct msm_fd_device *fd)
+{
+ size_t cnt;
+ unsigned int ab;
+ unsigned int ib;
+ unsigned int idx;
+ int usecase;
+ int ret;
+
+ idx = MSM_FD_MAX_CLK_RATES;
+
+ fd->bus_vectors = kzalloc(sizeof(*fd->bus_vectors) * idx, GFP_KERNEL);
+ if (!fd->bus_vectors) {
+ dev_err(fd->dev, "No memory for bus vectors\n");
+ return -ENOMEM;
+ }
+
+ fd->bus_paths = kzalloc(sizeof(*fd->bus_paths) * idx, GFP_KERNEL);
+ if (!fd->bus_paths) {
+ dev_err(fd->dev, "No memory for bus paths\n");
+ kfree(fd->bus_vectors);
+ fd->bus_vectors = NULL;
+ return -ENOMEM;
+ }
+
+ cnt = 0;
+ for (usecase = 0; usecase < idx; usecase++) {
+ ret = of_property_read_u32_index(fd->dev->of_node,
+ "qcom,bus-bandwidth-vectors", cnt++, &ab);
+ if (ret < 0)
+ break;
+
+ ret = of_property_read_u32_index(fd->dev->of_node,
+ "qcom,bus-bandwidth-vectors", cnt++, &ib);
+ if (ret < 0)
+ break;
+
+ fd->bus_vectors[usecase].src = MSM_BUS_MASTER_CPP;
+ fd->bus_vectors[usecase].dst = MSM_BUS_SLAVE_EBI_CH0;
+ fd->bus_vectors[usecase].ab = ab;
+ fd->bus_vectors[usecase].ib = ib;
+
+ fd->bus_paths[usecase].num_paths = 1;
+ fd->bus_paths[usecase].vectors = &fd->bus_vectors[usecase];
+
+ dev_dbg(fd->dev, "Bus bandwidth idx %d ab %u ib %u\n",
+ usecase, ab, ib);
+ }
+
+ fd->bus_scale_data.usecase = fd->bus_paths;
+ fd->bus_scale_data.num_usecases = usecase;
+ fd->bus_scale_data.name = MSM_FD_BUS_CLIENT_NAME;
+
+ return 0;
+}
+
+/*
+ * msm_fd_hw_put_bus - Put bus bandwidth.
+ * @fd: Pointer to fd device.
+ */
+void msm_fd_hw_put_bus(struct msm_fd_device *fd)
+{
+ kfree(fd->bus_vectors);
+ fd->bus_vectors = NULL;
+
+ kfree(fd->bus_paths);
+ fd->bus_paths = NULL;
+
+ fd->bus_scale_data.num_usecases = 0;
+}
+/*
+ * msm_fd_hw_bus_request - Request bus for memory access.
+ * @fd: Pointer to fd device.
+ * @idx: Bus bandwidth array index described in device tree.
+ */
+static int msm_fd_hw_bus_request(struct msm_fd_device *fd, unsigned int idx)
+{
+ int ret;
+
+ fd->bus_client = msm_bus_scale_register_client(&fd->bus_scale_data);
+ if (!fd->bus_client) {
+ dev_err(fd->dev, "Fail to register bus client\n");
+ return -ENOENT;
+ }
+
+ ret = msm_bus_scale_client_update_request(fd->bus_client, idx);
+ if (ret < 0) {
+ dev_err(fd->dev, "Fail bus scale update %d\n", ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * msm_fd_hw_bus_release - Release memory access bus.
+ * @fd: Pointer to fd device.
+ */
+static void msm_fd_hw_bus_release(struct msm_fd_device *fd)
+{
+ if (fd->bus_client) {
+ msm_bus_scale_unregister_client(fd->bus_client);
+ fd->bus_client = 0;
+ }
+}
+
+/*
+ * msm_fd_hw_get - Get fd hw for performing any hw operation.
+ * @fd: Pointer to fd device.
+ * @clock_rate_idx: Clock rate index.
+ *
+ * Prepare fd hw for operation. Have reference count protected by
+ * fd device mutex.
+ */
+int msm_fd_hw_get(struct msm_fd_device *fd, unsigned int clock_rate_idx)
+{
+ int ret;
+
+ mutex_lock(&fd->lock);
+
+ if (fd->ref_count == 0) {
+ ret = msm_fd_hw_enable_regulators(fd);
+ if (ret < 0) {
+ dev_err(fd->dev, "Fail to enable vdd\n");
+ goto error;
+ }
+
+ ret = msm_fd_hw_bus_request(fd, clock_rate_idx);
+ if (ret < 0) {
+ dev_err(fd->dev, "Fail bus request\n");
+ goto error_bus_request;
+ }
+
+ ret = msm_fd_hw_set_clock_rate_idx(fd, clock_rate_idx);
+ if (ret < 0) {
+ dev_err(fd->dev, "Fail to set clock rate idx\n");
+ goto error_clocks;
+ }
+
+ ret = msm_fd_hw_enable_clocks(fd);
+ if (ret < 0) {
+ dev_err(fd->dev, "Fail to enable clocks\n");
+ goto error_clocks;
+ }
+
+ if (msm_fd_hw_misc_irq_supported(fd))
+ msm_fd_hw_misc_irq_enable(fd);
+
+ ret = msm_fd_hw_set_dt_parms(fd);
+ if (ret < 0)
+ goto error_set_dt;
+ }
+
+ fd->ref_count++;
+ mutex_unlock(&fd->lock);
+
+ return 0;
+
+error_set_dt:
+ if (msm_fd_hw_misc_irq_supported(fd))
+ msm_fd_hw_misc_irq_disable(fd);
+ msm_fd_hw_disable_clocks(fd);
+error_clocks:
+ msm_fd_hw_bus_release(fd);
+error_bus_request:
+ msm_fd_hw_disable_regulators(fd);
+error:
+ mutex_unlock(&fd->lock);
+ return ret;
+}
+
+/*
+ * msm_fd_hw_get - Put fd hw.
+ * @fd: Pointer to fd device.
+ *
+ * Release fd hw. Have reference count protected by
+ * fd device mutex.
+ */
+void msm_fd_hw_put(struct msm_fd_device *fd)
+{
+ mutex_lock(&fd->lock);
+ BUG_ON(fd->ref_count == 0);
+
+ if (--fd->ref_count == 0) {
+ msm_fd_hw_halt(fd);
+
+ if (msm_fd_hw_misc_irq_supported(fd))
+ msm_fd_hw_misc_irq_disable(fd);
+
+ msm_fd_hw_bus_release(fd);
+ msm_fd_hw_disable_clocks(fd);
+ msm_fd_hw_disable_regulators(fd);
+ }
+ mutex_unlock(&fd->lock);
+}
+
+/*
+ * msm_fd_hw_attach_iommu - Attach iommu to face detection engine.
+ * @fd: Pointer to fd device.
+ *
+ * Iommu attach have reference count protected by
+ * fd device mutex.
+ */
+static int msm_fd_hw_attach_iommu(struct msm_fd_device *fd)
+{
+ int ret;
+
+ mutex_lock(&fd->lock);
+
+ if (fd->iommu_attached_cnt == UINT_MAX) {
+ dev_err(fd->dev, "Max count reached! can not attach iommu\n");
+ goto error;
+ }
+
+ if (fd->iommu_attached_cnt == 0) {
+ ret = cam_smmu_get_handle(MSM_FD_SMMU_CB_NAME, &fd->iommu_hdl);
+ if (ret < 0) {
+ dev_err(fd->dev, "get handle failed\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+ ret = cam_smmu_ops(fd->iommu_hdl, CAM_SMMU_ATTACH);
+ if (ret < 0) {
+ dev_err(fd->dev, "Can not attach iommu domain.\n");
+ goto error_attach;
+ }
+ }
+ fd->iommu_attached_cnt++;
+ mutex_unlock(&fd->lock);
+
+ return 0;
+
+error_attach:
+ cam_smmu_destroy_handle(fd->iommu_hdl);
+error:
+ mutex_unlock(&fd->lock);
+ return ret;
+}
+
+/*
+ * msm_fd_hw_detach_iommu - Detach iommu from face detection engine.
+ * @fd: Pointer to fd device.
+ *
+ * Iommu detach have reference count protected by
+ * fd device mutex.
+ */
+static void msm_fd_hw_detach_iommu(struct msm_fd_device *fd)
+{
+ mutex_lock(&fd->lock);
+ if (fd->iommu_attached_cnt == 0) {
+ dev_err(fd->dev, "There is no attached device\n");
+ mutex_unlock(&fd->lock);
+ return;
+ }
+ if (--fd->iommu_attached_cnt == 0) {
+ cam_smmu_ops(fd->iommu_hdl, CAM_SMMU_DETACH);
+ cam_smmu_destroy_handle(fd->iommu_hdl);
+ }
+ mutex_unlock(&fd->lock);
+}
+
+/*
+ * msm_fd_hw_map_buffer - Map buffer to fd hw mmu.
+ * @pool: Pointer to fd memory pool.
+ * @fd: Ion fd.
+ * @buf: Fd buffer handle, for storing mapped buffer information.
+ *
+ * It will map ion fd to fd hw mmu.
+ */
+int msm_fd_hw_map_buffer(struct msm_fd_mem_pool *pool, int fd,
+ struct msm_fd_buf_handle *buf)
+{
+ int ret;
+
+ if (!pool || fd < 0)
+ return -EINVAL;
+
+ ret = msm_fd_hw_attach_iommu(pool->fd_device);
+ if (ret < 0)
+ return -ENOMEM;
+
+ buf->pool = pool;
+ buf->fd = fd;
+ ret = cam_smmu_get_phy_addr(pool->fd_device->iommu_hdl,
+ buf->fd, CAM_SMMU_MAP_RW,
+ &buf->addr, &buf->size);
+ if (ret < 0) {
+ pr_err("Error: cannot get phy addr\n");
+ return -ENOMEM;
+ }
+ return buf->size;
+}
+
+/*
+ * msm_fd_hw_unmap_buffer - Unmap buffer from fd hw mmu.
+ * @buf: Fd buffer handle, for storing mapped buffer information.
+ */
+void msm_fd_hw_unmap_buffer(struct msm_fd_buf_handle *buf)
+{
+ if (buf->size) {
+ cam_smmu_put_phy_addr(buf->pool->fd_device->iommu_hdl,
+ buf->fd);
+ msm_fd_hw_detach_iommu(buf->pool->fd_device);
+ }
+
+ buf->fd = -1;
+ buf->pool = NULL;
+}
+
+/*
+ * msm_fd_hw_enable - Configure and enable fd hw.
+ * @fd: Fd device.
+ * @buffer: Buffer need to be processed.
+ *
+ * Configure and starts fd processing with given buffer.
+ * NOTE: Fd will not be enabled if engine is in running state.
+ */
+static int msm_fd_hw_enable(struct msm_fd_device *fd,
+ struct msm_fd_buffer *buffer)
+{
+ struct msm_fd_buf_handle *buf_handle =
+ buffer->vb.planes[0].mem_priv;
+
+ if (msm_fd_hw_is_runnig(fd)) {
+ dev_err(fd->dev, "Device is busy we can not enable\n");
+ return 0;
+ }
+
+ msm_fd_hw_srst(fd);
+ msm_fd_hw_set_size_mode(fd, buffer->format.size->reg_val);
+ msm_fd_hw_set_crop(fd, &buffer->format.crop);
+ msm_fd_hw_set_bytesperline(fd, buffer->format.bytesperline);
+ msm_fd_hw_set_image_addr(fd, buf_handle->addr);
+ msm_fd_hw_set_work_addr(fd, buffer->work_addr);
+ msm_fd_hw_set_min_face(fd, buffer->settings.min_size_index);
+ msm_fd_hw_set_threshold(fd, buffer->settings.threshold);
+ msm_fd_hw_set_direction_angle(fd, buffer->settings.direction_index,
+ buffer->settings.angle_index);
+ msm_fd_hw_run(fd);
+ return 1;
+}
+
+/*
+ * msm_fd_hw_try_enable - Try to enable fd hw.
+ * @fd: Fd device.
+ * @buffer: Buffer need to be processed.
+ * @state: Enable on device state
+ *
+ * It will enable fd hw if actual device state is equal with state argument.
+ */
+static int msm_fd_hw_try_enable(struct msm_fd_device *fd,
+ struct msm_fd_buffer *buffer, enum msm_fd_device_state state)
+{
+ int enabled = 0;
+
+ if (state == fd->state) {
+
+ fd->state = MSM_FD_DEVICE_RUNNING;
+ atomic_set(&buffer->active, 1);
+
+ msm_fd_hw_enable(fd, buffer);
+ enabled = 1;
+ }
+ return enabled;
+}
+
+/*
+ * msm_fd_hw_next_buffer - Get next buffer from fd device processing queue.
+ * @fd: Fd device.
+ */
+static struct msm_fd_buffer *msm_fd_hw_next_buffer(struct msm_fd_device *fd)
+{
+ struct msm_fd_buffer *buffer = NULL;
+
+ if (!list_empty(&fd->buf_queue))
+ buffer = list_first_entry(&fd->buf_queue,
+ struct msm_fd_buffer, list);
+
+ return buffer;
+}
+
+/*
+ * msm_fd_hw_add_buffer - Add buffer to fd device processing queue.
+ * @fd: Fd device.
+ */
+void msm_fd_hw_add_buffer(struct msm_fd_device *fd,
+ struct msm_fd_buffer *buffer)
+{
+ spin_lock(&fd->slock);
+
+ atomic_set(&buffer->active, 0);
+ init_completion(&buffer->completion);
+
+ INIT_LIST_HEAD(&buffer->list);
+ list_add_tail(&buffer->list, &fd->buf_queue);
+ spin_unlock(&fd->slock);
+}
+
+/*
+ * msm_fd_hw_remove_buffers_from_queue - Removes buffer from
+ * fd device processing queue.
+ * @fd: Fd device.
+ */
+void msm_fd_hw_remove_buffers_from_queue(struct msm_fd_device *fd,
+ struct vb2_queue *vb2_q)
+{
+ struct msm_fd_buffer *curr_buff;
+ struct msm_fd_buffer *temp;
+ struct msm_fd_buffer *active_buffer;
+ unsigned long time;
+
+ spin_lock(&fd->slock);
+
+ active_buffer = NULL;
+ list_for_each_entry_safe(curr_buff, temp, &fd->buf_queue, list) {
+ if (curr_buff->vb.vb2_queue == vb2_q) {
+
+ if (atomic_read(&curr_buff->active))
+ active_buffer = curr_buff;
+ else
+ list_del(&curr_buff->list);
+
+ }
+ }
+ spin_unlock(&fd->slock);
+
+ /* We need to wait active buffer to finish */
+ if (active_buffer) {
+ time = wait_for_completion_timeout(&active_buffer->completion,
+ msecs_to_jiffies(MSM_FD_PROCESSING_TIMEOUT_MS));
+ if (!time) {
+ /* Remove active buffer */
+ msm_fd_hw_get_active_buffer(fd);
+ /* Schedule if other buffers are present in device */
+ msm_fd_hw_schedule_next_buffer(fd);
+ }
+ }
+
+ return;
+}
+
+/*
+ * msm_fd_hw_buffer_done - Mark as done and removes from processing queue.
+ * @fd: Fd device.
+ * @buffer: Fd buffer.
+ */
+int msm_fd_hw_buffer_done(struct msm_fd_device *fd,
+ struct msm_fd_buffer *buffer)
+{
+ int ret = 0;
+
+ spin_lock(&fd->slock);
+
+ if (atomic_read(&buffer->active)) {
+ atomic_set(&buffer->active, 0);
+ complete_all(&buffer->completion);
+ } else {
+ dev_err(fd->dev, "Buffer is not active\n");
+ ret = -1;
+ }
+
+ spin_unlock(&fd->slock);
+
+ return ret;
+}
+
+/*
+ * msm_fd_hw_get_active_buffer - Get active buffer from fd processing queue.
+ * @fd: Fd device.
+ */
+struct msm_fd_buffer *msm_fd_hw_get_active_buffer(struct msm_fd_device *fd)
+{
+ struct msm_fd_buffer *buffer = NULL;
+
+ spin_lock(&fd->slock);
+ if (!list_empty(&fd->buf_queue)) {
+ buffer = list_first_entry(&fd->buf_queue,
+ struct msm_fd_buffer, list);
+ list_del(&buffer->list);
+ }
+ spin_unlock(&fd->slock);
+
+ return buffer;
+}
+
+/*
+ * msm_fd_hw_schedule_and_start - Schedule active buffer and start processing.
+ * @fd: Fd device.
+ *
+ * This can be executed only when device is in idle state.
+ */
+int msm_fd_hw_schedule_and_start(struct msm_fd_device *fd)
+{
+ struct msm_fd_buffer *buf;
+
+ spin_lock(&fd->slock);
+ buf = msm_fd_hw_next_buffer(fd);
+ if (buf)
+ msm_fd_hw_try_enable(fd, buf, MSM_FD_DEVICE_IDLE);
+
+ spin_unlock(&fd->slock);
+
+ return 0;
+}
+
+/*
+ * msm_fd_hw_schedule_next_buffer - Schedule next buffer and start processing.
+ * @fd: Fd device.
+ *
+ * NOTE: This can be executed only when device is in running state.
+ */
+int msm_fd_hw_schedule_next_buffer(struct msm_fd_device *fd)
+{
+ struct msm_fd_buffer *buf;
+ int ret;
+
+ spin_lock(&fd->slock);
+
+ /* We can schedule next buffer only in running state */
+ if (fd->state != MSM_FD_DEVICE_RUNNING) {
+ dev_err(fd->dev, "Can not schedule next buffer\n");
+ spin_unlock(&fd->slock);
+ return -EBUSY;
+ }
+
+ buf = msm_fd_hw_next_buffer(fd);
+ if (buf) {
+ ret = msm_fd_hw_try_enable(fd, buf, MSM_FD_DEVICE_RUNNING);
+ if (0 == ret) {
+ dev_err(fd->dev, "Ouch can not process next buffer\n");
+ spin_unlock(&fd->slock);
+ return -EBUSY;
+ }
+ } else {
+ fd->state = MSM_FD_DEVICE_IDLE;
+ }
+ spin_unlock(&fd->slock);
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v2/fd/msm_fd_hw.h b/drivers/media/platform/msm/camera_v2/fd/msm_fd_hw.h
new file mode 100644
index 000000000000..ea2e4cc1d117
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/fd/msm_fd_hw.h
@@ -0,0 +1,82 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_FD_HW_H__
+#define __MSM_FD_HW_H__
+
+#include "msm_fd_dev.h"
+
+int msm_fd_hw_get_face_count(struct msm_fd_device *fd);
+
+int msm_fd_hw_get_result_x(struct msm_fd_device *fd, int idx);
+
+int msm_fd_hw_get_result_y(struct msm_fd_device *fd, int idx);
+
+void msm_fd_hw_get_result_conf_size(struct msm_fd_device *fd,
+ int idx, u32 *conf, u32 *size);
+
+void msm_fd_hw_get_result_angle_pose(struct msm_fd_device *fd, int idx,
+ u32 *angle, u32 *pose);
+
+int msm_fd_hw_request_irq(struct platform_device *pdev,
+ struct msm_fd_device *fd, work_func_t work_func);
+
+void msm_fd_hw_release_irq(struct msm_fd_device *fd);
+
+int msm_fd_hw_get_revision(struct msm_fd_device *fd);
+
+void msm_fd_hw_release_mem_resources(struct msm_fd_device *fd);
+
+int msm_fd_hw_get_mem_resources(struct platform_device *pdev,
+ struct msm_fd_device *fd);
+
+int msm_fd_hw_get_iommu(struct msm_fd_device *fd);
+
+void msm_fd_hw_put_iommu(struct msm_fd_device *fd);
+
+int msm_fd_hw_get_regulators(struct msm_fd_device *fd);
+
+int msm_fd_hw_put_regulators(struct msm_fd_device *fd);
+
+int msm_fd_hw_get_clocks(struct msm_fd_device *fd);
+
+int msm_fd_hw_put_clocks(struct msm_fd_device *fd);
+
+int msm_fd_hw_get_bus(struct msm_fd_device *fd);
+
+void msm_fd_hw_put_bus(struct msm_fd_device *fd);
+
+int msm_fd_hw_get(struct msm_fd_device *fd, unsigned int clock_rate_idx);
+
+void msm_fd_hw_put(struct msm_fd_device *fd);
+
+int msm_fd_hw_map_buffer(struct msm_fd_mem_pool *pool, int fd,
+ struct msm_fd_buf_handle *buf);
+
+void msm_fd_hw_unmap_buffer(struct msm_fd_buf_handle *buf);
+
+void msm_fd_hw_add_buffer(struct msm_fd_device *fd,
+ struct msm_fd_buffer *buffer);
+
+void msm_fd_hw_remove_buffers_from_queue(struct msm_fd_device *fd,
+ struct vb2_queue *vb2_q);
+
+int msm_fd_hw_buffer_done(struct msm_fd_device *fd,
+ struct msm_fd_buffer *buffer);
+
+struct msm_fd_buffer *msm_fd_hw_get_active_buffer(struct msm_fd_device *fd);
+
+int msm_fd_hw_schedule_and_start(struct msm_fd_device *fd);
+
+int msm_fd_hw_schedule_next_buffer(struct msm_fd_device *fd);
+
+#endif /* __MSM_FD_HW_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/fd/msm_fd_regs.h b/drivers/media/platform/msm/camera_v2/fd/msm_fd_regs.h
new file mode 100644
index 000000000000..54323f84161f
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/fd/msm_fd_regs.h
@@ -0,0 +1,169 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_FD_REGS_H__
+#define __MSM_FD_REGS_H__
+
+/* FD core registers */
+#define MSM_FD_CONTROL (0x00)
+#define MSM_FD_CONTROL_SRST (1 << 0)
+#define MSM_FD_CONTROL_RUN (1 << 1)
+#define MSM_FD_CONTROL_FINISH (1 << 2)
+
+#define MSM_FD_RESULT_CNT (0x04)
+#define MSM_FD_RESULT_CNT_MASK (0x3F)
+
+#define MSM_FD_CONDT (0x08)
+#define MSM_FD_CONDT_MIN_MASK (0x03)
+#define MSM_FD_CONDT_MIN_SHIFT (0x00)
+#define MSM_FD_CONDT_DIR_MAX (0x08)
+#define MSM_FD_CONDT_DIR_MASK (0x3C)
+#define MSM_FD_CONDT_DIR_SHIFT (0x02)
+
+#define MSM_FD_START_X (0x0C)
+#define MSM_FD_START_X_MASK (0x3FF)
+
+#define MSM_FD_START_Y (0x10)
+#define MSM_FD_START_Y_MASK (0x1FF)
+
+#define MSM_FD_SIZE_X (0x14)
+#define MSM_FD_SIZE_X_MASK (0x3FF)
+
+#define MSM_FD_SIZE_Y (0x18)
+#define MSM_FD_SIZE_Y_MASK (0x1FF)
+
+#define MSM_FD_DHINT (0x1C)
+#define MSM_FD_DHINT_MASK (0xF)
+
+#define MSM_FD_IMAGE_ADDR (0x24)
+#define MSM_FD_IMAGE_ADDR_ALIGN (0x8)
+
+#define MSM_FD_WORK_ADDR (0x28)
+#define MSM_FD_WORK_ADDR_ALIGN (0x8)
+
+#define MSM_FD_IMAGE_SIZE (0x2C)
+#define MSM_FD_IMAGE_SIZE_QVGA (0x0)
+#define MSM_FD_IMAGE_SIZE_VGA (0x1)
+#define MSM_FD_IMAGE_SIZE_WQVGA (0x2)
+#define MSM_FD_IMAGE_SIZE_WVGA (0x3)
+
+#define MSM_FD_LINE_BYTES (0x30)
+#define MSM_FD_LINE_BYTES_MASK (0x1FFF)
+#define MSM_FD_LINE_BYTES_ALIGN (0x8)
+
+#define MSM_FD_RESULT_CENTER_X(x) (0x400 + (0x10 * (x)))
+
+#define MSM_FD_RESULT_CENTER_Y(x) (0x404 + (0x10 * (x)))
+
+#define MSM_FD_RESULT_CONF_SIZE(x) (0x408 + (0x10 * (x)))
+#define MSM_FD_RESULT_SIZE_MASK (0x1FF)
+#define MSM_FD_RESULT_SIZE_SHIFT (0x000)
+#define MSM_FD_RESULT_CONF_MASK (0xF)
+#define MSM_FD_RESULT_CONF_SHIFT (0x9)
+
+#define MSM_FD_RESULT_ANGLE_POSE(x) (0x40C + (0x10 * (x)))
+#define MSM_FD_RESULT_ANGLE_MASK (0x1FF)
+#define MSM_FD_RESULT_ANGLE_SHIFT (0x000)
+#define MSM_FD_RESULT_POSE_MASK (0x7)
+#define MSM_FD_RESULT_POSE_SHIFT (0x9)
+#define MSM_FD_RESULT_POSE_FRONT (0x1)
+#define MSM_FD_RESULT_POSE_RIGHT_DIAGONAL (0x2)
+#define MSM_FD_RESULT_POSE_RIGHT (0x3)
+#define MSM_FD_RESULT_POSE_LEFT_DIAGONAL (0x4)
+#define MSM_FD_RESULT_POSE_LEFT (0x5)
+
+/* FD misc registers */
+#define MSM_FD_MISC_HW_VERSION (0x00)
+#define MSM_FD_MISC_CGC_DISABLE (0x04)
+#define MSM_FD_HW_STOP (0x08)
+
+#define MSM_FD_MISC_SW_RESET (0x10)
+#define MSM_FD_MISC_SW_RESET_SET (1 << 0)
+
+#define MSM_FD_MISC_FIFO_STATUS (0x14)
+#define MSM_FD_MISC_FIFO_STATUS_RFIFO_DCNT_MAST (0x1F)
+#define MSM_FD_MISC_FIFO_STATUS_RFIFO_DCNT_SHIFT (0)
+#define MSM_FD_MISC_FIFO_STATUS_RFIFO_FULL (1 << 13)
+#define MSM_FD_MISC_FIFO_STATUS_RFIFO_EMPTY (1 << 14)
+#define MSM_FD_MISC_FIFO_STATUS_WFIFO_DCNT_MAST (0x1F)
+#define MSM_FD_MISC_FIFO_STATUS_WFIFO_DCNT_SHIFT (16)
+#define MSM_FD_MISC_FIFO_STATUS_WFIFO_EMPTY (1 << 29)
+#define MSM_FD_MISC_FIFO_STATUS_WFIFO_FULL (1 << 30)
+
+#define MSM_FD_MISC_DATA_ENDIAN (0x18)
+#define MSM_FD_MISC_DATA_ENDIAN_BYTE_SWAP_SET (1 << 0)
+
+#define MSM_FD_MISC_VBIF_REQ_PRIO (0x20)
+#define MSM_FD_MISC_VBIF_REQ_PRIO_MASK (0x3)
+
+#define MSM_FD_MISC_VBIF_PRIO_LEVEL (0x24)
+#define MSM_FD_MISC_VBIF_PRIO_LEVEL_MASK (0x3)
+
+#define MSM_FD_MISC_VBIF_MMU_PDIRECT (0x28)
+#define MSM_FD_MISC_VBIF_MMU_PDIRECT_INCREMENT (1 << 0)
+
+#define MSM_FD_MISC_VBIF_IRQ_CLR (0x30)
+#define MSM_FD_MISC_VBIF_IRQ_CLR_ALL (1 << 0)
+
+#define MSM_FD_MISC_VBIF_DONE_STATUS (0x34)
+#define MSM_FD_MISC_VBIF_DONE_STATUS_WRITE (1 << 0)
+#define MSM_FD_MISC_VBIF_DONE_STATUS_READ (1 << 1)
+
+#define MSM_FD_MISC_IRQ_MASK (0x50)
+#define MSM_FD_MISC_IRQ_MASK_HALT_REQ (1 << 1)
+#define MSM_FD_MISC_IRQ_MASK_CORE_IRQ (1 << 0)
+
+#define MSM_FD_MISC_IRQ_STATUS (0x54)
+#define MSM_FD_MISC_IRQ_STATUS_HALT_REQ (1 << 1)
+#define MSM_FD_MISC_IRQ_STATUS_CORE_IRQ (1 << 0)
+
+#define MSM_FD_MISC_IRQ_CLEAR (0x58)
+#define MSM_FD_MISC_IRQ_CLEAR_HALT (1 << 1)
+#define MSM_FD_MISC_IRQ_CLEAR_CORE (1 << 0)
+
+#define MSM_FD_MISC_TEST_BUS_SEL (0x40)
+#define MSM_FD_MISC_TEST_BUS_SEL_TEST_MODE_MASK (0xF)
+#define MSM_FD_MISC_TEST_BUS_SEL_TEST_MODE_SHIFT (0)
+#define MSM_FD_MISC_TEST_BUS_SEL_7_0_MASK (0x3)
+#define MSM_FD_MISC_TEST_BUS_SEL_7_0_SHIFT (16)
+#define MSM_FD_MISC_TEST_BUS_SEL_15_8_MASK (0x3)
+#define MSM_FD_MISC_TEST_BUS_SEL_15_8_SHIFT (18)
+#define MSM_FD_MISC_TEST_BUS_SEL_23_16_MASK (0x3)
+#define MSM_FD_MISC_TEST_BUS_SEL_23_16_SHIFT (20)
+#define MSM_FD_MISC_TEST_BUS_SEL_31_24_MASK (0x3)
+#define MSM_FD_MISC_TEST_BUS_SEL_31_24_SHIFT (22)
+
+#define MSM_FD_MISC_AHB_TEST_EN (0x44)
+#define MSM_FD_MISC_AHB_TEST_EN_MASK (0x3)
+
+#define MSM_FD_MISC_FD2VBIF_INT_TEST_SEL (0x48)
+#define MSM_FD_MISC_FD2VBIF_INT_TEST_MASK (0xF)
+
+#define MSM_FD_MISC_TEST_BUS (0x4C)
+
+/* FD vbif registers */
+#define MSM_FD_VBIF_CLKON (0x04)
+#define MSM_FD_VBIF_QOS_OVERRIDE_EN (0x10)
+#define MSM_FD_VBIF_QOS_OVERRIDE_REQPRI (0x18)
+#define MSM_FD_VBIF_QOS_OVERRIDE_PRILVL (0x1C)
+#define MSM_FD_VBIF_IN_RD_LIM_CONF0 (0xB0)
+#define MSM_FD_VBIF_IN_WR_LIM_CONF0 (0xC0)
+#define MSM_FD_VBIF_OUT_RD_LIM_CONF0 (0xD0)
+#define MSM_FD_VBIF_OUT_WR_LIM_CONF0 (0xD4)
+#define MSM_FD_VBIF_DDR_OUT_MAX_BURST (0xD8)
+#define MSM_FD_VBIF_ARB_CTL (0xF0)
+#define MSM_FD_VBIF_OUT_AXI_AMEMTYPE_CONF0 (0x160)
+#define MSM_FD_VBIF_OUT_AXI_AOOO_EN (0x178)
+#define MSM_FD_VBIF_OUT_AXI_AOOO (0x17c)
+#define MSM_FD_VBIF_ROUND_ROBIN_QOS_ARB (0x124)
+
+#endif /* __MSM_FD_REGS_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/Makefile b/drivers/media/platform/msm/camera_v2/isp/Makefile
new file mode 100644
index 000000000000..dfcc38aff16f
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common/
+obj-$(CONFIG_MSMB_CAMERA) += msm_buf_mgr.o msm_isp_util.o msm_isp_axi_util.o msm_isp_stats_util.o
+obj-$(CONFIG_MSMB_CAMERA) += msm_isp47.o msm_isp46.o msm_isp44.o msm_isp40.o msm_isp.o
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
new file mode 100644
index 000000000000..2352c0a94d65
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
@@ -0,0 +1,1462 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/ioctl.h>
+#include <linux/spinlock.h>
+#include <linux/videodev2.h>
+#include <linux/proc_fs.h>
+#include <linux/videodev2.h>
+#include <linux/vmalloc.h>
+
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-core.h>
+#include <media/msm_camera.h>
+#include <media/msm_isp.h>
+
+#include "msm.h"
+#include "msm_buf_mgr.h"
+#include "cam_smmu_api.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+#define BUF_DEBUG_FULL 0
+#define MAX_LIST_COUNT 100
+
+struct msm_isp_bufq *msm_isp_get_bufq(
+ struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle)
+{
+ struct msm_isp_bufq *bufq = NULL;
+ uint32_t bufq_index = bufq_handle & 0xFF;
+
+ /* bufq_handle cannot be 0 */
+ if ((bufq_handle == 0) ||
+ (bufq_index > buf_mgr->num_buf_q))
+ return NULL;
+
+ bufq = &buf_mgr->bufq[bufq_index];
+ if (bufq->bufq_handle == bufq_handle)
+ return bufq;
+
+ return NULL;
+}
+
+static struct msm_isp_buffer *msm_isp_get_buf_ptr(
+ struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index)
+{
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq\n", __func__);
+ return buf_info;
+ }
+
+ if (bufq->num_bufs <= buf_index) {
+ pr_err("%s: Invalid buf index\n", __func__);
+ return buf_info;
+ }
+ buf_info = &bufq->bufs[buf_index];
+ return buf_info;
+}
+
+static uint32_t msm_isp_get_buf_handle(
+ struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t session_id, uint32_t stream_id)
+{
+ int i;
+ uint32_t embedded_stream_id = 0;
+
+ for (i = 0; i < buf_mgr->num_buf_q; i++) {
+ if (buf_mgr->bufq[i].session_id == session_id &&
+ buf_mgr->bufq[i].stream_id == stream_id)
+ return 0;
+ }
+
+ /* put stream id in handle, if its stats, use FFFF */
+ if (stream_id & (1 << 31))
+ embedded_stream_id = 0xFFFF;
+ else
+ embedded_stream_id = stream_id;
+
+ for (i = 0; i < buf_mgr->num_buf_q; i++) {
+ if (buf_mgr->bufq[i].bufq_handle == 0) {
+ memset(&buf_mgr->bufq[i],
+ 0, sizeof(struct msm_isp_bufq));
+ buf_mgr->bufq[i].bufq_handle =
+ embedded_stream_id << 8 | i;
+ return buf_mgr->bufq[i].bufq_handle;
+ }
+ }
+ return 0;
+}
+
+static int msm_isp_free_bufq_handle(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle)
+{
+ struct msm_isp_bufq *bufq =
+ msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq)
+ return -EINVAL;
+
+ /* Set everything except lock to 0 */
+ bufq->bufq_handle = 0;
+ bufq->bufs = 0;
+ bufq->session_id = 0;
+ bufq->stream_id = 0;
+ bufq->num_bufs = 0;
+ bufq->buf_type = 0;
+ memset(&bufq->head, 0, sizeof(bufq->head));
+ memset(&bufq->share_head, 0, sizeof(bufq->share_head));
+ bufq->buf_client_count = 0;
+
+ return 0;
+}
+
+static void msm_isp_copy_planes_from_v4l2_buffer(
+ struct msm_isp_qbuf_buffer *qbuf_buf,
+ const struct v4l2_buffer *v4l2_buf)
+{
+ int i;
+ qbuf_buf->num_planes = v4l2_buf->length;
+ for (i = 0; i < qbuf_buf->num_planes; i++) {
+ qbuf_buf->planes[i].addr = v4l2_buf->m.planes[i].m.userptr;
+ qbuf_buf->planes[i].offset = v4l2_buf->m.planes[i].data_offset;
+ qbuf_buf->planes[i].length = v4l2_buf->m.planes[i].length;
+ }
+}
+
+static int msm_isp_prepare_v4l2_buf(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_buffer *buf_info,
+ struct msm_isp_qbuf_buffer *qbuf_buf,
+ uint32_t stream_id)
+{
+ int i, rc = -1;
+ int ret;
+ struct msm_isp_buffer_mapped_info *mapped_info;
+ uint32_t accu_length = 0;
+
+ for (i = 0; i < qbuf_buf->num_planes; i++) {
+ mapped_info = &buf_info->mapped_info[i];
+ mapped_info->buf_fd = qbuf_buf->planes[i].addr;
+ ret = cam_smmu_get_phy_addr(buf_mgr->iommu_hdl,
+ mapped_info->buf_fd,
+ CAM_SMMU_MAP_RW,
+ &(mapped_info->paddr),
+ &(mapped_info->len));
+ if (ret) {
+ rc = -EINVAL;
+ pr_err_ratelimited("%s: cannot map address", __func__);
+ goto get_phy_err;
+ }
+
+ mapped_info->paddr += accu_length;
+ accu_length += qbuf_buf->planes[i].length;
+
+ CDBG("%s: plane: %d addr:%lu\n",
+ __func__, i, (unsigned long)mapped_info->paddr);
+
+ }
+ buf_info->num_planes = qbuf_buf->num_planes;
+ return 0;
+get_phy_err:
+ i--;
+
+ return rc;
+}
+
+static void msm_isp_unprepare_v4l2_buf(
+ struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_buffer *buf_info,
+ uint32_t stream_id)
+{
+ int i;
+ struct msm_isp_buffer_mapped_info *mapped_info;
+ struct msm_isp_bufq *bufq = NULL;
+
+ if (!buf_mgr || !buf_info) {
+ pr_err("%s: NULL ptr %p %p\n", __func__,
+ buf_mgr, buf_info);
+ return;
+ }
+
+ bufq = msm_isp_get_bufq(buf_mgr, buf_info->bufq_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq, stream id %x\n",
+ __func__, stream_id);
+ return;
+ }
+
+ for (i = 0; i < buf_info->num_planes; i++) {
+ mapped_info = &buf_info->mapped_info[i];
+
+ cam_smmu_put_phy_addr(buf_mgr->iommu_hdl, mapped_info->buf_fd);
+ }
+ return;
+}
+
+static int msm_isp_map_buf(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_buffer_mapped_info *mapped_info, uint32_t fd)
+{
+ int rc = 0;
+ int ret;
+
+ if (!buf_mgr || !mapped_info) {
+ pr_err_ratelimited("%s: %d] NULL ptr buf_mgr %p mapped_info %p\n",
+ __func__, __LINE__, buf_mgr, mapped_info);
+ return -EINVAL;
+ }
+ ret = cam_smmu_get_phy_addr(buf_mgr->iommu_hdl,
+ fd,
+ CAM_SMMU_MAP_RW,
+ &(mapped_info->paddr),
+ &(mapped_info->len));
+
+ if (ret) {
+ rc = -EINVAL;
+ pr_err_ratelimited("%s: cannot map address", __func__);
+ goto smmu_map_error;
+ }
+ CDBG("%s: addr:%lu\n",
+ __func__, (unsigned long)mapped_info->paddr);
+
+ return rc;
+smmu_map_error:
+ cam_smmu_put_phy_addr(buf_mgr->iommu_hdl,
+ fd);
+ return rc;
+}
+
+static int msm_isp_unmap_buf(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t fd)
+{
+ if (!buf_mgr) {
+ pr_err_ratelimited("%s: %d] NULL ptr buf_mgr\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ cam_smmu_put_phy_addr(buf_mgr->iommu_hdl,
+ fd);
+
+ return 0;
+}
+
+static int msm_isp_buf_prepare(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_qbuf_info *info, struct vb2_buffer *vb2_buf)
+{
+ int rc = -1;
+ unsigned long flags;
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+ struct msm_isp_qbuf_buffer buf;
+
+ buf_info = msm_isp_get_buf_ptr(buf_mgr,
+ info->handle, info->buf_idx);
+ if (!buf_info) {
+ pr_err("Invalid buffer prepare\n");
+ return rc;
+ }
+
+ bufq = msm_isp_get_bufq(buf_mgr, buf_info->bufq_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq\n",
+ __func__);
+ return rc;
+ }
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ if (buf_info->state == MSM_ISP_BUFFER_STATE_DIVERTED) {
+ rc = buf_info->state;
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return rc;
+ }
+
+ if (buf_info->state != MSM_ISP_BUFFER_STATE_INITIALIZED) {
+ pr_err("%s: Invalid buffer state: %d bufq %x buf-id %d\n",
+ __func__, buf_info->state, bufq->bufq_handle,
+ buf_info->buf_idx);
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return rc;
+ }
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+
+ if (vb2_buf) {
+ msm_isp_copy_planes_from_v4l2_buffer(&buf, &vb2_buf->v4l2_buf);
+ buf_info->vb2_buf = vb2_buf;
+ } else {
+ buf = info->buffer;
+ }
+
+ rc = msm_isp_prepare_v4l2_buf(buf_mgr, buf_info, &buf, bufq->stream_id);
+ if (rc < 0) {
+ pr_err_ratelimited("%s: Prepare buffer error\n", __func__);
+ return rc;
+ }
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ buf_info->state = MSM_ISP_BUFFER_STATE_PREPARED;
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return rc;
+}
+
+static int msm_isp_buf_unprepare_all(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t buf_handle)
+{
+ int rc = -1, i;
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+ bufq = msm_isp_get_bufq(buf_mgr, buf_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq\n", __func__);
+ return rc;
+ }
+
+ for (i = 0; i < bufq->num_bufs; i++) {
+ buf_info = msm_isp_get_buf_ptr(buf_mgr, buf_handle, i);
+ if (!buf_info) {
+ pr_err("%s: buf not found\n", __func__);
+ return rc;
+ }
+ if (buf_info->state == MSM_ISP_BUFFER_STATE_UNUSED ||
+ buf_info->state ==
+ MSM_ISP_BUFFER_STATE_INITIALIZED)
+ continue;
+
+ if (MSM_ISP_BUFFER_SRC_HAL == BUF_SRC(bufq->stream_id)) {
+ if (buf_info->state == MSM_ISP_BUFFER_STATE_DEQUEUED ||
+ buf_info->state == MSM_ISP_BUFFER_STATE_DIVERTED)
+ buf_mgr->vb2_ops->put_buf(buf_info->vb2_buf,
+ bufq->session_id, bufq->stream_id);
+ }
+ msm_isp_unprepare_v4l2_buf(buf_mgr, buf_info, bufq->stream_id);
+ }
+ return 0;
+}
+
+static int msm_isp_get_buf_by_index(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct msm_isp_buffer **buf_info)
+{
+ int rc = -EINVAL;
+ unsigned long flags;
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *temp_buf_info;
+ uint32_t i = 0;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq\n", __func__);
+ return rc;
+ }
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ if (buf_index >= bufq->num_bufs) {
+ pr_err("%s: Invalid buf index: %d max: %d\n", __func__,
+ buf_index, bufq->num_bufs);
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return rc;
+ }
+
+ *buf_info = NULL;
+ for (i = 0; bufq->num_bufs; i++) {
+ temp_buf_info = &bufq->bufs[i];
+ if (temp_buf_info && temp_buf_info->buf_idx == buf_index) {
+ *buf_info = temp_buf_info;
+ break;
+ }
+ }
+
+ if (*buf_info) {
+ pr_debug("Found buf in isp buf mgr");
+ rc = 0;
+ }
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return rc;
+}
+
+static int msm_isp_buf_unprepare(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t buf_handle, int32_t buf_idx)
+{
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+ bufq = msm_isp_get_bufq(buf_mgr, buf_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq\n", __func__);
+ return -EINVAL;
+ }
+
+ buf_info = msm_isp_get_buf_ptr(buf_mgr, buf_handle, buf_idx);
+ if (!buf_info) {
+ pr_err("%s: buf not found\n", __func__);
+ return -EINVAL;
+ }
+ if (buf_info->state == MSM_ISP_BUFFER_STATE_UNUSED ||
+ buf_info->state == MSM_ISP_BUFFER_STATE_INITIALIZED)
+ return 0;
+
+ if (MSM_ISP_BUFFER_SRC_HAL == BUF_SRC(bufq->stream_id)) {
+ if (buf_info->state == MSM_ISP_BUFFER_STATE_DEQUEUED ||
+ buf_info->state == MSM_ISP_BUFFER_STATE_DIVERTED)
+ buf_mgr->vb2_ops->put_buf(buf_info->vb2_buf,
+ bufq->session_id, bufq->stream_id);
+ }
+ msm_isp_unprepare_v4l2_buf(buf_mgr, buf_info, bufq->stream_id);
+
+ return 0;
+}
+
+static int msm_isp_get_buf(struct msm_isp_buf_mgr *buf_mgr, uint32_t id,
+ uint32_t bufq_handle, struct msm_isp_buffer **buf_info,
+ uint32_t *buf_cnt)
+{
+ int rc = -1;
+ unsigned long flags;
+ unsigned int list_count = 0;
+ struct msm_isp_buffer *temp_buf_info;
+ struct msm_isp_bufq *bufq = NULL;
+ struct vb2_buffer *vb2_buf = NULL;
+
+ if (buf_mgr->open_count == 0) {
+ pr_err_ratelimited("%s: bug mgr open cnt = 0\n",
+ __func__);
+ return 0;
+ }
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err_ratelimited("%s: Invalid bufq\n", __func__);
+ return rc;
+ }
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ if (!bufq->bufq_handle) {
+ pr_err_ratelimited("%s: Invalid bufq handle\n", __func__);
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return rc;
+ }
+
+ *buf_info = NULL;
+ *buf_cnt = 0;
+ if (bufq->buf_type == ISP_SHARE_BUF) {
+ list_count = 0;
+ list_for_each_entry(temp_buf_info,
+ &bufq->share_head, share_list) {
+ if (!temp_buf_info->buf_used[id]) {
+ temp_buf_info->buf_used[id] = 1;
+ temp_buf_info->buf_get_count++;
+ *buf_cnt = temp_buf_info->buf_get_count;
+ if (temp_buf_info->buf_get_count ==
+ bufq->buf_client_count)
+ list_del(
+ &temp_buf_info->share_list);
+ if (temp_buf_info->buf_reuse_flag) {
+ kfree(temp_buf_info);
+ } else {
+ *buf_info = temp_buf_info;
+ rc = 0;
+ }
+ spin_unlock_irqrestore(
+ &bufq->bufq_lock, flags);
+ return rc;
+ } else if (temp_buf_info->buf_used[id] &&
+ temp_buf_info->buf_reuse_flag) {
+ spin_unlock_irqrestore(
+ &bufq->bufq_lock, flags);
+ return rc;
+ }
+ list_count++;
+ if (list_count > MAX_LIST_COUNT) {
+ pr_err_ratelimited("%s: %d share_list corruption, list corrupt! count = %d\n",
+ __func__, __LINE__, list_count);
+ spin_unlock_irqrestore(
+ &bufq->bufq_lock, flags);
+ return -EINVAL;
+ }
+ }
+ }
+
+ switch (BUF_SRC(bufq->stream_id)) {
+ case MSM_ISP_BUFFER_SRC_NATIVE:
+ list_for_each_entry(temp_buf_info, &bufq->head, list) {
+ if (temp_buf_info->state ==
+ MSM_ISP_BUFFER_STATE_QUEUED) {
+ list_del(&temp_buf_info->list);
+ *buf_info = temp_buf_info;
+ break;
+ }
+ }
+ break;
+ case MSM_ISP_BUFFER_SRC_HAL:
+ vb2_buf = buf_mgr->vb2_ops->get_buf(
+ bufq->session_id, bufq->stream_id);
+ if (vb2_buf) {
+ if (vb2_buf->v4l2_buf.index < bufq->num_bufs) {
+ *buf_info = &bufq->bufs[vb2_buf
+ ->v4l2_buf.index];
+ (*buf_info)->vb2_buf = vb2_buf;
+ } else {
+ pr_err("%s: Incorrect buf index %d\n",
+ __func__, vb2_buf->v4l2_buf.index);
+ rc = -EINVAL;
+ }
+ if ((*buf_info) == NULL) {
+ buf_mgr->vb2_ops->put_buf(vb2_buf,
+ bufq->session_id, bufq->stream_id);
+ pr_err("%s: buf index %d not found!\n",
+ __func__, vb2_buf->v4l2_buf.index);
+ rc = -EINVAL;
+
+ }
+ } else {
+ CDBG("%s: No HAL Buffer session_id: %d stream_id: %d\n",
+ __func__, bufq->session_id, bufq->stream_id);
+ rc = -EINVAL;
+ }
+ break;
+ case MSM_ISP_BUFFER_SRC_SCRATCH:
+ /* In scratch buf case we have only on buffer in queue.
+ * We return every time same buffer. */
+ *buf_info = list_entry(bufq->head.next, typeof(**buf_info),
+ list);
+ break;
+ default:
+ pr_err("%s: Incorrect buf source.\n", __func__);
+ rc = -EINVAL;
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return rc;
+ }
+
+ if (!(*buf_info)) {
+ if (bufq->buf_type == ISP_SHARE_BUF) {
+ temp_buf_info = kzalloc(
+ sizeof(struct msm_isp_buffer), GFP_ATOMIC);
+ if (temp_buf_info) {
+ temp_buf_info->buf_reuse_flag = 1;
+ temp_buf_info->buf_used[id] = 1;
+ temp_buf_info->buf_get_count = 1;
+ list_add_tail(&temp_buf_info->share_list,
+ &bufq->share_head);
+ } else
+ rc = -ENOMEM;
+ }
+ } else {
+ (*buf_info)->state = MSM_ISP_BUFFER_STATE_DEQUEUED;
+ if (bufq->buf_type == ISP_SHARE_BUF) {
+ memset((*buf_info)->buf_used, 0,
+ sizeof(uint8_t) * bufq->buf_client_count);
+ (*buf_info)->buf_used[id] = 1;
+ (*buf_info)->buf_get_count = 1;
+ (*buf_info)->buf_put_count = 0;
+ (*buf_info)->buf_reuse_flag = 0;
+ list_add_tail(&(*buf_info)->share_list,
+ &bufq->share_head);
+ }
+ rc = 0;
+ }
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return rc;
+}
+
+static int msm_isp_put_buf(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index)
+{
+ int rc = -1;
+ unsigned long flags;
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq\n", __func__);
+ return rc;
+ }
+
+ buf_info = msm_isp_get_buf_ptr(buf_mgr, bufq_handle, buf_index);
+ if (!buf_info) {
+ pr_err("%s: buf not found\n", __func__);
+ return rc;
+ }
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+
+ buf_info->buf_get_count = 0;
+ buf_info->buf_put_count = 0;
+ memset(buf_info->buf_used, 0, sizeof(buf_info->buf_used));
+
+ switch (buf_info->state) {
+ case MSM_ISP_BUFFER_STATE_PREPARED:
+ if (MSM_ISP_BUFFER_SRC_SCRATCH == BUF_SRC(bufq->stream_id))
+ list_add_tail(&buf_info->list, &bufq->head);
+ case MSM_ISP_BUFFER_STATE_DEQUEUED:
+ case MSM_ISP_BUFFER_STATE_DIVERTED:
+ if (MSM_ISP_BUFFER_SRC_NATIVE == BUF_SRC(bufq->stream_id))
+ list_add_tail(&buf_info->list, &bufq->head);
+ else if (MSM_ISP_BUFFER_SRC_HAL == BUF_SRC(bufq->stream_id))
+ buf_mgr->vb2_ops->put_buf(buf_info->vb2_buf,
+ bufq->session_id, bufq->stream_id);
+ buf_info->state = MSM_ISP_BUFFER_STATE_QUEUED;
+ rc = 0;
+ break;
+ case MSM_ISP_BUFFER_STATE_DISPATCHED:
+ buf_info->state = MSM_ISP_BUFFER_STATE_QUEUED;
+ rc = 0;
+ break;
+ case MSM_ISP_BUFFER_STATE_QUEUED:
+ rc = 0;
+ break;
+ default:
+ pr_err("%s: incorrect state = %d",
+ __func__, buf_info->state);
+ break;
+ }
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+
+ return rc;
+}
+
+static int msm_isp_put_buf_unsafe(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index)
+{
+ int rc = -1;
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq\n", __func__);
+ return rc;
+ }
+
+ buf_info = msm_isp_get_buf_ptr(buf_mgr, bufq_handle, buf_index);
+ if (!buf_info) {
+ pr_err("%s: buf not found\n", __func__);
+ return rc;
+ }
+
+ switch (buf_info->state) {
+ case MSM_ISP_BUFFER_STATE_PREPARED:
+ case MSM_ISP_BUFFER_STATE_DEQUEUED:
+ case MSM_ISP_BUFFER_STATE_DIVERTED:
+ if (BUF_SRC(bufq->stream_id))
+ list_add_tail(&buf_info->list, &bufq->head);
+ else
+ buf_mgr->vb2_ops->put_buf(buf_info->vb2_buf,
+ bufq->session_id, bufq->stream_id);
+ buf_info->state = MSM_ISP_BUFFER_STATE_QUEUED;
+ rc = 0;
+ break;
+ case MSM_ISP_BUFFER_STATE_DISPATCHED:
+ buf_info->state = MSM_ISP_BUFFER_STATE_QUEUED;
+ rc = 0;
+ break;
+ case MSM_ISP_BUFFER_STATE_QUEUED:
+ rc = 0;
+ break;
+ default:
+ pr_err("%s: incorrect state = %d",
+ __func__, buf_info->state);
+ break;
+ }
+
+ return rc;
+}
+
+static int msm_isp_update_put_buf_cnt(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index, uint32_t frame_id)
+{
+ int rc = -1;
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+ enum msm_isp_buffer_state state;
+ unsigned long flags;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err("Invalid bufq\n");
+ return rc;
+ }
+
+ buf_info = msm_isp_get_buf_ptr(buf_mgr, bufq_handle, buf_index);
+ if (!buf_info) {
+ pr_err("%s: buf not found\n", __func__);
+ return rc;
+ }
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ if (bufq->buf_type != ISP_SHARE_BUF ||
+ buf_info->buf_put_count == 0) {
+ buf_info->frame_id = frame_id;
+ }
+
+ state = buf_info->state;
+ if (state == MSM_ISP_BUFFER_STATE_DEQUEUED ||
+ state == MSM_ISP_BUFFER_STATE_DIVERTED) {
+ if (bufq->buf_type == ISP_SHARE_BUF) {
+ buf_info->buf_put_count++;
+ if (buf_info->buf_put_count != ISP_SHARE_BUF_CLIENT) {
+ rc = buf_info->buf_put_count;
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return rc;
+ }
+ }
+ } else {
+ pr_warn("%s: Invalid state, stream id %x, state %d\n", __func__,
+ bufq->stream_id, state);
+ }
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return 0;
+}
+
+static int msm_isp_buf_done(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct timeval *tv, uint32_t frame_id, uint32_t output_format)
+{
+ int rc = -1;
+ unsigned long flags;
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+ enum msm_isp_buffer_state state;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err("Invalid bufq\n");
+ return rc;
+ }
+
+ buf_info = msm_isp_get_buf_ptr(buf_mgr, bufq_handle, buf_index);
+ if (!buf_info) {
+ pr_err("%s: buf not found\n", __func__);
+ return rc;
+ }
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ state = buf_info->state;
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+
+ if (state == MSM_ISP_BUFFER_STATE_DEQUEUED ||
+ state == MSM_ISP_BUFFER_STATE_DIVERTED) {
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ buf_info->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ if (MSM_ISP_BUFFER_SRC_HAL == BUF_SRC(bufq->stream_id)) {
+ buf_info->vb2_buf->v4l2_buf.timestamp = *tv;
+ buf_info->vb2_buf->v4l2_buf.sequence = frame_id;
+ buf_info->vb2_buf->v4l2_buf.reserved = output_format;
+ buf_mgr->vb2_ops->buf_done(buf_info->vb2_buf,
+ bufq->session_id, bufq->stream_id);
+ } else {
+ pr_err("%s: Error wrong buf done %d\n", __func__,
+ state);
+ rc = msm_isp_put_buf(buf_mgr, buf_info->bufq_handle,
+ buf_info->buf_idx);
+ if (rc < 0) {
+ pr_err("%s: Buf put failed\n", __func__);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int msm_isp_flush_buf(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, enum msm_isp_buffer_flush_t flush_type)
+{
+ int rc = -1, i;
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+ unsigned long flags;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err("Invalid bufq\n");
+ return rc;
+ }
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ for (i = 0; i < bufq->num_bufs; i++) {
+ buf_info = msm_isp_get_buf_ptr(buf_mgr, bufq_handle, i);
+ if (!buf_info) {
+ pr_err("%s: buf not found\n", __func__);
+ continue;
+ }
+ if (flush_type == MSM_ISP_BUFFER_FLUSH_DIVERTED &&
+ buf_info->state == MSM_ISP_BUFFER_STATE_DIVERTED) {
+ buf_info->state = MSM_ISP_BUFFER_STATE_QUEUED;
+ } else if (flush_type == MSM_ISP_BUFFER_FLUSH_ALL) {
+ if (buf_info->state == MSM_ISP_BUFFER_STATE_DIVERTED) {
+ CDBG("%s: no need to queue Diverted buffer\n",
+ __func__);
+ } else if (buf_info->state ==
+ MSM_ISP_BUFFER_STATE_DEQUEUED) {
+ msm_isp_put_buf_unsafe(buf_mgr,
+ bufq_handle, buf_info->buf_idx);
+ }
+ }
+ }
+
+ if (bufq->buf_type == ISP_SHARE_BUF) {
+ while (!list_empty(&bufq->share_head)) {
+ buf_info = list_entry((&bufq->share_head)->next,
+ typeof(*buf_info), share_list);
+ list_del(&(buf_info->share_list));
+ if (buf_info->buf_reuse_flag)
+ kfree(buf_info);
+ }
+ }
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return 0;
+}
+
+static int msm_isp_buf_divert(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct timeval *tv, uint32_t frame_id)
+{
+ int rc = -1;
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+ unsigned long flags;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err("Invalid bufq\n");
+ return rc;
+ }
+
+ buf_info = msm_isp_get_buf_ptr(buf_mgr, bufq_handle, buf_index);
+ if (!buf_info) {
+ pr_err("%s: buf not found\n", __func__);
+ return rc;
+ }
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ if (bufq->buf_type != ISP_SHARE_BUF ||
+ buf_info->buf_put_count == 0) {
+ buf_info->frame_id = frame_id;
+ }
+
+ if (bufq->buf_type == ISP_SHARE_BUF) {
+ buf_info->buf_put_count++;
+ if (buf_info->buf_put_count != ISP_SHARE_BUF_CLIENT) {
+ rc = buf_info->buf_put_count;
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return rc;
+ }
+ }
+
+ if (buf_info->state == MSM_ISP_BUFFER_STATE_DEQUEUED) {
+ buf_info->state = MSM_ISP_BUFFER_STATE_DIVERTED;
+ buf_info->tv = tv;
+ }
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ return 0;
+}
+
+static int msm_isp_buf_enqueue(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_qbuf_info *info)
+{
+ int rc = -1, buf_state;
+ struct msm_isp_bufq *bufq = NULL;
+ struct msm_isp_buffer *buf_info = NULL;
+
+ bufq = msm_isp_get_bufq(buf_mgr, info->handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq, handle 0x%x, stream id %x num_plane %d\n"
+ , __func__, info->handle, (info->handle >> 8),
+ info->buffer.num_planes);
+ return -EINVAL;
+ }
+
+ buf_state = msm_isp_buf_prepare(buf_mgr, info, NULL);
+ if (buf_state < 0) {
+ pr_err_ratelimited("%s: Buf prepare failed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (buf_state == MSM_ISP_BUFFER_STATE_DIVERTED) {
+ buf_info = msm_isp_get_buf_ptr(buf_mgr,
+ info->handle, info->buf_idx);
+ if (!buf_info) {
+ pr_err("%s: buf not found\n", __func__);
+ return rc;
+ }
+ if (info->dirty_buf) {
+ rc = msm_isp_put_buf(buf_mgr,
+ info->handle, info->buf_idx);
+ } else {
+ if (BUF_SRC(bufq->stream_id))
+ pr_err("%s: Invalid native buffer state\n",
+ __func__);
+ else
+ rc = msm_isp_buf_done(buf_mgr,
+ info->handle, info->buf_idx,
+ buf_info->tv, buf_info->frame_id, 0);
+ }
+ } else {
+ if (MSM_ISP_BUFFER_SRC_HAL != BUF_SRC(bufq->stream_id)) {
+ rc = msm_isp_put_buf(buf_mgr,
+ info->handle, info->buf_idx);
+ if (rc < 0) {
+ pr_err("%s: Buf put failed stream %x\n",
+ __func__, bufq->stream_id);
+ return rc;
+ }
+ }
+ }
+ return rc;
+}
+
+static int msm_isp_buf_dequeue(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_qbuf_info *info)
+{
+ struct msm_isp_buffer *buf_info = NULL;
+ int rc = 0;
+
+ buf_info = msm_isp_get_buf_ptr(buf_mgr, info->handle, info->buf_idx);
+ if (!buf_info) {
+ pr_err("Invalid buffer dequeue\n");
+ return -EINVAL;
+ }
+
+ if (buf_info->state == MSM_ISP_BUFFER_STATE_DEQUEUED ||
+ buf_info->state == MSM_ISP_BUFFER_STATE_DIVERTED) {
+ pr_err("%s: Invalid state %d\n", __func__, buf_info->state);
+ return -EINVAL;
+ }
+ msm_isp_buf_unprepare(buf_mgr, info->handle, info->buf_idx);
+
+ buf_info->state = MSM_ISP_BUFFER_STATE_INITIALIZED;
+
+ return rc;
+}
+
+static int msm_isp_get_bufq_handle(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t session_id, uint32_t stream_id)
+{
+ int i;
+ for (i = 0; i < buf_mgr->num_buf_q; i++) {
+ if (buf_mgr->bufq[i].session_id == session_id &&
+ buf_mgr->bufq[i].stream_id == stream_id) {
+ return buf_mgr->bufq[i].bufq_handle;
+ }
+ }
+ pr_err("%s: No match found 0x%x 0x%x\n", __func__,
+ session_id, stream_id);
+ return 0;
+}
+
+static int msm_isp_get_buf_src(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t *buf_src)
+{
+ struct msm_isp_bufq *bufq = NULL;
+
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq\n",
+ __func__);
+ return -EINVAL;
+ }
+ *buf_src = BUF_SRC(bufq->stream_id);
+
+ return 0;
+}
+
+static int msm_isp_request_bufq(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_buf_request *buf_request)
+{
+ int rc = -1, i;
+ struct msm_isp_bufq *bufq = NULL;
+ CDBG("%s: E\n", __func__);
+
+ if (!buf_request->num_buf || buf_request->num_buf > VB2_MAX_FRAME) {
+ pr_err("Invalid buffer request\n");
+ return rc;
+ }
+
+ buf_request->handle = msm_isp_get_buf_handle(buf_mgr,
+ buf_request->session_id, buf_request->stream_id);
+ if (!buf_request->handle) {
+ pr_err("Invalid buffer handle\n");
+ return rc;
+ }
+
+ bufq = msm_isp_get_bufq(buf_mgr, buf_request->handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq stream_id %x\n",
+ __func__, buf_request->stream_id);
+
+ return rc;
+ }
+
+ bufq->bufs = kzalloc(sizeof(struct msm_isp_buffer) *
+ buf_request->num_buf, GFP_KERNEL);
+ if (!bufq->bufs) {
+ pr_err("No free memory for buf info\n");
+ msm_isp_free_bufq_handle(buf_mgr, buf_request->handle);
+ return rc;
+ }
+
+ bufq->bufq_handle = buf_request->handle;
+ bufq->session_id = buf_request->session_id;
+ bufq->stream_id = buf_request->stream_id;
+ bufq->num_bufs = buf_request->num_buf;
+ bufq->buf_type = buf_request->buf_type;
+ if (bufq->buf_type == ISP_SHARE_BUF)
+ bufq->buf_client_count = ISP_SHARE_BUF_CLIENT;
+ INIT_LIST_HEAD(&bufq->head);
+ INIT_LIST_HEAD(&bufq->share_head);
+ for (i = 0; i < buf_request->num_buf; i++) {
+ bufq->bufs[i].state = MSM_ISP_BUFFER_STATE_INITIALIZED;
+ bufq->bufs[i].bufq_handle = bufq->bufq_handle;
+ bufq->bufs[i].buf_idx = i;
+ }
+
+ return 0;
+}
+
+static int msm_isp_release_bufq(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle)
+{
+ struct msm_isp_bufq *bufq = NULL;
+ unsigned long flags;
+ int rc = -1;
+ bufq = msm_isp_get_bufq(buf_mgr, bufq_handle);
+ if (!bufq) {
+ pr_err("Invalid bufq release\n");
+ return rc;
+ }
+
+ msm_isp_buf_unprepare_all(buf_mgr, bufq_handle);
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ kfree(bufq->bufs);
+ msm_isp_free_bufq_handle(buf_mgr, bufq_handle);
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+
+ return 0;
+}
+
+static void msm_isp_release_all_bufq(
+ struct msm_isp_buf_mgr *buf_mgr)
+{
+ struct msm_isp_bufq *bufq = NULL;
+ unsigned long flags;
+ int i;
+ for (i = 0; i < buf_mgr->num_buf_q; i++) {
+ bufq = &buf_mgr->bufq[i];
+ if (!bufq->bufq_handle)
+ continue;
+
+ msm_isp_buf_unprepare_all(buf_mgr, bufq->bufq_handle);
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ kfree(bufq->bufs);
+ msm_isp_free_bufq_handle(buf_mgr, bufq->bufq_handle);
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ }
+}
+
+
+/**
+ * msm_isp_buf_put_scratch() - Release scratch buffers
+ * @buf_mgr: The buffer structure for h/w
+ *
+ * Returns 0 on success else error code
+ */
+static int msm_isp_buf_put_scratch(struct msm_isp_buf_mgr *buf_mgr)
+{
+ int rc;
+
+ if (!buf_mgr->scratch_buf_addr)
+ return 0;
+
+ rc = cam_smmu_put_phy_addr_scratch(buf_mgr->iommu_hdl,
+ buf_mgr->scratch_buf_addr);
+ if (rc)
+ pr_err("%s: failed to put scratch buffer to img iommu: %d\n",
+ __func__, rc);
+
+
+ if (!rc)
+ buf_mgr->scratch_buf_addr = 0;
+
+ return rc;
+}
+
+/**
+ * msm_isp_buf_get_scratch() - Create scratch buffers
+ * @buf_mgr: The buffer structure for h/w
+ *
+ * Create and map scratch buffers for all IOMMU's under the buffer
+ * manager.
+ *
+ * Returns 0 on success else error code
+ */
+static int msm_isp_buf_get_scratch(struct msm_isp_buf_mgr *buf_mgr)
+{
+ int rc;
+
+ if (buf_mgr->scratch_buf_addr || !buf_mgr->scratch_buf_range)
+ /* already mapped or not supported */
+ return 0;
+
+ rc = cam_smmu_get_phy_addr_scratch(
+ buf_mgr->iommu_hdl,
+ CAM_SMMU_MAP_RW,
+ &buf_mgr->scratch_buf_addr,
+ buf_mgr->scratch_buf_range,
+ SZ_4K);
+ if (rc) {
+ pr_err("%s: failed to map scratch buffer to img iommu: %d\n",
+ __func__, rc);
+ return rc;
+ }
+ return rc;
+}
+
+int msm_isp_smmu_attach(struct msm_isp_buf_mgr *buf_mgr,
+ void *arg)
+{
+ struct msm_vfe_smmu_attach_cmd *cmd = arg;
+ int rc = 0;
+
+ pr_debug("%s: cmd->security_mode : %d\n", __func__, cmd->security_mode);
+ mutex_lock(&buf_mgr->lock);
+ if (cmd->iommu_attach_mode == IOMMU_ATTACH) {
+ buf_mgr->secure_enable = cmd->security_mode;
+
+ /*
+ * Call hypervisor thru scm call to notify secure or
+ * non-secure mode
+ */
+ if (buf_mgr->attach_ref_cnt == 0) {
+ rc = cam_smmu_ops(buf_mgr->iommu_hdl,
+ CAM_SMMU_ATTACH);
+ if (rc < 0) {
+ pr_err("%s: img smmu attach error, rc :%d\n",
+ __func__, rc);
+ goto err1;
+ }
+ }
+ buf_mgr->attach_ref_cnt++;
+ rc = msm_isp_buf_get_scratch(buf_mgr);
+ if (rc)
+ goto err2;
+ } else {
+ if (buf_mgr->attach_ref_cnt > 0)
+ buf_mgr->attach_ref_cnt--;
+ else
+ pr_err("%s: Error! Invalid ref_cnt %d\n",
+ __func__, buf_mgr->attach_ref_cnt);
+
+ if (buf_mgr->attach_ref_cnt == 0) {
+ rc = msm_isp_buf_put_scratch(buf_mgr);
+ rc |= cam_smmu_ops(buf_mgr->iommu_hdl,
+ CAM_SMMU_DETACH);
+ if (rc < 0) {
+ pr_err("%s: img/stats smmu detach error, rc :%d\n",
+ __func__, rc);
+ }
+ }
+ }
+
+ mutex_unlock(&buf_mgr->lock);
+ return rc;
+
+err2:
+ if (cam_smmu_ops(buf_mgr->iommu_hdl, CAM_SMMU_DETACH))
+ pr_err("%s: img smmu detach error\n", __func__);
+err1:
+ mutex_unlock(&buf_mgr->lock);
+ return rc;
+}
+
+
+static int msm_isp_init_isp_buf_mgr(struct msm_isp_buf_mgr *buf_mgr,
+ const char *ctx_name)
+{
+ int rc = -1;
+ int i = 0;
+ mutex_lock(&buf_mgr->lock);
+ if (buf_mgr->open_count++) {
+ mutex_unlock(&buf_mgr->lock);
+ return 0;
+ }
+
+ CDBG("%s: E\n", __func__);
+ buf_mgr->attach_ref_cnt = 0;
+
+ buf_mgr->num_buf_q = BUF_MGR_NUM_BUF_Q;
+ memset(buf_mgr->bufq, 0, sizeof(buf_mgr->bufq));
+
+ rc = cam_smmu_get_handle("vfe", &buf_mgr->iommu_hdl);
+ if (rc < 0) {
+ pr_err("vfe get handle failed\n");
+ goto get_handle_error;
+ }
+
+ for (i = 0; i < BUF_MGR_NUM_BUF_Q; i++)
+ spin_lock_init(&buf_mgr->bufq[i].bufq_lock);
+
+ buf_mgr->pagefault_debug_disable = 0;
+ buf_mgr->frameId_mismatch_recovery = 0;
+ mutex_unlock(&buf_mgr->lock);
+ return 0;
+
+get_handle_error:
+ mutex_unlock(&buf_mgr->lock);
+ return rc;
+}
+
+static int msm_isp_deinit_isp_buf_mgr(
+ struct msm_isp_buf_mgr *buf_mgr)
+{
+ mutex_lock(&buf_mgr->lock);
+ if (buf_mgr->open_count > 0)
+ buf_mgr->open_count--;
+
+ if (buf_mgr->open_count) {
+ mutex_unlock(&buf_mgr->lock);
+ return 0;
+ }
+ msm_isp_release_all_bufq(buf_mgr);
+ buf_mgr->num_buf_q = 0;
+ buf_mgr->pagefault_debug_disable = 0;
+
+ msm_isp_buf_put_scratch(buf_mgr);
+ cam_smmu_ops(buf_mgr->iommu_hdl, CAM_SMMU_DETACH);
+ cam_smmu_destroy_handle(buf_mgr->iommu_hdl);
+
+ buf_mgr->attach_ref_cnt = 0;
+ mutex_unlock(&buf_mgr->lock);
+ return 0;
+}
+
+int msm_isp_proc_buf_cmd(struct msm_isp_buf_mgr *buf_mgr,
+ unsigned int cmd, void *arg)
+{
+ switch (cmd) {
+ case VIDIOC_MSM_ISP_REQUEST_BUF: {
+ struct msm_isp_buf_request *buf_req = arg;
+
+ buf_mgr->ops->request_buf(buf_mgr, buf_req);
+ break;
+ }
+ case VIDIOC_MSM_ISP_ENQUEUE_BUF: {
+ struct msm_isp_qbuf_info *qbuf_info = arg;
+
+ buf_mgr->ops->enqueue_buf(buf_mgr, qbuf_info);
+ break;
+ }
+ case VIDIOC_MSM_ISP_DEQUEUE_BUF: {
+ struct msm_isp_qbuf_info *qbuf_info = arg;
+
+ buf_mgr->ops->dequeue_buf(buf_mgr, qbuf_info);
+ break;
+ }
+ case VIDIOC_MSM_ISP_RELEASE_BUF: {
+ struct msm_isp_buf_request *buf_req = arg;
+
+ buf_mgr->ops->release_buf(buf_mgr, buf_req->handle);
+ break;
+ }
+ case VIDIOC_MSM_ISP_UNMAP_BUF: {
+ struct msm_isp_unmap_buf_req *unmap_req = arg;
+
+ buf_mgr->ops->unmap_buf(buf_mgr, unmap_req->fd);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int msm_isp_buf_mgr_debug(struct msm_isp_buf_mgr *buf_mgr,
+ unsigned long fault_addr)
+{
+ struct msm_isp_buffer *bufs = NULL;
+ uint32_t i = 0, j = 0, k = 0, rc = 0;
+ char *print_buf = NULL, temp_buf[100];
+ uint32_t start_addr = 0, end_addr = 0, print_buf_size = 2000;
+ int buf_addr_delta = -1;
+ int temp_delta = 0;
+ uint32_t debug_stream_id = 0;
+ uint32_t debug_buf_idx = 0;
+ uint32_t debug_buf_plane = 0;
+ uint32_t debug_start_addr = 0;
+ uint32_t debug_end_addr = 0;
+ uint32_t debug_frame_id = 0;
+ enum msm_isp_buffer_state debug_state;
+ unsigned long flags;
+ struct msm_isp_bufq *bufq = NULL;
+
+ if (!buf_mgr) {
+ pr_err_ratelimited("%s: %d] NULL buf_mgr\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < BUF_MGR_NUM_BUF_Q; i++) {
+ bufq = &buf_mgr->bufq[i];
+ if (!bufq)
+ continue;
+
+ spin_lock_irqsave(&bufq->bufq_lock, flags);
+ if (!bufq->bufq_handle) {
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ continue;
+ }
+
+ for (j = 0; j < bufq->num_bufs; j++) {
+ bufs = &bufq->bufs[j];
+ if (!bufs)
+ continue;
+
+ for (k = 0; k < bufs->num_planes; k++) {
+ start_addr = bufs->
+ mapped_info[k].paddr;
+ end_addr = bufs->mapped_info[k].paddr +
+ bufs->mapped_info[k].len - 1;
+ temp_delta = fault_addr - start_addr;
+ if (temp_delta < 0)
+ continue;
+
+ if (buf_addr_delta == -1 ||
+ temp_delta < buf_addr_delta) {
+ buf_addr_delta = temp_delta;
+ debug_stream_id = bufq->stream_id;
+ debug_buf_idx = j;
+ debug_buf_plane = k;
+ debug_start_addr = start_addr;
+ debug_end_addr = end_addr;
+ debug_frame_id = bufs->frame_id;
+ debug_state = bufs->state;
+ }
+ }
+ }
+ start_addr = 0;
+ end_addr = 0;
+ spin_unlock_irqrestore(&bufq->bufq_lock, flags);
+ }
+
+ pr_err("%s: ==== SMMU page fault addr %lx ====\n", __func__,
+ fault_addr);
+ pr_err("%s: nearby stream id %x, frame_id %d\n", __func__,
+ debug_stream_id, debug_frame_id);
+ pr_err("%s: nearby buf index %d, plane %d, state %d\n", __func__,
+ debug_buf_idx, debug_buf_plane, debug_state);
+ pr_err("%s: buf address 0x%x -- 0x%x\n", __func__,
+ debug_start_addr, debug_end_addr);
+
+ if (BUF_DEBUG_FULL) {
+ print_buf = kzalloc(print_buf_size, GFP_ATOMIC);
+ if (!print_buf) {
+ pr_err("%s failed: No memory", __func__);
+ return -ENOMEM;
+ }
+ snprintf(print_buf, print_buf_size, "%s\n", __func__);
+ for (i = 0; i < BUF_MGR_NUM_BUF_Q; i++) {
+ if (i % 2 == 0 && i > 0) {
+ pr_err("%s\n", print_buf);
+ print_buf[0] = 0;
+ }
+ if (buf_mgr->bufq[i].bufq_handle != 0) {
+ snprintf(temp_buf, sizeof(temp_buf),
+ "handle %x stream %x num_bufs %d\n",
+ buf_mgr->bufq[i].bufq_handle,
+ buf_mgr->bufq[i].stream_id,
+ buf_mgr->bufq[i].num_bufs);
+ strlcat(print_buf, temp_buf, print_buf_size);
+ for (j = 0; j < buf_mgr->bufq[i].num_bufs;
+ j++) {
+ bufs = &buf_mgr->bufq[i].bufs[j];
+ if (!bufs)
+ break;
+
+ for (k = 0; k < bufs->num_planes; k++) {
+ start_addr = bufs->
+ mapped_info[k].paddr;
+ end_addr = bufs->mapped_info[k].
+ paddr + bufs->
+ mapped_info[k].len;
+ snprintf(temp_buf,
+ sizeof(temp_buf),
+ " buf %d plane %d start_addr %x end_addr %x\n",
+ j, k, start_addr,
+ end_addr);
+ strlcat(print_buf, temp_buf,
+ print_buf_size);
+ }
+ }
+ start_addr = 0;
+ end_addr = 0;
+ }
+ }
+ pr_err("%s\n", print_buf);
+ kfree(print_buf);
+ }
+ return rc;
+}
+
+static struct msm_isp_buf_ops isp_buf_ops = {
+ .request_buf = msm_isp_request_bufq,
+ .enqueue_buf = msm_isp_buf_enqueue,
+ .dequeue_buf = msm_isp_buf_dequeue,
+ .release_buf = msm_isp_release_bufq,
+ .get_bufq_handle = msm_isp_get_bufq_handle,
+ .get_buf_src = msm_isp_get_buf_src,
+ .get_buf = msm_isp_get_buf,
+ .get_buf_by_index = msm_isp_get_buf_by_index,
+ .map_buf = msm_isp_map_buf,
+ .unmap_buf = msm_isp_unmap_buf,
+ .put_buf = msm_isp_put_buf,
+ .flush_buf = msm_isp_flush_buf,
+ .buf_done = msm_isp_buf_done,
+ .buf_divert = msm_isp_buf_divert,
+ .buf_mgr_init = msm_isp_init_isp_buf_mgr,
+ .buf_mgr_deinit = msm_isp_deinit_isp_buf_mgr,
+ .buf_mgr_debug = msm_isp_buf_mgr_debug,
+ .get_bufq = msm_isp_get_bufq,
+ .update_put_buf_cnt = msm_isp_update_put_buf_cnt,
+};
+
+int msm_isp_create_isp_buf_mgr(
+ struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_sd_req_vb2_q *vb2_ops,
+ struct device *dev,
+ uint32_t scratch_buf_range)
+{
+ int rc = 0;
+ if (buf_mgr->init_done)
+ return rc;
+
+ buf_mgr->ops = &isp_buf_ops;
+ buf_mgr->vb2_ops = vb2_ops;
+ buf_mgr->open_count = 0;
+ buf_mgr->pagefault_debug_disable = 0;
+ buf_mgr->secure_enable = NON_SECURE_MODE;
+ buf_mgr->attach_state = MSM_ISP_BUF_MGR_DETACH;
+ buf_mgr->scratch_buf_range = scratch_buf_range;
+ mutex_init(&buf_mgr->lock);
+
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
new file mode 100644
index 000000000000..6041604dc4c0
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.h
@@ -0,0 +1,209 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_ISP_BUF_H_
+#define _MSM_ISP_BUF_H_
+
+#include <media/msmb_isp.h>
+#include "msm_sd.h"
+
+/* Buffer type could be userspace / HAL.
+ * Userspase could provide native or scratch buffer. */
+#define BUF_SRC(id) ( \
+ (id & ISP_SCRATCH_BUF_BIT) ? MSM_ISP_BUFFER_SRC_SCRATCH : \
+ (id & ISP_NATIVE_BUF_BIT) ? MSM_ISP_BUFFER_SRC_NATIVE : \
+ MSM_ISP_BUFFER_SRC_HAL)
+
+#define ISP_SHARE_BUF_CLIENT 2
+#define BUF_MGR_NUM_BUF_Q 28
+#define MAX_IOMMU_CTX 2
+
+struct msm_isp_buf_mgr;
+
+enum msm_isp_buffer_src_t {
+ MSM_ISP_BUFFER_SRC_HAL,
+ MSM_ISP_BUFFER_SRC_NATIVE,
+ MSM_ISP_BUFFER_SRC_SCRATCH,
+ MSM_ISP_BUFFER_SRC_MAX,
+};
+
+enum msm_isp_buffer_state {
+ MSM_ISP_BUFFER_STATE_UNUSED, /* not used */
+ MSM_ISP_BUFFER_STATE_INITIALIZED, /* REQBUF done */
+ MSM_ISP_BUFFER_STATE_PREPARED, /* BUF mapped */
+ MSM_ISP_BUFFER_STATE_QUEUED, /* buf queued */
+ MSM_ISP_BUFFER_STATE_DEQUEUED, /* in use in VFE */
+ MSM_ISP_BUFFER_STATE_DIVERTED, /* Sent to other hardware*/
+ MSM_ISP_BUFFER_STATE_DISPATCHED, /* Sent to HAL*/
+};
+
+enum msm_isp_buffer_flush_t {
+ MSM_ISP_BUFFER_FLUSH_DIVERTED,
+ MSM_ISP_BUFFER_FLUSH_ALL,
+};
+
+enum msm_isp_buf_mgr_state {
+ MSM_ISP_BUF_MGR_ATTACH,
+ MSM_ISP_BUF_MGR_DETACH,
+};
+
+struct msm_isp_buffer_mapped_info {
+ size_t len;
+ dma_addr_t paddr;
+ int buf_fd;
+};
+
+struct buffer_cmd {
+ struct list_head list;
+ struct msm_isp_buffer_mapped_info *mapped_info;
+};
+
+struct msm_isp_buffer {
+ /*Common Data structure*/
+ int num_planes;
+ struct msm_isp_buffer_mapped_info mapped_info[VIDEO_MAX_PLANES];
+ int buf_idx;
+ uint32_t bufq_handle;
+ uint32_t frame_id;
+ struct timeval *tv;
+
+ /*Native buffer*/
+ struct list_head list;
+ enum msm_isp_buffer_state state;
+
+ /*Vb2 buffer data*/
+ struct vb2_buffer *vb2_buf;
+
+ /*Share buffer cache state*/
+ struct list_head share_list;
+ uint8_t buf_used[ISP_SHARE_BUF_CLIENT];
+ uint8_t buf_get_count;
+ uint8_t buf_put_count;
+ uint8_t buf_reuse_flag;
+};
+
+struct msm_isp_bufq {
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint32_t num_bufs;
+ uint32_t bufq_handle;
+ enum msm_isp_buf_type buf_type;
+ struct msm_isp_buffer *bufs;
+ spinlock_t bufq_lock;
+
+ /*Native buffer queue*/
+ struct list_head head;
+ /*Share buffer cache queue*/
+ struct list_head share_head;
+ uint8_t buf_client_count;
+};
+
+struct msm_isp_buf_ops {
+ int (*request_buf)(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_buf_request *buf_request);
+
+ int (*enqueue_buf)(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_qbuf_info *info);
+
+ int (*dequeue_buf)(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_qbuf_info *info);
+
+ int (*release_buf)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle);
+
+ int (*get_bufq_handle)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t session_id, uint32_t stream_id);
+
+ int (*get_buf_src)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t *buf_src);
+
+ int (*get_buf)(struct msm_isp_buf_mgr *buf_mgr, uint32_t id,
+ uint32_t bufq_handle, struct msm_isp_buffer **buf_info,
+ uint32_t *buf_cnt);
+
+ int (*get_buf_by_index)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct msm_isp_buffer **buf_info);
+
+ int (*map_buf)(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_isp_buffer_mapped_info *mapped_info, uint32_t fd);
+
+ int (*unmap_buf)(struct msm_isp_buf_mgr *buf_mgr, uint32_t fd);
+
+ int (*put_buf)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index);
+
+ int (*flush_buf)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, enum msm_isp_buffer_flush_t flush_type);
+
+ int (*buf_done)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct timeval *tv, uint32_t frame_id, uint32_t output_format);
+ int (*buf_divert)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index,
+ struct timeval *tv, uint32_t frame_id);
+ void (*register_ctx)(struct msm_isp_buf_mgr *buf_mgr,
+ struct device **iommu_ctx1, struct device **iommu_ctx2,
+ int num_iommu_ctx1, int num_iommu_ctx2);
+ int (*buf_mgr_init)(struct msm_isp_buf_mgr *buf_mgr,
+ const char *ctx_name);
+ int (*buf_mgr_deinit)(struct msm_isp_buf_mgr *buf_mgr);
+ int (*buf_mgr_debug)(struct msm_isp_buf_mgr *buf_mgr,
+ unsigned long fault_addr);
+ struct msm_isp_bufq * (*get_bufq)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle);
+ int (*update_put_buf_cnt)(struct msm_isp_buf_mgr *buf_mgr,
+ uint32_t bufq_handle, uint32_t buf_index,
+ uint32_t frame_id);
+};
+
+struct msm_isp_buf_mgr {
+ int init_done;
+ uint32_t open_count;
+ uint32_t pagefault_debug_disable;
+ uint32_t frameId_mismatch_recovery;
+ uint16_t num_buf_q;
+ struct msm_isp_bufq bufq[BUF_MGR_NUM_BUF_Q];
+
+ struct ion_client *client;
+ struct msm_isp_buf_ops *ops;
+
+ struct msm_sd_req_vb2_q *vb2_ops;
+
+ /*IOMMU driver*/
+ int iommu_hdl;
+
+ /*Add secure mode*/
+ int secure_enable;
+
+ int num_iommu_ctx;
+ int num_iommu_secure_ctx;
+ int attach_ref_cnt;
+ enum msm_isp_buf_mgr_state attach_state;
+ struct device *isp_dev;
+ struct mutex lock;
+ /* Scratch buffer */
+ dma_addr_t scratch_buf_addr;
+ uint32_t scratch_buf_range;
+};
+
+int msm_isp_create_isp_buf_mgr(struct msm_isp_buf_mgr *buf_mgr,
+ struct msm_sd_req_vb2_q *vb2_ops, struct device *dev,
+ uint32_t scratch_addr_range);
+
+int msm_isp_proc_buf_cmd(struct msm_isp_buf_mgr *buf_mgr,
+ unsigned int cmd, void *arg);
+
+int msm_isp_smmu_attach(struct msm_isp_buf_mgr *buf_mgr,
+ void *arg);
+
+#endif /* _MSM_ISP_BUF_H_ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
new file mode 100644
index 000000000000..545f133e25b7
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
@@ -0,0 +1,680 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/of_device.h>
+#include <linux/sched_clock.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+
+#include "msm_isp.h"
+#include "msm_isp_util.h"
+#include "msm_isp_axi_util.h"
+#include "msm_isp_stats_util.h"
+#include "msm_sd.h"
+#include "msm_isp47.h"
+#include "msm_isp46.h"
+#include "msm_isp44.h"
+#include "msm_isp40.h"
+#include "msm_isp32.h"
+
+static struct msm_sd_req_vb2_q vfe_vb2_ops;
+static struct msm_isp_buf_mgr vfe_buf_mgr;
+static struct msm_vfe_common_dev_data vfe_common_data;
+static struct dual_vfe_resource dualvfe;
+
+static const struct of_device_id msm_vfe_dt_match[] = {
+ {
+ .compatible = "qcom,vfe",
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_vfe_dt_match);
+
+#define MAX_OVERFLOW_COUNTERS 29
+#define OVERFLOW_LENGTH 1024
+#define OVERFLOW_BUFFER_LENGTH 64
+static char stat_line[OVERFLOW_LENGTH];
+
+struct msm_isp_statistics stats;
+struct msm_isp_ub_info ub_info;
+
+static int msm_isp_enable_debugfs(struct vfe_device *vfe_dev,
+ struct msm_isp_bw_req_info *isp_req_hist);
+
+static char *stats_str[MAX_OVERFLOW_COUNTERS] = {
+ "imgmaster0_overflow_cnt",
+ "imgmaster1_overflow_cnt",
+ "imgmaster2_overflow_cnt",
+ "imgmaster3_overflow_cnt",
+ "imgmaster4_overflow_cnt",
+ "imgmaster5_overflow_cnt",
+ "imgmaster6_overflow_cnt",
+ "be_overflow_cnt",
+ "bg_overflow_cnt",
+ "bf_overflow_cnt",
+ "awb_overflow_cnt",
+ "rs_overflow_cnt",
+ "cs_overflow_cnt",
+ "ihist_overflow_cnt",
+ "skinbhist_overflow_cnt",
+ "bfscale_overflow_cnt",
+ "ISP_VFE0_client_info.active",
+ "ISP_VFE0_client_info.ab",
+ "ISP_VFE0_client_info.ib",
+ "ISP_VFE1_client_info.active",
+ "ISP_VFE1_client_info.ab",
+ "ISP_VFE1_client_info.ib",
+ "ISP_CPP_client_info.active",
+ "ISP_CPP_client_info.ab",
+ "ISP_CPP_client_info.ib",
+ "ISP_last_overflow.ab",
+ "ISP_last_overflow.ib",
+ "ISP_VFE_CLK_RATE",
+ "ISP_CPP_CLK_RATE",
+};
+
+#define MAX_DEPTH_BW_REQ_HISTORY 25
+#define MAX_BW_HISTORY_BUFF_LEN 6144
+#define MAX_BW_HISTORY_LINE_BUFF_LEN 512
+
+#define MAX_UB_INFO_BUFF_LEN 1024
+#define MAX_UB_INFO_LINE_BUFF_LEN 256
+
+static struct msm_isp_bw_req_info
+ msm_isp_bw_request_history[MAX_DEPTH_BW_REQ_HISTORY];
+static int msm_isp_bw_request_history_idx;
+static char bw_request_history_buff[MAX_BW_HISTORY_BUFF_LEN];
+static char ub_info_buffer[MAX_UB_INFO_BUFF_LEN];
+static spinlock_t req_history_lock;
+
+static int vfe_debugfs_statistics_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t vfe_debugfs_statistics_read(struct file *t_file, char *t_char,
+ size_t t_size_t, loff_t *t_loff_t)
+{
+ int i;
+ uint64_t *ptr;
+ char buffer[OVERFLOW_BUFFER_LENGTH] = {0};
+ struct vfe_device *vfe_dev = (struct vfe_device *)
+ t_file->private_data;
+ struct msm_isp_statistics *stats = vfe_dev->stats;
+
+ memset(stat_line, 0, sizeof(stat_line));
+ msm_isp_util_get_bandwidth_stats(vfe_dev, stats);
+ ptr = (uint64_t *)(stats);
+ for (i = 0; i < MAX_OVERFLOW_COUNTERS; i++) {
+ strlcat(stat_line, stats_str[i], sizeof(stat_line));
+ strlcat(stat_line, " ", sizeof(stat_line));
+ snprintf(buffer, sizeof(buffer), "%llu", ptr[i]);
+ strlcat(stat_line, buffer, sizeof(stat_line));
+ strlcat(stat_line, "\r\n", sizeof(stat_line));
+ }
+ return simple_read_from_buffer(t_char, t_size_t,
+ t_loff_t, stat_line, strlen(stat_line));
+}
+
+static ssize_t vfe_debugfs_statistics_write(struct file *t_file,
+ const char *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+ struct vfe_device *vfe_dev = (struct vfe_device *)
+ t_file->private_data;
+ struct msm_isp_statistics *stats = vfe_dev->stats;
+ memset(stats, 0, sizeof(struct msm_isp_statistics));
+
+ return sizeof(struct msm_isp_statistics);
+}
+
+static int bw_history_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t bw_history_read(struct file *t_file, char *t_char,
+ size_t t_size_t, loff_t *t_loff_t)
+{
+ int i;
+ char *out_buffer = bw_request_history_buff;
+ char line_buffer[MAX_BW_HISTORY_LINE_BUFF_LEN] = {0};
+ struct msm_isp_bw_req_info *isp_req_hist =
+ (struct msm_isp_bw_req_info *) t_file->private_data;
+
+ memset(out_buffer, 0, MAX_BW_HISTORY_BUFF_LEN);
+
+ snprintf(line_buffer, sizeof(line_buffer),
+ "Bus bandwidth request history in chronological order:\n");
+ strlcat(out_buffer, line_buffer, sizeof(bw_request_history_buff));
+
+ snprintf(line_buffer, sizeof(line_buffer),
+ "MSM_ISP_MIN_AB = %u, MSM_ISP_MIN_IB = %u\n\n",
+ MSM_ISP_MIN_AB, MSM_ISP_MIN_IB);
+ strlcat(out_buffer, line_buffer, sizeof(bw_request_history_buff));
+
+ for (i = 0; i < MAX_DEPTH_BW_REQ_HISTORY; i++) {
+ snprintf(line_buffer, sizeof(line_buffer),
+ "idx = %d, client = %u, timestamp = %llu, ab = %llu, ib = %llu\n"
+ "ISP0.active = %x, ISP0.ab = %llu, ISP0.ib = %llu\n"
+ "ISP1.active = %x, ISP1.ab = %llu, ISP1.ib = %llu\n"
+ "CPP.active = %x, CPP.ab = %llu, CPP.ib = %llu\n\n",
+ i, isp_req_hist[i].client, isp_req_hist[i].timestamp,
+ isp_req_hist[i].total_ab, isp_req_hist[i].total_ib,
+ isp_req_hist[i].client_info[0].active,
+ isp_req_hist[i].client_info[0].ab,
+ isp_req_hist[i].client_info[0].ib,
+ isp_req_hist[i].client_info[1].active,
+ isp_req_hist[i].client_info[1].ab,
+ isp_req_hist[i].client_info[1].ib,
+ isp_req_hist[i].client_info[2].active,
+ isp_req_hist[i].client_info[2].ab,
+ isp_req_hist[i].client_info[2].ib);
+ strlcat(out_buffer, line_buffer,
+ sizeof(bw_request_history_buff));
+ }
+ return simple_read_from_buffer(t_char, t_size_t,
+ t_loff_t, out_buffer, strlen(out_buffer));
+}
+
+static ssize_t bw_history_write(struct file *t_file,
+ const char *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+ struct msm_isp_bw_req_info *isp_req_hist =
+ (struct msm_isp_bw_req_info *) t_file->private_data;
+
+ memset(isp_req_hist, 0, sizeof(msm_isp_bw_request_history));
+ msm_isp_bw_request_history_idx = 0;
+ return sizeof(msm_isp_bw_request_history);
+}
+
+static int ub_info_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t ub_info_read(struct file *t_file, char *t_char,
+ size_t t_size_t, loff_t *t_loff_t)
+{
+ int i;
+ char *out_buffer = ub_info_buffer;
+ char line_buffer[MAX_UB_INFO_LINE_BUFF_LEN] = {0};
+ struct vfe_device *vfe_dev =
+ (struct vfe_device *) t_file->private_data;
+ struct msm_isp_ub_info *ub_info = vfe_dev->ub_info;
+
+ memset(out_buffer, 0, MAX_UB_INFO_LINE_BUFF_LEN);
+ snprintf(line_buffer, sizeof(line_buffer),
+ "wm_ub_policy_type = %d\n"
+ "num_wm = %d\n"
+ "wm_ub = %d\n",
+ ub_info->policy, ub_info->num_wm, ub_info->wm_ub);
+ strlcat(out_buffer, line_buffer,
+ sizeof(ub_info_buffer));
+ for (i = 0; i < ub_info->num_wm; i++) {
+ snprintf(line_buffer, sizeof(line_buffer),
+ "data[%d] = 0x%x, addr[%d] = 0x%llx\n",
+ i, ub_info->data[i], i, ub_info->addr[i]);
+ strlcat(out_buffer, line_buffer,
+ sizeof(ub_info_buffer));
+ }
+
+ return simple_read_from_buffer(t_char, t_size_t,
+ t_loff_t, out_buffer, strlen(out_buffer));
+}
+
+static ssize_t ub_info_write(struct file *t_file,
+ const char *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+ struct vfe_device *vfe_dev =
+ (struct vfe_device *) t_file->private_data;
+ struct msm_isp_ub_info *ub_info = vfe_dev->ub_info;
+
+ memset(ub_info, 0, sizeof(struct msm_isp_ub_info));
+
+ return sizeof(struct msm_isp_ub_info);
+}
+
+static const struct file_operations vfe_debugfs_error = {
+ .open = vfe_debugfs_statistics_open,
+ .read = vfe_debugfs_statistics_read,
+ .write = vfe_debugfs_statistics_write,
+};
+
+static const struct file_operations bw_history_ops = {
+ .open = bw_history_open,
+ .read = bw_history_read,
+ .write = bw_history_write,
+};
+
+static const struct file_operations ub_info_ops = {
+ .open = ub_info_open,
+ .read = ub_info_read,
+ .write = ub_info_write,
+};
+
+static int msm_isp_enable_debugfs(struct vfe_device *vfe_dev,
+ struct msm_isp_bw_req_info *isp_req_hist)
+{
+ struct dentry *debugfs_base;
+ char dirname[32] = {0};
+
+ snprintf(dirname, sizeof(dirname), "msm_isp%d", vfe_dev->pdev->id);
+ debugfs_base = debugfs_create_dir(dirname, NULL);
+ if (!debugfs_base)
+ return -ENOMEM;
+ if (!debugfs_create_file("stats", S_IRUGO | S_IWUSR, debugfs_base,
+ vfe_dev, &vfe_debugfs_error))
+ return -ENOMEM;
+
+ if (!debugfs_create_file("bw_req_history", S_IRUGO | S_IWUSR,
+ debugfs_base, isp_req_hist, &bw_history_ops))
+ return -ENOMEM;
+
+ if (!debugfs_create_file("ub_info", S_IRUGO | S_IWUSR,
+ debugfs_base, vfe_dev, &ub_info_ops))
+ return -ENOMEM;
+
+ return 0;
+}
+
+void msm_isp_update_req_history(uint32_t client, uint64_t ab,
+ uint64_t ib,
+ struct msm_isp_bandwidth_info *client_info,
+ unsigned long long ts)
+{
+ int i;
+
+ spin_lock(&req_history_lock);
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].client =
+ client;
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].timestamp =
+ ts;
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].total_ab =
+ ab;
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].total_ib =
+ ib;
+
+ for (i = 0; i < MAX_ISP_CLIENT; i++) {
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].
+ client_info[i].active = client_info[i].active;
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].
+ client_info[i].ab = client_info[i].ab;
+ msm_isp_bw_request_history[msm_isp_bw_request_history_idx].
+ client_info[i].ib = client_info[i].ib;
+ }
+
+ msm_isp_bw_request_history_idx = (msm_isp_bw_request_history_idx + 1)
+ % MAX_DEPTH_BW_REQ_HISTORY;
+ spin_unlock(&req_history_lock);
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_isp_dqevent(struct file *file, struct v4l2_fh *vfh, void *arg)
+{
+ long rc;
+ if (is_compat_task()) {
+ struct msm_isp_event_data32 *event_data32;
+ struct msm_isp_event_data *event_data;
+ struct v4l2_event isp_event;
+ struct v4l2_event *isp_event_user;
+
+ memset(&isp_event, 0, sizeof(isp_event));
+ rc = v4l2_event_dequeue(vfh, &isp_event,
+ file->f_flags & O_NONBLOCK);
+ if (rc)
+ return rc;
+ event_data = (struct msm_isp_event_data *)
+ isp_event.u.data;
+ isp_event_user = (struct v4l2_event *)arg;
+ memcpy(isp_event_user, &isp_event,
+ sizeof(*isp_event_user));
+ event_data32 = (struct msm_isp_event_data32 *)
+ isp_event_user->u.data;
+ memset(event_data32, 0,
+ sizeof(struct msm_isp_event_data32));
+ event_data32->timestamp.tv_sec =
+ event_data->timestamp.tv_sec;
+ event_data32->timestamp.tv_usec =
+ event_data->timestamp.tv_usec;
+ event_data32->mono_timestamp.tv_sec =
+ event_data->mono_timestamp.tv_sec;
+ event_data32->mono_timestamp.tv_usec =
+ event_data->mono_timestamp.tv_usec;
+ event_data32->frame_id = event_data->frame_id;
+ memcpy(&(event_data32->u), &(event_data->u),
+ sizeof(event_data32->u));
+ } else {
+ rc = v4l2_event_dequeue(vfh, arg,
+ file->f_flags & O_NONBLOCK);
+ }
+ return rc;
+}
+#else
+static long msm_isp_dqevent(struct file *file, struct v4l2_fh *vfh, void *arg)
+{
+ return v4l2_event_dequeue(vfh, arg,
+ file->f_flags & O_NONBLOCK);
+}
+#endif
+
+static long msm_isp_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct v4l2_fh *vfh = file->private_data;
+
+ switch (cmd) {
+ case VIDIOC_DQEVENT: {
+ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
+ return -ENOIOCTLCMD;
+ return msm_isp_dqevent(file, vfh, arg);
+ }
+ break;
+ case VIDIOC_SUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
+
+ case VIDIOC_UNSUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
+
+ default:
+ return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
+ }
+}
+
+static struct v4l2_subdev_core_ops msm_vfe_v4l2_subdev_core_ops = {
+ .ioctl = msm_isp_ioctl,
+ .subscribe_event = msm_isp_subscribe_event,
+ .unsubscribe_event = msm_isp_unsubscribe_event,
+};
+
+static struct v4l2_subdev_ops msm_vfe_v4l2_subdev_ops = {
+ .core = &msm_vfe_v4l2_subdev_core_ops,
+};
+
+static struct v4l2_subdev_internal_ops msm_vfe_subdev_internal_ops = {
+ .open = msm_isp_open_node,
+ .close = msm_isp_close_node,
+};
+
+static long msm_isp_v4l2_fops_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_isp_subdev_do_ioctl);
+}
+
+static struct v4l2_file_operations msm_isp_v4l2_fops = {
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = msm_isp_v4l2_fops_ioctl,
+#endif
+ .unlocked_ioctl = msm_isp_v4l2_fops_ioctl
+};
+
+static int vfe_set_common_data(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = NULL;
+ struct vfe_device *vfe_dev = NULL;
+
+ sd = (struct v4l2_subdev *)platform_get_drvdata(pdev);
+ if (!sd) {
+ pr_err("%s: Error! Cannot find subdev\n", __func__);
+ return -EPERM;
+ }
+ vfe_dev = (struct vfe_device *)v4l2_get_subdevdata(sd);
+ if (!vfe_dev) {
+ pr_err("%s: Error! Cannot find vfe_dev\n", __func__);
+ return -EPERM;
+ }
+
+ vfe_dev->common_data = (struct msm_vfe_common_dev_data *)
+ pdev->dev.platform_data;
+
+ vfe_dev->common_data->dual_vfe_res = &dualvfe;
+ vfe_dev->common_data->dual_vfe_res->axi_data[vfe_dev->pdev->id] =
+ &vfe_dev->axi_data;
+ vfe_dev->common_data->dual_vfe_res->stats_data[vfe_dev->pdev->id] =
+ &vfe_dev->stats_data;
+ vfe_dev->common_data->dual_vfe_res->vfe_dev[vfe_dev->pdev->id] =
+ vfe_dev;
+ return 0;
+}
+
+static int vfe_probe(struct platform_device *pdev)
+{
+ struct vfe_parent_device *vfe_parent_dev;
+ int rc = 0;
+ struct device_node *node;
+ struct platform_device *new_dev = NULL;
+ uint32_t i = 0;
+ char name[10] = "\0";
+
+ vfe_parent_dev = kzalloc(sizeof(struct vfe_parent_device),
+ GFP_KERNEL);
+ if (!vfe_parent_dev) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ vfe_parent_dev->common_sd = kzalloc(
+ sizeof(struct msm_vfe_common_subdev), GFP_KERNEL);
+ if (!vfe_parent_dev->common_sd) {
+ rc = -ENOMEM;
+ goto probe_fail1;
+ }
+
+ vfe_parent_dev->common_sd->common_data = &vfe_common_data;
+ memset(&vfe_common_data, 0, sizeof(vfe_common_data));
+ spin_lock_init(&vfe_common_data.common_dev_data_lock);
+
+ of_property_read_u32(pdev->dev.of_node,
+ "num_child", &vfe_parent_dev->num_hw_sd);
+
+ for (i = 0; i < vfe_parent_dev->num_hw_sd; i++) {
+ node = NULL;
+ snprintf(name, sizeof(name), "qcom,vfe%d", i);
+ node = of_find_node_by_name(NULL, name);
+ if (!node) {
+ pr_err("%s: Error! Cannot find node in dtsi %s\n",
+ __func__, name);
+ goto probe_fail2;
+ }
+ new_dev = of_find_device_by_node(node);
+ if (!new_dev) {
+ pr_err("%s: Failed to find device on bus %s\n",
+ __func__, node->name);
+ goto probe_fail2;
+ }
+ vfe_parent_dev->child_list[i] = new_dev;
+ new_dev->dev.platform_data =
+ (void *)vfe_parent_dev->common_sd->common_data;
+ rc = vfe_set_common_data(new_dev);
+ if (rc < 0)
+ goto probe_fail2;
+ }
+
+ vfe_parent_dev->num_sd = vfe_parent_dev->num_hw_sd;
+ vfe_parent_dev->pdev = pdev;
+
+ return rc;
+
+probe_fail2:
+ kfree(vfe_parent_dev->common_sd);
+probe_fail1:
+ kfree(vfe_parent_dev);
+end:
+ return rc;
+}
+
+int vfe_hw_probe(struct platform_device *pdev)
+{
+ struct vfe_device *vfe_dev;
+ /*struct msm_cam_subdev_info sd_info;*/
+ const struct of_device_id *match_dev;
+ int rc = 0;
+
+ vfe_dev = kzalloc(sizeof(struct vfe_device), GFP_KERNEL);
+ if (!vfe_dev) {
+ pr_err("%s: no enough memory\n", __func__);
+ rc = -ENOMEM;
+ goto end;
+ }
+ vfe_dev->stats = kzalloc(sizeof(struct msm_isp_statistics), GFP_KERNEL);
+ if (!vfe_dev->stats) {
+ pr_err("%s: no enough memory\n", __func__);
+ rc = -ENOMEM;
+ goto probe_fail1;
+ }
+
+ vfe_dev->ub_info = kzalloc(sizeof(struct msm_isp_ub_info), GFP_KERNEL);
+ if (!vfe_dev->ub_info) {
+ pr_err("%s: no enough memory\n", __func__);
+ rc = -ENOMEM;
+ goto probe_fail2;
+ }
+
+ if (pdev->dev.of_node) {
+ of_property_read_u32(pdev->dev.of_node,
+ "cell-index", &pdev->id);
+
+ match_dev = of_match_device(pdev->dev.driver->of_match_table,
+ &pdev->dev);
+ if (!match_dev) {
+ pr_err("%s: No vfe hardware info\n", __func__);
+ rc = -EINVAL;
+ goto probe_fail3;
+ }
+ vfe_dev->hw_info =
+ (struct msm_vfe_hardware_info *) match_dev->data;
+ } else {
+ vfe_dev->hw_info = (struct msm_vfe_hardware_info *)
+ platform_get_device_id(pdev)->driver_data;
+ }
+
+ if (!vfe_dev->hw_info) {
+ pr_err("%s: No vfe hardware info\n", __func__);
+ rc = -EINVAL;
+ goto probe_fail3;
+ }
+ ISP_DBG("%s: device id = %d\n", __func__, pdev->id);
+
+ vfe_dev->pdev = pdev;
+
+
+ rc = vfe_dev->hw_info->vfe_ops.core_ops.get_platform_data(vfe_dev);
+ if (rc < 0) {
+ pr_err("%s: failed to get platform resources\n", __func__);
+ rc = -ENOMEM;
+ goto probe_fail3;
+ }
+
+ INIT_LIST_HEAD(&vfe_dev->tasklet_q);
+ tasklet_init(&vfe_dev->vfe_tasklet,
+ msm_isp_do_tasklet, (unsigned long)vfe_dev);
+
+ v4l2_subdev_init(&vfe_dev->subdev.sd, &msm_vfe_v4l2_subdev_ops);
+ vfe_dev->subdev.sd.internal_ops =
+ &msm_vfe_subdev_internal_ops;
+ snprintf(vfe_dev->subdev.sd.name,
+ ARRAY_SIZE(vfe_dev->subdev.sd.name),
+ "vfe");
+ vfe_dev->subdev.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ vfe_dev->subdev.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
+ v4l2_set_subdevdata(&vfe_dev->subdev.sd, vfe_dev);
+ platform_set_drvdata(pdev, &vfe_dev->subdev.sd);
+ mutex_init(&vfe_dev->realtime_mutex);
+ mutex_init(&vfe_dev->core_mutex);
+ spin_lock_init(&vfe_dev->tasklet_lock);
+ spin_lock_init(&vfe_dev->shared_data_lock);
+ spin_lock_init(&vfe_dev->reg_update_lock);
+ spin_lock_init(&req_history_lock);
+ media_entity_init(&vfe_dev->subdev.sd.entity, 0, NULL, 0);
+ vfe_dev->subdev.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ vfe_dev->subdev.sd.entity.group_id = MSM_CAMERA_SUBDEV_VFE;
+ vfe_dev->subdev.sd.entity.name = pdev->name;
+ vfe_dev->subdev.close_seq = MSM_SD_CLOSE_1ST_CATEGORY | 0x2;
+ rc = msm_sd_register(&vfe_dev->subdev);
+ if (rc != 0) {
+ pr_err("%s: msm_sd_register error = %d\n", __func__, rc);
+ goto probe_fail3;
+ }
+ msm_cam_copy_v4l2_subdev_fops(&msm_isp_v4l2_fops);
+ msm_isp_v4l2_fops.unlocked_ioctl = msm_isp_v4l2_fops_ioctl;
+#ifdef CONFIG_COMPAT
+ msm_isp_v4l2_fops.compat_ioctl32 =
+ msm_isp_v4l2_fops_ioctl;
+#endif
+ vfe_dev->subdev.sd.devnode->fops = &msm_isp_v4l2_fops;
+
+ vfe_dev->buf_mgr = &vfe_buf_mgr;
+ v4l2_subdev_notify(&vfe_dev->subdev.sd,
+ MSM_SD_NOTIFY_REQ_CB, &vfe_vb2_ops);
+ rc = msm_isp_create_isp_buf_mgr(vfe_dev->buf_mgr,
+ &vfe_vb2_ops, &pdev->dev,
+ vfe_dev->hw_info->axi_hw_info->scratch_buf_range);
+ if (rc < 0) {
+ pr_err("%s: Unable to create buffer manager\n", __func__);
+ rc = -EINVAL;
+ goto probe_fail3;
+ }
+ msm_isp_enable_debugfs(vfe_dev, msm_isp_bw_request_history);
+ vfe_dev->buf_mgr->num_iommu_secure_ctx =
+ vfe_dev->hw_info->num_iommu_secure_ctx;
+ vfe_dev->buf_mgr->init_done = 1;
+ vfe_dev->vfe_open_cnt = 0;
+ return rc;
+
+probe_fail3:
+ kfree(vfe_dev->ub_info);
+probe_fail2:
+ kfree(vfe_dev->stats);
+probe_fail1:
+ kfree(vfe_dev);
+end:
+ return rc;
+}
+
+static struct platform_driver vfe_driver = {
+ .probe = vfe_probe,
+ .driver = {
+ .name = "msm_vfe",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_vfe_dt_match,
+ },
+};
+
+static int __init msm_vfe_init_module(void)
+{
+ return platform_driver_register(&vfe_driver);
+}
+
+static void __exit msm_vfe_exit_module(void)
+{
+ platform_driver_unregister(&vfe_driver);
+}
+
+late_initcall(msm_vfe_init_module);
+module_exit(msm_vfe_exit_module);
+MODULE_DESCRIPTION("MSM VFE driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
new file mode 100644
index 000000000000..87055954b68c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -0,0 +1,728 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_VFE_H__
+#define __MSM_VFE_H__
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/avtimer_kernel.h>
+#include <media/v4l2-subdev.h>
+#include <media/msmb_isp.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+
+#include "msm_buf_mgr.h"
+
+#define VFE40_8974V1_VERSION 0x10000018
+#define VFE40_8974V2_VERSION 0x1001001A
+#define VFE40_8974V3_VERSION 0x1001001B
+#define VFE40_8x26_VERSION 0x20000013
+#define VFE40_8x26V2_VERSION 0x20010014
+#define VFE40_8916_VERSION 0x10030000
+#define VFE40_8939_VERSION 0x10040000
+#define VFE40_8952_VERSION 0x10060000
+#define VFE40_8976_VERSION 0x10050000
+#define VFE40_8937_VERSION 0x10080000
+#define VFE40_8953_VERSION 0x10090000
+#define VFE32_8909_VERSION 0x30600
+
+#define MAX_IOMMU_CTX 2
+#define MAX_NUM_WM 7
+#define MAX_NUM_RDI 3
+#define MAX_NUM_RDI_MASTER 3
+#define MAX_NUM_COMPOSITE_MASK 4
+#define MAX_NUM_STATS_COMP_MASK 2
+#define MAX_INIT_FRAME_DROP 31
+#define MAX_REG_UPDATE_THRESHOLD 10
+#define ISP_Q2 (1 << 2)
+
+#define VFE_PING_FLAG 0xFFFFFFFF
+#define VFE_PONG_FLAG 0x0
+
+#define VFE_MAX_CFG_TIMEOUT 3000
+#define VFE_CLK_INFO_MAX 16
+#define STATS_COMP_BIT_MASK 0x1FF
+
+#define MSM_ISP_MIN_AB 100000000
+#define MSM_ISP_MIN_IB 100000000
+#define MAX_BUFFERS_IN_HW 2
+
+#define MAX_VFE 2
+
+struct vfe_device;
+struct msm_vfe_axi_stream;
+struct msm_vfe_stats_stream;
+
+#define VFE_SD_HW_MAX VFE_SD_COMMON
+
+/* This struct is used to save/track SOF info for some INTF.
+ * e.g. used in Master-Slave mode */
+struct msm_vfe_sof_info {
+ uint32_t timestamp_ms;
+ uint32_t mono_timestamp_ms;
+ uint32_t frame_id;
+};
+
+/* Each INTF in Master-Slave mode uses this struct. */
+struct msm_vfe_dual_hw_ms_info {
+ /* type is Master/Slave */
+ enum msm_vfe_dual_hw_ms_type dual_hw_ms_type;
+ /* sof_info is resource from common_data. If NULL, then this INTF
+ * sof does not need to be saved */
+ struct msm_vfe_sof_info *sof_info;
+ /* slave_id is index in common_data sof_info array for slaves */
+ uint8_t slave_id;
+};
+
+struct vfe_subscribe_info {
+ struct v4l2_fh *vfh;
+ uint32_t active;
+};
+
+enum msm_isp_pack_fmt {
+ QCOM,
+ MIPI,
+ DPCM6,
+ DPCM8,
+ PLAIN8,
+ PLAIN16,
+ DPCM10,
+ MAX_ISP_PACK_FMT,
+};
+
+enum msm_isp_camif_update_state {
+ NO_UPDATE,
+ ENABLE_CAMIF,
+ DISABLE_CAMIF,
+ DISABLE_CAMIF_IMMEDIATELY
+};
+
+struct msm_isp_timestamp {
+ /*Monotonic clock for v4l2 buffer*/
+ struct timeval buf_time;
+ /*Monotonic clock for VT */
+ struct timeval vt_time;
+ /*Wall clock for userspace event*/
+ struct timeval event_time;
+};
+
+struct msm_vfe_irq_ops {
+ void (*read_irq_status)(struct vfe_device *vfe_dev,
+ uint32_t *irq_status0, uint32_t *irq_status1);
+ void (*process_reg_update)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts);
+ void (*process_epoch_irq)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts);
+ void (*process_reset_irq)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1);
+ void (*process_halt_irq)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1);
+ void (*process_camif_irq)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts);
+ void (*process_axi_irq)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts);
+ void (*process_stats_irq)(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts);
+ void (*enable_camif_err)(struct vfe_device *vfe_dev, int enable);
+};
+
+struct msm_vfe_axi_ops {
+ void (*reload_wm)(struct vfe_device *vfe_dev, void __iomem *vfe_base,
+ uint32_t reload_mask);
+ void (*enable_wm)(void __iomem *vfe_base,
+ uint8_t wm_idx, uint8_t enable);
+ int32_t (*cfg_io_format)(struct vfe_device *vfe_dev,
+ enum msm_vfe_axi_stream_src stream_src,
+ uint32_t io_format);
+ void (*cfg_framedrop)(void __iomem *vfe_base,
+ struct msm_vfe_axi_stream *stream_info,
+ uint32_t framedrop_pattern, uint32_t framedrop_period);
+ void (*clear_framedrop)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+ void (*cfg_comp_mask)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+ void (*clear_comp_mask)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+ void (*cfg_wm_irq_mask)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+ void (*clear_wm_irq_mask)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+
+ void (*cfg_wm_reg)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint8_t plane_idx);
+ void (*clear_wm_reg)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx);
+
+ void (*cfg_wm_xbar_reg)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint8_t plane_idx);
+ void (*clear_wm_xbar_reg)(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx);
+
+ void (*cfg_ub)(struct vfe_device *vfe_dev);
+
+ void (*read_wm_ping_pong_addr)(struct vfe_device *vfe_dev);
+
+ void (*update_ping_pong_addr)(void __iomem *vfe_base,
+ uint8_t wm_idx, uint32_t pingpong_bit, dma_addr_t paddr,
+ int32_t buf_size);
+
+ uint32_t (*get_wm_mask)(uint32_t irq_status0, uint32_t irq_status1);
+ uint32_t (*get_comp_mask)(uint32_t irq_status0, uint32_t irq_status1);
+ uint32_t (*get_pingpong_status)(struct vfe_device *vfe_dev);
+ int (*halt)(struct vfe_device *vfe_dev, uint32_t blocking);
+ int (*restart)(struct vfe_device *vfe_dev, uint32_t blocking,
+ uint32_t enable_camif);
+ void (*update_cgc_override)(struct vfe_device *vfe_dev,
+ uint8_t wm_idx, uint8_t cgc_override);
+};
+
+struct msm_vfe_core_ops {
+ void (*reg_update)(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src);
+ long (*reset_hw)(struct vfe_device *vfe_dev, uint32_t first_start,
+ uint32_t blocking_call);
+ int (*init_hw)(struct vfe_device *vfe_dev);
+ void (*init_hw_reg)(struct vfe_device *vfe_dev);
+ void (*clear_status_reg)(struct vfe_device *vfe_dev);
+ void (*release_hw)(struct vfe_device *vfe_dev);
+ void (*cfg_input_mux)(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg);
+ int (*start_fetch_eng)(struct vfe_device *vfe_dev,
+ void *arg);
+ void (*update_camif_state)(struct vfe_device *vfe_dev,
+ enum msm_isp_camif_update_state update_state);
+ void (*cfg_rdi_reg)(struct vfe_device *vfe_dev,
+ struct msm_vfe_rdi_cfg *rdi_cfg,
+ enum msm_vfe_input_src input_src);
+ int (*get_platform_data)(struct vfe_device *vfe_dev);
+ void (*get_error_mask)(uint32_t *error_mask0, uint32_t *error_mask1);
+ void (*process_error_status)(struct vfe_device *vfe_dev);
+ void (*get_overflow_mask)(uint32_t *overflow_mask);
+ void (*get_irq_mask)(struct vfe_device *vfe_dev,
+ uint32_t *irq0_mask, uint32_t *irq1_mask);
+ void (*restore_irq_mask)(struct vfe_device *vfe_dev);
+ void (*get_halt_restart_mask)(uint32_t *irq0_mask,
+ uint32_t *irq1_mask);
+ void (*get_rdi_wm_mask)(struct vfe_device *vfe_dev,
+ uint32_t *rdi_wm_mask);
+ bool (*is_module_cfg_lock_needed)(uint32_t reg_offset);
+};
+struct msm_vfe_stats_ops {
+ int (*get_stats_idx)(enum msm_isp_stats_type stats_type);
+ int (*check_streams)(struct msm_vfe_stats_stream *stream_info);
+ void (*cfg_framedrop)(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info,
+ uint32_t framedrop_pattern, uint32_t framedrop_period);
+ void (*clear_framedrop)(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info);
+ void (*cfg_comp_mask)(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t comp_index,
+ uint8_t enable);
+ void (*cfg_wm_irq_mask)(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info);
+ void (*clear_wm_irq_mask)(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info);
+
+ void (*cfg_wm_reg)(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info);
+ void (*clear_wm_reg)(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info);
+
+ void (*cfg_ub)(struct vfe_device *vfe_dev);
+
+ void (*enable_module)(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable);
+
+ void (*update_ping_pong_addr)(void __iomem *vfe_base,
+ struct msm_vfe_stats_stream *stream_info,
+ uint32_t pingpong_status, dma_addr_t paddr);
+
+ uint32_t (*get_frame_id)(struct vfe_device *vfe_dev);
+ uint32_t (*get_wm_mask)(uint32_t irq_status0, uint32_t irq_status1);
+ uint32_t (*get_comp_mask)(uint32_t irq_status0, uint32_t irq_status1);
+ uint32_t (*get_pingpong_status)(struct vfe_device *vfe_dev);
+
+ void (*update_cgc_override)(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable);
+};
+
+struct msm_vfe_ops {
+ struct msm_vfe_irq_ops irq_ops;
+ struct msm_vfe_axi_ops axi_ops;
+ struct msm_vfe_core_ops core_ops;
+ struct msm_vfe_stats_ops stats_ops;
+};
+
+struct msm_vfe_hardware_info {
+ int num_iommu_ctx;
+ /* secure iommu ctx nums */
+ int num_iommu_secure_ctx;
+ int vfe_clk_idx;
+ int runtime_axi_update;
+ struct msm_vfe_ops vfe_ops;
+ struct msm_vfe_axi_hardware_info *axi_hw_info;
+ struct msm_vfe_stats_hardware_info *stats_hw_info;
+ uint32_t dmi_reg_offset;
+};
+
+struct msm_vfe_axi_hardware_info {
+ uint8_t num_wm;
+ uint8_t num_rdi;
+ uint8_t num_rdi_master;
+ uint8_t num_comp_mask;
+ uint32_t min_wm_ub;
+ uint32_t scratch_buf_range;
+};
+
+enum msm_vfe_axi_state {
+ AVAILABLE,
+ INACTIVE,
+ ACTIVE,
+ PAUSED,
+ START_PENDING,
+ STOP_PENDING,
+ PAUSE_PENDING,
+ RESUME_PENDING,
+ STARTING,
+ STOPPING,
+ PAUSING,
+ RESUMING,
+ UPDATING,
+};
+
+enum msm_vfe_axi_cfg_update_state {
+ NO_AXI_CFG_UPDATE,
+ APPLYING_UPDATE_RESUME,
+ UPDATE_REQUESTED,
+};
+
+#define VFE_NO_DROP 0xFFFFFFFF
+#define VFE_DROP_EVERY_2FRAME 0x55555555
+#define VFE_DROP_EVERY_4FRAME 0x11111111
+#define VFE_DROP_EVERY_8FRAME 0x01010101
+#define VFE_DROP_EVERY_16FRAME 0x00010001
+#define VFE_DROP_EVERY_32FRAME 0x00000001
+
+enum msm_vfe_axi_stream_type {
+ CONTINUOUS_STREAM,
+ BURST_STREAM,
+};
+
+struct msm_vfe_frame_request_queue {
+ struct list_head list;
+ enum msm_vfe_buff_queue_id buff_queue_id;
+ uint8_t cmd_used;
+};
+
+#define MSM_VFE_REQUESTQ_SIZE 8
+
+struct msm_vfe_axi_stream {
+ uint32_t frame_id;
+ enum msm_vfe_axi_state state;
+ enum msm_vfe_axi_stream_src stream_src;
+ uint8_t num_planes;
+ uint8_t wm[MAX_PLANES_PER_STREAM];
+ uint32_t output_format;/*Planar/RAW/Misc*/
+ struct msm_vfe_axi_plane_cfg plane_cfg[MAX_PLANES_PER_STREAM];
+ uint8_t comp_mask_index;
+ struct msm_isp_buffer *buf[2];
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint32_t bufq_handle[VFE_BUF_QUEUE_MAX];
+ uint8_t controllable_output;
+ uint8_t undelivered_request_cnt;
+ uint8_t request_q_idx;
+ uint32_t request_q_cnt;
+ struct list_head request_q;
+ struct msm_vfe_frame_request_queue
+ request_queue_cmd[MSM_VFE_REQUESTQ_SIZE];
+ uint32_t stream_handle;
+ uint8_t buf_divert;
+ enum msm_vfe_axi_stream_type stream_type;
+ uint32_t frame_based;
+ enum msm_vfe_frame_skip_pattern frame_skip_pattern;
+ uint32_t current_framedrop_period;
+ uint32_t prev_framedrop_period;
+ uint32_t num_burst_capture;/*number of frame to capture*/
+ uint32_t init_frame_drop;
+ spinlock_t lock;
+
+ /*Bandwidth calculation info*/
+ uint32_t max_width;
+ /*Based on format plane size in Q2. e.g NV12 = 1.5*/
+ uint32_t format_factor;
+ uint32_t bandwidth;
+
+ uint32_t runtime_num_burst_capture;
+ uint32_t runtime_output_format;
+ enum msm_stream_memory_input_t memory_input;
+ struct msm_isp_sw_framskip sw_skip;
+ uint8_t sw_ping_pong_bit;
+};
+
+struct msm_vfe_axi_composite_info {
+ uint32_t stream_handle;
+ uint32_t stream_composite_mask;
+};
+
+enum msm_vfe_camif_state {
+ CAMIF_STOPPED,
+ CAMIF_ENABLE,
+ CAMIF_DISABLE,
+ CAMIF_STOPPING,
+};
+
+struct msm_vfe_src_info {
+ uint32_t frame_id;
+ uint32_t reg_update_frame_id;
+ uint8_t active;
+ uint8_t pix_stream_count;
+ uint8_t raw_stream_count;
+ enum msm_vfe_inputmux input_mux;
+ uint32_t width;
+ long pixel_clock;
+ uint32_t input_format;/*V4L2 pix format with bayer pattern*/
+ uint32_t last_updt_frm_id;
+ uint32_t sof_counter_step;
+ struct timeval time_stamp;
+ enum msm_vfe_dual_hw_type dual_hw_type;
+ struct msm_vfe_dual_hw_ms_info dual_hw_ms_info;
+};
+
+struct msm_vfe_fetch_engine_info {
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint32_t bufq_handle;
+ uint32_t buf_idx;
+ uint8_t is_busy;
+ uint8_t offline_mode;
+ uint32_t fd;
+};
+
+enum msm_wm_ub_cfg_type {
+ MSM_WM_UB_CFG_DEFAULT,
+ MSM_WM_UB_EQUAL_SLICING,
+ MSM_WM_UB_CFG_MAX_NUM
+};
+
+struct msm_vfe_axi_shared_data {
+ struct msm_vfe_axi_hardware_info *hw_info;
+ struct msm_vfe_axi_stream stream_info[VFE_AXI_SRC_MAX];
+ uint32_t free_wm[MAX_NUM_WM];
+ uint32_t wm_image_size[MAX_NUM_WM];
+ enum msm_wm_ub_cfg_type wm_ub_cfg_policy;
+ uint8_t num_used_wm;
+ uint8_t num_active_stream;
+ uint8_t num_rdi_stream;
+ uint8_t num_pix_stream;
+ uint32_t rdi_wm_mask;
+ struct msm_vfe_axi_composite_info
+ composite_info[MAX_NUM_COMPOSITE_MASK];
+ uint8_t num_used_composite_mask;
+ uint32_t stream_update[VFE_SRC_MAX];
+ atomic_t axi_cfg_update[VFE_SRC_MAX];
+ enum msm_isp_camif_update_state pipeline_update;
+ struct msm_vfe_src_info src_info[VFE_SRC_MAX];
+ uint16_t stream_handle_cnt;
+ uint32_t event_mask;
+ uint8_t enable_frameid_recovery;
+ enum msm_vfe_camif_state camif_state;
+};
+
+struct msm_vfe_stats_hardware_info {
+ uint32_t stats_capability_mask;
+ uint8_t *stats_ping_pong_offset;
+ uint8_t num_stats_type;
+ uint8_t num_stats_comp_mask;
+};
+
+enum msm_vfe_stats_state {
+ STATS_AVALIABLE,
+ STATS_INACTIVE,
+ STATS_ACTIVE,
+ STATS_START_PENDING,
+ STATS_STOP_PENDING,
+ STATS_STARTING,
+ STATS_STOPPING,
+};
+
+struct msm_vfe_stats_stream {
+ uint32_t session_id;
+ uint32_t stream_id;
+ uint32_t stream_handle;
+ uint32_t composite_flag;
+ enum msm_isp_stats_type stats_type;
+ enum msm_vfe_stats_state state;
+ uint32_t framedrop_pattern;
+ uint32_t framedrop_period;
+ uint32_t irq_subsample_pattern;
+ uint32_t init_stats_frame_drop;
+ struct msm_isp_sw_framskip sw_skip;
+
+ uint32_t buffer_offset;
+ struct msm_isp_buffer *buf[2];
+ uint32_t bufq_handle;
+};
+
+struct msm_vfe_stats_shared_data {
+ struct msm_vfe_stats_stream stream_info[MSM_ISP_STATS_MAX];
+ uint8_t num_active_stream;
+ atomic_t stats_comp_mask[MAX_NUM_STATS_COMP_MASK];
+ uint16_t stream_handle_cnt;
+ atomic_t stats_update;
+};
+
+struct msm_vfe_tasklet_queue_cmd {
+ struct list_head list;
+ uint32_t vfeInterruptStatus0;
+ uint32_t vfeInterruptStatus1;
+ struct msm_isp_timestamp ts;
+ uint8_t cmd_used;
+ uint8_t iommu_page_fault;
+};
+
+#define MSM_VFE_TASKLETQ_SIZE 200
+
+enum msm_vfe_overflow_state {
+ NO_OVERFLOW,
+ OVERFLOW_DETECTED,
+ HALT_ENFORCED,
+};
+
+struct msm_vfe_error_info {
+ atomic_t overflow_state;
+ uint32_t overflow_recover_irq_mask0;
+ uint32_t overflow_recover_irq_mask1;
+ uint32_t error_mask0;
+ uint32_t error_mask1;
+ uint32_t violation_status;
+ uint32_t camif_status;
+ uint8_t stream_framedrop_count[BUF_MGR_NUM_BUF_Q];
+ uint8_t stats_framedrop_count[MSM_ISP_STATS_MAX];
+ uint32_t info_dump_frame_count;
+ uint32_t error_count;
+ uint32_t framedrop_flag;
+};
+
+struct msm_isp_statistics {
+ int64_t imagemaster0_overflow;
+ int64_t imagemaster1_overflow;
+ int64_t imagemaster2_overflow;
+ int64_t imagemaster3_overflow;
+ int64_t imagemaster4_overflow;
+ int64_t imagemaster5_overflow;
+ int64_t imagemaster6_overflow;
+ int64_t be_overflow;
+ int64_t bg_overflow;
+ int64_t bf_overflow;
+ int64_t awb_overflow;
+ int64_t rs_overflow;
+ int64_t cs_overflow;
+ int64_t ihist_overflow;
+ int64_t skinbhist_overflow;
+ int64_t bfscale_overflow;
+
+ int64_t isp_vfe0_active;
+ int64_t isp_vfe0_ab;
+ int64_t isp_vfe0_ib;
+
+ int64_t isp_vfe1_active;
+ int64_t isp_vfe1_ab;
+ int64_t isp_vfe1_ib;
+
+ int64_t isp_cpp_active;
+ int64_t isp_cpp_ab;
+ int64_t isp_cpp_ib;
+
+ int64_t last_overflow_ab;
+ int64_t last_overflow_ib;
+
+ int64_t vfe_clk_rate;
+ int64_t cpp_clk_rate;
+};
+
+enum msm_isp_hw_client {
+ ISP_VFE0,
+ ISP_VFE1,
+ ISP_CPP,
+ MAX_ISP_CLIENT,
+};
+
+struct msm_isp_bandwidth_info {
+ uint32_t active;
+ uint64_t ab;
+ uint64_t ib;
+};
+
+struct msm_isp_bw_req_info {
+ uint32_t client;
+ unsigned long long timestamp;
+ uint64_t total_ab;
+ uint64_t total_ib;
+ struct msm_isp_bandwidth_info client_info[MAX_ISP_CLIENT];
+};
+
+#define MSM_ISP_MAX_WM 7
+struct msm_isp_ub_info {
+ enum msm_wm_ub_cfg_type policy;
+ uint8_t num_wm;
+ uint32_t wm_ub;
+ uint32_t data[MSM_ISP_MAX_WM];
+ uint64_t addr[MSM_ISP_MAX_WM];
+};
+
+struct msm_vfe_hw_init_parms {
+ const char *entries;
+ const char *regs;
+ const char *settings;
+};
+
+struct dual_vfe_resource {
+ struct vfe_device *vfe_dev[MAX_VFE];
+ void __iomem *vfe_base[MAX_VFE];
+ uint32_t reg_update_mask[MAX_VFE];
+ struct msm_vfe_stats_shared_data *stats_data[MAX_VFE];
+ struct msm_vfe_axi_shared_data *axi_data[MAX_VFE];
+ uint32_t wm_reload_mask[MAX_VFE];
+};
+
+struct master_slave_resource_info {
+ enum msm_vfe_dual_hw_type dual_hw_type;
+ struct msm_vfe_sof_info master_sof_info;
+ uint8_t master_active;
+ uint32_t sof_delta_threshold; /* Updated by Master */
+ uint32_t num_slave;
+ uint32_t reserved_slave_mask;
+ uint32_t slave_active_mask;
+ struct msm_vfe_sof_info slave_sof_info[MS_NUM_SLAVE_MAX];
+};
+
+struct msm_vfe_common_dev_data {
+ spinlock_t common_dev_data_lock;
+ struct dual_vfe_resource *dual_vfe_res;
+ struct master_slave_resource_info ms_resource;
+};
+
+struct msm_vfe_common_subdev {
+ /* parent reference */
+ struct vfe_parent_device *parent;
+
+ /* Media Subdevice */
+ struct msm_sd_subdev *subdev;
+
+ /* Buf Mgr */
+ struct msm_isp_buf_mgr *buf_mgr;
+
+ /* Common Data */
+ struct msm_vfe_common_dev_data *common_data;
+};
+
+struct vfe_device {
+ /* Driver private data */
+ struct platform_device *pdev;
+ struct msm_vfe_common_dev_data *common_data;
+ struct msm_sd_subdev subdev;
+ struct msm_isp_buf_mgr *buf_mgr;
+
+ /* Resource info */
+ struct resource *vfe_irq;
+ struct resource *vfe_mem;
+ struct resource *vfe_vbif_mem;
+ struct resource *vfe_io;
+ struct resource *vfe_vbif_io;
+ void __iomem *vfe_base;
+ void __iomem *vfe_vbif_base;
+ struct device *iommu_ctx[MAX_IOMMU_CTX];
+ struct regulator *fs_vfe;
+ struct regulator *fs_camss;
+ struct regulator *fs_mmagic_camss;
+ struct clk **vfe_clk;
+ uint32_t num_clk;
+
+ /* Sync variables*/
+ struct completion reset_complete;
+ struct completion halt_complete;
+ struct completion stream_config_complete;
+ struct completion stats_config_complete;
+ struct mutex realtime_mutex;
+ struct mutex core_mutex;
+ spinlock_t shared_data_lock;
+ spinlock_t reg_update_lock;
+ spinlock_t tasklet_lock;
+
+ /* Tasklet info */
+ atomic_t irq_cnt;
+ uint8_t taskletq_idx;
+ struct list_head tasklet_q;
+ struct tasklet_struct vfe_tasklet;
+ struct msm_vfe_tasklet_queue_cmd
+ tasklet_queue_cmd[MSM_VFE_TASKLETQ_SIZE];
+
+ /* Data structures */
+ struct msm_vfe_hardware_info *hw_info;
+ struct msm_vfe_axi_shared_data axi_data;
+ struct msm_vfe_stats_shared_data stats_data;
+ struct msm_vfe_error_info error_info;
+ struct msm_vfe_fetch_engine_info fetch_engine_info;
+ enum msm_vfe_hvx_streaming_cmd hvx_cmd;
+
+ /* State variables */
+ uint32_t vfe_hw_version;
+ int vfe_clk_idx;
+ uint32_t vfe_open_cnt;
+ uint8_t vt_enable;
+ uint8_t ignore_error;
+ uint32_t vfe_ub_policy;
+ uint8_t reset_pending;
+ uint8_t reg_update_requested;
+ uint8_t reg_updated;
+ uint32_t is_split;
+ uint32_t dual_vfe_enable;
+ unsigned long page_fault_addr;
+
+ /* Debug variables */
+ int dump_reg;
+ struct msm_isp_statistics *stats;
+ uint64_t msm_isp_last_overflow_ab;
+ uint64_t msm_isp_last_overflow_ib;
+ uint64_t msm_isp_vfe_clk_rate;
+ struct msm_isp_ub_info *ub_info;
+ uint32_t isp_sof_debug;
+ uint32_t isp_raw0_debug;
+ uint32_t isp_raw1_debug;
+ uint32_t isp_raw2_debug;
+ uint8_t is_camif_raw_crop_supported;
+};
+
+struct vfe_parent_device {
+ struct platform_device *pdev;
+ uint32_t num_sd;
+ uint32_t num_hw_sd;
+ struct platform_device *child_list[VFE_SD_HW_MAX];
+ struct msm_vfe_common_subdev *common_sd;
+};
+
+int vfe_hw_probe(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
new file mode 100644
index 000000000000..062ff3bea7e6
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.c
@@ -0,0 +1,1576 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "msm_isp32.h"
+#include "msm_isp_util.h"
+#include "msm_isp_axi_util.h"
+#include "msm_isp_stats_util.h"
+#include "msm_isp.h"
+#include "msm.h"
+#include "msm_camera_io_util.h"
+
+static const struct platform_device_id msm_vfe32_dev_id[] = {
+ {"msm_vfe32", (kernel_ulong_t) &vfe32_hw_info},
+ {}
+};
+
+#define VFE32_BURST_LEN 2
+#define VFE32_UB_SIZE 1024
+#define VFE32_UB_SIZE_32KB 2048
+#define VFE32_EQUAL_SLICE_UB 194
+#define VFE32_AXI_SLICE_UB 792
+#define VFE32_WM_BASE(idx) (0x4C + 0x18 * idx)
+#define VFE32_RDI_BASE(idx) (idx ? 0x734 + 0x4 * (idx - 1) : 0x06FC)
+#define VFE32_XBAR_BASE(idx) (0x40 + 0x4 * (idx / 4))
+#define VFE32_XBAR_SHIFT(idx) ((idx % 4) * 8)
+#define VFE32_PING_PONG_BASE(wm, ping_pong) \
+ (VFE32_WM_BASE(wm) + 0x4 * (1 + ((~ping_pong) & 0x1)))
+
+static uint8_t stats_pingpong_offset_map[] = {
+ 7, 8, 9, 10, 11, 12, 13};
+
+#define VFE32_NUM_STATS_TYPE 7
+#define VFE32_STATS_BASE(idx) (0xF4 + 0xC * idx)
+#define VFE32_STATS_PING_PONG_BASE(idx, ping_pong) \
+ (VFE32_STATS_BASE(idx) + 0x4 * \
+ (~(ping_pong >> (stats_pingpong_offset_map[idx])) & 0x1))
+
+#define VFE32_CLK_IDX 1
+#define MSM_ISP32_TOTAL_WM_UB 792
+/*792 double word*/
+
+static struct msm_cam_clk_info msm_vfe32_1_clk_info[VFE_CLK_INFO_MAX];
+
+static struct msm_cam_clk_info msm_vfe32_2_clk_info[] = {
+ /*vfe32 clock info for A-family: 8960 */
+ {"vfe_clk", 266667000},
+ {"vfe_pclk", -1},
+ {"csi_vfe_clk", -1},
+};
+
+static int32_t msm_vfe32_init_qos_parms(struct vfe_device *vfe_dev,
+ struct msm_vfe_hw_init_parms *qos_parms,
+ struct msm_vfe_hw_init_parms *ds_parms)
+{
+ void __iomem *vfebase = vfe_dev->vfe_base;
+ struct device_node *of_node;
+ uint32_t *ds_settings = NULL, *ds_regs = NULL, ds_entries = 0;
+ int32_t i = 0 , rc = 0;
+ uint32_t *qos_settings = NULL, *qos_regs = NULL, qos_entries = 0;
+ of_node = vfe_dev->pdev->dev.of_node;
+
+ rc = of_property_read_u32(of_node, qos_parms->entries,
+ &qos_entries);
+ if (rc < 0 || !qos_entries) {
+ pr_err("%s: NO QOS entries found\n", __func__);
+ } else {
+ qos_settings = kzalloc(sizeof(uint32_t) * qos_entries,
+ GFP_KERNEL);
+ if (!qos_settings) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ qos_regs = kzalloc(sizeof(uint32_t) * qos_entries,
+ GFP_KERNEL);
+ if (!qos_regs) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ kfree(qos_settings);
+ return -ENOMEM;
+ }
+ rc = of_property_read_u32_array(of_node, qos_parms->regs,
+ qos_regs, qos_entries);
+ if (rc < 0) {
+ pr_err("%s: NO QOS BUS BDG info\n", __func__);
+ kfree(qos_settings);
+ kfree(qos_regs);
+ } else {
+ if (qos_parms->settings) {
+ rc = of_property_read_u32_array(of_node,
+ qos_parms->settings,
+ qos_settings, qos_entries);
+ if (rc < 0) {
+ pr_err("%s: NO QOS settings\n",
+ __func__);
+ kfree(qos_settings);
+ kfree(qos_regs);
+ } else {
+ for (i = 0; i < qos_entries; i++)
+ msm_camera_io_w(qos_settings[i],
+ vfebase + qos_regs[i]);
+ kfree(qos_settings);
+ kfree(qos_regs);
+ }
+ } else {
+ kfree(qos_settings);
+ kfree(qos_regs);
+ }
+ }
+ }
+ rc = of_property_read_u32(of_node, ds_parms->entries,
+ &ds_entries);
+ if (rc < 0 || !ds_entries) {
+ pr_err("%s: NO D/S entries found\n", __func__);
+ } else {
+ ds_settings = kzalloc(sizeof(uint32_t) * ds_entries,
+ GFP_KERNEL);
+ if (!ds_settings) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ ds_regs = kzalloc(sizeof(uint32_t) * ds_entries,
+ GFP_KERNEL);
+ if (!ds_regs) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ kfree(ds_settings);
+ return -ENOMEM;
+ }
+ rc = of_property_read_u32_array(of_node, ds_parms->regs,
+ ds_regs, ds_entries);
+ if (rc < 0) {
+ pr_err("%s: NO D/S register info\n", __func__);
+ kfree(ds_settings);
+ kfree(ds_regs);
+ } else {
+ if (ds_parms->settings) {
+ rc = of_property_read_u32_array(of_node,
+ ds_parms->settings, ds_settings,
+ ds_entries);
+ if (rc < 0) {
+ pr_err("%s: NO D/S settings\n",
+ __func__);
+ kfree(ds_settings);
+ kfree(ds_regs);
+ } else {
+ for (i = 0; i < ds_entries; i++)
+ msm_camera_io_w(ds_settings[i],
+ vfebase + ds_regs[i]);
+ kfree(ds_regs);
+ kfree(ds_settings);
+ }
+ } else {
+ kfree(ds_regs);
+ kfree(ds_settings);
+ }
+ }
+ }
+ return 0;
+}
+
+static int32_t msm_vfe32_init_vbif_parms(struct vfe_device *vfe_dev,
+ struct msm_vfe_hw_init_parms *vbif_parms)
+{
+ void __iomem *vfe_vbif_base = vfe_dev->vfe_vbif_base;
+ struct device_node *of_node;
+ int32_t i = 0 , rc = 0;
+ uint32_t *vbif_settings = NULL, *vbif_regs = NULL, vbif_entries = 0;
+ of_node = vfe_dev->pdev->dev.of_node;
+
+ rc = of_property_read_u32(of_node, vbif_parms->entries,
+ &vbif_entries);
+ if (rc < 0 || !vbif_entries) {
+ pr_err("%s: NO VBIF entries found\n", __func__);
+ } else {
+ vbif_settings = kzalloc(sizeof(uint32_t) * vbif_entries,
+ GFP_KERNEL);
+ if (!vbif_settings) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ vbif_regs = kzalloc(sizeof(uint32_t) * vbif_entries,
+ GFP_KERNEL);
+ if (!vbif_regs) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ kfree(vbif_settings);
+ return -ENOMEM;
+ }
+ rc = of_property_read_u32_array(of_node, vbif_parms->regs,
+ vbif_regs, vbif_entries);
+ if (rc < 0) {
+ pr_err("%s: NO VBIF info\n", __func__);
+ kfree(vbif_settings);
+ kfree(vbif_regs);
+ } else {
+ rc = of_property_read_u32_array(of_node,
+ vbif_parms->settings,
+ vbif_settings, vbif_entries);
+ if (rc < 0) {
+ pr_err("%s: NO VBIF settings\n",
+ __func__);
+ kfree(vbif_settings);
+ kfree(vbif_regs);
+ } else {
+ for (i = 0; i < vbif_entries; i++)
+ msm_camera_io_w(
+ vbif_settings[i],
+ vfe_vbif_base + vbif_regs[i]);
+ kfree(vbif_settings);
+ kfree(vbif_regs);
+ }
+ }
+ }
+ return 0;
+}
+
+static int msm_vfe32_init_hardware(struct vfe_device *vfe_dev)
+{
+ int rc = -1;
+ vfe_dev->vfe_clk_idx = 0;
+ rc = msm_isp_init_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
+ if (rc < 0) {
+ pr_err("%s: Bandwidth registration Failed!\n", __func__);
+ goto bus_scale_register_failed;
+ }
+
+ if (vfe_dev->fs_vfe) {
+ rc = regulator_enable(vfe_dev->fs_vfe);
+ if (rc) {
+ pr_err("%s: Regulator enable failed\n", __func__);
+ goto fs_failed;
+ }
+ }
+
+ rc = msm_isp_get_clk_info(vfe_dev, vfe_dev->pdev,
+ &msm_vfe32_1_clk_info[0]);
+ if (rc < 0) {
+ pr_err("msm_isp_get_clk_info() failed\n");
+ goto fs_failed;
+ }
+
+ if (vfe_dev->num_clk <= 0) {
+ pr_err("%s: Invalid num of clock\n", __func__);
+ goto fs_failed;
+ } else {
+ vfe_dev->vfe_clk =
+ kzalloc(sizeof(struct clk *) * vfe_dev->num_clk,
+ GFP_KERNEL);
+ if (!vfe_dev->vfe_clk) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+ rc = msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe32_1_clk_info,
+ vfe_dev->vfe_clk, ARRAY_SIZE(msm_vfe32_1_clk_info), 1);
+ if (rc < 0) {
+ rc = msm_cam_clk_enable(&vfe_dev->pdev->dev,
+ msm_vfe32_2_clk_info, vfe_dev->vfe_clk,
+ ARRAY_SIZE(msm_vfe32_2_clk_info), 1);
+ if (rc < 0)
+ goto clk_enable_failed;
+ else
+ vfe_dev->vfe_clk_idx = 2;
+ } else
+ vfe_dev->vfe_clk_idx = 1;
+
+ vfe_dev->vfe_base = ioremap(vfe_dev->vfe_mem->start,
+ resource_size(vfe_dev->vfe_mem));
+ if (!vfe_dev->vfe_base) {
+ rc = -ENOMEM;
+ pr_err("%s: vfe ioremap failed\n", __func__);
+ goto vfe_remap_failed;
+ }
+ vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] =
+ vfe_dev->vfe_base;
+
+ vfe_dev->vfe_vbif_base = ioremap(vfe_dev->vfe_vbif_mem->start,
+ resource_size(vfe_dev->vfe_vbif_mem));
+ if (!vfe_dev->vfe_vbif_base) {
+ rc = -ENOMEM;
+ pr_err("%s: vfe ioremap failed\n", __func__);
+ goto vbif_remap_failed;
+ }
+
+ rc = request_irq(vfe_dev->vfe_irq->start, msm_isp_process_irq,
+ IRQF_TRIGGER_RISING, "vfe", vfe_dev);
+ if (rc < 0) {
+ pr_err("%s: irq request failed\n", __func__);
+ goto irq_req_failed;
+ }
+
+ return rc;
+irq_req_failed:
+ iounmap(vfe_dev->vfe_vbif_base);
+ vfe_dev->vfe_vbif_base = NULL;
+vbif_remap_failed:
+ iounmap(vfe_dev->vfe_base);
+ vfe_dev->vfe_base = NULL;
+vfe_remap_failed:
+ if (vfe_dev->vfe_clk_idx == 1)
+ msm_cam_clk_enable(&vfe_dev->pdev->dev,
+ msm_vfe32_1_clk_info, vfe_dev->vfe_clk,
+ ARRAY_SIZE(msm_vfe32_1_clk_info), 0);
+ if (vfe_dev->vfe_clk_idx == 2)
+ msm_cam_clk_enable(&vfe_dev->pdev->dev,
+ msm_vfe32_2_clk_info, vfe_dev->vfe_clk,
+ ARRAY_SIZE(msm_vfe32_2_clk_info), 0);
+clk_enable_failed:
+ if (vfe_dev->fs_vfe)
+ regulator_disable(vfe_dev->fs_vfe);
+ kfree(vfe_dev->vfe_clk);
+fs_failed:
+ msm_isp_deinit_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
+bus_scale_register_failed:
+ return rc;
+}
+
+static void msm_vfe32_release_hardware(struct vfe_device *vfe_dev)
+{
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x1C);
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x20);
+ disable_irq(vfe_dev->vfe_irq->start);
+ free_irq(vfe_dev->vfe_irq->start, vfe_dev);
+ tasklet_kill(&vfe_dev->vfe_tasklet);
+ msm_isp_flush_tasklet(vfe_dev);
+ iounmap(vfe_dev->vfe_vbif_base);
+ vfe_dev->vfe_vbif_base = NULL;
+ if (vfe_dev->vfe_clk_idx == 1)
+ msm_cam_clk_enable(&vfe_dev->pdev->dev,
+ msm_vfe32_1_clk_info, vfe_dev->vfe_clk,
+ ARRAY_SIZE(msm_vfe32_1_clk_info), 0);
+ if (vfe_dev->vfe_clk_idx == 2)
+ msm_cam_clk_enable(&vfe_dev->pdev->dev,
+ msm_vfe32_2_clk_info, vfe_dev->vfe_clk,
+ ARRAY_SIZE(msm_vfe32_2_clk_info), 0);
+ vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] = NULL;
+ iounmap(vfe_dev->vfe_base);
+ vfe_dev->vfe_base = NULL;
+ kfree(vfe_dev->vfe_clk);
+ regulator_disable(vfe_dev->fs_vfe);
+ msm_isp_deinit_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
+}
+
+static void msm_vfe32_init_hardware_reg(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_hw_init_parms qos_parms;
+ struct msm_vfe_hw_init_parms vbif_parms;
+ struct msm_vfe_hw_init_parms ds_parms;
+
+ qos_parms.entries = "qos-entries";
+ qos_parms.regs = "qos-regs";
+ qos_parms.settings = "qos-settings";
+ vbif_parms.entries = "vbif-entries";
+ vbif_parms.regs = "vbif-regs";
+ vbif_parms.settings = "vbif-settings";
+ ds_parms.entries = "ds-entries";
+ ds_parms.regs = "ds-regs";
+ ds_parms.settings = "ds-settings";
+
+ msm_vfe32_init_qos_parms(vfe_dev, &qos_parms, &ds_parms);
+ msm_vfe32_init_vbif_parms(vfe_dev, &vbif_parms);
+
+ /* CGC_OVERRIDE */
+ msm_camera_io_w(0x07FFFFFF, vfe_dev->vfe_base + 0xC);
+ /* BUS_CFG */
+ msm_camera_io_w(0x00000009, vfe_dev->vfe_base + 0x3C);
+ msm_camera_io_w(0x01000021, vfe_dev->vfe_base + 0x1C);
+ msm_camera_io_w_mb(0x1CFFFFFF, vfe_dev->vfe_base + 0x20);
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x24);
+ msm_camera_io_w_mb(0x1FFFFFFF, vfe_dev->vfe_base + 0x28);
+
+}
+
+static void msm_vfe32_clear_status_reg(struct vfe_device *vfe_dev)
+{
+ msm_camera_io_w((1 << 23), vfe_dev->vfe_base + 0x1C);
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x20);
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x24);
+ msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x18);
+}
+
+static void msm_vfe32_process_reset_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ if (irq_status1 & BIT(23))
+ complete(&vfe_dev->reset_complete);
+}
+
+static void msm_vfe32_process_halt_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+}
+
+static void msm_vfe32_process_camif_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ if (!(irq_status0 & 0x1F))
+ return;
+
+ if (irq_status0 & BIT(0)) {
+ ISP_DBG("%s: SOF IRQ\n", __func__);
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
+ && vfe_dev->axi_data.src_info[VFE_PIX_0].
+ pix_stream_count == 0) {
+ msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
+ if (vfe_dev->axi_data.stream_update[VFE_PIX_0])
+ msm_isp_axi_stream_update(vfe_dev, VFE_PIX_0);
+ msm_isp_update_framedrop_reg(vfe_dev, VFE_PIX_0);
+ }
+ }
+}
+
+static void msm_vfe32_process_violation_status(struct vfe_device *vfe_dev)
+{
+ uint32_t violation_status = vfe_dev->error_info.violation_status;
+ if (!violation_status)
+ return;
+
+ if (violation_status & BIT(0))
+ pr_err("%s: black violation\n", __func__);
+ if (violation_status & BIT(1))
+ pr_err("%s: rolloff violation\n", __func__);
+ if (violation_status & BIT(2))
+ pr_err("%s: demux violation\n", __func__);
+ if (violation_status & BIT(3))
+ pr_err("%s: demosaic violation\n", __func__);
+ if (violation_status & BIT(4))
+ pr_err("%s: crop violation\n", __func__);
+ if (violation_status & BIT(5))
+ pr_err("%s: scale violation\n", __func__);
+ if (violation_status & BIT(6))
+ pr_err("%s: wb violation\n", __func__);
+ if (violation_status & BIT(7))
+ pr_err("%s: clf violation\n", __func__);
+ if (violation_status & BIT(8))
+ pr_err("%s: matrix violation\n", __func__);
+ if (violation_status & BIT(9))
+ pr_err("%s: rgb lut violation\n", __func__);
+ if (violation_status & BIT(10))
+ pr_err("%s: la violation\n", __func__);
+ if (violation_status & BIT(11))
+ pr_err("%s: chroma enhance violation\n", __func__);
+ if (violation_status & BIT(12))
+ pr_err("%s: chroma supress mce violation\n", __func__);
+ if (violation_status & BIT(13))
+ pr_err("%s: skin enhance violation\n", __func__);
+ if (violation_status & BIT(14))
+ pr_err("%s: asf violation\n", __func__);
+ if (violation_status & BIT(15))
+ pr_err("%s: scale y violation\n", __func__);
+ if (violation_status & BIT(16))
+ pr_err("%s: scale cbcr violation\n", __func__);
+ if (violation_status & BIT(17))
+ pr_err("%s: chroma subsample violation\n", __func__);
+ if (violation_status & BIT(18))
+ pr_err("%s: framedrop enc y violation\n", __func__);
+ if (violation_status & BIT(19))
+ pr_err("%s: framedrop enc cbcr violation\n", __func__);
+ if (violation_status & BIT(20))
+ pr_err("%s: framedrop view y violation\n", __func__);
+ if (violation_status & BIT(21))
+ pr_err("%s: framedrop view cbcr violation\n", __func__);
+ if (violation_status & BIT(22))
+ pr_err("%s: realign buf y violation\n", __func__);
+ if (violation_status & BIT(23))
+ pr_err("%s: realign buf cb violation\n", __func__);
+ if (violation_status & BIT(24))
+ pr_err("%s: realign buf cr violation\n", __func__);
+}
+
+static void msm_vfe32_get_overflow_mask(uint32_t *overflow_mask)
+{
+ *overflow_mask = 0x0;
+}
+
+static void msm_vfe32_process_error_status(struct vfe_device *vfe_dev)
+{
+ uint32_t error_status1 = vfe_dev->error_info.error_mask1;
+
+ if (error_status1 & BIT(0))
+ pr_err("%s: camif error status: 0x%x\n",
+ __func__, vfe_dev->error_info.camif_status);
+ if (error_status1 & BIT(1))
+ pr_err("%s: stats bhist overwrite\n", __func__);
+ if (error_status1 & BIT(2))
+ pr_err("%s: stats cs overwrite\n", __func__);
+ if (error_status1 & BIT(3))
+ pr_err("%s: stats ihist overwrite\n", __func__);
+ if (error_status1 & BIT(4))
+ pr_err("%s: realign buf y overflow\n", __func__);
+ if (error_status1 & BIT(5))
+ pr_err("%s: realign buf cb overflow\n", __func__);
+ if (error_status1 & BIT(6))
+ pr_err("%s: realign buf cr overflow\n", __func__);
+ if (error_status1 & BIT(7)) {
+ pr_err("%s: violation\n", __func__);
+ msm_vfe32_process_violation_status(vfe_dev);
+ }
+ if (error_status1 & BIT(8)) {
+ vfe_dev->stats->imagemaster0_overflow++;
+ pr_err("%s: image master 0 bus overflow\n", __func__);
+ }
+ if (error_status1 & BIT(9)) {
+ vfe_dev->stats->imagemaster1_overflow++;
+ pr_err("%s: image master 1 bus overflow\n", __func__);
+ }
+ if (error_status1 & BIT(10)) {
+ vfe_dev->stats->imagemaster2_overflow++;
+ pr_err("%s: image master 2 bus overflow\n", __func__);
+ }
+ if (error_status1 & BIT(11)) {
+ vfe_dev->stats->imagemaster3_overflow++;
+ pr_err("%s: image master 3 bus overflow\n", __func__);
+ }
+ if (error_status1 & BIT(12)) {
+ vfe_dev->stats->imagemaster4_overflow++;
+ pr_err("%s: image master 4 bus overflow\n", __func__);
+ }
+ if (error_status1 & BIT(13)) {
+ vfe_dev->stats->imagemaster5_overflow++;
+ pr_err("%s: image master 5 bus overflow\n", __func__);
+ }
+ if (error_status1 & BIT(14)) {
+ vfe_dev->stats->imagemaster6_overflow++;
+ pr_err("%s: image master 6 bus overflow\n", __func__);
+ }
+ if (error_status1 & BIT(15)) {
+ vfe_dev->stats->bg_overflow++;
+ pr_err("%s: status ae/bg bus overflow\n", __func__);
+ }
+ if (error_status1 & BIT(16)) {
+ vfe_dev->stats->bf_overflow++;
+ pr_err("%s: status af/bf bus overflow\n", __func__);
+ }
+ if (error_status1 & BIT(17)) {
+ vfe_dev->stats->awb_overflow++;
+ pr_err("%s: status awb bus overflow\n", __func__);
+ }
+ if (error_status1 & BIT(18)) {
+ vfe_dev->stats->rs_overflow++;
+ pr_err("%s: status rs bus overflow\n", __func__);
+ }
+ if (error_status1 & BIT(19)) {
+ vfe_dev->stats->cs_overflow++;
+ pr_err("%s: status cs bus overflow\n", __func__);
+ }
+ if (error_status1 & BIT(20)) {
+ vfe_dev->stats->ihist_overflow++;
+ pr_err("%s: status ihist bus overflow\n", __func__);
+ }
+ if (error_status1 & BIT(21)) {
+ vfe_dev->stats->skinbhist_overflow++;
+ pr_err("%s: status skin bhist bus overflow\n", __func__);
+ }
+ if (error_status1 & BIT(22))
+ pr_err("%s: axi error\n", __func__);
+}
+
+static void msm_vfe32_read_irq_status(struct vfe_device *vfe_dev,
+ uint32_t *irq_status0, uint32_t *irq_status1)
+{
+ *irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
+ *irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x30);
+ msm_camera_io_w(*irq_status0, vfe_dev->vfe_base + 0x24);
+ msm_camera_io_w_mb(*irq_status1, vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x18);
+
+ if (*irq_status1 & BIT(0))
+ vfe_dev->error_info.camif_status =
+ msm_camera_io_r(vfe_dev->vfe_base + 0x204);
+
+ if (*irq_status1 & BIT(7))
+ vfe_dev->error_info.violation_status |=
+ msm_camera_io_r(vfe_dev->vfe_base + 0x7B4);
+}
+
+static void msm_vfe32_process_reg_update(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ uint32_t rdi_status;
+ enum msm_vfe_input_src i;
+
+ if (!(irq_status0 & 0x20) && !(irq_status1 & 0x1C000000))
+ return;
+
+ if (irq_status0 & BIT(5)) {
+ msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
+ VFE_PIX_0);
+ if (vfe_dev->axi_data.stream_update[VFE_PIX_0]) {
+ rdi_status = msm_camera_io_r(vfe_dev->vfe_base +
+ VFE32_XBAR_BASE(0));
+ rdi_status |= msm_camera_io_r(vfe_dev->vfe_base +
+ VFE32_XBAR_BASE(4));
+
+ if ((rdi_status & BIT(7)) && (!(irq_status0 & 0x20)))
+ return;
+ }
+ if (atomic_read(&vfe_dev->stats_data.stats_update))
+ msm_isp_stats_stream_update(vfe_dev);
+ }
+
+ for (i = VFE_RAW_0; i <= VFE_RAW_2; i++) {
+ if (irq_status1 & BIT(26 + (i - VFE_RAW_0))) {
+ msm_isp_notify(vfe_dev, ISP_EVENT_SOF, i, ts);
+ if (vfe_dev->axi_data.stream_update[i])
+ msm_isp_axi_stream_update(vfe_dev, i);
+ msm_isp_update_framedrop_reg(vfe_dev, i);
+
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
+ i);
+ }
+ }
+
+ msm_isp_update_error_frame_count(vfe_dev);
+
+ return;
+}
+
+static void msm_vfe32_process_epoch_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ /* Not supported */
+}
+
+static void msm_vfe32_reg_update(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src)
+{
+ if (vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE1) {
+ msm_camera_io_w_mb(0xF,
+ vfe_dev->common_data->dual_vfe_res->vfe_base[ISP_VFE0]
+ + 0x260);
+ msm_camera_io_w_mb(0xF, vfe_dev->vfe_base + 0x260);
+ } else if (!vfe_dev->is_split) {
+ msm_camera_io_w_mb(0xF, vfe_dev->vfe_base + 0x260);
+ }
+}
+
+static long msm_vfe32_reset_hardware(struct vfe_device *vfe_dev,
+ uint32_t first_start, uint32_t blocking)
+{
+ init_completion(&vfe_dev->reset_complete);
+ msm_camera_io_w_mb(0x3FF, vfe_dev->vfe_base + 0x4);
+ return wait_for_completion_timeout(
+ &vfe_dev->reset_complete, msecs_to_jiffies(50));
+}
+
+static void msm_vfe32_axi_reload_wm(
+ struct vfe_device *vfe_dev, void __iomem *vfe_base,
+ uint32_t reload_mask)
+{
+ if (!vfe_dev->pdev->dev.of_node) {
+ /*vfe32 A-family: 8960*/
+ msm_camera_io_w_mb(reload_mask, vfe_base + 0x38);
+ } else {
+ /*vfe32 B-family: 8610*/
+ msm_camera_io_w(0x0, vfe_base + 0x24);
+ msm_camera_io_w(0x0, vfe_base + 0x28);
+ msm_camera_io_w(0x0, vfe_base + 0x20);
+ msm_camera_io_w_mb(0x1, vfe_base + 0x18);
+ msm_camera_io_w(0x9AAAAAAA , vfe_base + 0x600);
+ msm_camera_io_w(reload_mask, vfe_base + 0x38);
+ }
+}
+
+static void msm_vfe32_axi_enable_wm(void __iomem *vfe_base,
+ uint8_t wm_idx, uint8_t enable)
+{
+ uint32_t val = msm_camera_io_r(
+ vfe_base + VFE32_WM_BASE(wm_idx));
+ if (enable)
+ val |= 0x1;
+ else
+ val &= ~0x1;
+ msm_camera_io_w_mb(val,
+ vfe_base + VFE32_WM_BASE(wm_idx));
+}
+
+static void msm_vfe32_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t comp_mask, comp_mask_index =
+ stream_info->comp_mask_index;
+ uint32_t irq_mask;
+
+ comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x34);
+ comp_mask &= ~(0x7F << (comp_mask_index * 8));
+ comp_mask |= (axi_data->composite_info[comp_mask_index].
+ stream_composite_mask << (comp_mask_index * 8));
+ msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x34);
+
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
+ irq_mask |= BIT(comp_mask_index + 21);
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
+}
+
+static void msm_vfe32_axi_clear_comp_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ uint32_t comp_mask, comp_mask_index = stream_info->comp_mask_index;
+ uint32_t irq_mask;
+
+ comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x34);
+ comp_mask &= ~(0x7F << (comp_mask_index * 8));
+ msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x34);
+
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
+ irq_mask &= ~BIT(comp_mask_index + 21);
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
+}
+
+static void msm_vfe32_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ uint32_t irq_mask;
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
+ irq_mask |= BIT(stream_info->wm[0] + 6);
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
+}
+
+static void msm_vfe32_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ uint32_t irq_mask;
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
+ irq_mask &= ~BIT(stream_info->wm[0] + 6);
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
+}
+
+static void msm_vfe32_cfg_framedrop(void __iomem *vfe_base,
+ struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
+ uint32_t framedrop_period)
+{
+ if (stream_info->stream_src == PIX_ENCODER) {
+ msm_camera_io_w(framedrop_period - 1, vfe_base + 0x504);
+ msm_camera_io_w(framedrop_period - 1, vfe_base + 0x508);
+ msm_camera_io_w(framedrop_pattern, vfe_base + 0x50C);
+ msm_camera_io_w(framedrop_pattern, vfe_base + 0x510);
+ } else if (stream_info->stream_src == PIX_VIEWFINDER) {
+ msm_camera_io_w(framedrop_period - 1, vfe_base + 0x514);
+ msm_camera_io_w(framedrop_period - 1, vfe_base + 0x518);
+ msm_camera_io_w(framedrop_pattern, vfe_base + 0x51C);
+ msm_camera_io_w(framedrop_pattern, vfe_base + 0x520);
+ }
+ msm_camera_io_w_mb(0x1, vfe_base + 0x260);
+}
+
+static void msm_vfe32_clear_framedrop(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ if (stream_info->stream_src == PIX_ENCODER) {
+ msm_camera_io_w(0, vfe_dev->vfe_base + 0x50C);
+ msm_camera_io_w(0, vfe_dev->vfe_base + 0x510);
+ } else if (stream_info->stream_src == PIX_VIEWFINDER) {
+ msm_camera_io_w(0, vfe_dev->vfe_base + 0x51C);
+ msm_camera_io_w(0, vfe_dev->vfe_base + 0x520);
+ }
+}
+
+static int32_t msm_vfe32_cfg_io_format(struct vfe_device *vfe_dev,
+ enum msm_vfe_axi_stream_src stream_src, uint32_t io_format)
+{
+ int bpp, bpp_reg = 0, pack_fmt = 0, pack_reg = 0;
+ uint32_t io_format_reg;
+ bpp = msm_isp_get_bit_per_pixel(io_format);
+ if (bpp < 0) {
+ pr_err("%s:%d invalid io_format %d bpp %d", __func__, __LINE__,
+ io_format, bpp);
+ return -EINVAL;
+ }
+
+ switch (bpp) {
+ case 8:
+ bpp_reg = 0;
+ break;
+ case 10:
+ bpp_reg = 1 << 0;
+ break;
+ case 12:
+ bpp_reg = 1 << 1;
+ break;
+ default:
+ pr_err("%s:%d invalid bpp %d", __func__, __LINE__, bpp);
+ return -EINVAL;
+ }
+
+ if (stream_src == IDEAL_RAW) {
+ pack_fmt = msm_isp_get_pack_format(io_format);
+ switch (pack_fmt) {
+ case QCOM:
+ pack_reg = 0x0;
+ break;
+ case MIPI:
+ pack_reg = 0x1;
+ break;
+ case DPCM6:
+ pack_reg = 0x2;
+ break;
+ case DPCM8:
+ pack_reg = 0x3;
+ break;
+ case PLAIN8:
+ pack_reg = 0x4;
+ break;
+ case PLAIN16:
+ pack_reg = 0x5;
+ break;
+ default:
+ pr_err("%s: invalid pack fmt!\n", __func__);
+ return -EINVAL;
+ }
+ }
+
+ io_format_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x6F8);
+ switch (stream_src) {
+ case PIX_ENCODER:
+ case PIX_VIEWFINDER:
+ case CAMIF_RAW:
+ io_format_reg &= 0xFFFFCFFF;
+ io_format_reg |= bpp_reg << 12;
+ break;
+ case IDEAL_RAW:
+ io_format_reg &= 0xFFFFFFC8;
+ io_format_reg |= bpp_reg << 4 | pack_reg;
+ break;
+ case RDI_INTF_0:
+ case RDI_INTF_1:
+ case RDI_INTF_2:
+ default:
+ pr_err("%s: Invalid stream source\n", __func__);
+ return -EINVAL;
+ }
+ msm_camera_io_w(io_format_reg, vfe_dev->vfe_base + 0x6F8);
+ return 0;
+}
+
+static int msm_vfe32_start_fetch_engine(struct vfe_device *vfe_dev,
+ void *arg)
+{
+ return 0;
+}
+
+static void msm_vfe32_cfg_fetch_engine(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg)
+{
+ pr_err("%s: Fetch engine not supported\n", __func__);
+ return;
+}
+
+static void msm_vfe32_cfg_camif(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg)
+{
+ uint16_t first_pixel, last_pixel, first_line, last_line;
+ struct msm_vfe_camif_cfg *camif_cfg = &pix_cfg->camif_cfg;
+ uint32_t val;
+
+ first_pixel = camif_cfg->first_pixel;
+ last_pixel = camif_cfg->last_pixel;
+ first_line = camif_cfg->first_line;
+ last_line = camif_cfg->last_line;
+
+ msm_camera_io_w(pix_cfg->input_mux << 16 | pix_cfg->pixel_pattern,
+ vfe_dev->vfe_base + 0x14);
+
+ msm_camera_io_w(camif_cfg->lines_per_frame << 16 |
+ camif_cfg->pixels_per_line,
+ vfe_dev->vfe_base + 0x1EC);
+
+ msm_camera_io_w(first_pixel << 16 | last_pixel,
+ vfe_dev->vfe_base + 0x1F0);
+
+ msm_camera_io_w(first_line << 16 | last_line,
+ vfe_dev->vfe_base + 0x1F4);
+
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x6FC);
+ val &= 0xFFFFFFFC;
+ val |= camif_cfg->camif_input;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x6FC);
+}
+
+static void msm_vfe32_cfg_input_mux(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg)
+{
+ switch (pix_cfg->input_mux) {
+ case CAMIF:
+ msm_vfe32_cfg_camif(vfe_dev, pix_cfg);
+ break;
+ case EXTERNAL_READ:
+ msm_vfe32_cfg_fetch_engine(vfe_dev, pix_cfg);
+ break;
+ default:
+ pr_err("%s: Unsupported input mux %d\n",
+ __func__, pix_cfg->input_mux);
+ }
+ return;
+}
+
+static void msm_vfe32_update_camif_state(
+ struct vfe_device *vfe_dev,
+ enum msm_isp_camif_update_state update_state)
+{
+ uint32_t val;
+ bool bus_en, vfe_en;
+ if (update_state == NO_UPDATE)
+ return;
+
+ if (update_state == ENABLE_CAMIF) {
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
+ val |= 0x1;
+ msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x1C);
+
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x1E4);
+ bus_en =
+ ((vfe_dev->axi_data.src_info[
+ VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
+ vfe_en =
+ ((vfe_dev->axi_data.src_info[
+ VFE_PIX_0].pix_stream_count > 0) ? 1 : 0);
+ val &= 0xFFFFFF3F;
+ val = val | bus_en << 7 | vfe_en << 6;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x1E4);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x1E0);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].active = 1;
+ } else if (update_state == DISABLE_CAMIF) {
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x1E0);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
+ } else if (update_state == DISABLE_CAMIF_IMMEDIATELY) {
+ msm_camera_io_w_mb(0x6, vfe_dev->vfe_base + 0x1E0);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
+ }
+}
+
+static void msm_vfe32_cfg_rdi_reg(struct vfe_device *vfe_dev,
+ struct msm_vfe_rdi_cfg *rdi_cfg, enum msm_vfe_input_src input_src)
+{
+ uint8_t rdi = input_src - VFE_RAW_0;
+ uint32_t rdi_reg_cfg;
+ rdi_reg_cfg = msm_camera_io_r(
+ vfe_dev->vfe_base + VFE32_RDI_BASE(0));
+ rdi_reg_cfg &= ~(BIT(16 + rdi));
+ rdi_reg_cfg |= rdi_cfg->frame_based << (16 + rdi);
+ msm_camera_io_w(rdi_reg_cfg,
+ vfe_dev->vfe_base + VFE32_RDI_BASE(0));
+
+ rdi_reg_cfg = msm_camera_io_r(
+ vfe_dev->vfe_base + VFE32_RDI_BASE(rdi));
+ rdi_reg_cfg &= 0x70003;
+ rdi_reg_cfg |= (rdi * 3) << 28 | rdi_cfg->cid << 4 | 0x4;
+ msm_camera_io_w(
+ rdi_reg_cfg, vfe_dev->vfe_base + VFE32_RDI_BASE(rdi));
+
+}
+
+static void msm_vfe32_axi_cfg_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint8_t plane_idx)
+{
+ uint32_t val;
+ uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[plane_idx]);
+
+ if (!stream_info->frame_based) {
+ /*WR_IMAGE_SIZE*/
+ val =
+ ((msm_isp_cal_word_per_line(
+ stream_info->output_format,
+ stream_info->plane_cfg[plane_idx].
+ output_width)+1)/2 - 1) << 16 |
+ (stream_info->plane_cfg[plane_idx].
+ output_height - 1);
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x10);
+
+ /*WR_BUFFER_CFG*/
+ val =
+ msm_isp_cal_word_per_line(
+ stream_info->output_format,
+ stream_info->plane_cfg[plane_idx].
+ output_stride) << 16 |
+ (stream_info->plane_cfg[plane_idx].
+ output_height - 1) << 4 | VFE32_BURST_LEN;
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
+ } else {
+ msm_camera_io_w(0x2, vfe_dev->vfe_base + wm_base);
+ val =
+ msm_isp_cal_word_per_line(
+ stream_info->output_format,
+ stream_info->plane_cfg[plane_idx].
+ output_width) << 16 |
+ (stream_info->plane_cfg[plane_idx].
+ output_height - 1) << 4 | VFE32_BURST_LEN;
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
+ }
+ return;
+}
+
+static void msm_vfe32_axi_clear_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
+{
+ uint32_t val = 0;
+ uint32_t wm_base = VFE32_WM_BASE(stream_info->wm[plane_idx]);
+ /*WR_IMAGE_SIZE*/
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x10);
+ /*WR_BUFFER_CFG*/
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
+ return;
+}
+
+static void msm_vfe32_axi_cfg_wm_xbar_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
+{
+ struct msm_vfe_axi_plane_cfg *plane_cfg =
+ &stream_info->plane_cfg[plane_idx];
+ uint8_t wm = stream_info->wm[plane_idx];
+ uint32_t xbar_cfg = 0;
+ uint32_t xbar_reg_cfg = 0;
+
+ switch (stream_info->stream_src) {
+ case PIX_ENCODER:
+ case PIX_VIEWFINDER: {
+ if (plane_cfg->output_plane_format != CRCB_PLANE &&
+ plane_cfg->output_plane_format != CBCR_PLANE) {
+ /*SINGLE_STREAM_SEL*/
+ xbar_cfg |= plane_cfg->output_plane_format << 5;
+ } else {
+ switch (stream_info->output_format) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV14:
+ case V4L2_PIX_FMT_NV16:
+ xbar_cfg |= 0x3 << 3; /*PAIR_STREAM_SWAP_CTRL*/
+ break;
+ }
+ xbar_cfg |= BIT(1); /*PAIR_STREAM_EN*/
+ }
+ if (stream_info->stream_src == PIX_VIEWFINDER)
+ xbar_cfg |= 0x1; /*VIEW_STREAM_EN*/
+ break;
+ }
+ case CAMIF_RAW:
+ xbar_cfg = 0x60;
+ break;
+ case IDEAL_RAW:
+ xbar_cfg = 0x80;
+ break;
+ case RDI_INTF_0:
+ xbar_cfg = 0xA0;
+ break;
+ case RDI_INTF_1:
+ xbar_cfg = 0xC0;
+ break;
+ case RDI_INTF_2:
+ xbar_cfg = 0xE0;
+ break;
+ default:
+ pr_err("%s: Invalid stream src\n", __func__);
+ }
+ xbar_reg_cfg = msm_camera_io_r(vfe_dev->vfe_base + VFE32_XBAR_BASE(wm));
+ xbar_reg_cfg &= ~(0xFF << VFE32_XBAR_SHIFT(wm));
+ xbar_reg_cfg |= (xbar_cfg << VFE32_XBAR_SHIFT(wm));
+ msm_camera_io_w(xbar_reg_cfg, vfe_dev->vfe_base + VFE32_XBAR_BASE(wm));
+ return;
+}
+
+static void msm_vfe32_axi_clear_wm_xbar_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
+{
+ uint8_t wm = stream_info->wm[plane_idx];
+ uint32_t xbar_reg_cfg = 0;
+
+ xbar_reg_cfg = msm_camera_io_r(vfe_dev->vfe_base + VFE32_XBAR_BASE(wm));
+ xbar_reg_cfg &= ~(0xFF << VFE32_XBAR_SHIFT(wm));
+ msm_camera_io_w(xbar_reg_cfg, vfe_dev->vfe_base + VFE32_XBAR_BASE(wm));
+}
+
+static void msm_vfe32_cfg_axi_ub_equal_default(struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t ub_offset = 0;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t total_image_size = 0;
+ uint32_t num_used_wms = 0;
+ uint32_t prop_size = 0;
+ uint32_t wm_ub_size;
+ uint64_t delta;
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ if (axi_data->free_wm[i] > 0) {
+ num_used_wms++;
+ total_image_size += axi_data->wm_image_size[i];
+ }
+ }
+ prop_size = MSM_ISP32_TOTAL_WM_UB -
+ axi_data->hw_info->min_wm_ub * num_used_wms;
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ if (axi_data->free_wm[i]) {
+ delta =
+ (uint64_t)(axi_data->wm_image_size[i] *
+ prop_size);
+ do_div(delta, total_image_size);
+ wm_ub_size = axi_data->hw_info->min_wm_ub +
+ (uint32_t)delta;
+ msm_camera_io_w(ub_offset << 16 |
+ (wm_ub_size - 1), vfe_dev->vfe_base +
+ VFE32_WM_BASE(i) + 0xC);
+ ub_offset += wm_ub_size;
+ } else {
+ msm_camera_io_w(0,
+ vfe_dev->vfe_base + VFE32_WM_BASE(i) + 0xC);
+ }
+ }
+}
+
+static void msm_vfe32_cfg_axi_ub_equal_slicing(struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t ub_offset = 0;
+ uint32_t final_ub_slice_size;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ if (ub_offset + VFE32_EQUAL_SLICE_UB > VFE32_AXI_SLICE_UB) {
+ final_ub_slice_size = VFE32_AXI_SLICE_UB - ub_offset;
+ msm_camera_io_w(ub_offset << 16 |
+ (final_ub_slice_size - 1), vfe_dev->vfe_base +
+ VFE32_WM_BASE(i) + 0xC);
+ ub_offset += final_ub_slice_size;
+ } else {
+ msm_camera_io_w(ub_offset << 16 |
+ (VFE32_EQUAL_SLICE_UB - 1), vfe_dev->vfe_base +
+ VFE32_WM_BASE(i) + 0xC);
+ ub_offset += VFE32_EQUAL_SLICE_UB;
+ }
+ }
+}
+
+static void msm_vfe32_cfg_axi_ub(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ axi_data->wm_ub_cfg_policy = MSM_WM_UB_CFG_DEFAULT;
+ if (axi_data->wm_ub_cfg_policy == MSM_WM_UB_EQUAL_SLICING)
+ msm_vfe32_cfg_axi_ub_equal_slicing(vfe_dev);
+ else
+ msm_vfe32_cfg_axi_ub_equal_default(vfe_dev);
+}
+
+static void msm_vfe32_update_ping_pong_addr(void __iomem *vfe_base,
+ uint8_t wm_idx, uint32_t pingpong_bit, dma_addr_t paddr,
+ int32_t buf_size)
+{
+ uint32_t paddr32 = (paddr & 0xFFFFFFFF);
+ msm_camera_io_w(paddr32, vfe_base +
+ VFE32_PING_PONG_BASE(wm_idx, pingpong_bit));
+}
+
+static int msm_vfe32_axi_halt(struct vfe_device *vfe_dev, uint32_t blocking)
+{
+ uint32_t halt_mask;
+ uint32_t axi_busy_flag = true;
+
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x1D8);
+ while (axi_busy_flag) {
+ if (msm_camera_io_r(
+ vfe_dev->vfe_base + 0x1DC) & 0x1)
+ axi_busy_flag = false;
+ }
+ msm_camera_io_w_mb(0, vfe_dev->vfe_base + 0x1D8);
+ halt_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x20);
+ halt_mask &= 0xFEFFFFFF;
+ /* Disable AXI IRQ */
+ msm_camera_io_w_mb(halt_mask, vfe_dev->vfe_base + 0x20);
+ return 0;
+}
+
+static uint32_t msm_vfe32_get_wm_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ return (irq_status0 >> 6) & 0x7F;
+}
+
+static uint32_t msm_vfe32_get_comp_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ return (irq_status0 >> 21) & 0x7;
+}
+
+static uint32_t msm_vfe32_get_pingpong_status(struct vfe_device *vfe_dev)
+{
+ return msm_camera_io_r(vfe_dev->vfe_base + 0x180);
+}
+
+static int msm_vfe32_get_stats_idx(enum msm_isp_stats_type stats_type)
+{
+ switch (stats_type) {
+ case MSM_ISP_STATS_AEC:
+ case MSM_ISP_STATS_BG:
+ return 0;
+ case MSM_ISP_STATS_AF:
+ case MSM_ISP_STATS_BF:
+ return 1;
+ case MSM_ISP_STATS_AWB:
+ return 2;
+ case MSM_ISP_STATS_RS:
+ return 3;
+ case MSM_ISP_STATS_CS:
+ return 4;
+ case MSM_ISP_STATS_IHIST:
+ return 5;
+ case MSM_ISP_STATS_SKIN:
+ case MSM_ISP_STATS_BHIST:
+ return 6;
+ default:
+ pr_err("%s: Invalid stats type\n", __func__);
+ return -EINVAL;
+ }
+}
+
+static int msm_vfe32_stats_check_streams(
+ struct msm_vfe_stats_stream *stream_info)
+{
+ return 0;
+}
+
+static void msm_vfe32_stats_cfg_comp_mask(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t comp_idx, uint8_t enable)
+{
+ return;
+}
+
+static void msm_vfe32_stats_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ uint32_t irq_mask;
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
+ irq_mask |= BIT(STATS_IDX(stream_info->stream_handle) + 13);
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
+ return;
+}
+
+static void msm_vfe32_stats_clear_wm_irq_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ uint32_t irq_mask;
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
+ irq_mask &= ~(BIT(STATS_IDX(stream_info->stream_handle) + 13));
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x1C);
+ return;
+}
+
+static void msm_vfe32_stats_cfg_wm_reg(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ /*Nothing to configure for VFE3.x*/
+ return;
+}
+
+static void msm_vfe32_stats_clear_wm_reg(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ /*Nothing to configure for VFE3.x*/
+ return;
+}
+
+static void msm_vfe32_stats_cfg_ub(struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t ub_offset = VFE32_UB_SIZE;
+ uint32_t ub_size[VFE32_NUM_STATS_TYPE] = {
+ 107, /*MSM_ISP_STATS_BG*/
+ 92, /*MSM_ISP_STATS_BF*/
+ 2, /*MSM_ISP_STATS_AWB*/
+ 7, /*MSM_ISP_STATS_RS*/
+ 16, /*MSM_ISP_STATS_CS*/
+ 2, /*MSM_ISP_STATS_IHIST*/
+ 7, /*MSM_ISP_STATS_BHIST*/
+ };
+
+ if (vfe_dev->vfe_hw_version == VFE32_8909_VERSION)
+ ub_offset = VFE32_UB_SIZE_32KB;
+
+ for (i = 0; i < VFE32_NUM_STATS_TYPE; i++) {
+ ub_offset -= ub_size[i];
+ msm_camera_io_w(ub_offset << 16 | (ub_size[i] - 1),
+ vfe_dev->vfe_base + VFE32_STATS_BASE(i) + 0x8);
+ }
+ return;
+}
+
+static bool msm_vfe32_is_module_cfg_lock_needed(
+ uint32_t reg_offset)
+{
+ return false;
+}
+
+static void msm_vfe32_stats_enable_module(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable)
+{
+ int i;
+ uint32_t module_cfg, module_cfg_mask = 0;
+
+ for (i = 0; i < VFE32_NUM_STATS_TYPE; i++) {
+ if ((stats_mask >> i) & 0x1) {
+ switch (i) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ module_cfg_mask |= 1 << (5 + i);
+ break;
+ case 5:
+ module_cfg_mask |= 1 << 16;
+ break;
+ case 6:
+ module_cfg_mask |= 1 << 19;
+ break;
+ default:
+ pr_err("%s: Invalid stats mask\n", __func__);
+ return;
+ }
+ }
+ }
+
+ module_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x10);
+ if (enable)
+ module_cfg |= module_cfg_mask;
+ else
+ module_cfg &= ~module_cfg_mask;
+ msm_camera_io_w(module_cfg, vfe_dev->vfe_base + 0x10);
+}
+
+static void msm_vfe32_stats_update_ping_pong_addr(void __iomem *vfe_base,
+ struct msm_vfe_stats_stream *stream_info, uint32_t pingpong_status,
+ dma_addr_t paddr)
+{
+ uint32_t paddr32 = (paddr & 0xFFFFFFFF);
+ int stats_idx = STATS_IDX(stream_info->stream_handle);
+ msm_camera_io_w(paddr32, vfe_base +
+ VFE32_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
+}
+
+static uint32_t msm_vfe32_stats_get_wm_mask(uint32_t irq_status0,
+ uint32_t irq_status1)
+{
+ return (irq_status0 >> 13) & 0x7F;
+}
+
+static uint32_t msm_vfe32_stats_get_comp_mask(uint32_t irq_status0,
+ uint32_t irq_status1)
+{
+ return (irq_status0 >> 24) & 0x1;
+}
+
+static uint32_t msm_vfe32_stats_get_frame_id(struct vfe_device *vfe_dev)
+{
+ return vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+}
+
+static int msm_vfe32_get_platform_data(struct vfe_device *vfe_dev)
+{
+ int rc = 0;
+ vfe_dev->vfe_mem = platform_get_resource_byname(vfe_dev->pdev,
+ IORESOURCE_MEM, "vfe");
+ if (!vfe_dev->vfe_mem) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto vfe_no_resource;
+ }
+
+ vfe_dev->vfe_vbif_mem = platform_get_resource_byname(
+ vfe_dev->pdev,
+ IORESOURCE_MEM, "vfe_vbif");
+ if (!vfe_dev->vfe_vbif_mem) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto vfe_no_resource;
+ }
+
+ vfe_dev->vfe_irq = platform_get_resource_byname(vfe_dev->pdev,
+ IORESOURCE_IRQ, "vfe");
+ if (!vfe_dev->vfe_irq) {
+ pr_err("%s: no irq resource?\n", __func__);
+ rc = -ENODEV;
+ goto vfe_no_resource;
+ }
+
+ vfe_dev->fs_vfe = regulator_get(&vfe_dev->pdev->dev, "vdd");
+ if (IS_ERR(vfe_dev->fs_vfe)) {
+ pr_err("%s: Regulator get failed %ld\n", __func__,
+ PTR_ERR(vfe_dev->fs_vfe));
+ vfe_dev->fs_vfe = NULL;
+ rc = -ENODEV;
+ goto vfe_no_resource;
+ }
+
+ if (!vfe_dev->pdev->dev.of_node)
+ vfe_dev->iommu_ctx[0] = msm_iommu_get_ctx("vfe_imgwr");
+ else
+ vfe_dev->iommu_ctx[0] = msm_iommu_get_ctx("vfe");
+
+ if (!vfe_dev->iommu_ctx[0]) {
+ pr_err("%s: no iommux ctx resource?\n", __func__);
+ rc = -ENODEV;
+ goto vfe_no_resource;
+ }
+
+ if (!vfe_dev->pdev->dev.of_node)
+ vfe_dev->iommu_ctx[1] = msm_iommu_get_ctx("vfe_misc");
+ else
+ vfe_dev->iommu_ctx[1] = msm_iommu_get_ctx("vfe");
+
+ if (!vfe_dev->iommu_ctx[1]) {
+ pr_err("%s: no iommux ctx resource?\n", __func__);
+ rc = -ENODEV;
+ goto vfe_no_resource;
+ }
+
+vfe_no_resource:
+ return rc;
+}
+
+static void msm_vfe32_get_error_mask(uint32_t *error_mask0,
+ uint32_t *error_mask1)
+{
+ *error_mask0 = 0x00000000;
+ *error_mask1 = 0x007FFFFF;
+}
+
+struct msm_vfe_axi_hardware_info msm_vfe32_axi_hw_info = {
+ .num_wm = 5,
+ .num_comp_mask = 3,
+ .num_rdi = 3,
+ .num_rdi_master = 3,
+ .min_wm_ub = 64,
+ .scratch_buf_range = SZ_32M,
+};
+
+static struct msm_vfe_stats_hardware_info msm_vfe32_stats_hw_info = {
+ .stats_capability_mask =
+ 1 << MSM_ISP_STATS_AEC | 1 << MSM_ISP_STATS_BG |
+ 1 << MSM_ISP_STATS_AF | 1 << MSM_ISP_STATS_BF |
+ 1 << MSM_ISP_STATS_AWB | 1 << MSM_ISP_STATS_IHIST |
+ 1 << MSM_ISP_STATS_RS | 1 << MSM_ISP_STATS_CS |
+ 1 << MSM_ISP_STATS_SKIN | 1 << MSM_ISP_STATS_BHIST,
+ .stats_ping_pong_offset = stats_pingpong_offset_map,
+ .num_stats_type = VFE32_NUM_STATS_TYPE,
+ .num_stats_comp_mask = 0,
+};
+
+struct msm_vfe_hardware_info vfe32_hw_info = {
+ .num_iommu_ctx = 2,
+ .num_iommu_secure_ctx = 0,
+ .vfe_clk_idx = VFE32_CLK_IDX,
+ .vfe_ops = {
+ .irq_ops = {
+ .read_irq_status = msm_vfe32_read_irq_status,
+ .process_camif_irq = msm_vfe32_process_camif_irq,
+ .process_reset_irq = msm_vfe32_process_reset_irq,
+ .process_halt_irq = msm_vfe32_process_halt_irq,
+ .process_reg_update = msm_vfe32_process_reg_update,
+ .process_axi_irq = msm_isp_process_axi_irq,
+ .process_stats_irq = msm_isp_process_stats_irq,
+ .process_epoch_irq = msm_vfe32_process_epoch_irq,
+ },
+ .axi_ops = {
+ .reload_wm = msm_vfe32_axi_reload_wm,
+ .enable_wm = msm_vfe32_axi_enable_wm,
+ .cfg_io_format = msm_vfe32_cfg_io_format,
+ .cfg_comp_mask = msm_vfe32_axi_cfg_comp_mask,
+ .clear_comp_mask = msm_vfe32_axi_clear_comp_mask,
+ .cfg_wm_irq_mask = msm_vfe32_axi_cfg_wm_irq_mask,
+ .clear_wm_irq_mask = msm_vfe32_axi_clear_wm_irq_mask,
+ .cfg_framedrop = msm_vfe32_cfg_framedrop,
+ .clear_framedrop = msm_vfe32_clear_framedrop,
+ .cfg_wm_reg = msm_vfe32_axi_cfg_wm_reg,
+ .clear_wm_reg = msm_vfe32_axi_clear_wm_reg,
+ .cfg_wm_xbar_reg = msm_vfe32_axi_cfg_wm_xbar_reg,
+ .clear_wm_xbar_reg = msm_vfe32_axi_clear_wm_xbar_reg,
+ .cfg_ub = msm_vfe32_cfg_axi_ub,
+ .update_ping_pong_addr =
+ msm_vfe32_update_ping_pong_addr,
+ .get_comp_mask = msm_vfe32_get_comp_mask,
+ .get_wm_mask = msm_vfe32_get_wm_mask,
+ .get_pingpong_status = msm_vfe32_get_pingpong_status,
+ .halt = msm_vfe32_axi_halt,
+ },
+ .core_ops = {
+ .reg_update = msm_vfe32_reg_update,
+ .cfg_input_mux = msm_vfe32_cfg_input_mux,
+ .update_camif_state = msm_vfe32_update_camif_state,
+ .start_fetch_eng = msm_vfe32_start_fetch_engine,
+ .cfg_rdi_reg = msm_vfe32_cfg_rdi_reg,
+ .reset_hw = msm_vfe32_reset_hardware,
+ .init_hw = msm_vfe32_init_hardware,
+ .init_hw_reg = msm_vfe32_init_hardware_reg,
+ .clear_status_reg = msm_vfe32_clear_status_reg,
+ .release_hw = msm_vfe32_release_hardware,
+ .get_platform_data = msm_vfe32_get_platform_data,
+ .get_error_mask = msm_vfe32_get_error_mask,
+ .process_error_status = msm_vfe32_process_error_status,
+ .get_overflow_mask = msm_vfe32_get_overflow_mask,
+ .is_module_cfg_lock_needed =
+ msm_vfe32_is_module_cfg_lock_needed,
+ },
+ .stats_ops = {
+ .get_stats_idx = msm_vfe32_get_stats_idx,
+ .check_streams = msm_vfe32_stats_check_streams,
+ .cfg_comp_mask = msm_vfe32_stats_cfg_comp_mask,
+ .cfg_wm_irq_mask = msm_vfe32_stats_cfg_wm_irq_mask,
+ .clear_wm_irq_mask = msm_vfe32_stats_clear_wm_irq_mask,
+ .cfg_wm_reg = msm_vfe32_stats_cfg_wm_reg,
+ .clear_wm_reg = msm_vfe32_stats_clear_wm_reg,
+ .cfg_ub = msm_vfe32_stats_cfg_ub,
+ .enable_module = msm_vfe32_stats_enable_module,
+ .update_ping_pong_addr =
+ msm_vfe32_stats_update_ping_pong_addr,
+ .get_comp_mask = msm_vfe32_stats_get_comp_mask,
+ .get_wm_mask = msm_vfe32_stats_get_wm_mask,
+ .get_frame_id = msm_vfe32_stats_get_frame_id,
+ .get_pingpong_status = msm_vfe32_get_pingpong_status,
+ },
+ },
+ .dmi_reg_offset = 0x5A0,
+ .axi_hw_info = &msm_vfe32_axi_hw_info,
+ .stats_hw_info = &msm_vfe32_stats_hw_info,
+};
+EXPORT_SYMBOL(vfe32_hw_info);
+
+static const struct of_device_id msm_vfe32_dt_match[] = {
+ {
+ .compatible = "qcom,vfe32",
+ .data = &vfe32_hw_info,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_vfe32_dt_match);
+
+static struct platform_driver vfe32_driver = {
+ .probe = vfe_hw_probe,
+ .driver = {
+ .name = "msm_vfe32",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_vfe32_dt_match,
+ },
+ .id_table = msm_vfe32_dev_id,
+};
+
+static int __init msm_vfe32_init_module(void)
+{
+ return platform_driver_register(&vfe32_driver);
+}
+
+static void __exit msm_vfe32_exit_module(void)
+{
+ platform_driver_unregister(&vfe32_driver);
+}
+
+module_init(msm_vfe32_init_module);
+module_exit(msm_vfe32_exit_module);
+MODULE_DESCRIPTION("MSM VFE32 driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp32.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.h
new file mode 100644
index 000000000000..523f0340662a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp32.h
@@ -0,0 +1,17 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_ISP32_H__
+#define __MSM_ISP32_H__
+
+extern struct msm_vfe_hardware_info vfe32_hw_info;
+#endif /* __MSM_ISP32_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
new file mode 100644
index 000000000000..10cddddc54cd
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -0,0 +1,2451 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/ratelimit.h>
+#include <asm/div64.h>
+#include "msm_isp40.h"
+#include "msm_isp_util.h"
+#include "msm_isp_axi_util.h"
+#include "msm_isp_stats_util.h"
+#include "msm_isp.h"
+#include "msm.h"
+#include "msm_camera_io_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+#define VFE40_BURST_LEN 1
+#define VFE40_BURST_LEN_8916_VERSION 2
+#define VFE40_BURST_LEN_8952_VERSION 3
+#define VFE40_WM_BIT_SHIFT 4
+#define VFE40_WM_BIT_SHIFT_8976_VERSION 3
+#define VFE40_STATS_BURST_LEN 1
+#define VFE40_STATS_BURST_LEN_8916_VERSION 2
+#define VFE40_FETCH_BURST_LEN 3
+#define VFE40_UB_SIZE 1536 /* 1536 * 128 bits = 24KB */
+#define VFE40_UB_SIZE_8952 2048 /* 2048 * 128 bits = 32KB */
+#define VFE40_UB_SIZE_8916 3072 /* 3072 * 128 bits = 48KB */
+#define VFE40_EQUAL_SLICE_UB 190 /* (UB_SIZE - STATS SIZE)/6 */
+#define VFE40_EQUAL_SLICE_UB_8916 236
+#define VFE40_TOTAL_WM_UB 1144 /* UB_SIZE - STATS SIZE */
+#define VFE40_TOTAL_WM_UB_8916 1656
+#define VFE40_WM_BASE(idx) (0x6C + 0x24 * idx)
+#define VFE40_RDI_BASE(idx) (0x2E8 + 0x4 * idx)
+#define VFE40_XBAR_BASE(idx) (0x58 + 0x4 * (idx / 2))
+#define VFE40_XBAR_SHIFT(idx) ((idx%2) ? 16 : 0)
+#define VFE40_PING_PONG_BASE(wm, ping_pong) \
+ (VFE40_WM_BASE(wm) + 0x4 * (1 + ((~ping_pong) & 0x1)))
+
+#define VFE40_BUS_RD_CGC_OVERRIDE_BIT 16
+
+#define STATS_IDX_BE 0
+#define STATS_IDX_BG 1
+#define STATS_IDX_BF 2
+#define STATS_IDX_AWB 3
+#define STATS_IDX_RS 4
+#define STATS_IDX_CS 5
+#define STATS_IDX_IHIST 6
+#define STATS_IDX_BHIST 7
+
+static uint8_t stats_pingpong_offset_map[] = {
+ 8, 9, 10, 11, 12, 13, 14, 15};
+
+#define VFE40_NUM_STATS_TYPE 8
+#define VFE40_STATS_BASE(idx) (0x168 + 0x18 * idx)
+#define VFE40_STATS_PING_PONG_BASE(idx, ping_pong) \
+ (VFE40_STATS_BASE(idx) + 0x4 * \
+ (~(ping_pong >> (stats_pingpong_offset_map[idx])) & 0x1))
+
+#define VFE40_VBIF_CLKON 0x4
+#define VFE40_VBIF_IN_RD_LIM_CONF0 0xB0
+#define VFE40_VBIF_IN_RD_LIM_CONF1 0xB4
+#define VFE40_VBIF_IN_RD_LIM_CONF2 0xB8
+#define VFE40_VBIF_IN_WR_LIM_CONF0 0xC0
+#define VFE40_VBIF_IN_WR_LIM_CONF1 0xC4
+#define VFE40_VBIF_IN_WR_LIM_CONF2 0xC8
+#define VFE40_VBIF_OUT_RD_LIM_CONF0 0xD0
+#define VFE40_VBIF_OUT_WR_LIM_CONF0 0xD4
+#define VFE40_VBIF_DDR_OUT_MAX_BURST 0xD8
+#define VFE40_VBIF_OCMEM_OUT_MAX_BURST 0xDC
+#define VFE40_VBIF_ARB_CTL 0xF0
+#define VFE40_VBIF_ROUND_ROBIN_QOS_ARB 0x124
+#define VFE40_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x160
+#define VFE40_VBIF_OUT_AXI_AMEMTYPE_CONF1 0x164
+#define VFE40_VBIF_OUT_AXI_AOOO_EN 0x178
+#define VFE40_VBIF_OUT_AXI_AOOO 0x17C
+
+#define VFE40_BUS_BDG_QOS_CFG_0 0x000002C4
+#define VFE40_BUS_BDG_QOS_CFG_1 0x000002C8
+#define VFE40_BUS_BDG_QOS_CFG_2 0x000002CC
+#define VFE40_BUS_BDG_QOS_CFG_3 0x000002D0
+#define VFE40_BUS_BDG_QOS_CFG_4 0x000002D4
+#define VFE40_BUS_BDG_QOS_CFG_5 0x000002D8
+#define VFE40_BUS_BDG_QOS_CFG_6 0x000002DC
+#define VFE40_BUS_BDG_QOS_CFG_7 0x000002E0
+
+#define VFE40_CLK_IDX 2
+static struct msm_cam_clk_info msm_vfe40_clk_info[VFE_CLK_INFO_MAX];
+
+static int32_t msm_vfe40_init_qos_parms(struct vfe_device *vfe_dev,
+ struct msm_vfe_hw_init_parms *qos_parms,
+ struct msm_vfe_hw_init_parms *ds_parms)
+{
+ void __iomem *vfebase = vfe_dev->vfe_base;
+ struct device_node *of_node;
+ uint32_t *ds_settings = NULL, *ds_regs = NULL, ds_entries = 0;
+ int32_t i = 0 , rc = 0;
+ uint32_t *qos_settings = NULL, *qos_regs = NULL, qos_entries = 0;
+ of_node = vfe_dev->pdev->dev.of_node;
+
+ rc = of_property_read_u32(of_node, qos_parms->entries,
+ &qos_entries);
+ if (rc < 0 || !qos_entries) {
+ pr_err("%s: NO QOS entries found\n", __func__);
+ } else {
+ qos_settings = kzalloc(sizeof(uint32_t) * qos_entries,
+ GFP_KERNEL);
+ if (!qos_settings) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ qos_regs = kzalloc(sizeof(uint32_t) * qos_entries,
+ GFP_KERNEL);
+ if (!qos_regs) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ kfree(qos_settings);
+ return -ENOMEM;
+ }
+ rc = of_property_read_u32_array(of_node, qos_parms->regs,
+ qos_regs, qos_entries);
+ if (rc < 0) {
+ pr_err("%s: NO QOS BUS BDG info\n", __func__);
+ kfree(qos_settings);
+ kfree(qos_regs);
+ } else {
+ if (qos_parms->settings) {
+ rc = of_property_read_u32_array(of_node,
+ qos_parms->settings,
+ qos_settings, qos_entries);
+ if (rc < 0) {
+ pr_err("%s: NO QOS settings\n",
+ __func__);
+ kfree(qos_settings);
+ kfree(qos_regs);
+ } else {
+ for (i = 0; i < qos_entries; i++)
+ msm_camera_io_w(qos_settings[i],
+ vfebase + qos_regs[i]);
+ kfree(qos_settings);
+ kfree(qos_regs);
+ }
+ } else {
+ kfree(qos_settings);
+ kfree(qos_regs);
+ }
+ }
+ }
+ rc = of_property_read_u32(of_node, ds_parms->entries,
+ &ds_entries);
+ if (rc < 0 || !ds_entries) {
+ pr_err("%s: NO D/S entries found\n", __func__);
+ } else {
+ ds_settings = kzalloc(sizeof(uint32_t) * ds_entries,
+ GFP_KERNEL);
+ if (!ds_settings) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ ds_regs = kzalloc(sizeof(uint32_t) * ds_entries,
+ GFP_KERNEL);
+ if (!ds_regs) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ kfree(ds_settings);
+ return -ENOMEM;
+ }
+ rc = of_property_read_u32_array(of_node, ds_parms->regs,
+ ds_regs, ds_entries);
+ if (rc < 0) {
+ pr_err("%s: NO D/S register info\n", __func__);
+ kfree(ds_settings);
+ kfree(ds_regs);
+ } else {
+ if (ds_parms->settings) {
+ rc = of_property_read_u32_array(of_node,
+ ds_parms->settings, ds_settings,
+ ds_entries);
+ if (rc < 0) {
+ pr_err("%s: NO D/S settings\n",
+ __func__);
+ kfree(ds_settings);
+ kfree(ds_regs);
+ } else {
+ for (i = 0; i < ds_entries; i++)
+ msm_camera_io_w(ds_settings[i],
+ vfebase + ds_regs[i]);
+ kfree(ds_regs);
+ kfree(ds_settings);
+ }
+ } else {
+ kfree(ds_regs);
+ kfree(ds_settings);
+ }
+ }
+ }
+ return 0;
+}
+
+static int32_t msm_vfe40_init_vbif_parms(struct vfe_device *vfe_dev,
+ struct msm_vfe_hw_init_parms *vbif_parms)
+{
+ void __iomem *vfe_vbif_base = vfe_dev->vfe_vbif_base;
+ struct device_node *of_node;
+ int32_t i = 0 , rc = 0;
+ uint32_t *vbif_settings = NULL, *vbif_regs = NULL, vbif_entries = 0;
+ of_node = vfe_dev->pdev->dev.of_node;
+
+ rc = of_property_read_u32(of_node, vbif_parms->entries,
+ &vbif_entries);
+ if (rc < 0 || !vbif_entries) {
+ pr_err("%s: NO VBIF entries found\n", __func__);
+ } else {
+ vbif_settings = kzalloc(sizeof(uint32_t) * vbif_entries,
+ GFP_KERNEL);
+ if (!vbif_settings) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ vbif_regs = kzalloc(sizeof(uint32_t) * vbif_entries,
+ GFP_KERNEL);
+ if (!vbif_regs) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ kfree(vbif_settings);
+ return -ENOMEM;
+ }
+ rc = of_property_read_u32_array(of_node, vbif_parms->regs,
+ vbif_regs, vbif_entries);
+ if (rc < 0) {
+ pr_err("%s: NO VBIF info\n", __func__);
+ kfree(vbif_settings);
+ kfree(vbif_regs);
+ } else {
+ rc = of_property_read_u32_array(of_node,
+ vbif_parms->settings,
+ vbif_settings, vbif_entries);
+ if (rc < 0) {
+ pr_err("%s: NO VBIF settings\n",
+ __func__);
+ kfree(vbif_settings);
+ kfree(vbif_regs);
+ } else {
+ for (i = 0; i < vbif_entries; i++)
+ msm_camera_io_w(
+ vbif_settings[i],
+ vfe_vbif_base + vbif_regs[i]);
+ kfree(vbif_settings);
+ kfree(vbif_regs);
+ }
+ }
+ }
+ return 0;
+}
+
+static int msm_vfe40_init_hardware(struct vfe_device *vfe_dev)
+{
+ int rc = -1;
+ rc = msm_isp_init_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
+ if (rc < 0) {
+ pr_err("%s: Bandwidth registration Failed!\n", __func__);
+ goto bus_scale_register_failed;
+ }
+
+ if (vfe_dev->fs_vfe) {
+ rc = regulator_enable(vfe_dev->fs_vfe);
+ if (rc) {
+ pr_err("%s: Regulator enable failed\n", __func__);
+ goto fs_failed;
+ }
+ }
+
+ rc = msm_isp_get_clk_info(vfe_dev, vfe_dev->pdev,
+ &msm_vfe40_clk_info[0]);
+ if (rc < 0) {
+ pr_err("msm_isp_get_clk_info() failed\n");
+ goto fs_failed;
+ }
+ if (vfe_dev->num_clk <= 0) {
+ pr_err("%s: Invalid num of clock\n", __func__);
+ goto fs_failed;
+ } else {
+ vfe_dev->vfe_clk =
+ kzalloc(sizeof(struct clk *) * vfe_dev->num_clk,
+ GFP_KERNEL);
+ if (!vfe_dev->vfe_clk) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+ rc = msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe40_clk_info,
+ vfe_dev->vfe_clk, vfe_dev->num_clk, 1);
+ if (rc < 0)
+ goto clk_enable_failed;
+
+ vfe_dev->vfe_base = ioremap(vfe_dev->vfe_mem->start,
+ resource_size(vfe_dev->vfe_mem));
+ if (!vfe_dev->vfe_base) {
+ rc = -ENOMEM;
+ pr_err("%s: vfe ioremap failed\n", __func__);
+ goto vfe_remap_failed;
+ }
+ vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] =
+ vfe_dev->vfe_base;
+
+ vfe_dev->vfe_vbif_base = ioremap(vfe_dev->vfe_vbif_mem->start,
+ resource_size(vfe_dev->vfe_vbif_mem));
+ if (!vfe_dev->vfe_vbif_base) {
+ rc = -ENOMEM;
+ pr_err("%s: vfe ioremap failed\n", __func__);
+ goto vbif_remap_failed;
+ }
+
+ rc = request_irq(vfe_dev->vfe_irq->start, msm_isp_process_irq,
+ IRQF_TRIGGER_RISING, "vfe", vfe_dev);
+ if (rc < 0) {
+ pr_err("%s: irq request failed\n", __func__);
+ goto irq_req_failed;
+ }
+ return rc;
+irq_req_failed:
+ iounmap(vfe_dev->vfe_vbif_base);
+ vfe_dev->vfe_vbif_base = NULL;
+vbif_remap_failed:
+ iounmap(vfe_dev->vfe_base);
+ vfe_dev->vfe_base = NULL;
+vfe_remap_failed:
+ msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe40_clk_info,
+ vfe_dev->vfe_clk, vfe_dev->num_clk, 0);
+clk_enable_failed:
+ if (vfe_dev->fs_vfe)
+ regulator_disable(vfe_dev->fs_vfe);
+ kfree(vfe_dev->vfe_clk);
+fs_failed:
+ msm_isp_deinit_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
+bus_scale_register_failed:
+ return rc;
+}
+
+static void msm_vfe40_release_hardware(struct vfe_device *vfe_dev)
+{
+ /* disable all mask before tasklet kill */
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x2C);
+
+ disable_irq(vfe_dev->vfe_irq->start);
+ free_irq(vfe_dev->vfe_irq->start, vfe_dev);
+ tasklet_kill(&vfe_dev->vfe_tasklet);
+ msm_isp_flush_tasklet(vfe_dev);
+ iounmap(vfe_dev->vfe_vbif_base);
+ vfe_dev->vfe_vbif_base = NULL;
+ msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe40_clk_info,
+ vfe_dev->vfe_clk, vfe_dev->num_clk, 0);
+ vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] = NULL;
+ iounmap(vfe_dev->vfe_base);
+ vfe_dev->vfe_base = NULL;
+ kfree(vfe_dev->vfe_clk);
+ regulator_disable(vfe_dev->fs_vfe);
+ msm_isp_deinit_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
+}
+
+static void msm_vfe40_init_hardware_reg(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_hw_init_parms qos_parms;
+ struct msm_vfe_hw_init_parms vbif_parms;
+ struct msm_vfe_hw_init_parms ds_parms;
+
+ qos_parms.entries = "qos-entries";
+ qos_parms.regs = "qos-regs";
+ qos_parms.settings = "qos-settings";
+ vbif_parms.entries = "vbif-entries";
+ vbif_parms.regs = "vbif-regs";
+ vbif_parms.settings = "vbif-settings";
+ ds_parms.entries = "ds-entries";
+ ds_parms.regs = "ds-regs";
+ ds_parms.settings = "ds-settings";
+
+ switch (vfe_dev->vfe_hw_version) {
+ case VFE40_8974V1_VERSION:
+ case VFE40_8x26_VERSION:
+ case VFE40_8916_VERSION:
+ case VFE40_8939_VERSION:
+ break;
+ case VFE40_8x26V2_VERSION:
+ qos_parms.settings = "qos-v2-settings";
+ break;
+ case VFE40_8974V2_VERSION:
+ case VFE40_8974V3_VERSION:
+ if (vfe_dev->vfe_hw_version == VFE40_8974V2_VERSION)
+ qos_parms.settings = "qos-v2-settings";
+ else
+ qos_parms.settings = "qos-v3-settings";
+ vbif_parms.entries = "vbif-v2-entries";
+ vbif_parms.regs = "vbif-v2-regs";
+ vbif_parms.settings = "vbif-v2-settings";
+ break;
+ case VFE40_8937_VERSION:
+ case VFE40_8953_VERSION:
+ vfe_dev->is_camif_raw_crop_supported = 1;
+ break;
+ default:
+ pr_err("%s: QOS and VBIF is NOT configured for HW Version %x\n",
+ __func__, vfe_dev->vfe_hw_version);
+ }
+
+ msm_vfe40_init_qos_parms(vfe_dev, &qos_parms, &ds_parms);
+ msm_vfe40_init_vbif_parms(vfe_dev, &vbif_parms);
+ /* BUS_CFG */
+ msm_camera_io_w(0x10000001, vfe_dev->vfe_base + 0x50);
+ msm_camera_io_w(0xE00000F1, vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w_mb(0xFEFFFFFF, vfe_dev->vfe_base + 0x2C);
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
+ msm_camera_io_w_mb(0xFEFFFFFF, vfe_dev->vfe_base + 0x34);
+ msm_camera_io_w(1, vfe_dev->vfe_base + 0x24);
+ msm_camera_io_w(0, vfe_dev->vfe_base + 0x30);
+ msm_camera_io_w_mb(0, vfe_dev->vfe_base + 0x34);
+ msm_camera_io_w(1, vfe_dev->vfe_base + 0x24);
+}
+
+static void msm_vfe40_clear_status_reg(struct vfe_device *vfe_dev)
+{
+ msm_camera_io_w((1 << 31), vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x2C);
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
+ msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x34);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x24);
+}
+
+static void msm_vfe40_process_reset_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ if (irq_status0 & (1 << 31))
+ complete(&vfe_dev->reset_complete);
+}
+
+static void msm_vfe40_process_halt_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ if (irq_status1 & (1 << 8)) {
+ complete(&vfe_dev->halt_complete);
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x2C0);
+ }
+}
+
+static void msm_vfe40_process_input_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ if (!(irq_status0 & 0x1000003))
+ return;
+
+ if (irq_status0 & (1 << 0)) {
+ ISP_DBG("%s: SOF IRQ\n", __func__);
+ msm_isp_increment_frame_id(vfe_dev, VFE_PIX_0, ts);
+ }
+
+ if (irq_status0 & (1 << 24)) {
+ ISP_DBG("%s: Fetch Engine Read IRQ\n", __func__);
+ msm_isp_fetch_engine_done_notify(vfe_dev,
+ &vfe_dev->fetch_engine_info);
+ }
+
+ if (irq_status0 & (1 << 1))
+ ISP_DBG("%s: EOF IRQ\n", __func__);
+}
+
+static void msm_vfe40_process_violation_status(
+ struct vfe_device *vfe_dev)
+{
+ uint32_t violation_status = vfe_dev->error_info.violation_status;
+ if (!violation_status)
+ return;
+
+ if (violation_status & (1 << 0))
+ pr_err("%s: vfe %d camif violation\n", __func__,
+ vfe_dev->pdev->id);
+ if (violation_status & (1 << 1))
+ pr_err("%s: vfe %d black violation\n", __func__,
+ vfe_dev->pdev->id);
+ if (violation_status & (1 << 2))
+ pr_err("%s: vfe %d rolloff violation\n", __func__,
+ vfe_dev->pdev->id);
+ if (violation_status & (1 << 3))
+ pr_err("%s: demux violation\n", __func__);
+ if (violation_status & (1 << 4))
+ pr_err("%s: demosaic violation\n", __func__);
+ if (violation_status & (1 << 5))
+ pr_err("%s: wb violation\n", __func__);
+ if (violation_status & (1 << 6))
+ pr_err("%s: clf violation\n", __func__);
+ if (violation_status & (1 << 7))
+ pr_err("%s: color correct violation\n", __func__);
+ if (violation_status & (1 << 8))
+ pr_err("%s: rgb lut violation\n", __func__);
+ if (violation_status & (1 << 9))
+ pr_err("%s: la violation\n", __func__);
+ if (violation_status & (1 << 10))
+ pr_err("%s: chroma enhance violation\n", __func__);
+ if (violation_status & (1 << 11))
+ pr_err("%s: chroma supress mce violation\n", __func__);
+ if (violation_status & (1 << 12))
+ pr_err("%s: skin enhance violation\n", __func__);
+ if (violation_status & (1 << 13))
+ pr_err("%s: color tranform enc violation\n", __func__);
+ if (violation_status & (1 << 14))
+ pr_err("%s: color tranform view violation\n", __func__);
+ if (violation_status & (1 << 15))
+ pr_err("%s: scale enc y violation\n", __func__);
+ if (violation_status & (1 << 16))
+ pr_err("%s: scale enc cbcr violation\n", __func__);
+ if (violation_status & (1 << 17))
+ pr_err("%s: scale view y violation\n", __func__);
+ if (violation_status & (1 << 18))
+ pr_err("%s: scale view cbcr violation\n", __func__);
+ if (violation_status & (1 << 19))
+ pr_err("%s: asf enc violation\n", __func__);
+ if (violation_status & (1 << 20))
+ pr_err("%s: asf view violation\n", __func__);
+ if (violation_status & (1 << 21))
+ pr_err("%s: crop enc y violation\n", __func__);
+ if (violation_status & (1 << 22))
+ pr_err("%s: crop enc cbcr violation\n", __func__);
+ if (violation_status & (1 << 23))
+ pr_err("%s: crop view y violation\n", __func__);
+ if (violation_status & (1 << 24))
+ pr_err("%s: crop view cbcr violation\n", __func__);
+ if (violation_status & (1 << 25))
+ pr_err("%s: realign buf y violation\n", __func__);
+ if (violation_status & (1 << 26))
+ pr_err("%s: realign buf cb violation\n", __func__);
+ if (violation_status & (1 << 27))
+ pr_err("%s: realign buf cr violation\n", __func__);
+}
+
+static void msm_vfe40_process_error_status(struct vfe_device *vfe_dev)
+{
+ uint32_t error_status1 = vfe_dev->error_info.error_mask1;
+ if (error_status1 & (1 << 0)) {
+ pr_err_ratelimited("%s: vfe %d camif error status: 0x%x\n",
+ __func__, vfe_dev->pdev->id,
+ vfe_dev->error_info.camif_status);
+ msm_camera_io_dump(vfe_dev->vfe_base + 0x2F4, 0x30, 1);
+ }
+ if (error_status1 & (1 << 1))
+ pr_err_ratelimited("%s: stats bhist overwrite\n", __func__);
+ if (error_status1 & (1 << 2))
+ pr_err_ratelimited("%s: stats cs overwrite\n", __func__);
+ if (error_status1 & (1 << 3))
+ pr_err_ratelimited("%s: stats ihist overwrite\n", __func__);
+ if (error_status1 & (1 << 4))
+ pr_err_ratelimited("%s: realign buf y overflow\n", __func__);
+ if (error_status1 & (1 << 5))
+ pr_err_ratelimited("%s: realign buf cb overflow\n", __func__);
+ if (error_status1 & (1 << 6))
+ pr_err_ratelimited("%s: realign buf cr overflow\n", __func__);
+ if (error_status1 & (1 << 7)) {
+ msm_vfe40_process_violation_status(vfe_dev);
+ }
+ if (error_status1 & (1 << 9)) {
+ vfe_dev->stats->imagemaster0_overflow++;
+ pr_err_ratelimited("%s: image master 0 bus overflow\n",
+ __func__);
+ }
+ if (error_status1 & (1 << 10)) {
+ vfe_dev->stats->imagemaster1_overflow++;
+ pr_err_ratelimited("%s: image master 1 bus overflow\n",
+ __func__);
+ }
+ if (error_status1 & (1 << 11)) {
+ vfe_dev->stats->imagemaster2_overflow++;
+ pr_err_ratelimited("%s: image master 2 bus overflow\n",
+ __func__);
+ }
+ if (error_status1 & (1 << 12)) {
+ vfe_dev->stats->imagemaster3_overflow++;
+ pr_err_ratelimited("%s: image master 3 bus overflow\n",
+ __func__);
+ }
+ if (error_status1 & (1 << 13)) {
+ vfe_dev->stats->imagemaster4_overflow++;
+ pr_err_ratelimited("%s: image master 4 bus overflow\n",
+ __func__);
+ }
+ if (error_status1 & (1 << 14)) {
+ vfe_dev->stats->imagemaster5_overflow++;
+ pr_err_ratelimited("%s: image master 5 bus overflow\n",
+ __func__);
+ }
+ if (error_status1 & (1 << 15)) {
+ vfe_dev->stats->imagemaster6_overflow++;
+ pr_err_ratelimited("%s: image master 6 bus overflow\n",
+ __func__);
+ }
+ if (error_status1 & (1 << 16)) {
+ vfe_dev->stats->be_overflow++;
+ pr_err_ratelimited("%s: status be bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 17)) {
+ vfe_dev->stats->bg_overflow++;
+ pr_err_ratelimited("%s: status bg bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 18)) {
+ vfe_dev->stats->bf_overflow++;
+ pr_err_ratelimited("%s: status bf bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 19)) {
+ vfe_dev->stats->awb_overflow++;
+ pr_err_ratelimited("%s: status awb bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 20)) {
+ vfe_dev->stats->rs_overflow++;
+ pr_err_ratelimited("%s: status rs bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 21)) {
+ vfe_dev->stats->cs_overflow++;
+ pr_err_ratelimited("%s: status cs bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 22)) {
+ vfe_dev->stats->ihist_overflow++;
+ pr_err_ratelimited("%s: status ihist bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 23)) {
+ vfe_dev->stats->skinbhist_overflow++;
+ pr_err_ratelimited("%s: status skin bhist bus overflow\n",
+ __func__);
+ }
+
+ /* Update ab/ib values for any overflow that may have occured*/
+ if ((error_status1 >> 9) & 0x7FFF)
+ msm_isp_util_update_last_overflow_ab_ib(vfe_dev);
+}
+
+static void msm_vfe40_enable_camif_error(struct vfe_device *vfe_dev,
+ int enable)
+{
+ uint32_t val;
+
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
+ if (enable)
+ msm_camera_io_w_mb(val | BIT(0), vfe_dev->vfe_base + 0x2C);
+ else
+ msm_camera_io_w_mb(val & ~(BIT(0)), vfe_dev->vfe_base + 0x2C);
+}
+
+static void msm_vfe40_read_irq_status(struct vfe_device *vfe_dev,
+ uint32_t *irq_status0, uint32_t *irq_status1)
+{
+ uint32_t irq_mask0, irq_mask1;
+
+ *irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x38);
+ *irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x3C);
+ /*
+ * Ignore composite 2/3 irq which is used for dual VFE only
+ */
+ if (*irq_status0 & 0x6000000)
+ *irq_status0 &= ~(0x18000000);
+ msm_camera_io_w(*irq_status0, vfe_dev->vfe_base + 0x30);
+ msm_camera_io_w(*irq_status1, vfe_dev->vfe_base + 0x34);
+ msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x24);
+ if (*irq_status0 & 0x18000000) {
+ pr_err_ratelimited("%s: Protection triggered\n", __func__);
+ *irq_status0 &= ~(0x18000000);
+ }
+
+ irq_mask0 = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
+ irq_mask1 = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
+ *irq_status0 &= irq_mask0;
+ *irq_status1 &= irq_mask1;
+
+ if (*irq_status1 & (1 << 0)) {
+ vfe_dev->error_info.camif_status =
+ msm_camera_io_r(vfe_dev->vfe_base + 0x31C);
+ msm_vfe40_enable_camif_error(vfe_dev, 0);
+ }
+
+ if (*irq_status1 & (1 << 7))
+ vfe_dev->error_info.violation_status |=
+ msm_camera_io_r(vfe_dev->vfe_base + 0x48);
+
+}
+
+static void msm_vfe40_process_reg_update(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ enum msm_vfe_input_src i;
+ uint32_t shift_irq;
+ uint8_t reg_updated = 0;
+ unsigned long flags;
+
+ if (!(irq_status0 & 0xF0))
+ return;
+ /* Shift status bits so that PIX REG UPDATE is 1st bit */
+ shift_irq = ((irq_status0 & 0xF0) >> 4);
+
+ for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
+ if (shift_irq & BIT(i)) {
+ reg_updated |= BIT(i);
+ ISP_DBG("%s REG_UPDATE IRQ %x\n", __func__,
+ (uint32_t)BIT(i));
+ switch (i) {
+ case VFE_PIX_0:
+ msm_isp_save_framedrop_values(vfe_dev,
+ VFE_PIX_0);
+ msm_isp_notify(vfe_dev, ISP_EVENT_REG_UPDATE,
+ VFE_PIX_0, ts);
+ if (atomic_read(
+ &vfe_dev->stats_data.stats_update))
+ msm_isp_stats_stream_update(vfe_dev);
+ if (vfe_dev->axi_data.camif_state ==
+ CAMIF_STOPPING)
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ reg_update(vfe_dev, i);
+ break;
+ case VFE_RAW_0:
+ case VFE_RAW_1:
+ case VFE_RAW_2:
+ msm_isp_increment_frame_id(vfe_dev, i, ts);
+ msm_isp_notify(vfe_dev, ISP_EVENT_SOF, i, ts);
+ msm_isp_update_framedrop_reg(vfe_dev, i);
+ /*
+ * Reg Update is pseudo SOF for RDI,
+ * so request every frame
+ */
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
+ vfe_dev, i);
+ break;
+ default:
+ pr_err("%s: Error case\n", __func__);
+ return;
+ }
+ if (vfe_dev->axi_data.stream_update[i])
+ msm_isp_axi_stream_update(vfe_dev, i);
+ if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
+ msm_isp_axi_cfg_update(vfe_dev, i);
+ if (atomic_read(
+ &vfe_dev->axi_data.axi_cfg_update[i]) ==
+ 0)
+ msm_isp_notify(vfe_dev,
+ ISP_EVENT_STREAM_UPDATE_DONE,
+ i, ts);
+ }
+ }
+ }
+
+ spin_lock_irqsave(&vfe_dev->reg_update_lock, flags);
+ if (reg_updated & BIT(VFE_PIX_0))
+ vfe_dev->reg_updated = 1;
+
+ vfe_dev->reg_update_requested &= ~reg_updated;
+ spin_unlock_irqrestore(&vfe_dev->reg_update_lock, flags);
+}
+
+static void msm_vfe40_reg_update(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src)
+{
+ uint32_t update_mask = 0;
+ unsigned long flags;
+
+ /* This HW supports upto VFE_RAW_2 */
+ if (frame_src > VFE_RAW_2 && frame_src != VFE_SRC_MAX) {
+ pr_err("%s Error case\n", __func__);
+ return;
+ }
+
+ /*
+ * If frame_src == VFE_SRC_MAX request reg_update on all
+ * supported INTF
+ */
+ if (frame_src == VFE_SRC_MAX)
+ update_mask = 0xF;
+ else
+ update_mask = BIT((uint32_t)frame_src);
+ ISP_DBG("%s update_mask %x\n", __func__, update_mask);
+
+ spin_lock_irqsave(&vfe_dev->reg_update_lock, flags);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].reg_update_frame_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+ vfe_dev->reg_update_requested |= update_mask;
+ vfe_dev->common_data->dual_vfe_res->reg_update_mask[vfe_dev->pdev->id] =
+ vfe_dev->reg_update_requested;
+ if ((vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE1) &&
+ ((frame_src == VFE_PIX_0) || (frame_src == VFE_SRC_MAX))) {
+ msm_camera_io_w_mb(update_mask,
+ vfe_dev->common_data->dual_vfe_res->vfe_base[ISP_VFE0]
+ + 0x378);
+ msm_camera_io_w_mb(update_mask,
+ vfe_dev->vfe_base + 0x378);
+ } else if (!vfe_dev->is_split ||
+ ((frame_src == VFE_PIX_0) &&
+ (vfe_dev->axi_data.camif_state == CAMIF_STOPPING)) ||
+ (frame_src >= VFE_RAW_0 && frame_src <= VFE_SRC_MAX)) {
+ msm_camera_io_w_mb(update_mask,
+ vfe_dev->vfe_base + 0x378);
+ }
+ spin_unlock_irqrestore(&vfe_dev->reg_update_lock, flags);
+}
+
+static void msm_vfe40_process_epoch_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ if (!(irq_status0 & 0xc))
+ return;
+
+ if (irq_status0 & BIT(2)) {
+ msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
+ ISP_DBG("%s: EPOCH0 IRQ\n", __func__);
+ msm_isp_update_framedrop_reg(vfe_dev, VFE_PIX_0);
+ msm_isp_update_stats_framedrop_reg(vfe_dev);
+ msm_isp_update_error_frame_count(vfe_dev);
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
+ && vfe_dev->axi_data.src_info[VFE_PIX_0].
+ pix_stream_count == 0) {
+ ISP_DBG("%s: SOF IRQ\n", __func__);
+ msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
+ if (vfe_dev->axi_data.stream_update[VFE_PIX_0])
+ msm_isp_axi_stream_update(vfe_dev, VFE_PIX_0);
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
+ vfe_dev, VFE_PIX_0);
+ }
+ }
+}
+
+static long msm_vfe40_reset_hardware(struct vfe_device *vfe_dev,
+ uint32_t first_start, uint32_t blocking_call)
+{
+ long rc = 0;
+ init_completion(&vfe_dev->reset_complete);
+
+ if (first_start) {
+ msm_camera_io_w_mb(0x1FF, vfe_dev->vfe_base + 0xC);
+ } else {
+ msm_camera_io_w_mb(0x1EF, vfe_dev->vfe_base + 0xC);
+ msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x30);
+ msm_camera_io_w(0xFEFFFEFF, vfe_dev->vfe_base + 0x34);
+ msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x24);
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ reload_wm(vfe_dev, vfe_dev->vfe_base, 0x0001FFFF);
+ }
+
+
+ if (blocking_call) {
+ rc = wait_for_completion_timeout(
+ &vfe_dev->reset_complete, msecs_to_jiffies(50));
+ }
+ return rc;
+}
+
+static void msm_vfe40_axi_reload_wm(struct vfe_device *vfe_dev,
+ void __iomem *vfe_base, uint32_t reload_mask)
+{
+ msm_camera_io_w_mb(reload_mask, vfe_base + 0x4C);
+}
+
+static void msm_vfe40_axi_update_cgc_override(struct vfe_device *vfe_dev,
+ uint8_t wm_idx, uint8_t enable)
+{
+ uint32_t val;
+
+ /* Change CGC override */
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x974);
+ if (enable)
+ val |= (1 << wm_idx);
+ else
+ val &= ~(1 << wm_idx);
+ msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x974);
+}
+
+static void msm_vfe40_axi_enable_wm(void __iomem *vfe_base,
+ uint8_t wm_idx, uint8_t enable)
+{
+ uint32_t val;
+ val = msm_camera_io_r(vfe_base + VFE40_WM_BASE(wm_idx));
+ if (enable)
+ val |= 0x1;
+ else
+ val &= ~0x1;
+ msm_camera_io_w_mb(val,
+ vfe_base + VFE40_WM_BASE(wm_idx));
+}
+
+static void msm_vfe40_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t comp_mask, comp_mask_index =
+ stream_info->comp_mask_index;
+ uint32_t irq_mask;
+
+ comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x40);
+ comp_mask &= ~(0x7F << (comp_mask_index * 8));
+ comp_mask |= (axi_data->composite_info[comp_mask_index].
+ stream_composite_mask << (comp_mask_index * 8));
+
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
+ irq_mask |= 1 << (comp_mask_index + 25);
+
+ /*
+ * For dual VFE, composite 2/3 interrupt is used to trigger
+ * microcontroller to update certain VFE registers
+ */
+ if (stream_info->plane_cfg[0].plane_addr_offset &&
+ stream_info->stream_src == PIX_VIEWFINDER) {
+ comp_mask |= (axi_data->composite_info[comp_mask_index].
+ stream_composite_mask << 16);
+ irq_mask |= BIT(27);
+ }
+
+ if (stream_info->plane_cfg[0].plane_addr_offset &&
+ stream_info->stream_src == PIX_ENCODER) {
+ comp_mask |= (axi_data->composite_info[comp_mask_index].
+ stream_composite_mask << 24);
+ irq_mask |= BIT(28);
+ }
+
+ msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x40);
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x28);
+}
+
+static void msm_vfe40_axi_clear_comp_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t comp_mask, comp_mask_index = stream_info->comp_mask_index;
+ uint32_t irq_mask;
+
+ comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x40);
+ comp_mask &= ~(0x7F << (comp_mask_index * 8));
+
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
+ irq_mask &= ~(1 << (comp_mask_index + 25));
+
+ if (stream_info->plane_cfg[0].plane_addr_offset &&
+ stream_info->stream_src == PIX_VIEWFINDER) {
+ comp_mask &= ~(axi_data->composite_info[comp_mask_index].
+ stream_composite_mask << 16);
+ irq_mask &= ~BIT(27);
+ }
+
+ if (stream_info->plane_cfg[0].plane_addr_offset &&
+ stream_info->stream_src == PIX_ENCODER) {
+ comp_mask &= ~(axi_data->composite_info[comp_mask_index].
+ stream_composite_mask << 24);
+ irq_mask &= ~BIT(28);
+ }
+
+ msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x40);
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x28);
+}
+
+static void msm_vfe40_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ uint32_t irq_mask;
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
+ irq_mask |= 1 << (stream_info->wm[0] + 8);
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x28);
+}
+
+static void msm_vfe40_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ uint32_t irq_mask;
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
+ irq_mask &= ~(1 << (stream_info->wm[0] + 8));
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x28);
+}
+
+static void msm_vfe40_cfg_framedrop(void __iomem *vfe_base,
+ struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
+ uint32_t framedrop_period)
+{
+ uint32_t i, temp;
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ msm_camera_io_w(framedrop_pattern, vfe_base +
+ VFE40_WM_BASE(stream_info->wm[i]) + 0x1C);
+ temp = msm_camera_io_r(vfe_base +
+ VFE40_WM_BASE(stream_info->wm[i]) + 0xC);
+ temp &= 0xFFFFFF83;
+ msm_camera_io_w(temp | (framedrop_period - 1) << 2,
+ vfe_base + VFE40_WM_BASE(stream_info->wm[i]) + 0xC);
+ }
+
+ msm_camera_io_w_mb(0x1, vfe_base + 0x378);
+}
+
+static void msm_vfe40_clear_framedrop(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ uint32_t i;
+ for (i = 0; i < stream_info->num_planes; i++)
+ msm_camera_io_w(0, vfe_dev->vfe_base +
+ VFE40_WM_BASE(stream_info->wm[i]) + 0x1C);
+}
+
+static int32_t msm_vfe40_convert_bpp_to_reg(int32_t bpp, uint32_t *bpp_reg)
+{
+ int rc = 0;
+
+ switch (bpp) {
+ case 8:
+ *bpp_reg = 0;
+ break;
+ case 10:
+ *bpp_reg = 1 << 0;
+ break;
+ case 12:
+ *bpp_reg = 1 << 1;
+ break;
+ default:
+ pr_err("%s:%d invalid bpp %d", __func__, __LINE__, bpp);
+ return -EINVAL;
+ }
+ return rc;
+}
+
+static int32_t msm_vfe40_convert_io_fmt_to_reg(
+ enum msm_isp_pack_fmt pack_format, uint32_t *pack_reg)
+{
+ int rc = 0;
+
+ switch (pack_format) {
+ case QCOM:
+ *pack_reg = 0x0;
+ break;
+ case MIPI:
+ *pack_reg = 0x1;
+ break;
+ case DPCM6:
+ *pack_reg = 0x2;
+ break;
+ case DPCM8:
+ *pack_reg = 0x3;
+ break;
+ case PLAIN8:
+ *pack_reg = 0x4;
+ break;
+ case PLAIN16:
+ *pack_reg = 0x5;
+ break;
+ default:
+ pr_err("%s: invalid pack fmt %d!\n", __func__, pack_format);
+ return -EINVAL;
+ }
+ return rc;
+}
+
+static int32_t msm_vfe40_cfg_io_format(struct vfe_device *vfe_dev,
+ enum msm_vfe_axi_stream_src stream_src, uint32_t io_format)
+{
+ int rc = 0;
+ int bpp = 0, read_bpp = 0;
+ enum msm_isp_pack_fmt pack_fmt = 0, read_pack_fmt = 0;
+ uint32_t bpp_reg = 0, pack_reg = 0;
+ uint32_t read_bpp_reg = 0, read_pack_reg = 0;
+ uint32_t io_format_reg = 0; /*io format register bit*/
+
+ io_format_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x54);
+ if ((stream_src < RDI_INTF_0) &&
+ (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux ==
+ EXTERNAL_READ)) {
+ read_bpp = msm_isp_get_bit_per_pixel(
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format);
+ rc = msm_vfe40_convert_bpp_to_reg(read_bpp, &read_bpp_reg);
+ if (rc < 0) {
+ pr_err("%s: convert_bpp_to_reg err! in_bpp %d rc %d\n",
+ __func__, read_bpp, rc);
+ return rc;
+ }
+ read_pack_fmt = msm_isp_get_pack_format(
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format);
+ rc = msm_vfe40_convert_io_fmt_to_reg(
+ read_pack_fmt, &read_pack_reg);
+ if (rc < 0) {
+ pr_err("%s: convert_io_fmt_to_reg err! rc = %d\n",
+ __func__, rc);
+ return rc;
+ }
+ /*use input format(v4l2_pix_fmt) to get pack format*/
+ io_format_reg &= 0xFFC8FFFF;
+ io_format_reg |= (read_bpp_reg << 20 | read_pack_reg << 16);
+ }
+
+ bpp = msm_isp_get_bit_per_pixel(io_format);
+ rc = msm_vfe40_convert_bpp_to_reg(bpp, &bpp_reg);
+ if (rc < 0) {
+ pr_err("%s: convert_bpp_to_reg err! bpp %d rc = %d\n",
+ __func__, bpp, rc);
+ return rc;
+ }
+
+ switch (stream_src) {
+ case PIX_ENCODER:
+ case PIX_VIEWFINDER:
+ case CAMIF_RAW:
+ io_format_reg &= 0xFFFFCFFF;
+ io_format_reg |= bpp_reg << 12;
+ break;
+ case IDEAL_RAW:
+ /*use output format(v4l2_pix_fmt) to get pack format*/
+ pack_fmt = msm_isp_get_pack_format(io_format);
+ rc = msm_vfe40_convert_io_fmt_to_reg(pack_fmt, &pack_reg);
+ if (rc < 0) {
+ pr_err("%s: convert_io_fmt_to_reg err! rc = %d\n",
+ __func__, rc);
+ return rc;
+ }
+ io_format_reg &= 0xFFFFFFC8;
+ io_format_reg |= bpp_reg << 4 | pack_reg;
+ break;
+ case RDI_INTF_0:
+ case RDI_INTF_1:
+ case RDI_INTF_2:
+ default:
+ pr_err("%s: Invalid stream source\n", __func__);
+ return -EINVAL;
+ }
+ msm_camera_io_w(io_format_reg, vfe_dev->vfe_base + 0x54);
+ return 0;
+}
+
+static int msm_vfe40_start_fetch_engine(struct vfe_device *vfe_dev,
+ void *arg)
+{
+ int rc = 0;
+ uint32_t bufq_handle = 0;
+ struct msm_isp_buffer *buf = NULL;
+ struct msm_vfe_fetch_eng_start *fe_cfg = arg;
+ struct msm_isp_buffer_mapped_info mapped_info;
+
+ if (vfe_dev->fetch_engine_info.is_busy == 1) {
+ pr_err("%s: fetch engine busy\n", __func__);
+ return -EINVAL;
+ }
+
+ memset(&mapped_info, 0, sizeof(struct msm_isp_buffer_mapped_info));
+ /* There is other option of passing buffer address from user,
+ in such case, driver needs to map the buffer and use it*/
+ vfe_dev->fetch_engine_info.session_id = fe_cfg->session_id;
+ vfe_dev->fetch_engine_info.stream_id = fe_cfg->stream_id;
+ vfe_dev->fetch_engine_info.offline_mode = fe_cfg->offline_mode;
+ vfe_dev->fetch_engine_info.fd = fe_cfg->fd;
+
+ if (!fe_cfg->offline_mode) {
+ bufq_handle = vfe_dev->buf_mgr->ops->get_bufq_handle(
+ vfe_dev->buf_mgr, fe_cfg->session_id,
+ fe_cfg->stream_id);
+ vfe_dev->fetch_engine_info.bufq_handle = bufq_handle;
+
+ rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
+ vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
+ if (rc < 0 || !buf) {
+ pr_err("%s: No fetch buffer rc= %d buf= %p\n",
+ __func__, rc, buf);
+ return -EINVAL;
+ }
+ mapped_info = buf->mapped_info[0];
+ buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ } else {
+ rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
+ &mapped_info, fe_cfg->fd);
+ if (rc < 0) {
+ pr_err("%s: can not map buffer\n", __func__);
+ return -EINVAL;
+ }
+ }
+ vfe_dev->fetch_engine_info.buf_idx = fe_cfg->buf_idx;
+ vfe_dev->fetch_engine_info.is_busy = 1;
+
+ msm_camera_io_w(mapped_info.paddr, vfe_dev->vfe_base + 0x228);
+
+ msm_camera_io_w_mb(0x10000, vfe_dev->vfe_base + 0x4C);
+ msm_camera_io_w_mb(0x20000, vfe_dev->vfe_base + 0x4C);
+
+ ISP_DBG("%s:VFE%d Fetch Engine ready\n", __func__, vfe_dev->pdev->id);
+ return 0;
+}
+
+static void msm_vfe40_cfg_fetch_engine(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg)
+{
+ uint32_t x_size_word;
+ uint32_t temp = 0;
+ struct msm_vfe_fetch_engine_cfg *fe_cfg = NULL;
+
+ if (pix_cfg->input_mux != EXTERNAL_READ) {
+ pr_err("%s: Invalid mux configuration - mux: %d",
+ __func__, pix_cfg->input_mux);
+ return;
+ }
+
+ fe_cfg = &pix_cfg->fetch_engine_cfg;
+ pr_debug("%s: fetch_dbg wd x ht buf = %d x %d, fe = %d x %d\n",
+ __func__, fe_cfg->buf_width, fe_cfg->buf_height,
+ fe_cfg->fetch_width, fe_cfg->fetch_height);
+
+ vfe_dev->hw_info->vfe_ops.axi_ops.update_cgc_override(vfe_dev,
+ VFE40_BUS_RD_CGC_OVERRIDE_BIT, 1);
+
+ temp = msm_camera_io_r(vfe_dev->vfe_base + 0x50);
+ temp &= 0xFFFFFFFD;
+ temp |= (1 << 1);
+ msm_camera_io_w(temp, vfe_dev->vfe_base + 0x50);
+
+ temp = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
+ temp &= 0xFEFFFFFF;
+ temp |= (1 << 24);
+ msm_camera_io_w(temp, vfe_dev->vfe_base + 0x28);
+
+ msm_camera_io_w((fe_cfg->fetch_height - 1),
+ vfe_dev->vfe_base + 0x238);
+
+ /* need to update to use formulae to calculate X_SIZE_WORD*/
+ x_size_word = msm_isp_cal_word_per_line(
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format,
+ fe_cfg->fetch_width);
+
+ msm_camera_io_w((x_size_word - 1) << 16, vfe_dev->vfe_base + 0x23C);
+
+ temp = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
+ temp |= 2 << 16;
+ msm_camera_io_w(temp, vfe_dev->vfe_base + 0x1C);
+
+ msm_camera_io_w(x_size_word << 16 |
+ (fe_cfg->buf_height-1) << 4 |
+ VFE40_FETCH_BURST_LEN, vfe_dev->vfe_base + 0x240);
+
+ msm_camera_io_w(0 << 28 | 2 << 25 |
+ (fe_cfg->buf_width - 1) << 12 |
+ (fe_cfg->buf_height - 1)
+ , vfe_dev->vfe_base + 0x244);
+
+ /* need to use formulae to calculate MAIN_UNPACK_PATTERN*/
+ msm_camera_io_w(0xF6543210, vfe_dev->vfe_base + 0x248);
+ msm_camera_io_w(0xF, vfe_dev->vfe_base + 0x264);
+
+ return;
+}
+
+static void msm_vfe40_cfg_testgen(struct vfe_device *vfe_dev,
+ struct msm_vfe_testgen_cfg *testgen_cfg)
+{
+ uint32_t bit_per_pixel = 0;
+ uint32_t bpp_reg = 0;
+ uint32_t bayer_pix_pattern_reg = 0;
+ uint32_t unicolorbar_reg = 0;
+ uint32_t unicolor_enb = 0;
+
+ bit_per_pixel = msm_isp_get_bit_per_pixel(
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format);
+
+ switch (bit_per_pixel) {
+ case 8:
+ bpp_reg = 0x0;
+ break;
+ case 10:
+ bpp_reg = 0x1;
+ break;
+ case 12:
+ bpp_reg = 0x10;
+ break;
+ case 14:
+ bpp_reg = 0x11;
+ break;
+ default:
+ pr_err("%s: invalid bpp %d\n", __func__, bit_per_pixel);
+ break;
+ }
+
+ msm_camera_io_w(bpp_reg << 16 | testgen_cfg->burst_num_frame,
+ vfe_dev->vfe_base + 0x940);
+
+ msm_camera_io_w(((testgen_cfg->lines_per_frame - 1) << 16) |
+ (testgen_cfg->pixels_per_line - 1), vfe_dev->vfe_base + 0x944);
+
+ msm_camera_io_w(testgen_cfg->h_blank, vfe_dev->vfe_base + 0x958);
+
+ msm_camera_io_w((1 << 16) | testgen_cfg->v_blank,
+ vfe_dev->vfe_base + 0x95C);
+
+ switch (testgen_cfg->pixel_bayer_pattern) {
+ case ISP_BAYER_RGRGRG:
+ bayer_pix_pattern_reg = 0x0;
+ break;
+ case ISP_BAYER_GRGRGR:
+ bayer_pix_pattern_reg = 0x1;
+ break;
+ case ISP_BAYER_BGBGBG:
+ bayer_pix_pattern_reg = 0x10;
+ break;
+ case ISP_BAYER_GBGBGB:
+ bayer_pix_pattern_reg = 0x11;
+ break;
+ default:
+ pr_err("%s: invalid pix pattern %d\n",
+ __func__, bit_per_pixel);
+ break;
+ }
+
+ if (testgen_cfg->color_bar_pattern == COLOR_BAR_8_COLOR) {
+ unicolor_enb = 0x0;
+ } else {
+ unicolor_enb = 0x1;
+ switch (testgen_cfg->color_bar_pattern) {
+ case UNICOLOR_WHITE:
+ unicolorbar_reg = 0x0;
+ break;
+ case UNICOLOR_YELLOW:
+ unicolorbar_reg = 0x1;
+ break;
+ case UNICOLOR_CYAN:
+ unicolorbar_reg = 0x10;
+ break;
+ case UNICOLOR_GREEN:
+ unicolorbar_reg = 0x11;
+ break;
+ case UNICOLOR_MAGENTA:
+ unicolorbar_reg = 0x100;
+ break;
+ case UNICOLOR_RED:
+ unicolorbar_reg = 0x101;
+ break;
+ case UNICOLOR_BLUE:
+ unicolorbar_reg = 0x110;
+ break;
+ case UNICOLOR_BLACK:
+ unicolorbar_reg = 0x111;
+ break;
+ default:
+ pr_err("%s: invalid colorbar %d\n",
+ __func__, testgen_cfg->color_bar_pattern);
+ break;
+ }
+ }
+ msm_camera_io_w((testgen_cfg->rotate_period << 8) |
+ (bayer_pix_pattern_reg << 6) | (unicolor_enb << 4) |
+ (unicolorbar_reg), vfe_dev->vfe_base + 0x968);
+}
+
+static void msm_vfe40_cfg_camif(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg)
+{
+ uint16_t first_pixel, last_pixel, first_line, last_line;
+ struct msm_vfe_camif_cfg *camif_cfg = &pix_cfg->camif_cfg;
+ uint32_t val, subsample_period, subsample_pattern;
+ struct msm_vfe_camif_subsample_cfg *subsample_cfg =
+ &pix_cfg->camif_cfg.subsample_cfg;
+ uint16_t bus_sub_en = 0;
+
+ vfe_dev->dual_vfe_enable = camif_cfg->is_split;
+
+ msm_camera_io_w(pix_cfg->input_mux << 16 | pix_cfg->pixel_pattern,
+ vfe_dev->vfe_base + 0x1C);
+
+ first_pixel = camif_cfg->first_pixel;
+ last_pixel = camif_cfg->last_pixel;
+ first_line = camif_cfg->first_line;
+ last_line = camif_cfg->last_line;
+ subsample_period = camif_cfg->subsample_cfg.irq_subsample_period;
+ subsample_pattern = camif_cfg->subsample_cfg.irq_subsample_pattern;
+
+ msm_camera_io_w(camif_cfg->lines_per_frame << 16 |
+ camif_cfg->pixels_per_line, vfe_dev->vfe_base + 0x300);
+
+ msm_camera_io_w(first_pixel << 16 | last_pixel,
+ vfe_dev->vfe_base + 0x304);
+
+ msm_camera_io_w(first_line << 16 | last_line,
+ vfe_dev->vfe_base + 0x308);
+ if (subsample_period && subsample_pattern) {
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x2F8);
+ val &= 0xFFE0FFFF;
+ val = (subsample_period - 1) << 16;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x2F8);
+ ISP_DBG("%s:camif PERIOD %x PATTERN %x\n",
+ __func__, subsample_period, subsample_pattern);
+
+ val = subsample_pattern;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x314);
+ } else {
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x314);
+ }
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x2E8);
+ val |= camif_cfg->camif_input;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x2E8);
+
+ if (subsample_cfg->pixel_skip || subsample_cfg->line_skip) {
+ bus_sub_en = 1;
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x2F8);
+ val &= 0xFFFFFFDF;
+ val = val | bus_sub_en << 5;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x2F8);
+ subsample_cfg->pixel_skip &= 0x0000FFFF;
+ subsample_cfg->line_skip &= 0x0000FFFF;
+ msm_camera_io_w((subsample_cfg->line_skip << 16) |
+ subsample_cfg->pixel_skip,
+ vfe_dev->vfe_base + 0x30C);
+ if (vfe_dev->is_camif_raw_crop_supported) {
+ /* Pdaf output will be sent in PLAIN16 format*/
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x54);
+ val |= 5 << 9;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x54);
+ if (subsample_cfg->first_pixel ||
+ subsample_cfg->last_pixel ||
+ subsample_cfg->first_line ||
+ subsample_cfg->last_line) {
+ msm_camera_io_w(
+ subsample_cfg->first_pixel << 16 |
+ subsample_cfg->last_pixel,
+ vfe_dev->vfe_base + 0x8A4);
+ msm_camera_io_w(
+ subsample_cfg->first_line << 16 |
+ subsample_cfg->last_line,
+ vfe_dev->vfe_base + 0x8A8);
+ val = msm_camera_io_r(
+ vfe_dev->vfe_base + 0x2F8);
+ val |= 1 << 22;
+ msm_camera_io_w(val,
+ vfe_dev->vfe_base + 0x2F8);
+ }
+ }
+
+ }
+}
+
+static void msm_vfe40_cfg_input_mux(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg)
+{
+ uint32_t core_cfg = 0;
+ uint32_t val = 0;
+
+ core_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
+ core_cfg &= 0xFFFCFFFF;
+
+ switch (pix_cfg->input_mux) {
+ case CAMIF:
+ core_cfg |= 0x0 << 16;
+ msm_camera_io_w_mb(core_cfg, vfe_dev->vfe_base + 0x1C);
+ msm_vfe40_cfg_camif(vfe_dev, pix_cfg);
+ break;
+ case TESTGEN:
+ /* Change CGC override */
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x974);
+ val |= (1 << 31);
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x974);
+
+ /* CAMIF and TESTGEN will both go thorugh CAMIF*/
+ core_cfg |= 0x1 << 16;
+ msm_camera_io_w_mb(core_cfg, vfe_dev->vfe_base + 0x1C);
+ msm_vfe40_cfg_camif(vfe_dev, pix_cfg);
+ msm_vfe40_cfg_testgen(vfe_dev, &pix_cfg->testgen_cfg);
+ break;
+ case EXTERNAL_READ:
+ core_cfg |= 0x2 << 16;
+ msm_camera_io_w_mb(core_cfg, vfe_dev->vfe_base + 0x1C);
+ msm_vfe40_cfg_fetch_engine(vfe_dev, pix_cfg);
+ break;
+ default:
+ pr_err("%s: Unsupported input mux %d\n",
+ __func__, pix_cfg->input_mux);
+ break;
+ }
+ return;
+}
+
+static void msm_vfe40_update_camif_state(struct vfe_device *vfe_dev,
+ enum msm_isp_camif_update_state update_state)
+{
+ uint32_t val;
+ bool bus_en, vfe_en;
+ if (update_state == NO_UPDATE)
+ return;
+
+ if (update_state == ENABLE_CAMIF) {
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
+ msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x34);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x24);
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
+ val |= 0xF7;
+ msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w_mb(0x140000, vfe_dev->vfe_base + 0x318);
+
+ bus_en =
+ ((vfe_dev->axi_data.
+ src_info[VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
+ vfe_en =
+ ((vfe_dev->axi_data.
+ src_info[VFE_PIX_0].pix_stream_count > 0) ? 1 : 0);
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x2F8);
+ val &= 0xFFFFFF3F;
+ val = val | bus_en << 7 | vfe_en << 6;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x2F8);
+ msm_camera_io_w_mb(0x4, vfe_dev->vfe_base + 0x2F4);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x2F4);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].active = 1;
+ /* testgen GO*/
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
+ msm_camera_io_w(1, vfe_dev->vfe_base + 0x93C);
+ } else if (update_state == DISABLE_CAMIF) {
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x2F4);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
+ /* testgen OFF*/
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
+ msm_camera_io_w(1 << 1, vfe_dev->vfe_base + 0x93C);
+ } else if (update_state == DISABLE_CAMIF_IMMEDIATELY) {
+ msm_camera_io_w_mb(0x6, vfe_dev->vfe_base + 0x2F4);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
+ msm_camera_io_w(1 << 1, vfe_dev->vfe_base + 0x93C);
+ }
+}
+
+static void msm_vfe40_cfg_rdi_reg(
+ struct vfe_device *vfe_dev, struct msm_vfe_rdi_cfg *rdi_cfg,
+ enum msm_vfe_input_src input_src)
+{
+ uint8_t rdi = input_src - VFE_RAW_0;
+ uint32_t rdi_reg_cfg;
+ rdi_reg_cfg = msm_camera_io_r(
+ vfe_dev->vfe_base + VFE40_RDI_BASE(0));
+ rdi_reg_cfg &= ~(BIT(16 + rdi));
+ rdi_reg_cfg |= rdi_cfg->frame_based << (16 + rdi);
+ msm_camera_io_w(rdi_reg_cfg,
+ vfe_dev->vfe_base + VFE40_RDI_BASE(0));
+
+ rdi_reg_cfg = msm_camera_io_r(
+ vfe_dev->vfe_base + VFE40_RDI_BASE(rdi));
+ rdi_reg_cfg &= 0x70003;
+ rdi_reg_cfg |= (rdi * 3) << 28 | rdi_cfg->cid << 4 | 0x4;
+ msm_camera_io_w(
+ rdi_reg_cfg, vfe_dev->vfe_base + VFE40_RDI_BASE(rdi));
+}
+
+static void msm_vfe40_axi_cfg_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint8_t plane_idx)
+{
+ uint32_t val;
+ uint32_t burst_len, wm_bit_shift = VFE40_WM_BIT_SHIFT_8976_VERSION;
+ uint32_t wm_base = VFE40_WM_BASE(stream_info->wm[plane_idx]);
+
+ if (vfe_dev->vfe_hw_version == VFE40_8916_VERSION ||
+ vfe_dev->vfe_hw_version == VFE40_8939_VERSION) {
+ burst_len = VFE40_BURST_LEN_8916_VERSION;
+ wm_bit_shift = VFE40_WM_BIT_SHIFT;
+ } else if (vfe_dev->vfe_hw_version == VFE40_8952_VERSION) {
+ burst_len = VFE40_BURST_LEN_8952_VERSION;
+ wm_bit_shift = VFE40_WM_BIT_SHIFT;
+ } else if (vfe_dev->vfe_hw_version == VFE40_8976_VERSION ||
+ vfe_dev->vfe_hw_version == VFE40_8937_VERSION ||
+ vfe_dev->vfe_hw_version == VFE40_8953_VERSION) {
+ burst_len = VFE40_BURST_LEN_8952_VERSION;
+ wm_bit_shift = VFE40_WM_BIT_SHIFT_8976_VERSION;
+ } else {
+ burst_len = VFE40_BURST_LEN;
+ }
+
+ if (!stream_info->frame_based) {
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + wm_base);
+ /*WR_IMAGE_SIZE*/
+ val =
+ ((msm_isp_cal_word_per_line(
+ stream_info->output_format,
+ stream_info->plane_cfg[plane_idx].
+ output_width)+1)/2 - 1) << 16 |
+ (stream_info->plane_cfg[plane_idx].
+ output_height - 1);
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
+
+ /*WR_BUFFER_CFG*/
+ val =
+ msm_isp_cal_word_per_line(stream_info->output_format,
+ stream_info->plane_cfg[
+ plane_idx].output_stride) << 16 |
+ (stream_info->plane_cfg[
+ plane_idx].output_height - 1) << wm_bit_shift |
+ burst_len;
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
+ } else {
+ msm_camera_io_w(0x2, vfe_dev->vfe_base + wm_base);
+ val =
+ msm_isp_cal_word_per_line(stream_info->output_format,
+ stream_info->plane_cfg[
+ plane_idx].output_width) << 16 |
+ (stream_info->plane_cfg[
+ plane_idx].output_height - 1) << 4 |
+ burst_len;
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
+ }
+
+ /*WR_IRQ_SUBSAMPLE_PATTERN*/
+ msm_camera_io_w(0xFFFFFFFF,
+ vfe_dev->vfe_base + wm_base + 0x20);
+ /* TD: Add IRQ subsample pattern */
+ return;
+}
+
+static void msm_vfe40_axi_clear_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
+{
+ uint32_t val = 0;
+ uint32_t wm_base = VFE40_WM_BASE(stream_info->wm[plane_idx]);
+ /*WR_ADDR_CFG*/
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0xC);
+ /*WR_IMAGE_SIZE*/
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
+ /*WR_BUFFER_CFG*/
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
+ /*WR_IRQ_SUBSAMPLE_PATTERN*/
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x20);
+ return;
+}
+
+static void msm_vfe40_axi_cfg_wm_xbar_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint8_t plane_idx)
+{
+ struct msm_vfe_axi_plane_cfg *plane_cfg =
+ &stream_info->plane_cfg[plane_idx];
+ uint8_t wm = stream_info->wm[plane_idx];
+ uint32_t xbar_cfg = 0;
+ uint32_t xbar_reg_cfg = 0;
+
+ switch (stream_info->stream_src) {
+ case PIX_ENCODER:
+ case PIX_VIEWFINDER: {
+ if (plane_cfg->output_plane_format != CRCB_PLANE &&
+ plane_cfg->output_plane_format != CBCR_PLANE) {
+ /*SINGLE_STREAM_SEL*/
+ xbar_cfg |= plane_cfg->output_plane_format << 8;
+ } else {
+ switch (stream_info->output_format) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV14:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV24:
+ xbar_cfg |= 0x3 << 4; /*PAIR_STREAM_SWAP_CTRL*/
+ break;
+ }
+ xbar_cfg |= 0x1 << 1; /*PAIR_STREAM_EN*/
+ }
+ if (stream_info->stream_src == PIX_VIEWFINDER)
+ xbar_cfg |= 0x1; /*VIEW_STREAM_EN*/
+ break;
+ }
+ case CAMIF_RAW:
+ xbar_cfg = 0x300;
+ break;
+ case IDEAL_RAW:
+ xbar_cfg = 0x400;
+ break;
+ case RDI_INTF_0:
+ xbar_cfg = 0x500;
+ break;
+ case RDI_INTF_1:
+ xbar_cfg = 0x600;
+ break;
+ case RDI_INTF_2:
+ xbar_cfg = 0x700;
+ break;
+ default:
+ pr_err("%s: Invalid stream src\n", __func__);
+ break;
+ }
+ xbar_reg_cfg =
+ msm_camera_io_r(vfe_dev->vfe_base + VFE40_XBAR_BASE(wm));
+ xbar_reg_cfg &= ~(0xFFFF << VFE40_XBAR_SHIFT(wm));
+ xbar_reg_cfg |= (xbar_cfg << VFE40_XBAR_SHIFT(wm));
+ msm_camera_io_w(xbar_reg_cfg,
+ vfe_dev->vfe_base + VFE40_XBAR_BASE(wm));
+ return;
+}
+
+static void msm_vfe40_axi_clear_wm_xbar_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
+{
+ uint8_t wm = stream_info->wm[plane_idx];
+ uint32_t xbar_reg_cfg = 0;
+
+ xbar_reg_cfg =
+ msm_camera_io_r(vfe_dev->vfe_base + VFE40_XBAR_BASE(wm));
+ xbar_reg_cfg &= ~(0xFFFF << VFE40_XBAR_SHIFT(wm));
+ msm_camera_io_w(xbar_reg_cfg,
+ vfe_dev->vfe_base + VFE40_XBAR_BASE(wm));
+}
+
+static void msm_vfe40_cfg_axi_ub_equal_default(
+ struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t ub_offset = 0;
+ struct msm_vfe_axi_shared_data *axi_data =
+ &vfe_dev->axi_data;
+ uint32_t total_image_size = 0;
+ uint8_t num_used_wms = 0;
+ uint32_t prop_size = 0;
+ uint32_t wm_ub_size;
+ uint32_t total_wm_ub;
+
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ if (axi_data->free_wm[i] > 0) {
+ num_used_wms++;
+ total_image_size += axi_data->wm_image_size[i];
+ }
+ }
+
+ if (vfe_dev->vfe_hw_version == VFE40_8916_VERSION) {
+ vfe_dev->ub_info->wm_ub = VFE40_TOTAL_WM_UB_8916;
+ total_wm_ub = VFE40_TOTAL_WM_UB_8916;
+ } else {
+ vfe_dev->ub_info->wm_ub = VFE40_TOTAL_WM_UB;
+ total_wm_ub = VFE40_TOTAL_WM_UB;
+ }
+ vfe_dev->ub_info->num_wm = axi_data->hw_info->num_wm;
+ prop_size = total_wm_ub -
+ axi_data->hw_info->min_wm_ub * num_used_wms;
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ if (axi_data->free_wm[i]) {
+ uint64_t delta = 0;
+ uint64_t temp = (uint64_t)axi_data->wm_image_size[i] *
+ (uint64_t)prop_size;
+ do_div(temp, total_image_size);
+ delta = temp;
+ wm_ub_size = axi_data->hw_info->min_wm_ub + delta;
+ msm_camera_io_w(ub_offset << 16 | (wm_ub_size - 1),
+ vfe_dev->vfe_base + VFE40_WM_BASE(i) + 0x10);
+
+ vfe_dev->ub_info->data[i] =
+ ub_offset << 16 | (wm_ub_size - 1);
+ vfe_dev->ub_info->addr[i] = VFE40_WM_BASE(i) + 0x10;
+ ub_offset += wm_ub_size;
+ } else {
+ msm_camera_io_w(0,
+ vfe_dev->vfe_base + VFE40_WM_BASE(i) + 0x10);
+ vfe_dev->ub_info->data[i] = 0;
+ vfe_dev->ub_info->addr[i] = VFE40_WM_BASE(i) + 0x10;
+ }
+ }
+}
+
+static void msm_vfe40_cfg_axi_ub_equal_slicing(
+ struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t ub_offset = 0;
+ uint32_t equal_slice_ub;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+ if (vfe_dev->vfe_hw_version == VFE40_8916_VERSION ||
+ vfe_dev->vfe_hw_version == VFE40_8952_VERSION) {
+ vfe_dev->ub_info->wm_ub = VFE40_EQUAL_SLICE_UB_8916;
+ equal_slice_ub = VFE40_EQUAL_SLICE_UB_8916;
+ } else {
+ vfe_dev->ub_info->wm_ub = VFE40_EQUAL_SLICE_UB;
+ equal_slice_ub = VFE40_EQUAL_SLICE_UB;
+ }
+
+ vfe_dev->ub_info->num_wm = axi_data->hw_info->num_wm;
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ msm_camera_io_w(ub_offset << 16 | (equal_slice_ub - 1),
+ vfe_dev->vfe_base + VFE40_WM_BASE(i) + 0x10);
+ vfe_dev->ub_info->data[i] =
+ ub_offset << 16 | (equal_slice_ub - 1);
+ vfe_dev->ub_info->addr[i] = VFE40_WM_BASE(i) + 0x10;
+ ub_offset += equal_slice_ub;
+ }
+}
+
+static void msm_vfe40_cfg_axi_ub(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ axi_data->wm_ub_cfg_policy =
+ (enum msm_wm_ub_cfg_type)vfe_dev->vfe_ub_policy;
+ ISP_DBG("%s: ub_policy %d\n", __func__, axi_data->wm_ub_cfg_policy);
+
+ if (axi_data->wm_ub_cfg_policy == MSM_WM_UB_EQUAL_SLICING) {
+ vfe_dev->ub_info->policy = MSM_WM_UB_EQUAL_SLICING;
+ msm_vfe40_cfg_axi_ub_equal_slicing(vfe_dev);
+ } else {
+ vfe_dev->ub_info->policy = MSM_WM_UB_CFG_DEFAULT;
+ msm_vfe40_cfg_axi_ub_equal_default(vfe_dev);
+ }
+}
+
+static void msm_vfe40_read_wm_ping_pong_addr(
+ struct vfe_device *vfe_dev)
+{
+ msm_camera_io_dump(vfe_dev->vfe_base +
+ (VFE40_WM_BASE(0) & 0xFFFFFFF0), 0x200, 1);
+}
+
+static void msm_vfe40_update_ping_pong_addr(
+ void __iomem *vfe_base,
+ uint8_t wm_idx, uint32_t pingpong_bit, dma_addr_t paddr,
+ int32_t buf_size)
+{
+ uint32_t paddr32 = (paddr & 0xFFFFFFFF);
+ msm_camera_io_w(paddr32, vfe_base +
+ VFE40_PING_PONG_BASE(wm_idx, pingpong_bit));
+}
+
+static int msm_vfe40_axi_halt(struct vfe_device *vfe_dev,
+ uint32_t blocking)
+{
+ int rc = 0;
+ enum msm_vfe_input_src i;
+
+ /* Keep only halt and restart mask */
+ msm_camera_io_w(BIT(31), vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w(BIT(8), vfe_dev->vfe_base + 0x2C);
+ /*Clear IRQ Status */
+ msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x30);
+ msm_camera_io_w(0xFEFFFEFF, vfe_dev->vfe_base + 0x34);
+ msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x24);
+ /* if any stream is waiting for update, signal complete */
+ for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
+ /* if any stream is waiting for update, signal complete */
+ if (vfe_dev->axi_data.stream_update[i]) {
+ ISP_DBG("%s: complete stream update\n", __func__);
+ msm_isp_axi_stream_update(vfe_dev, i);
+ if (vfe_dev->axi_data.stream_update[i])
+ msm_isp_axi_stream_update(vfe_dev, i);
+ }
+ if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
+ ISP_DBG("%s: complete on axi config update\n",
+ __func__);
+ msm_isp_axi_cfg_update(vfe_dev, i);
+ if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i]))
+ msm_isp_axi_cfg_update(vfe_dev, i);
+ }
+ }
+
+ if (atomic_read(&vfe_dev->stats_data.stats_update)) {
+ ISP_DBG("%s: complete on stats update\n", __func__);
+ msm_isp_stats_stream_update(vfe_dev);
+ if (atomic_read(&vfe_dev->stats_data.stats_update))
+ msm_isp_stats_stream_update(vfe_dev);
+ }
+
+ if (blocking) {
+ init_completion(&vfe_dev->halt_complete);
+ /* Halt AXI Bus Bridge */
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x2C0);
+ rc = wait_for_completion_interruptible_timeout(
+ &vfe_dev->halt_complete, msecs_to_jiffies(500));
+ if (rc <= 0)
+ pr_err("%s:VFE%d halt timeout rc=%d\n", __func__,
+ vfe_dev->pdev->id, rc);
+ } else {
+ /* Halt AXI Bus Bridge */
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x2C0);
+ }
+
+ return rc;
+}
+
+static int msm_vfe40_axi_restart(struct vfe_device *vfe_dev,
+ uint32_t blocking, uint32_t enable_camif)
+{
+ vfe_dev->hw_info->vfe_ops.core_ops.restore_irq_mask(vfe_dev);
+ /* Clear IRQ Status */
+ msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x30);
+ msm_camera_io_w(0xFEFFFEFF, vfe_dev->vfe_base + 0x34);
+ msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x24);
+ msm_camera_io_w_mb(0x140000, vfe_dev->vfe_base + 0x318);
+
+ /* Start AXI */
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x2C0);
+
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev, VFE_SRC_MAX);
+ memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
+ atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
+
+ if (enable_camif) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, ENABLE_CAMIF);
+ }
+
+ return 0;
+}
+
+static uint32_t msm_vfe40_get_wm_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ return (irq_status0 >> 8) & 0x7F;
+}
+
+static uint32_t msm_vfe40_get_comp_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ return (irq_status0 >> 25) & 0xF;
+}
+
+static uint32_t msm_vfe40_get_pingpong_status(
+ struct vfe_device *vfe_dev)
+{
+ return msm_camera_io_r(vfe_dev->vfe_base + 0x268);
+}
+
+static int msm_vfe40_get_stats_idx(enum msm_isp_stats_type stats_type)
+{
+ switch (stats_type) {
+ case MSM_ISP_STATS_BE:
+ return 0;
+ case MSM_ISP_STATS_BG:
+ return 1;
+ case MSM_ISP_STATS_BF:
+ return 2;
+ case MSM_ISP_STATS_AWB:
+ return 3;
+ case MSM_ISP_STATS_RS:
+ return 4;
+ case MSM_ISP_STATS_CS:
+ return 5;
+ case MSM_ISP_STATS_IHIST:
+ return 6;
+ case MSM_ISP_STATS_BHIST:
+ return 7;
+ default:
+ pr_err("%s: Invalid stats type\n", __func__);
+ return -EINVAL;
+ }
+}
+
+static int msm_vfe40_stats_check_streams(
+ struct msm_vfe_stats_stream *stream_info)
+{
+ return 0;
+}
+
+static void msm_vfe40_stats_cfg_comp_mask(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t request_comp_index, uint8_t enable)
+{
+ uint32_t comp_mask_reg, mask_bf_scale;
+ atomic_t *stats_comp_mask;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+
+ if (vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask < 1)
+ return;
+
+ if (request_comp_index >= MAX_NUM_STATS_COMP_MASK) {
+ pr_err("%s: num of comp masks %d exceed max %d\n",
+ __func__, request_comp_index,
+ MAX_NUM_STATS_COMP_MASK);
+ return;
+ }
+
+ if (vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask >
+ MAX_NUM_STATS_COMP_MASK) {
+ pr_err("%s: num of comp masks %d exceed max %d\n",
+ __func__,
+ vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask,
+ MAX_NUM_STATS_COMP_MASK);
+ return;
+ }
+
+ stats_mask = stats_mask & 0xFF;
+ mask_bf_scale = stats_mask;
+
+ stats_comp_mask = &stats_data->stats_comp_mask[request_comp_index];
+ comp_mask_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x44);
+
+ if (enable) {
+ comp_mask_reg |= mask_bf_scale << (16 + request_comp_index * 8);
+ atomic_set(stats_comp_mask, stats_mask |
+ atomic_read(stats_comp_mask));
+ } else {
+ if (!(atomic_read(stats_comp_mask) & stats_mask))
+ return;
+ atomic_set(stats_comp_mask,
+ ~stats_mask & atomic_read(stats_comp_mask));
+ comp_mask_reg &= ~(mask_bf_scale <<
+ (16 + request_comp_index * 8));
+ }
+ msm_camera_io_w(comp_mask_reg, vfe_dev->vfe_base + 0x44);
+
+ ISP_DBG("%s: comp_mask_reg: %x comp mask0 %x mask1: %x\n",
+ __func__, comp_mask_reg,
+ atomic_read(&stats_data->stats_comp_mask[0]),
+ atomic_read(&stats_data->stats_comp_mask[1]));
+
+ return;
+}
+
+static void msm_vfe40_stats_cfg_wm_irq_mask(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ uint32_t irq_mask;
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
+ irq_mask |= 1 << (STATS_IDX(stream_info->stream_handle) + 16);
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x28);
+}
+
+static void msm_vfe40_stats_clear_wm_irq_mask(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ uint32_t irq_mask;
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
+ irq_mask &= ~(1 << (STATS_IDX(stream_info->stream_handle) + 16));
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x28);
+}
+
+static void msm_vfe40_stats_cfg_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ int stats_idx = STATS_IDX(stream_info->stream_handle);
+ uint32_t stats_base = VFE40_STATS_BASE(stats_idx);
+
+ /*WR_ADDR_CFG*/
+ msm_camera_io_w(stream_info->framedrop_period << 2,
+ vfe_dev->vfe_base + stats_base + 0x8);
+ /*WR_IRQ_FRAMEDROP_PATTERN*/
+ msm_camera_io_w(stream_info->framedrop_pattern,
+ vfe_dev->vfe_base + stats_base + 0x10);
+ /*WR_IRQ_SUBSAMPLE_PATTERN*/
+ msm_camera_io_w(0xFFFFFFFF,
+ vfe_dev->vfe_base + stats_base + 0x14);
+}
+
+static void msm_vfe40_stats_clear_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ uint32_t val = 0;
+ int stats_idx = STATS_IDX(stream_info->stream_handle);
+ uint32_t stats_base = VFE40_STATS_BASE(stats_idx);
+
+ /*WR_ADDR_CFG*/
+ msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x8);
+ /*WR_IRQ_FRAMEDROP_PATTERN*/
+ msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x10);
+ /*WR_IRQ_SUBSAMPLE_PATTERN*/
+ msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x14);
+}
+
+static void msm_vfe40_stats_cfg_ub(struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t ub_offset;
+ uint32_t stats_burst_len;
+ uint32_t ub_size[VFE40_NUM_STATS_TYPE] = {
+ 64, /*MSM_ISP_STATS_BE*/
+ 128, /*MSM_ISP_STATS_BG*/
+ 128, /*MSM_ISP_STATS_BF*/
+ 16, /*MSM_ISP_STATS_AWB*/
+ 8, /*MSM_ISP_STATS_RS*/
+ 16, /*MSM_ISP_STATS_CS*/
+ 16, /*MSM_ISP_STATS_IHIST*/
+ 16, /*MSM_ISP_STATS_BHIST*/
+ };
+
+ if (vfe_dev->vfe_hw_version == VFE40_8916_VERSION ||
+ vfe_dev->vfe_hw_version == VFE40_8939_VERSION ||
+ vfe_dev->vfe_hw_version == VFE40_8937_VERSION ||
+ vfe_dev->vfe_hw_version == VFE40_8953_VERSION) {
+ stats_burst_len = VFE40_STATS_BURST_LEN_8916_VERSION;
+ ub_offset = VFE40_UB_SIZE_8916;
+ } else if (vfe_dev->vfe_hw_version == VFE40_8952_VERSION ||
+ vfe_dev->vfe_hw_version == VFE40_8976_VERSION) {
+ stats_burst_len = VFE40_STATS_BURST_LEN_8916_VERSION;
+ ub_offset = VFE40_UB_SIZE_8952;
+ } else {
+ stats_burst_len = VFE40_STATS_BURST_LEN;
+ ub_offset = VFE40_UB_SIZE;
+ }
+
+ for (i = 0; i < VFE40_NUM_STATS_TYPE; i++) {
+ ub_offset -= ub_size[i];
+ msm_camera_io_w(stats_burst_len << 30 |
+ ub_offset << 16 | (ub_size[i] - 1),
+ vfe_dev->vfe_base + VFE40_STATS_BASE(i) + 0xC);
+ }
+}
+
+static void msm_vfe40_stats_update_cgc_override(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable)
+{
+ int i;
+ uint32_t module_cfg, cgc_mask = 0;
+
+ for (i = 0; i < VFE40_NUM_STATS_TYPE; i++) {
+ if ((stats_mask >> i) & 0x1) {
+ switch (i) {
+ case STATS_IDX_BE:
+ cgc_mask |= (1 << 8);
+ break;
+ case STATS_IDX_BF:
+ cgc_mask |= (1 << 10);
+ break;
+ case STATS_IDX_BG:
+ cgc_mask |= (1 << 9);
+ break;
+ case STATS_IDX_BHIST:
+ cgc_mask |= (1 << 15);
+ break;
+ case STATS_IDX_AWB:
+ cgc_mask |= (1 << 11);
+ break;
+ case STATS_IDX_RS:
+ cgc_mask |= (1 << 12);
+ break;
+ case STATS_IDX_CS:
+ cgc_mask |= (1 << 13);
+ break;
+ case STATS_IDX_IHIST:
+ cgc_mask |= (1 << 14);
+ break;
+ default:
+ pr_err("%s: Invalid stats mask\n", __func__);
+ return;
+ }
+ }
+ }
+
+ /* CGC override */
+ module_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x974);
+ if (enable)
+ module_cfg |= cgc_mask;
+ else
+ module_cfg &= ~cgc_mask;
+ msm_camera_io_w(module_cfg, vfe_dev->vfe_base + 0x974);
+}
+
+static bool msm_vfe40_is_module_cfg_lock_needed(
+ uint32_t reg_offset)
+{
+ if (reg_offset == 0x18)
+ return true;
+ else
+ return false;
+}
+
+static void msm_vfe40_stats_enable_module(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable)
+{
+ int i;
+ uint32_t module_cfg, module_cfg_mask = 0;
+ unsigned long flags;
+
+ for (i = 0; i < VFE40_NUM_STATS_TYPE; i++) {
+ if ((stats_mask >> i) & 0x1) {
+ switch (i) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ module_cfg_mask |= 1 << (5 + i);
+ break;
+ case 6:
+ module_cfg_mask |= 1 << 15;
+ break;
+ case 7:
+ module_cfg_mask |= 1 << 18;
+ break;
+ default:
+ pr_err("%s: Invalid stats mask\n", __func__);
+ return;
+ }
+ }
+ }
+
+ /*
+ * For vfe40 stats and other modules share module_cfg register.
+ * Hence need to Grab lock.
+ */
+ spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
+ module_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x18);
+ if (enable)
+ module_cfg |= module_cfg_mask;
+ else
+ module_cfg &= ~module_cfg_mask;
+ msm_camera_io_w(module_cfg, vfe_dev->vfe_base + 0x18);
+ spin_unlock_irqrestore(&vfe_dev->shared_data_lock, flags);
+}
+
+static void msm_vfe40_stats_update_ping_pong_addr(
+ void __iomem *vfe_base, struct msm_vfe_stats_stream *stream_info,
+ uint32_t pingpong_status, dma_addr_t paddr)
+{
+ uint32_t paddr32 = (paddr & 0xFFFFFFFF);
+ int stats_idx = STATS_IDX(stream_info->stream_handle);
+ msm_camera_io_w(paddr32, vfe_base +
+ VFE40_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
+}
+
+static uint32_t msm_vfe40_stats_get_wm_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ return (irq_status0 >> 16) & 0xFF;
+}
+
+static uint32_t msm_vfe40_stats_get_comp_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ return (irq_status0 >> 29) & 0x3;
+}
+
+static uint32_t msm_vfe40_stats_get_frame_id(
+ struct vfe_device *vfe_dev)
+{
+ return vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+}
+
+static int msm_vfe40_get_platform_data(struct vfe_device *vfe_dev)
+{
+ int rc = 0;
+ vfe_dev->vfe_mem = platform_get_resource_byname(vfe_dev->pdev,
+ IORESOURCE_MEM, "vfe");
+ if (!vfe_dev->vfe_mem) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto vfe_no_resource;
+ }
+
+ vfe_dev->vfe_vbif_mem = platform_get_resource_byname(
+ vfe_dev->pdev,
+ IORESOURCE_MEM, "vfe_vbif");
+ if (!vfe_dev->vfe_vbif_mem) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto vfe_no_resource;
+ }
+
+ vfe_dev->vfe_irq = platform_get_resource_byname(vfe_dev->pdev,
+ IORESOURCE_IRQ, "vfe");
+ if (!vfe_dev->vfe_irq) {
+ pr_err("%s: no irq resource?\n", __func__);
+ rc = -ENODEV;
+ goto vfe_no_resource;
+ }
+
+ vfe_dev->fs_vfe = regulator_get(&vfe_dev->pdev->dev, "vdd");
+ if (IS_ERR(vfe_dev->fs_vfe)) {
+ pr_err("%s: Regulator get failed %ld\n", __func__,
+ PTR_ERR(vfe_dev->fs_vfe));
+ vfe_dev->fs_vfe = NULL;
+ rc = -ENODEV;
+ goto vfe_no_resource;
+ }
+
+vfe_no_resource:
+ return rc;
+}
+
+static void msm_vfe40_get_error_mask(
+ uint32_t *error_mask0, uint32_t *error_mask1)
+{
+ *error_mask0 = 0x00000000;
+ *error_mask1 = 0x00FFFEFF;
+}
+
+static void msm_vfe40_get_overflow_mask(uint32_t *overflow_mask)
+{
+ *overflow_mask = 0x00FFFE7E;
+}
+
+static void msm_vfe40_get_rdi_wm_mask(struct vfe_device *vfe_dev,
+ uint32_t *rdi_wm_mask)
+{
+ *rdi_wm_mask = vfe_dev->axi_data.rdi_wm_mask;
+}
+
+static void msm_vfe40_get_irq_mask(struct vfe_device *vfe_dev,
+ uint32_t *irq0_mask, uint32_t *irq1_mask)
+{
+ *irq0_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
+ *irq1_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
+}
+
+
+static void msm_vfe40_restore_irq_mask(struct vfe_device *vfe_dev)
+{
+ msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask0,
+ vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask1,
+ vfe_dev->vfe_base + 0x2C);
+}
+
+
+static void msm_vfe40_get_halt_restart_mask(uint32_t *irq0_mask,
+ uint32_t *irq1_mask)
+{
+ *irq0_mask = BIT(31);
+ *irq1_mask = BIT(8);
+}
+
+static struct msm_vfe_axi_hardware_info msm_vfe40_axi_hw_info = {
+ .num_wm = 7,
+ .num_comp_mask = 3,
+ .num_rdi = 3,
+ .num_rdi_master = 3,
+ .min_wm_ub = 64,
+ .scratch_buf_range = SZ_32M,
+};
+
+static struct msm_vfe_stats_hardware_info msm_vfe40_stats_hw_info = {
+ .stats_capability_mask =
+ 1 << MSM_ISP_STATS_BE | 1 << MSM_ISP_STATS_BF |
+ 1 << MSM_ISP_STATS_BG | 1 << MSM_ISP_STATS_BHIST |
+ 1 << MSM_ISP_STATS_AWB | 1 << MSM_ISP_STATS_IHIST |
+ 1 << MSM_ISP_STATS_RS | 1 << MSM_ISP_STATS_CS,
+ .stats_ping_pong_offset = stats_pingpong_offset_map,
+ .num_stats_type = VFE40_NUM_STATS_TYPE,
+ .num_stats_comp_mask = 2,
+};
+
+struct msm_vfe_hardware_info vfe40_hw_info = {
+ .num_iommu_ctx = 1,
+ .num_iommu_secure_ctx = 1,
+ .vfe_clk_idx = VFE40_CLK_IDX,
+ .runtime_axi_update = 0,
+ .vfe_ops = {
+ .irq_ops = {
+ .read_irq_status = msm_vfe40_read_irq_status,
+ .process_camif_irq = msm_vfe40_process_input_irq,
+ .process_reset_irq = msm_vfe40_process_reset_irq,
+ .process_halt_irq = msm_vfe40_process_halt_irq,
+ .process_reset_irq = msm_vfe40_process_reset_irq,
+ .process_reg_update = msm_vfe40_process_reg_update,
+ .process_axi_irq = msm_isp_process_axi_irq,
+ .process_stats_irq = msm_isp_process_stats_irq,
+ .process_epoch_irq = msm_vfe40_process_epoch_irq,
+ .enable_camif_err = msm_vfe40_enable_camif_error,
+ },
+ .axi_ops = {
+ .reload_wm = msm_vfe40_axi_reload_wm,
+ .enable_wm = msm_vfe40_axi_enable_wm,
+ .cfg_io_format = msm_vfe40_cfg_io_format,
+ .cfg_comp_mask = msm_vfe40_axi_cfg_comp_mask,
+ .clear_comp_mask = msm_vfe40_axi_clear_comp_mask,
+ .cfg_wm_irq_mask = msm_vfe40_axi_cfg_wm_irq_mask,
+ .clear_wm_irq_mask = msm_vfe40_axi_clear_wm_irq_mask,
+ .cfg_framedrop = msm_vfe40_cfg_framedrop,
+ .clear_framedrop = msm_vfe40_clear_framedrop,
+ .cfg_wm_reg = msm_vfe40_axi_cfg_wm_reg,
+ .clear_wm_reg = msm_vfe40_axi_clear_wm_reg,
+ .cfg_wm_xbar_reg = msm_vfe40_axi_cfg_wm_xbar_reg,
+ .clear_wm_xbar_reg = msm_vfe40_axi_clear_wm_xbar_reg,
+ .cfg_ub = msm_vfe40_cfg_axi_ub,
+ .read_wm_ping_pong_addr =
+ msm_vfe40_read_wm_ping_pong_addr,
+ .update_ping_pong_addr =
+ msm_vfe40_update_ping_pong_addr,
+ .get_comp_mask = msm_vfe40_get_comp_mask,
+ .get_wm_mask = msm_vfe40_get_wm_mask,
+ .get_pingpong_status = msm_vfe40_get_pingpong_status,
+ .halt = msm_vfe40_axi_halt,
+ .restart = msm_vfe40_axi_restart,
+ .update_cgc_override =
+ msm_vfe40_axi_update_cgc_override,
+ },
+ .core_ops = {
+ .reg_update = msm_vfe40_reg_update,
+ .cfg_input_mux = msm_vfe40_cfg_input_mux,
+ .update_camif_state = msm_vfe40_update_camif_state,
+ .start_fetch_eng = msm_vfe40_start_fetch_engine,
+ .cfg_rdi_reg = msm_vfe40_cfg_rdi_reg,
+ .reset_hw = msm_vfe40_reset_hardware,
+ .init_hw = msm_vfe40_init_hardware,
+ .init_hw_reg = msm_vfe40_init_hardware_reg,
+ .clear_status_reg = msm_vfe40_clear_status_reg,
+ .release_hw = msm_vfe40_release_hardware,
+ .get_platform_data = msm_vfe40_get_platform_data,
+ .get_error_mask = msm_vfe40_get_error_mask,
+ .get_overflow_mask = msm_vfe40_get_overflow_mask,
+ .get_rdi_wm_mask = msm_vfe40_get_rdi_wm_mask,
+ .get_irq_mask = msm_vfe40_get_irq_mask,
+ .restore_irq_mask = msm_vfe40_restore_irq_mask,
+ .get_halt_restart_mask =
+ msm_vfe40_get_halt_restart_mask,
+ .process_error_status = msm_vfe40_process_error_status,
+ .is_module_cfg_lock_needed =
+ msm_vfe40_is_module_cfg_lock_needed,
+ },
+ .stats_ops = {
+ .get_stats_idx = msm_vfe40_get_stats_idx,
+ .check_streams = msm_vfe40_stats_check_streams,
+ .cfg_comp_mask = msm_vfe40_stats_cfg_comp_mask,
+ .cfg_wm_irq_mask = msm_vfe40_stats_cfg_wm_irq_mask,
+ .clear_wm_irq_mask = msm_vfe40_stats_clear_wm_irq_mask,
+ .cfg_wm_reg = msm_vfe40_stats_cfg_wm_reg,
+ .clear_wm_reg = msm_vfe40_stats_clear_wm_reg,
+ .cfg_ub = msm_vfe40_stats_cfg_ub,
+ .enable_module = msm_vfe40_stats_enable_module,
+ .update_ping_pong_addr =
+ msm_vfe40_stats_update_ping_pong_addr,
+ .get_comp_mask = msm_vfe40_stats_get_comp_mask,
+ .get_wm_mask = msm_vfe40_stats_get_wm_mask,
+ .get_frame_id = msm_vfe40_stats_get_frame_id,
+ .get_pingpong_status = msm_vfe40_get_pingpong_status,
+ .update_cgc_override =
+ msm_vfe40_stats_update_cgc_override,
+ },
+ },
+ .dmi_reg_offset = 0x918,
+ .axi_hw_info = &msm_vfe40_axi_hw_info,
+ .stats_hw_info = &msm_vfe40_stats_hw_info,
+};
+EXPORT_SYMBOL(vfe40_hw_info);
+
+static const struct of_device_id msm_vfe40_dt_match[] = {
+ {
+ .compatible = "qcom,vfe40",
+ .data = &vfe40_hw_info,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_vfe40_dt_match);
+
+static struct platform_driver vfe40_driver = {
+ .probe = vfe_hw_probe,
+ .driver = {
+ .name = "msm_vfe40",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_vfe40_dt_match,
+ },
+};
+
+static int __init msm_vfe40_init_module(void)
+{
+ return platform_driver_register(&vfe40_driver);
+}
+
+static void __exit msm_vfe40_exit_module(void)
+{
+ platform_driver_unregister(&vfe40_driver);
+}
+
+module_init(msm_vfe40_init_module);
+module_exit(msm_vfe40_exit_module);
+MODULE_DESCRIPTION("MSM VFE40 driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.h
new file mode 100644
index 000000000000..b7daa0282658
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.h
@@ -0,0 +1,17 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_ISP40_H__
+#define __MSM_ISP40_H__
+
+extern struct msm_vfe_hardware_info vfe40_hw_info;
+#endif /* __MSM_ISP40_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
new file mode 100644
index 000000000000..38416a8b4e85
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
@@ -0,0 +1,2089 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/ratelimit.h>
+
+#include "msm_isp44.h"
+#include "msm_isp_util.h"
+#include "msm_isp_axi_util.h"
+#include "msm_isp_stats_util.h"
+#include "msm_isp.h"
+#include "msm.h"
+#include "msm_camera_io_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+#define STATS_IDX_BF_SCALE 0
+#define STATS_IDX_BE 1
+#define STATS_IDX_BG 2
+#define STATS_IDX_BF 3
+#define STATS_IDX_AWB 4
+#define STATS_IDX_RS 5
+#define STATS_IDX_CS 6
+#define STATS_IDX_IHIST 7
+#define STATS_IDX_BHIST 8
+
+#define VFE44_8084V1_VERSION 0x4000000A
+
+#define VFE44_BURST_LEN 3
+#define VFE44_FETCH_BURST_LEN 3
+#define VFE44_STATS_BURST_LEN 2
+#define VFE44_UB_SIZE 2048
+#define MSM_ISP44_TOTAL_IMAGE_UB 1528
+#define VFE44_WM_BASE(idx) (0x6C + 0x24 * idx)
+#define VFE44_RDI_BASE(idx) (0x2E8 + 0x4 * idx)
+#define VFE44_XBAR_BASE(idx) (0x58 + 0x4 * (idx / 2))
+#define VFE44_XBAR_SHIFT(idx) ((idx%2) ? 16 : 0)
+#define VFE44_PING_PONG_BASE(wm, ping_pong) \
+ (VFE44_WM_BASE(wm) + 0x4 * (1 + ((~ping_pong) & 0x1)))
+
+#define VFE44_BUS_RD_CGC_OVERRIDE_BIT 16
+
+static uint8_t stats_pingpong_offset_map[] = {
+ 7, 8, 9, 10, 11, 12, 13, 14, 15};
+
+#define SHIFT_BF_SCALE_BIT 1
+#define VFE44_NUM_STATS_COMP 2
+#define VFE44_NUM_STATS_TYPE 9
+#define VFE44_STATS_BASE(idx) \
+ ((idx) == STATS_IDX_BF_SCALE ? 0xA0C : (0x168 + 0x18 * (idx-1)))
+#define VFE44_STATS_PING_PONG_BASE(idx, ping_pong) \
+ (VFE44_STATS_BASE(idx) + 0x4 * \
+ (~(ping_pong >> (stats_pingpong_offset_map[idx])) & 0x1))
+
+#define VFE44_CLK_IDX 2
+static struct msm_cam_clk_info msm_vfe44_clk_info[VFE_CLK_INFO_MAX];
+
+static int32_t msm_vfe44_init_dt_parms(struct vfe_device *vfe_dev,
+ struct msm_vfe_hw_init_parms *dt_parms)
+{
+ void __iomem *vfebase = vfe_dev->vfe_base;
+ struct device_node *of_node;
+ int32_t i = 0 , rc = 0;
+ uint32_t *dt_settings = NULL, *dt_regs = NULL, dt_entries = 0;
+
+ of_node = vfe_dev->pdev->dev.of_node;
+
+ rc = of_property_read_u32(of_node, dt_parms->entries,
+ &dt_entries);
+ if (rc < 0 || !dt_entries) {
+ pr_err("%s: NO QOS entries found\n", __func__);
+ return -EINVAL;
+ } else {
+ dt_settings = kzalloc(sizeof(uint32_t) * dt_entries,
+ GFP_KERNEL);
+ if (!dt_settings) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ dt_regs = kzalloc(sizeof(uint32_t) * dt_entries,
+ GFP_KERNEL);
+ if (!dt_regs) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ kfree(dt_settings);
+ return -ENOMEM;
+ }
+ rc = of_property_read_u32_array(of_node, dt_parms->regs,
+ dt_regs, dt_entries);
+ if (rc < 0) {
+ pr_err("%s: NO QOS BUS BDG info\n", __func__);
+ kfree(dt_settings);
+ kfree(dt_regs);
+ return -EINVAL;
+ } else {
+ if (dt_parms->settings) {
+ rc = of_property_read_u32_array(of_node,
+ dt_parms->settings,
+ dt_settings, dt_entries);
+ if (rc < 0) {
+ pr_err("%s: NO QOS settings\n",
+ __func__);
+ kfree(dt_settings);
+ kfree(dt_regs);
+ } else {
+ for (i = 0; i < dt_entries; i++) {
+ msm_camera_io_w(dt_settings[i],
+ vfebase + dt_regs[i]);
+ }
+ kfree(dt_settings);
+ kfree(dt_regs);
+ }
+ } else {
+ kfree(dt_settings);
+ kfree(dt_regs);
+ }
+ }
+ }
+ return 0;
+}
+
+static int msm_vfe44_init_hardware(struct vfe_device *vfe_dev)
+{
+ int rc = -1;
+ rc = msm_isp_init_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
+ if (rc < 0) {
+ pr_err("%s: Bandwidth registration Failed!\n", __func__);
+ goto bus_scale_register_failed;
+ }
+
+ if (vfe_dev->fs_vfe) {
+ rc = regulator_enable(vfe_dev->fs_vfe);
+ if (rc) {
+ pr_err("%s: Regulator enable failed\n", __func__);
+ goto fs_failed;
+ }
+ }
+
+ rc = msm_isp_get_clk_info(vfe_dev, vfe_dev->pdev, msm_vfe44_clk_info);
+ if (rc < 0) {
+ pr_err("msm_isp_get_clk_info() failed\n");
+ goto fs_failed;
+ }
+ if (vfe_dev->num_clk <= 0) {
+ pr_err("%s: Invalid num of clock\n", __func__);
+ goto fs_failed;
+ } else {
+ vfe_dev->vfe_clk =
+ kzalloc(sizeof(struct clk *) * vfe_dev->num_clk,
+ GFP_KERNEL);
+ if (!vfe_dev->vfe_clk) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+ rc = msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe44_clk_info,
+ vfe_dev->vfe_clk, vfe_dev->num_clk, 1);
+ if (rc < 0)
+ goto clk_enable_failed;
+
+ vfe_dev->vfe_base = ioremap(vfe_dev->vfe_mem->start,
+ resource_size(vfe_dev->vfe_mem));
+ if (!vfe_dev->vfe_base) {
+ rc = -ENOMEM;
+ pr_err("%s: vfe ioremap failed\n", __func__);
+ goto vfe_remap_failed;
+ }
+ vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] =
+ vfe_dev->vfe_base;
+
+ vfe_dev->vfe_vbif_base = ioremap(vfe_dev->vfe_vbif_mem->start,
+ resource_size(vfe_dev->vfe_vbif_mem));
+ if (!vfe_dev->vfe_vbif_base) {
+ rc = -ENOMEM;
+ pr_err("%s: vfe ioremap failed\n", __func__);
+ goto vbif_remap_failed;
+ }
+
+ rc = request_irq(vfe_dev->vfe_irq->start, msm_isp_process_irq,
+ IRQF_TRIGGER_RISING, "vfe", vfe_dev);
+ if (rc < 0) {
+ pr_err("%s: irq request failed\n", __func__);
+ goto irq_req_failed;
+ }
+ return rc;
+irq_req_failed:
+ iounmap(vfe_dev->vfe_vbif_base);
+ vfe_dev->vfe_vbif_base = NULL;
+vbif_remap_failed:
+ iounmap(vfe_dev->vfe_base);
+ vfe_dev->vfe_base = NULL;
+vfe_remap_failed:
+ msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe44_clk_info,
+ vfe_dev->vfe_clk, vfe_dev->num_clk, 0);
+clk_enable_failed:
+ if (vfe_dev->fs_vfe)
+ regulator_disable(vfe_dev->fs_vfe);
+ kfree(vfe_dev->vfe_clk);
+fs_failed:
+ msm_isp_deinit_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
+bus_scale_register_failed:
+ return rc;
+}
+
+static void msm_vfe44_release_hardware(struct vfe_device *vfe_dev)
+{
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x2C);
+ disable_irq(vfe_dev->vfe_irq->start);
+ free_irq(vfe_dev->vfe_irq->start, vfe_dev);
+ tasklet_kill(&vfe_dev->vfe_tasklet);
+ msm_isp_flush_tasklet(vfe_dev);
+ iounmap(vfe_dev->vfe_vbif_base);
+ vfe_dev->vfe_vbif_base = NULL;
+ msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe44_clk_info,
+ vfe_dev->vfe_clk, vfe_dev->num_clk, 0);
+ vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] = NULL;
+ iounmap(vfe_dev->vfe_base);
+ vfe_dev->vfe_base = NULL;
+ kfree(vfe_dev->vfe_clk);
+ regulator_disable(vfe_dev->fs_vfe);
+ msm_isp_deinit_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
+}
+
+static void msm_vfe44_init_hardware_reg(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_hw_init_parms qos_parms;
+ struct msm_vfe_hw_init_parms vbif_parms;
+ struct msm_vfe_hw_init_parms ds_parms;
+
+ qos_parms.entries = "qos-entries";
+ qos_parms.regs = "qos-regs";
+ qos_parms.settings = "qos-settings";
+ vbif_parms.entries = "vbif-entries";
+ vbif_parms.regs = "vbif-regs";
+ vbif_parms.settings = "vbif-settings";
+ ds_parms.entries = "ds-entries";
+ ds_parms.regs = "ds-regs";
+ ds_parms.settings = "ds-settings";
+
+ msm_vfe44_init_dt_parms(vfe_dev, &qos_parms);
+ msm_vfe44_init_dt_parms(vfe_dev, &ds_parms);
+ msm_vfe44_init_dt_parms(vfe_dev, &vbif_parms);
+
+ /* BUS_CFG */
+ msm_camera_io_w(0x10000001, vfe_dev->vfe_base + 0x50);
+ msm_camera_io_w(0xE00000F1, vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x2C);
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
+ msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x34);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x24);
+
+}
+
+static void msm_vfe44_clear_status_reg(struct vfe_device *vfe_dev)
+{
+ msm_camera_io_w(0x80000000, vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x2C);
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
+ msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x34);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x24);
+}
+
+static void msm_vfe44_process_reset_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ if (irq_status0 & (1 << 31)) {
+ complete(&vfe_dev->reset_complete);
+ vfe_dev->reset_pending = 0;
+ }
+}
+
+static void msm_vfe44_process_halt_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ if (irq_status1 & (1 << 8)) {
+ complete(&vfe_dev->halt_complete);
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x2C0);
+ }
+}
+
+static void msm_vfe44_process_input_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ if (!(irq_status0 & 0x1000003))
+ return;
+
+ if (irq_status0 & (1 << 0)) {
+ ISP_DBG("%s: SOF IRQ\n", __func__);
+ msm_isp_increment_frame_id(vfe_dev, VFE_PIX_0, ts);
+ }
+
+ if (irq_status0 & (1 << 24)) {
+ ISP_DBG("%s: Fetch Engine Read IRQ\n", __func__);
+ msm_isp_fetch_engine_done_notify(vfe_dev,
+ &vfe_dev->fetch_engine_info);
+ }
+
+ if (irq_status0 & (1 << 1))
+ ISP_DBG("%s: EOF IRQ\n", __func__);
+}
+
+static void msm_vfe44_process_violation_status(
+ struct vfe_device *vfe_dev)
+{
+ uint32_t violation_status = vfe_dev->error_info.violation_status;
+ if (!violation_status)
+ return;
+
+ if (violation_status & (1 << 0))
+ pr_err("%s: camif violation\n", __func__);
+ if (violation_status & (1 << 1))
+ pr_err("%s: black violation\n", __func__);
+ if (violation_status & (1 << 2))
+ pr_err("%s: rolloff violation\n", __func__);
+ if (violation_status & (1 << 3))
+ pr_err("%s: demux violation\n", __func__);
+ if (violation_status & (1 << 4))
+ pr_err("%s: demosaic violation\n", __func__);
+ if (violation_status & (1 << 5))
+ pr_err("%s: wb violation\n", __func__);
+ if (violation_status & (1 << 6))
+ pr_err("%s: clf violation\n", __func__);
+ if (violation_status & (1 << 7))
+ pr_err("%s: color correct violation\n", __func__);
+ if (violation_status & (1 << 8))
+ pr_err("%s: rgb lut violation\n", __func__);
+ if (violation_status & (1 << 9))
+ pr_err("%s: la violation\n", __func__);
+ if (violation_status & (1 << 10))
+ pr_err("%s: chroma enhance violation\n", __func__);
+ if (violation_status & (1 << 11))
+ pr_err("%s: chroma supress mce violation\n", __func__);
+ if (violation_status & (1 << 12))
+ pr_err("%s: skin enhance violation\n", __func__);
+ if (violation_status & (1 << 13))
+ pr_err("%s: color tranform enc violation\n", __func__);
+ if (violation_status & (1 << 14))
+ pr_err("%s: color tranform view violation\n", __func__);
+ if (violation_status & (1 << 15))
+ pr_err("%s: scale enc y violation\n", __func__);
+ if (violation_status & (1 << 16))
+ pr_err("%s: scale enc cbcr violation\n", __func__);
+ if (violation_status & (1 << 17))
+ pr_err("%s: scale view y violation\n", __func__);
+ if (violation_status & (1 << 18))
+ pr_err("%s: scale view cbcr violation\n", __func__);
+ if (violation_status & (1 << 21))
+ pr_err("%s: crop enc y violation\n", __func__);
+ if (violation_status & (1 << 22))
+ pr_err("%s: crop enc cbcr violation\n", __func__);
+ if (violation_status & (1 << 23))
+ pr_err("%s: crop view y violation\n", __func__);
+ if (violation_status & (1 << 24))
+ pr_err("%s: crop view cbcr violation\n", __func__);
+ if (violation_status & (1 << 25))
+ pr_err("%s: realign buf y violation\n", __func__);
+ if (violation_status & (1 << 26))
+ pr_err("%s: realign buf cb violation\n", __func__);
+ if (violation_status & (1 << 27))
+ pr_err("%s: realign buf cr violation\n", __func__);
+ if (violation_status & (1 << 28))
+ pr_err("%s: ltm violation\n", __func__);
+ if (violation_status & (1 << 29))
+ pr_err("%s: ltm cov violation\n", __func__);
+ if (violation_status & (1 << 30))
+ pr_err("%s: abf violation\n", __func__);
+ if (violation_status & (1 << 31))
+ pr_err("%s: bpc violation\n", __func__);
+}
+
+static void msm_vfe44_process_error_status(struct vfe_device *vfe_dev)
+{
+ uint32_t error_status1 = vfe_dev->error_info.error_mask1;
+ if (error_status1 & (1 << 0)) {
+ pr_err("%s: camif error status: 0x%x\n",
+ __func__, vfe_dev->error_info.camif_status);
+ msm_camera_io_dump(vfe_dev->vfe_base + 0x2f4, 0x30, 1);
+ }
+ if (error_status1 & (1 << 1))
+ pr_err("%s: stats bhist overwrite\n", __func__);
+ if (error_status1 & (1 << 2))
+ pr_err("%s: stats cs overwrite\n", __func__);
+ if (error_status1 & (1 << 3))
+ pr_err("%s: stats ihist overwrite\n", __func__);
+ if (error_status1 & (1 << 4))
+ pr_err("%s: realign buf y overflow\n", __func__);
+ if (error_status1 & (1 << 5))
+ pr_err("%s: realign buf cb overflow\n", __func__);
+ if (error_status1 & (1 << 6))
+ pr_err("%s: realign buf cr overflow\n", __func__);
+ if (error_status1 & (1 << 7)) {
+ msm_vfe44_process_violation_status(vfe_dev);
+ }
+ if (error_status1 & (1 << 9)) {
+ vfe_dev->stats->imagemaster0_overflow++;
+ pr_err("%s: image master 0 bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 10)) {
+ vfe_dev->stats->imagemaster1_overflow++;
+ pr_err("%s: image master 1 bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 11)) {
+ vfe_dev->stats->imagemaster2_overflow++;
+ pr_err("%s: image master 2 bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 12)) {
+ vfe_dev->stats->imagemaster3_overflow++;
+ pr_err("%s: image master 3 bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 13)) {
+ vfe_dev->stats->imagemaster4_overflow++;
+ pr_err("%s: image master 4 bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 14)) {
+ vfe_dev->stats->imagemaster5_overflow++;
+ pr_err("%s: image master 5 bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 15)) {
+ vfe_dev->stats->imagemaster6_overflow++;
+ pr_err("%s: image master 6 bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 16)) {
+ vfe_dev->stats->be_overflow++;
+ pr_err("%s: status be bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 17)) {
+ vfe_dev->stats->bg_overflow++;
+ pr_err("%s: status bg bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 18)) {
+ vfe_dev->stats->bf_overflow++;
+ pr_err("%s: status bf bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 19)) {
+ vfe_dev->stats->awb_overflow++;
+ pr_err("%s: status awb bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 20)) {
+ vfe_dev->stats->rs_overflow++;
+ pr_err("%s: status rs bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 21)) {
+ vfe_dev->stats->cs_overflow++;
+ pr_err("%s: status cs bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 22)) {
+ vfe_dev->stats->ihist_overflow++;
+ pr_err("%s: status ihist bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 23)) {
+ vfe_dev->stats->skinbhist_overflow++;
+ pr_err("%s: status skin bhist bus overflow\n", __func__);
+ }
+ if (error_status1 & (1 << 24)) {
+ vfe_dev->stats->bfscale_overflow++;
+ pr_err("%s: status bf scale bus overflow\n", __func__);
+ }
+}
+
+static void msm_vfe44_enable_camif_error(struct vfe_device *vfe_dev,
+ int enable)
+{
+ uint32_t val;
+
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
+ if (enable)
+ msm_camera_io_w_mb(val | BIT(0), vfe_dev->vfe_base + 0x2C);
+ else
+ msm_camera_io_w_mb(val & ~(BIT(0)), vfe_dev->vfe_base + 0x2C);
+}
+
+static void msm_vfe44_read_irq_status(struct vfe_device *vfe_dev,
+ uint32_t *irq_status0, uint32_t *irq_status1)
+{
+ uint32_t irq_mask0 = 0, irq_mask1 = 0;
+ irq_mask0 = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
+ irq_mask1 = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
+
+ *irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x38);
+ *irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x3C);
+
+ msm_camera_io_w(*irq_status0, vfe_dev->vfe_base + 0x30);
+ msm_camera_io_w(*irq_status1, vfe_dev->vfe_base + 0x34);
+ msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x24);
+ *irq_status0 &= irq_mask0;
+ *irq_status1 &= irq_mask1;
+ if (*irq_status0 & 0x10000000) {
+ pr_err_ratelimited("%s: Protection triggered\n", __func__);
+ *irq_status0 &= ~(0x10000000);
+ }
+
+ if (*irq_status1 & (1 << 0)) {
+ vfe_dev->error_info.camif_status =
+ msm_camera_io_r(vfe_dev->vfe_base + 0x31C);
+ msm_vfe44_enable_camif_error(vfe_dev, 0);
+ }
+
+ if (*irq_status1 & (1 << 7))
+ vfe_dev->error_info.violation_status =
+ msm_camera_io_r(vfe_dev->vfe_base + 0x48);
+
+}
+
+static void msm_vfe44_process_reg_update(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ enum msm_vfe_input_src i;
+ uint32_t shift_irq;
+ uint8_t reg_updated = 0;
+ unsigned long flags;
+
+ if (!(irq_status0 & 0xF0))
+ return;
+ /* Shift status bits so that PIX SOF is 1st bit */
+ shift_irq = ((irq_status0 & 0xF0) >> 4);
+
+ for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
+ if (shift_irq & BIT(i)) {
+ reg_updated |= BIT(i);
+ ISP_DBG("%s REG_UPDATE IRQ %x\n", __func__,
+ (uint32_t)BIT(i));
+ switch (i) {
+ case VFE_PIX_0:
+ msm_isp_save_framedrop_values(vfe_dev,
+ VFE_PIX_0);
+ msm_isp_notify(vfe_dev, ISP_EVENT_REG_UPDATE,
+ VFE_PIX_0, ts);
+ if (atomic_read(
+ &vfe_dev->stats_data.stats_update))
+ msm_isp_stats_stream_update(vfe_dev);
+ if (vfe_dev->axi_data.camif_state ==
+ CAMIF_STOPPING)
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ reg_update(vfe_dev, i);
+ break;
+ case VFE_RAW_0:
+ case VFE_RAW_1:
+ case VFE_RAW_2:
+ msm_isp_increment_frame_id(vfe_dev, i, ts);
+ msm_isp_notify(vfe_dev, ISP_EVENT_SOF, i, ts);
+ msm_isp_update_framedrop_reg(vfe_dev, i);
+ /*
+ * Reg Update is pseudo SOF for RDI,
+ * so request every frame
+ */
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
+ vfe_dev, i);
+ break;
+ default:
+ pr_err("%s: Error case\n", __func__);
+ return;
+ }
+ if (vfe_dev->axi_data.stream_update[i])
+ msm_isp_axi_stream_update(vfe_dev, i);
+ if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
+ msm_isp_axi_cfg_update(vfe_dev, i);
+ if (atomic_read(
+ &vfe_dev->axi_data.axi_cfg_update[i]) ==
+ 0)
+ msm_isp_notify(vfe_dev,
+ ISP_EVENT_STREAM_UPDATE_DONE,
+ i, ts);
+ }
+ }
+ }
+
+ spin_lock_irqsave(&vfe_dev->reg_update_lock, flags);
+ if (reg_updated & BIT(VFE_PIX_0))
+ vfe_dev->reg_updated = 1;
+
+ vfe_dev->reg_update_requested &= ~reg_updated;
+ spin_unlock_irqrestore(&vfe_dev->reg_update_lock, flags);
+}
+
+static void msm_vfe44_process_epoch_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ if (!(irq_status0 & 0xc))
+ return;
+
+ if (irq_status0 & BIT(2)) {
+ msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
+ ISP_DBG("%s: EPOCH0 IRQ\n", __func__);
+ msm_isp_update_framedrop_reg(vfe_dev, VFE_PIX_0);
+ msm_isp_update_stats_framedrop_reg(vfe_dev);
+ msm_isp_update_error_frame_count(vfe_dev);
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
+ && vfe_dev->axi_data.src_info[VFE_PIX_0].
+ pix_stream_count == 0) {
+ ISP_DBG("%s: SOF IRQ\n", __func__);
+ msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
+ if (vfe_dev->axi_data.stream_update[VFE_PIX_0])
+ msm_isp_axi_stream_update(vfe_dev, VFE_PIX_0);
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
+ vfe_dev, VFE_PIX_0);
+ }
+ }
+}
+
+static void msm_vfe44_reg_update(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src)
+{
+ uint32_t update_mask = 0;
+ unsigned long flags;
+
+ /* This HW supports upto VFE_RAW_2 */
+ if (frame_src > VFE_RAW_2 && frame_src != VFE_SRC_MAX) {
+ pr_err("%s Error case\n", __func__);
+ return;
+ }
+
+ /*
+ * If frame_src == VFE_SRC_MAX request reg_update on
+ * all supported INTF
+ */
+ if (frame_src == VFE_SRC_MAX)
+ update_mask = 0xF;
+ else
+ update_mask = BIT((uint32_t)frame_src);
+
+ spin_lock_irqsave(&vfe_dev->reg_update_lock, flags);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].reg_update_frame_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+ vfe_dev->reg_update_requested |= update_mask;
+ vfe_dev->common_data->dual_vfe_res->reg_update_mask[vfe_dev->pdev->id] =
+ vfe_dev->reg_update_requested;
+ if ((vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE1) &&
+ ((frame_src == VFE_PIX_0) || (frame_src == VFE_SRC_MAX))) {
+ msm_camera_io_w_mb(update_mask,
+ vfe_dev->common_data->dual_vfe_res->vfe_base[ISP_VFE0]
+ + 0x378);
+ msm_camera_io_w_mb(update_mask,
+ vfe_dev->vfe_base + 0x378);
+ } else if (!vfe_dev->is_split ||
+ ((frame_src == VFE_PIX_0) &&
+ (vfe_dev->axi_data.camif_state == CAMIF_STOPPING)) ||
+ (frame_src >= VFE_RAW_0 && frame_src <= VFE_SRC_MAX)) {
+ msm_camera_io_w_mb(update_mask,
+ vfe_dev->vfe_base + 0x378);
+ }
+ spin_unlock_irqrestore(&vfe_dev->reg_update_lock, flags);
+}
+
+static long msm_vfe44_reset_hardware(struct vfe_device *vfe_dev,
+ uint32_t first_start, uint32_t blocking_call)
+{
+ long rc = 0;
+ init_completion(&vfe_dev->reset_complete);
+
+ if (blocking_call)
+ vfe_dev->reset_pending = 1;
+
+ if (first_start) {
+ msm_camera_io_w_mb(0x1FF, vfe_dev->vfe_base + 0xC);
+ } else {
+ msm_camera_io_w_mb(0x1EF, vfe_dev->vfe_base + 0xC);
+ msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x30);
+ msm_camera_io_w(0xFEFFFEFF, vfe_dev->vfe_base + 0x34);
+ msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x24);
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ reload_wm(vfe_dev, vfe_dev->vfe_base, 0x0001FFFF);
+ }
+
+ if (blocking_call) {
+ rc = wait_for_completion_timeout(
+ &vfe_dev->reset_complete, msecs_to_jiffies(50));
+ if (rc <= 0) {
+ pr_err("%s:%d failed: reset timeout\n", __func__,
+ __LINE__);
+ vfe_dev->reset_pending = 0;
+ }
+ }
+
+ return rc;
+}
+
+static void msm_vfe44_axi_reload_wm(struct vfe_device *vfe_dev,
+ void __iomem *vfe_base, uint32_t reload_mask)
+{
+ msm_camera_io_w_mb(reload_mask, vfe_base + 0x4C);
+}
+
+static void msm_vfe44_axi_enable_wm(void __iomem *vfe_base,
+ uint8_t wm_idx, uint8_t enable)
+{
+ uint32_t val;
+ val = msm_camera_io_r(vfe_base + VFE44_WM_BASE(wm_idx));
+ if (enable)
+ val |= 0x1;
+ else
+ val &= ~0x1;
+ msm_camera_io_w_mb(val,
+ vfe_base + VFE44_WM_BASE(wm_idx));
+}
+
+static void msm_vfe44_axi_update_cgc_override(struct vfe_device *vfe_dev,
+ uint8_t wm_idx, uint8_t cgc_override)
+{
+ uint32_t val = 0;
+
+ /* Change CGC override */
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x974);
+ if (cgc_override)
+ val |= (1 << wm_idx);
+ else
+ val &= ~(1 << wm_idx);
+ msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x974);
+}
+
+static void msm_vfe44_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t comp_mask, comp_mask_index =
+ stream_info->comp_mask_index;
+ uint32_t irq_mask;
+
+ comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x40);
+ comp_mask &= ~(0x7F << (comp_mask_index * 8));
+ comp_mask |= (axi_data->composite_info[comp_mask_index].
+ stream_composite_mask << (comp_mask_index * 8));
+ msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x40);
+
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
+ irq_mask |= 1 << (comp_mask_index + 25);
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x28);
+}
+
+static void msm_vfe44_axi_clear_comp_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ uint32_t comp_mask, comp_mask_index = stream_info->comp_mask_index;
+ uint32_t irq_mask;
+
+ comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x40);
+ comp_mask &= ~(0x7F << (comp_mask_index * 8));
+ msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x40);
+
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
+ irq_mask &= ~(1 << (comp_mask_index + 25));
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x28);
+}
+
+static void msm_vfe44_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ uint32_t irq_mask;
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
+ irq_mask |= 1 << (stream_info->wm[0] + 8);
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x28);
+}
+
+static void msm_vfe44_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ uint32_t irq_mask;
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
+ irq_mask &= ~(1 << (stream_info->wm[0] + 8));
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x28);
+}
+
+static void msm_vfe44_cfg_framedrop(void __iomem *vfe_base,
+ struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
+ uint32_t framedrop_period)
+{
+ uint32_t i, temp;
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ msm_camera_io_w(framedrop_pattern, vfe_base +
+ VFE44_WM_BASE(stream_info->wm[i]) + 0x1C);
+ temp = msm_camera_io_r(vfe_base +
+ VFE44_WM_BASE(stream_info->wm[i]) + 0xC);
+ temp &= 0xFFFFFF83;
+ msm_camera_io_w(temp | (framedrop_period - 1) << 2,
+ vfe_base + VFE44_WM_BASE(stream_info->wm[i]) + 0xC);
+ }
+}
+
+static void msm_vfe44_clear_framedrop(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ uint32_t i;
+ for (i = 0; i < stream_info->num_planes; i++)
+ msm_camera_io_w(0, vfe_dev->vfe_base +
+ VFE44_WM_BASE(stream_info->wm[i]) + 0x1C);
+}
+
+static int32_t msm_vfe44_convert_bpp_to_reg(int32_t bpp, uint32_t *bpp_reg)
+{
+ int rc = 0;
+ switch (bpp) {
+ case 8:
+ *bpp_reg = 0;
+ break;
+ case 10:
+ *bpp_reg = 1 << 0;
+ break;
+ case 12:
+ *bpp_reg = 1 << 1;
+ break;
+ default:
+ pr_err("%s:%d invalid bpp %d", __func__, __LINE__, bpp);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int32_t msm_vfe44_convert_io_fmt_to_reg(
+ enum msm_isp_pack_fmt pack_format, uint32_t *pack_reg)
+{
+ int rc = 0;
+
+ switch (pack_format) {
+ case QCOM:
+ *pack_reg = 0x0;
+ break;
+ case MIPI:
+ *pack_reg = 0x1;
+ break;
+ case DPCM6:
+ *pack_reg = 0x2;
+ break;
+ case DPCM8:
+ *pack_reg = 0x3;
+ break;
+ case PLAIN8:
+ *pack_reg = 0x4;
+ break;
+ case PLAIN16:
+ *pack_reg = 0x5;
+ break;
+ default:
+ pr_err("%s: invalid pack fmt %d!\n", __func__, pack_format);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int32_t msm_vfe44_cfg_io_format(struct vfe_device *vfe_dev,
+ enum msm_vfe_axi_stream_src stream_src, uint32_t io_format)
+{
+ int rc = 0;
+ int bpp = 0, read_bpp = 0;
+ enum msm_isp_pack_fmt pack_fmt = 0, read_pack_fmt = 0;
+ uint32_t bpp_reg = 0, pack_reg = 0;
+ uint32_t read_bpp_reg = 0, read_pack_reg = 0;
+ uint32_t io_format_reg = 0; /*io format register bit*/
+
+ io_format_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x54);
+
+ /*input config*/
+ if ((stream_src < RDI_INTF_0) &&
+ (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux ==
+ EXTERNAL_READ)) {
+ read_bpp = msm_isp_get_bit_per_pixel(
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format);
+ rc = msm_vfe44_convert_bpp_to_reg(read_bpp, &read_bpp_reg);
+ if (rc < 0) {
+ pr_err("%s: convert_bpp_to_reg err! in_bpp %d rc %d\n",
+ __func__, read_bpp, rc);
+ return rc;
+ }
+
+ read_pack_fmt = msm_isp_get_pack_format(
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format);
+ rc = msm_vfe44_convert_io_fmt_to_reg(
+ read_pack_fmt, &read_pack_reg);
+ if (rc < 0) {
+ pr_err("%s: convert_io_fmt_to_reg err! rc = %d\n",
+ __func__, rc);
+ return rc;
+ }
+ /*use input format(v4l2_pix_fmt) to get pack format*/
+ io_format_reg &= 0xFFC8FFFF;
+ io_format_reg |= (read_bpp_reg << 20 | read_pack_reg << 16);
+ }
+
+ bpp = msm_isp_get_bit_per_pixel(io_format);
+ rc = msm_vfe44_convert_bpp_to_reg(bpp, &bpp_reg);
+ if (rc < 0) {
+ pr_err("%s: convert_bpp_to_reg err! bpp %d rc = %d\n",
+ __func__, bpp, rc);
+ return rc;
+ }
+
+ switch (stream_src) {
+ case PIX_ENCODER:
+ case PIX_VIEWFINDER:
+ case CAMIF_RAW:
+ io_format_reg &= 0xFFFFCFFF;
+ io_format_reg |= bpp_reg << 12;
+ break;
+ case IDEAL_RAW:
+ /*use output format(v4l2_pix_fmt) to get pack format*/
+ pack_fmt = msm_isp_get_pack_format(io_format);
+ rc = msm_vfe44_convert_io_fmt_to_reg(pack_fmt, &pack_reg);
+ if (rc < 0) {
+ pr_err("%s: convert_io_fmt_to_reg err! rc = %d\n",
+ __func__, rc);
+ return rc;
+ }
+ io_format_reg &= 0xFFFFFFC8;
+ io_format_reg |= bpp_reg << 4 | pack_reg;
+ break;
+ case RDI_INTF_0:
+ case RDI_INTF_1:
+ case RDI_INTF_2:
+ default:
+ pr_err("%s: Invalid stream source\n", __func__);
+ return -EINVAL;
+ }
+
+ msm_camera_io_w(io_format_reg, vfe_dev->vfe_base + 0x54);
+ return 0;
+}
+
+static int msm_vfe44_fetch_engine_start(struct vfe_device *vfe_dev,
+ void *arg)
+{
+ int rc = 0;
+ uint32_t bufq_handle;
+ struct msm_isp_buffer *buf = NULL;
+ struct msm_vfe_fetch_eng_start *fe_cfg = arg;
+ struct msm_isp_buffer_mapped_info mapped_info;
+
+ if (vfe_dev->fetch_engine_info.is_busy == 1) {
+ pr_err("%s: fetch engine busy\n", __func__);
+ return -EINVAL;
+ }
+
+ memset(&mapped_info, 0, sizeof(struct msm_isp_buffer_mapped_info));
+ /* There is other option of passing buffer address from user,
+ in such case, driver needs to map the buffer and use it*/
+ vfe_dev->fetch_engine_info.session_id = fe_cfg->session_id;
+ vfe_dev->fetch_engine_info.stream_id = fe_cfg->stream_id;
+ vfe_dev->fetch_engine_info.offline_mode = fe_cfg->offline_mode;
+ vfe_dev->fetch_engine_info.fd = fe_cfg->fd;
+
+ if (!fe_cfg->offline_mode) {
+ bufq_handle = vfe_dev->buf_mgr->ops->get_bufq_handle(
+ vfe_dev->buf_mgr, fe_cfg->session_id,
+ fe_cfg->stream_id);
+ vfe_dev->fetch_engine_info.bufq_handle = bufq_handle;
+ rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
+ vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
+ if (rc < 0) {
+ pr_err("%s: No fetch buffer\n", __func__);
+ return -EINVAL;
+ }
+ mapped_info = buf->mapped_info[0];
+ buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ } else {
+ rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
+ &mapped_info, fe_cfg->fd);
+ if (rc < 0) {
+ pr_err("%s: can not map buffer\n", __func__);
+ return -EINVAL;
+ }
+ }
+ vfe_dev->fetch_engine_info.buf_idx = fe_cfg->buf_idx;
+ vfe_dev->fetch_engine_info.is_busy = 1;
+
+ msm_camera_io_w(mapped_info.paddr, vfe_dev->vfe_base + 0x228);
+
+ msm_camera_io_w_mb(0x10000, vfe_dev->vfe_base + 0x4C);
+ msm_camera_io_w_mb(0x20000, vfe_dev->vfe_base + 0x4C);
+
+ ISP_DBG("%s: Fetch Engine ready\n", __func__);
+ return 0;
+}
+
+static void msm_vfe44_cfg_fetch_engine(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg)
+{
+ uint32_t x_size_word;
+ struct msm_vfe_fetch_engine_cfg *fe_cfg = NULL;
+ uint32_t temp = 0;
+
+ if (pix_cfg->input_mux == EXTERNAL_READ) {
+ fe_cfg = &pix_cfg->fetch_engine_cfg;
+ pr_debug("%s: fetch_dbg wd x ht buf = %d x %d, fe = %d x %d\n",
+ __func__, fe_cfg->buf_width, fe_cfg->buf_height,
+ fe_cfg->fetch_width, fe_cfg->fetch_height);
+
+ vfe_dev->hw_info->vfe_ops.axi_ops.update_cgc_override(vfe_dev,
+ VFE44_BUS_RD_CGC_OVERRIDE_BIT, 1);
+
+ temp = msm_camera_io_r(vfe_dev->vfe_base + 0x50);
+ temp &= 0xFFFFFFFD;
+ temp |= (1 << 1);
+ msm_camera_io_w(temp, vfe_dev->vfe_base + 0x50);
+
+ temp = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
+ temp &= 0xFEFFFFFF;
+ temp |= (1 << 24);
+ msm_camera_io_w(temp, vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w((fe_cfg->fetch_height - 1) & 0xFFF,
+ vfe_dev->vfe_base + 0x238);
+
+ x_size_word = msm_isp_cal_word_per_line(
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format,
+ fe_cfg->fetch_width);
+ msm_camera_io_w((x_size_word - 1) << 16,
+ vfe_dev->vfe_base + 0x23C);
+
+ msm_camera_io_w(x_size_word << 16 |
+ (fe_cfg->buf_height - 1) << 4 | VFE44_FETCH_BURST_LEN,
+ vfe_dev->vfe_base + 0x240);
+
+ msm_camera_io_w(0 << 28 | 2 << 25 |
+ ((fe_cfg->buf_width - 1) & 0x1FFF) << 12 |
+ ((fe_cfg->buf_height - 1) & 0xFFF), vfe_dev->vfe_base + 0x244);
+
+ /* need to use formulae to calculate MAIN_UNPACK_PATTERN*/
+ msm_camera_io_w(0xF6543210, vfe_dev->vfe_base + 0x248);
+ msm_camera_io_w(0xF, vfe_dev->vfe_base + 0x264);
+
+ temp = msm_camera_io_r(vfe_dev->vfe_base + 0x1C);
+ temp |= 2 << 16 | pix_cfg->pixel_pattern;
+ msm_camera_io_w(temp, vfe_dev->vfe_base + 0x1C);
+
+ } else {
+ pr_err("%s: Invalid mux configuration - mux: %d", __func__,
+ pix_cfg->input_mux);
+ return;
+ }
+
+ return;
+}
+
+static void msm_vfe44_cfg_camif(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg)
+{
+ uint16_t first_pixel, last_pixel, first_line, last_line;
+ struct msm_vfe_camif_cfg *camif_cfg = &pix_cfg->camif_cfg;
+ uint32_t val, subsample_period, subsample_pattern;
+ struct msm_vfe_camif_subsample_cfg *subsample_cfg =
+ &pix_cfg->camif_cfg.subsample_cfg;
+ uint16_t bus_sub_en = 0;
+
+ vfe_dev->dual_vfe_enable = camif_cfg->is_split;
+
+ msm_camera_io_w(pix_cfg->input_mux << 16 | pix_cfg->pixel_pattern,
+ vfe_dev->vfe_base + 0x1C);
+
+ if (subsample_cfg->pixel_skip || subsample_cfg->line_skip) {
+ bus_sub_en = 1;
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x2F8);
+ val &= 0xFFFFFFDF;
+ val = val | bus_sub_en << 5;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x2F8);
+ subsample_cfg->pixel_skip &= 0x0000FFFF;
+ subsample_cfg->line_skip &= 0x0000FFFF;
+ msm_camera_io_w((subsample_cfg->line_skip << 16) |
+ subsample_cfg->pixel_skip,
+ vfe_dev->vfe_base + 0x30C);
+ }
+
+ first_pixel = camif_cfg->first_pixel;
+ last_pixel = camif_cfg->last_pixel;
+ first_line = camif_cfg->first_line;
+ last_line = camif_cfg->last_line;
+ subsample_period = camif_cfg->subsample_cfg.irq_subsample_period;
+ subsample_pattern = camif_cfg->subsample_cfg.irq_subsample_pattern;
+
+ msm_camera_io_w(camif_cfg->lines_per_frame << 16 |
+ camif_cfg->pixels_per_line, vfe_dev->vfe_base + 0x300);
+
+ msm_camera_io_w(first_pixel << 16 | last_pixel,
+ vfe_dev->vfe_base + 0x304);
+
+ msm_camera_io_w(first_line << 16 | last_line,
+ vfe_dev->vfe_base + 0x308);
+ if (subsample_period && subsample_pattern) {
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x2F8);
+ val &= 0xFFE0FFFF;
+ val = (subsample_period - 1) << 16;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x2F8);
+ ISP_DBG("%s:camif PERIOD %x PATTERN %x\n",
+ __func__, subsample_period, subsample_pattern);
+
+ val = subsample_pattern;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x314);
+ } else {
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x314);
+ }
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x2E8);
+ val |= camif_cfg->camif_input;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x2E8);
+
+}
+
+static void msm_vfe44_cfg_input_mux(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg)
+{
+ switch (pix_cfg->input_mux) {
+ case CAMIF:
+ msm_vfe44_cfg_camif(vfe_dev, pix_cfg);
+ break;
+ case EXTERNAL_READ:
+ msm_vfe44_cfg_fetch_engine(vfe_dev, pix_cfg);
+ break;
+ default:
+ pr_err("%s: Unsupported input mux %d\n",
+ __func__, pix_cfg->input_mux);
+ }
+ return;
+}
+
+static void msm_vfe44_update_camif_state(struct vfe_device *vfe_dev,
+ enum msm_isp_camif_update_state update_state)
+{
+ uint32_t val;
+ bool bus_en, vfe_en;
+
+ if (update_state == NO_UPDATE)
+ return;
+
+ if (update_state == ENABLE_CAMIF) {
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
+ msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x34);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x24);
+
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
+ val |= 0xF7;
+ msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w_mb(0x140000, vfe_dev->vfe_base + 0x318);
+
+ bus_en =
+ ((vfe_dev->axi_data.
+ src_info[VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
+ vfe_en =
+ ((vfe_dev->axi_data.
+ src_info[VFE_PIX_0].pix_stream_count > 0) ? 1 : 0);
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x2F8);
+ val &= 0xFFFFFF3F;
+ val = val | bus_en << 7 | vfe_en << 6;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x2F8);
+ msm_camera_io_w_mb(0x4, vfe_dev->vfe_base + 0x2F4);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x2F4);
+
+ vfe_dev->axi_data.src_info[VFE_PIX_0].active = 1;
+ } else if (update_state == DISABLE_CAMIF) {
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x2F4);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
+ } else if (update_state == DISABLE_CAMIF_IMMEDIATELY) {
+ msm_camera_io_w_mb(0x6, vfe_dev->vfe_base + 0x2F4);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
+ }
+}
+
+static void msm_vfe44_cfg_rdi_reg(
+ struct vfe_device *vfe_dev, struct msm_vfe_rdi_cfg *rdi_cfg,
+ enum msm_vfe_input_src input_src)
+{
+ uint8_t rdi = input_src - VFE_RAW_0;
+ uint32_t rdi_reg_cfg;
+ rdi_reg_cfg = msm_camera_io_r(
+ vfe_dev->vfe_base + VFE44_RDI_BASE(0));
+ rdi_reg_cfg &= ~(BIT(16 + rdi));
+ rdi_reg_cfg |= rdi_cfg->frame_based << (16 + rdi);
+ msm_camera_io_w(rdi_reg_cfg,
+ vfe_dev->vfe_base + VFE44_RDI_BASE(0));
+
+ rdi_reg_cfg = msm_camera_io_r(
+ vfe_dev->vfe_base + VFE44_RDI_BASE(rdi));
+ rdi_reg_cfg &= 0x70003;
+ rdi_reg_cfg |= (rdi * 3) << 28 | rdi_cfg->cid << 4 | 0x4;
+ msm_camera_io_w(
+ rdi_reg_cfg, vfe_dev->vfe_base + VFE44_RDI_BASE(rdi));
+}
+
+static void msm_vfe44_axi_cfg_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint8_t plane_idx)
+{
+ uint32_t val;
+ uint32_t wm_base = VFE44_WM_BASE(stream_info->wm[plane_idx]);
+
+ if (!stream_info->frame_based) {
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + wm_base);
+ /*WR_IMAGE_SIZE*/
+ val =
+ ((msm_isp_cal_word_per_line(
+ stream_info->output_format,
+ stream_info->plane_cfg[plane_idx].
+ output_width)+1)/2 - 1) << 16 |
+ (stream_info->plane_cfg[plane_idx].
+ output_height - 1);
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
+
+ /*WR_BUFFER_CFG*/
+ val = (stream_info->plane_cfg[plane_idx].output_height - 1);
+ val = (((val & 0xfff) << 2) | ((val >> 12) & 0x3));
+ val = val << 2 |
+ msm_isp_cal_word_per_line(stream_info->output_format,
+ stream_info->plane_cfg[
+ plane_idx].output_stride) << 16 |
+ VFE44_BURST_LEN;
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
+ } else {
+ msm_camera_io_w(0x2, vfe_dev->vfe_base + wm_base);
+ val = (stream_info->plane_cfg[plane_idx].output_height - 1);
+ val = (((val & 0xfff) << 2) | ((val >> 12) & 0x3));
+ val = val << 2 |
+ msm_isp_cal_word_per_line(stream_info->output_format,
+ stream_info->plane_cfg[
+ plane_idx].output_width) << 16 |
+ VFE44_BURST_LEN;
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
+ }
+
+ /*WR_IRQ_SUBSAMPLE_PATTERN*/
+ msm_camera_io_w(0xFFFFFFFF,
+ vfe_dev->vfe_base + wm_base + 0x20);
+ /* TD: Add IRQ subsample pattern */
+}
+
+static void msm_vfe44_axi_clear_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
+{
+ uint32_t val = 0;
+ uint32_t wm_base = VFE44_WM_BASE(stream_info->wm[plane_idx]);
+
+ /*WR_ADDR_CFG*/
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0xC);
+ /*WR_IMAGE_SIZE*/
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
+ /*WR_BUFFER_CFG*/
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
+ /*WR_IRQ_SUBSAMPLE_PATTERN*/
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x20);
+}
+
+static void msm_vfe44_axi_cfg_wm_xbar_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint8_t plane_idx)
+{
+ struct msm_vfe_axi_plane_cfg *plane_cfg =
+ &stream_info->plane_cfg[plane_idx];
+ uint8_t wm = stream_info->wm[plane_idx];
+ uint32_t xbar_cfg = 0;
+ uint32_t xbar_reg_cfg = 0;
+
+ switch (stream_info->stream_src) {
+ case PIX_ENCODER:
+ case PIX_VIEWFINDER: {
+ if (plane_cfg->output_plane_format != CRCB_PLANE &&
+ plane_cfg->output_plane_format != CBCR_PLANE) {
+ /*SINGLE_STREAM_SEL*/
+ xbar_cfg |= plane_cfg->output_plane_format << 8;
+ } else {
+ switch (stream_info->output_format) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV14:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV24:
+ xbar_cfg |= 0x3 << 4; /*PAIR_STREAM_SWAP_CTRL*/
+ break;
+ }
+ xbar_cfg |= 0x1 << 1; /*PAIR_STREAM_EN*/
+ }
+ if (stream_info->stream_src == PIX_VIEWFINDER)
+ xbar_cfg |= 0x1; /*VIEW_STREAM_EN*/
+ break;
+ }
+ case CAMIF_RAW:
+ xbar_cfg = 0x300;
+ break;
+ case IDEAL_RAW:
+ xbar_cfg = 0x400;
+ break;
+ case RDI_INTF_0:
+ xbar_cfg = 0x500;
+ break;
+ case RDI_INTF_1:
+ xbar_cfg = 0x600;
+ break;
+ case RDI_INTF_2:
+ xbar_cfg = 0x700;
+ break;
+ default:
+ pr_err("%s: Invalid stream src\n", __func__);
+ break;
+ }
+ xbar_reg_cfg =
+ msm_camera_io_r(vfe_dev->vfe_base + VFE44_XBAR_BASE(wm));
+ xbar_reg_cfg &= ~(0xFFFF << VFE44_XBAR_SHIFT(wm));
+ xbar_reg_cfg |= (xbar_cfg << VFE44_XBAR_SHIFT(wm));
+ msm_camera_io_w(xbar_reg_cfg,
+ vfe_dev->vfe_base + VFE44_XBAR_BASE(wm));
+}
+
+static void msm_vfe44_axi_clear_wm_xbar_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
+{
+ uint8_t wm = stream_info->wm[plane_idx];
+ uint32_t xbar_reg_cfg = 0;
+
+ xbar_reg_cfg =
+ msm_camera_io_r(vfe_dev->vfe_base + VFE44_XBAR_BASE(wm));
+ xbar_reg_cfg &= ~(0xFFFF << VFE44_XBAR_SHIFT(wm));
+ msm_camera_io_w(xbar_reg_cfg,
+ vfe_dev->vfe_base + VFE44_XBAR_BASE(wm));
+}
+
+static void msm_vfe44_cfg_axi_ub_equal_default(
+ struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t ub_offset = 0;
+ struct msm_vfe_axi_shared_data *axi_data =
+ &vfe_dev->axi_data;
+ uint32_t total_image_size = 0;
+ uint8_t num_used_wms = 0;
+ uint32_t prop_size = 0;
+ uint32_t wm_ub_size;
+ uint64_t delta;
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ if (axi_data->free_wm[i] > 0) {
+ num_used_wms++;
+ total_image_size += axi_data->wm_image_size[i];
+ }
+ }
+ prop_size = MSM_ISP44_TOTAL_IMAGE_UB -
+ axi_data->hw_info->min_wm_ub * num_used_wms;
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ if (axi_data->free_wm[i]) {
+ delta = (uint64_t)axi_data->wm_image_size[i] *
+ (uint64_t)prop_size;
+ do_div(delta, total_image_size);
+ wm_ub_size = axi_data->hw_info->min_wm_ub +
+ (uint32_t)delta;
+ msm_camera_io_w(ub_offset << 16 | (wm_ub_size - 1),
+ vfe_dev->vfe_base + VFE44_WM_BASE(i) + 0x10);
+ ub_offset += wm_ub_size;
+ } else
+ msm_camera_io_w(0,
+ vfe_dev->vfe_base + VFE44_WM_BASE(i) + 0x10);
+ }
+}
+
+static void msm_vfe44_cfg_axi_ub_equal_slicing(
+ struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t ub_offset = 0;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t ub_equal_slice = MSM_ISP44_TOTAL_IMAGE_UB /
+ axi_data->hw_info->num_wm;
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ msm_camera_io_w(ub_offset << 16 | (ub_equal_slice - 1),
+ vfe_dev->vfe_base + VFE44_WM_BASE(i) + 0x10);
+ ub_offset += ub_equal_slice;
+ }
+}
+
+static void msm_vfe44_cfg_axi_ub(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ axi_data->wm_ub_cfg_policy = MSM_WM_UB_CFG_DEFAULT;
+ if (axi_data->wm_ub_cfg_policy == MSM_WM_UB_EQUAL_SLICING)
+ msm_vfe44_cfg_axi_ub_equal_slicing(vfe_dev);
+ else
+ msm_vfe44_cfg_axi_ub_equal_default(vfe_dev);
+}
+
+static void msm_vfe44_read_wm_ping_pong_addr(
+ struct vfe_device *vfe_dev)
+{
+ msm_camera_io_dump(vfe_dev->vfe_base +
+ (VFE44_WM_BASE(0) & 0xFFFFFFF0), 0x200, 1);
+}
+
+static void msm_vfe44_update_ping_pong_addr(
+ void __iomem *vfe_base,
+ uint8_t wm_idx, uint32_t pingpong_bit, dma_addr_t paddr,
+ int32_t buf_size)
+{
+ uint32_t paddr32 = (paddr & 0xFFFFFFFF);
+ msm_camera_io_w(paddr32, vfe_base +
+ VFE44_PING_PONG_BASE(wm_idx, pingpong_bit));
+}
+
+static int msm_vfe44_axi_halt(struct vfe_device *vfe_dev,
+ uint32_t blocking)
+{
+ int rc = 0;
+ enum msm_vfe_input_src i;
+
+ /* Keep only halt and restart mask */
+ msm_camera_io_w(BIT(31), vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w(BIT(8), vfe_dev->vfe_base + 0x2C);
+
+ /*Clear IRQ Status0, only leave reset irq mask*/
+ msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x30);
+
+ /*Clear IRQ Status1, only leave halt irq mask*/
+ msm_camera_io_w(0xFEFFFEFF, vfe_dev->vfe_base + 0x34);
+
+ /*push clear cmd*/
+ msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x24);
+
+ if (atomic_read(&vfe_dev->error_info.overflow_state)
+ == OVERFLOW_DETECTED)
+ pr_err_ratelimited("%s: VFE%d halt for recovery, blocking %d\n",
+ __func__, vfe_dev->pdev->id, blocking);
+
+ if (blocking) {
+ init_completion(&vfe_dev->halt_complete);
+ /* Halt AXI Bus Bridge */
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x2C0);
+ rc = wait_for_completion_timeout(
+ &vfe_dev->halt_complete, msecs_to_jiffies(500));
+ if (rc <= 0)
+ pr_err("%s:VFE%d halt timeout rc=%d\n", __func__,
+ vfe_dev->pdev->id, rc);
+ } else {
+ /* Halt AXI Bus Bridge */
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x2C0);
+ }
+
+ for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
+ /* if any stream is waiting for update, signal complete */
+ if (vfe_dev->axi_data.stream_update[i]) {
+ ISP_DBG("%s: complete stream update\n", __func__);
+ msm_isp_axi_stream_update(vfe_dev, i);
+ if (vfe_dev->axi_data.stream_update[i])
+ msm_isp_axi_stream_update(vfe_dev, i);
+ }
+ if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
+ ISP_DBG("%s: complete on axi config update\n",
+ __func__);
+ msm_isp_axi_cfg_update(vfe_dev, i);
+ if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i]))
+ msm_isp_axi_cfg_update(vfe_dev, i);
+ }
+ }
+
+ if (atomic_read(&vfe_dev->stats_data.stats_update)) {
+ ISP_DBG("%s: complete on stats update\n", __func__);
+ msm_isp_stats_stream_update(vfe_dev);
+ if (atomic_read(&vfe_dev->stats_data.stats_update))
+ msm_isp_stats_stream_update(vfe_dev);
+ }
+
+ return rc;
+}
+
+static int msm_vfe44_axi_restart(struct vfe_device *vfe_dev,
+ uint32_t blocking, uint32_t enable_camif)
+{
+ vfe_dev->hw_info->vfe_ops.core_ops.restore_irq_mask(vfe_dev);
+ msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x30);
+ msm_camera_io_w(0xFEFFFEFF, vfe_dev->vfe_base + 0x34);
+ msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x24);
+ msm_camera_io_w_mb(0x140000, vfe_dev->vfe_base + 0x318);
+
+ /* Start AXI */
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x2C0);
+
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev, VFE_SRC_MAX);
+ memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
+ atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
+
+ if (enable_camif) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, ENABLE_CAMIF);
+ }
+
+ return 0;
+}
+
+static uint32_t msm_vfe44_get_wm_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ return (irq_status0 >> 8) & 0x7F;
+}
+
+static uint32_t msm_vfe44_get_comp_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ return (irq_status0 >> 25) & 0xF;
+}
+
+static uint32_t msm_vfe44_get_pingpong_status(
+ struct vfe_device *vfe_dev)
+{
+ return msm_camera_io_r(vfe_dev->vfe_base + 0x268);
+}
+
+static int msm_vfe44_get_stats_idx(enum msm_isp_stats_type stats_type)
+{
+ switch (stats_type) {
+ case MSM_ISP_STATS_BE:
+ return STATS_IDX_BE;
+ case MSM_ISP_STATS_BG:
+ return STATS_IDX_BG;
+ case MSM_ISP_STATS_BF:
+ return STATS_IDX_BF;
+ case MSM_ISP_STATS_AWB:
+ return STATS_IDX_AWB;
+ case MSM_ISP_STATS_RS:
+ return STATS_IDX_RS;
+ case MSM_ISP_STATS_CS:
+ return STATS_IDX_CS;
+ case MSM_ISP_STATS_IHIST:
+ return STATS_IDX_IHIST;
+ case MSM_ISP_STATS_BHIST:
+ return STATS_IDX_BHIST;
+ case MSM_ISP_STATS_BF_SCALE:
+ return STATS_IDX_BF_SCALE;
+ default:
+ pr_err("%s: Invalid stats type\n", __func__);
+ return -EINVAL;
+ }
+}
+
+static int msm_vfe44_stats_check_streams(
+ struct msm_vfe_stats_stream *stream_info)
+{
+ if (stream_info[STATS_IDX_BF].state ==
+ STATS_AVALIABLE &&
+ stream_info[STATS_IDX_BF_SCALE].state !=
+ STATS_AVALIABLE) {
+ pr_err("%s: does not support BF_SCALE while BF is disabled\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (stream_info[STATS_IDX_BF].state != STATS_AVALIABLE &&
+ stream_info[STATS_IDX_BF_SCALE].state != STATS_AVALIABLE &&
+ stream_info[STATS_IDX_BF].composite_flag !=
+ stream_info[STATS_IDX_BF_SCALE].composite_flag) {
+ pr_err("%s: Different composite flag for BF and BF_SCALE\n",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void msm_vfe44_stats_cfg_comp_mask(
+ struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t request_comp_index, uint8_t enable)
+{
+ uint32_t comp_mask_reg, mask_bf_scale;
+ atomic_t *stats_comp_mask;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+
+ if (vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask < 1)
+ return;
+
+ if (request_comp_index >= MAX_NUM_STATS_COMP_MASK) {
+ pr_err("%s: num of comp masks %d exceed max %d\n",
+ __func__, request_comp_index,
+ MAX_NUM_STATS_COMP_MASK);
+ return;
+ }
+
+ if (vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask >
+ MAX_NUM_STATS_COMP_MASK) {
+ pr_err("%s: num of comp masks %d exceed max %d\n",
+ __func__,
+ vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask,
+ MAX_NUM_STATS_COMP_MASK);
+ return;
+ }
+
+ /* BF scale is controlled by BF also so ignore bit 0 of BF scale */
+ stats_mask = stats_mask & 0x1FF;
+ mask_bf_scale = stats_mask >> SHIFT_BF_SCALE_BIT;
+
+ stats_comp_mask = &stats_data->stats_comp_mask[request_comp_index];
+ comp_mask_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x44);
+
+ if (enable) {
+ comp_mask_reg |= mask_bf_scale << (16 + request_comp_index * 8);
+ atomic_set(stats_comp_mask, stats_mask |
+ atomic_read(stats_comp_mask));
+ } else {
+ if (!(atomic_read(stats_comp_mask) & stats_mask))
+ return;
+ if (stats_mask & (1 << STATS_IDX_BF_SCALE) &&
+ atomic_read(stats_comp_mask) &
+ (1 << STATS_IDX_BF_SCALE))
+ atomic_set(stats_comp_mask,
+ ~(1 << STATS_IDX_BF_SCALE) &
+ atomic_read(stats_comp_mask));
+
+ atomic_set(stats_comp_mask,
+ ~stats_mask & atomic_read(stats_comp_mask));
+ comp_mask_reg &= ~(mask_bf_scale <<
+ (16 + request_comp_index * 8));
+ }
+ msm_camera_io_w(comp_mask_reg, vfe_dev->vfe_base + 0x44);
+
+ ISP_DBG("%s: comp_mask_reg: %x comp mask0 %x mask1: %x\n",
+ __func__, comp_mask_reg,
+ atomic_read(&stats_data->stats_comp_mask[0]),
+ atomic_read(&stats_data->stats_comp_mask[1]));
+
+ return;
+}
+
+static void msm_vfe44_stats_cfg_wm_irq_mask(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ uint32_t irq_mask;
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
+ irq_mask |= 1 << (STATS_IDX(stream_info->stream_handle) + 15);
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x28);
+}
+
+static void msm_vfe44_stats_clear_wm_irq_mask(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ uint32_t irq_mask;
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
+ irq_mask &= ~(1 << (STATS_IDX(stream_info->stream_handle) + 15));
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x28);
+}
+
+static void msm_vfe44_stats_cfg_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ int stats_idx = STATS_IDX(stream_info->stream_handle);
+ uint32_t stats_base = VFE44_STATS_BASE(stats_idx);
+
+ /* BF_SCALE does not have its own WR_ADDR_CFG,
+ * IRQ_FRAMEDROP_PATTERN and IRQ_SUBSAMPLE_PATTERN;
+ * it's using the same from BF */
+ if (stats_idx == STATS_IDX_BF_SCALE)
+ return;
+ /*WR_ADDR_CFG*/
+ msm_camera_io_w(stream_info->framedrop_period << 2,
+ vfe_dev->vfe_base + stats_base + 0x8);
+ /*WR_IRQ_FRAMEDROP_PATTERN*/
+ msm_camera_io_w(stream_info->framedrop_pattern,
+ vfe_dev->vfe_base + stats_base + 0x10);
+ /*WR_IRQ_SUBSAMPLE_PATTERN*/
+ msm_camera_io_w(0xFFFFFFFF,
+ vfe_dev->vfe_base + stats_base + 0x14);
+}
+
+static void msm_vfe44_stats_clear_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ uint32_t val = 0;
+ int stats_idx = STATS_IDX(stream_info->stream_handle);
+ uint32_t stats_base = VFE44_STATS_BASE(stats_idx);
+ /* BF_SCALE does not have its own WR_ADDR_CFG,
+ * IRQ_FRAMEDROP_PATTERN and IRQ_SUBSAMPLE_PATTERN;
+ * it's using the same from BF */
+ if (stats_idx == STATS_IDX_BF_SCALE)
+ return;
+
+ /*WR_ADDR_CFG*/
+ msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x8);
+ /*WR_IRQ_FRAMEDROP_PATTERN*/
+ msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x10);
+ /*WR_IRQ_SUBSAMPLE_PATTERN*/
+ msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x14);
+}
+
+static void msm_vfe44_stats_cfg_ub(struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t ub_offset = VFE44_UB_SIZE;
+ uint32_t ub_size[VFE44_NUM_STATS_TYPE] = {
+ 128, /*MSM_ISP_STATS_BF_SCALE*/
+ 64, /*MSM_ISP_STATS_BE*/
+ 128, /*MSM_ISP_STATS_BG*/
+ 128, /*MSM_ISP_STATS_BF*/
+ 16, /*MSM_ISP_STATS_AWB*/
+ 8, /*MSM_ISP_STATS_RS*/
+ 16, /*MSM_ISP_STATS_CS*/
+ 16, /*MSM_ISP_STATS_IHIST*/
+ 16, /*MSM_ISP_STATS_BHIST*/
+ };
+
+ for (i = 0; i < VFE44_NUM_STATS_TYPE; i++) {
+ ub_offset -= ub_size[i];
+ msm_camera_io_w(VFE44_STATS_BURST_LEN << 30 |
+ ub_offset << 16 | (ub_size[i] - 1),
+ vfe_dev->vfe_base + VFE44_STATS_BASE(i) +
+ ((i == STATS_IDX_BF_SCALE) ? 0x8 : 0xC));
+ }
+}
+
+static bool msm_vfe44_is_module_cfg_lock_needed(
+ uint32_t reg_offset)
+{
+ if (reg_offset == 0x18)
+ return true;
+ else
+ return false;
+}
+
+static void msm_vfe44_stats_enable_module(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable)
+{
+ int i;
+ uint32_t module_cfg, module_cfg_mask = 0;
+ uint32_t stats_cfg, stats_cfg_mask = 0;
+ unsigned long flags;
+
+ for (i = 0; i < VFE44_NUM_STATS_TYPE; i++) {
+ if ((stats_mask >> i) & 0x1) {
+ switch (i) {
+ case STATS_IDX_BE:
+ case STATS_IDX_BG:
+ case STATS_IDX_BF:
+ case STATS_IDX_AWB:
+ case STATS_IDX_RS:
+ case STATS_IDX_CS:
+ module_cfg_mask |= 1 << (4 + i);
+ break;
+ case STATS_IDX_IHIST:
+ module_cfg_mask |= 1 << 15;
+ break;
+ case STATS_IDX_BHIST:
+ module_cfg_mask |= 1 << 18;
+ break;
+ case STATS_IDX_BF_SCALE:
+ stats_cfg_mask |= 1 << 2;
+ break;
+ default:
+ pr_err("%s: Invalid stats mask\n", __func__);
+ return;
+ }
+ }
+ }
+
+ /*
+ * For vfe44 stats and other modules share module_cfg register.
+ * Hence need to Grab lock.
+ */
+ spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
+ module_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x18);
+ if (enable)
+ module_cfg |= module_cfg_mask;
+ else
+ module_cfg &= ~module_cfg_mask;
+ msm_camera_io_w(module_cfg, vfe_dev->vfe_base + 0x18);
+ spin_unlock_irqrestore(&vfe_dev->shared_data_lock, flags);
+
+ stats_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x888);
+ if (enable)
+ stats_cfg |= stats_cfg_mask;
+ else
+ stats_cfg &= ~stats_cfg_mask;
+ msm_camera_io_w(stats_cfg, vfe_dev->vfe_base + 0x888);
+}
+
+static void msm_vfe44_stats_update_cgc_override(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t cgc_override)
+{
+ int i;
+ uint32_t val = 0, cgc_mask = 0;
+
+ for (i = 0; i < VFE44_NUM_STATS_TYPE; i++) {
+ if ((stats_mask >> i) & 0x1) {
+ switch (i) {
+ case STATS_IDX_BE:
+ cgc_mask |= (1 << 8);
+ break;
+ case STATS_IDX_BG:
+ cgc_mask |= (1 << 9);
+ break;
+ case STATS_IDX_BF:
+ cgc_mask |= (1 << 10);
+ break;
+ case STATS_IDX_AWB:
+ cgc_mask |= (1 << 11);
+ break;
+ case STATS_IDX_RS:
+ cgc_mask |= (1 << 12);
+ break;
+ case STATS_IDX_CS:
+ cgc_mask |= (1 << 13);
+ break;
+ case STATS_IDX_IHIST:
+ cgc_mask |= (1 << 14);
+ break;
+ case STATS_IDX_BHIST:
+ cgc_mask |= (1 << 15);
+ break;
+ case STATS_IDX_BF_SCALE:
+ cgc_mask |= (1 << 10);
+ break;
+ default:
+ pr_err("%s: Invalid stats mask\n", __func__);
+ return;
+ }
+ }
+ }
+
+ /* CGC override */
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x974);
+ if (cgc_override)
+ val |= cgc_mask;
+ else
+ val &= ~cgc_mask;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x974);
+}
+
+static void msm_vfe44_stats_update_ping_pong_addr(
+ void __iomem *vfe_base, struct msm_vfe_stats_stream *stream_info,
+ uint32_t pingpong_status, dma_addr_t paddr)
+{
+ uint32_t paddr32 = (paddr & 0xFFFFFFFF);
+ int stats_idx = STATS_IDX(stream_info->stream_handle);
+
+ msm_camera_io_w(paddr32, vfe_base +
+ VFE44_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
+}
+
+static uint32_t msm_vfe44_stats_get_wm_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ return (irq_status0 >> 15) & 0x1FF;
+}
+
+static uint32_t msm_vfe44_stats_get_comp_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ return (irq_status0 >> 29) & 0x3;
+}
+
+static uint32_t msm_vfe44_stats_get_frame_id(
+ struct vfe_device *vfe_dev)
+{
+ return vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+}
+
+static int msm_vfe44_get_platform_data(struct vfe_device *vfe_dev)
+{
+ int rc = 0;
+ vfe_dev->vfe_mem = platform_get_resource_byname(vfe_dev->pdev,
+ IORESOURCE_MEM, "vfe");
+ if (!vfe_dev->vfe_mem) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto vfe_no_resource;
+ }
+
+ vfe_dev->vfe_vbif_mem = platform_get_resource_byname(
+ vfe_dev->pdev,
+ IORESOURCE_MEM, "vfe_vbif");
+ if (!vfe_dev->vfe_vbif_mem) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto vfe_no_resource;
+ }
+
+ vfe_dev->vfe_irq = platform_get_resource_byname(vfe_dev->pdev,
+ IORESOURCE_IRQ, "vfe");
+ if (!vfe_dev->vfe_irq) {
+ pr_err("%s: no irq resource?\n", __func__);
+ rc = -ENODEV;
+ goto vfe_no_resource;
+ }
+
+ vfe_dev->fs_vfe = regulator_get(&vfe_dev->pdev->dev, "vdd");
+ if (IS_ERR(vfe_dev->fs_vfe)) {
+ pr_err("%s: Regulator get failed %ld\n", __func__,
+ PTR_ERR(vfe_dev->fs_vfe));
+ vfe_dev->fs_vfe = NULL;
+ rc = -ENODEV;
+ goto vfe_no_resource;
+ }
+
+vfe_no_resource:
+ return rc;
+}
+
+static void msm_vfe44_get_error_mask(
+ uint32_t *error_mask0, uint32_t *error_mask1)
+{
+ *error_mask0 = 0x00000000;
+ *error_mask1 = 0x01FFFEFF;
+}
+
+static void msm_vfe44_get_overflow_mask(uint32_t *overflow_mask)
+{
+ *overflow_mask = 0x00FFFE7E;
+}
+
+static void msm_vfe44_get_rdi_wm_mask(struct vfe_device *vfe_dev,
+ uint32_t *rdi_wm_mask)
+{
+ *rdi_wm_mask = vfe_dev->axi_data.rdi_wm_mask;
+}
+
+static void msm_vfe44_get_irq_mask(struct vfe_device *vfe_dev,
+ uint32_t *irq0_mask, uint32_t *irq1_mask)
+{
+ *irq0_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
+ *irq1_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
+}
+
+
+static void msm_vfe44_restore_irq_mask(struct vfe_device *vfe_dev)
+{
+ msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask0,
+ vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask1,
+ vfe_dev->vfe_base + 0x2C);
+}
+
+
+static void msm_vfe44_get_halt_restart_mask(uint32_t *irq0_mask,
+ uint32_t *irq1_mask)
+{
+ *irq0_mask = BIT(31);
+ *irq1_mask = BIT(8);
+}
+
+static struct msm_vfe_axi_hardware_info msm_vfe44_axi_hw_info = {
+ .num_wm = 6,
+ .num_comp_mask = 3,
+ .num_rdi = 3,
+ .num_rdi_master = 3,
+ .min_wm_ub = 96,
+ .scratch_buf_range = SZ_32M,
+};
+
+static struct msm_vfe_stats_hardware_info msm_vfe44_stats_hw_info = {
+ .stats_capability_mask =
+ 1 << MSM_ISP_STATS_BE | 1 << MSM_ISP_STATS_BF |
+ 1 << MSM_ISP_STATS_BG | 1 << MSM_ISP_STATS_BHIST |
+ 1 << MSM_ISP_STATS_AWB | 1 << MSM_ISP_STATS_IHIST |
+ 1 << MSM_ISP_STATS_RS | 1 << MSM_ISP_STATS_CS |
+ 1 << MSM_ISP_STATS_BF_SCALE,
+ .stats_ping_pong_offset = stats_pingpong_offset_map,
+ .num_stats_type = VFE44_NUM_STATS_TYPE,
+ .num_stats_comp_mask = VFE44_NUM_STATS_COMP,
+};
+
+struct msm_vfe_hardware_info vfe44_hw_info = {
+ .num_iommu_ctx = 1,
+ .num_iommu_secure_ctx = 1,
+ .vfe_clk_idx = VFE44_CLK_IDX,
+ .runtime_axi_update = 0,
+ .vfe_ops = {
+ .irq_ops = {
+ .read_irq_status = msm_vfe44_read_irq_status,
+ .process_camif_irq = msm_vfe44_process_input_irq,
+ .process_reset_irq = msm_vfe44_process_reset_irq,
+ .process_halt_irq = msm_vfe44_process_halt_irq,
+ .process_reset_irq = msm_vfe44_process_reset_irq,
+ .process_reg_update = msm_vfe44_process_reg_update,
+ .process_axi_irq = msm_isp_process_axi_irq,
+ .process_stats_irq = msm_isp_process_stats_irq,
+ .process_epoch_irq = msm_vfe44_process_epoch_irq,
+ .enable_camif_err = msm_vfe44_enable_camif_error,
+ },
+ .axi_ops = {
+ .reload_wm = msm_vfe44_axi_reload_wm,
+ .enable_wm = msm_vfe44_axi_enable_wm,
+ .cfg_io_format = msm_vfe44_cfg_io_format,
+ .cfg_comp_mask = msm_vfe44_axi_cfg_comp_mask,
+ .clear_comp_mask = msm_vfe44_axi_clear_comp_mask,
+ .cfg_wm_irq_mask = msm_vfe44_axi_cfg_wm_irq_mask,
+ .clear_wm_irq_mask = msm_vfe44_axi_clear_wm_irq_mask,
+ .cfg_framedrop = msm_vfe44_cfg_framedrop,
+ .clear_framedrop = msm_vfe44_clear_framedrop,
+ .cfg_wm_reg = msm_vfe44_axi_cfg_wm_reg,
+ .clear_wm_reg = msm_vfe44_axi_clear_wm_reg,
+ .cfg_wm_xbar_reg = msm_vfe44_axi_cfg_wm_xbar_reg,
+ .clear_wm_xbar_reg = msm_vfe44_axi_clear_wm_xbar_reg,
+ .cfg_ub = msm_vfe44_cfg_axi_ub,
+ .read_wm_ping_pong_addr =
+ msm_vfe44_read_wm_ping_pong_addr,
+ .update_ping_pong_addr =
+ msm_vfe44_update_ping_pong_addr,
+ .get_comp_mask = msm_vfe44_get_comp_mask,
+ .get_wm_mask = msm_vfe44_get_wm_mask,
+ .get_pingpong_status = msm_vfe44_get_pingpong_status,
+ .halt = msm_vfe44_axi_halt,
+ .restart = msm_vfe44_axi_restart,
+ .update_cgc_override =
+ msm_vfe44_axi_update_cgc_override,
+ },
+ .core_ops = {
+ .reg_update = msm_vfe44_reg_update,
+ .cfg_input_mux = msm_vfe44_cfg_input_mux,
+ .update_camif_state = msm_vfe44_update_camif_state,
+ .start_fetch_eng = msm_vfe44_fetch_engine_start,
+ .cfg_rdi_reg = msm_vfe44_cfg_rdi_reg,
+ .reset_hw = msm_vfe44_reset_hardware,
+ .init_hw = msm_vfe44_init_hardware,
+ .init_hw_reg = msm_vfe44_init_hardware_reg,
+ .clear_status_reg = msm_vfe44_clear_status_reg,
+ .release_hw = msm_vfe44_release_hardware,
+ .get_platform_data = msm_vfe44_get_platform_data,
+ .get_error_mask = msm_vfe44_get_error_mask,
+ .get_overflow_mask = msm_vfe44_get_overflow_mask,
+ .get_rdi_wm_mask = msm_vfe44_get_rdi_wm_mask,
+ .get_irq_mask = msm_vfe44_get_irq_mask,
+ .restore_irq_mask = msm_vfe44_restore_irq_mask,
+ .get_halt_restart_mask =
+ msm_vfe44_get_halt_restart_mask,
+ .process_error_status = msm_vfe44_process_error_status,
+ .is_module_cfg_lock_needed =
+ msm_vfe44_is_module_cfg_lock_needed,
+ },
+ .stats_ops = {
+ .get_stats_idx = msm_vfe44_get_stats_idx,
+ .check_streams = msm_vfe44_stats_check_streams,
+ .cfg_comp_mask = msm_vfe44_stats_cfg_comp_mask,
+ .cfg_wm_irq_mask = msm_vfe44_stats_cfg_wm_irq_mask,
+ .clear_wm_irq_mask = msm_vfe44_stats_clear_wm_irq_mask,
+ .cfg_wm_reg = msm_vfe44_stats_cfg_wm_reg,
+ .clear_wm_reg = msm_vfe44_stats_clear_wm_reg,
+ .cfg_ub = msm_vfe44_stats_cfg_ub,
+ .enable_module = msm_vfe44_stats_enable_module,
+ .update_ping_pong_addr =
+ msm_vfe44_stats_update_ping_pong_addr,
+ .get_comp_mask = msm_vfe44_stats_get_comp_mask,
+ .get_wm_mask = msm_vfe44_stats_get_wm_mask,
+ .get_frame_id = msm_vfe44_stats_get_frame_id,
+ .get_pingpong_status = msm_vfe44_get_pingpong_status,
+ .update_cgc_override =
+ msm_vfe44_stats_update_cgc_override,
+ },
+ },
+ .dmi_reg_offset = 0x918,
+ .axi_hw_info = &msm_vfe44_axi_hw_info,
+ .stats_hw_info = &msm_vfe44_stats_hw_info,
+};
+EXPORT_SYMBOL(vfe44_hw_info);
+
+static const struct of_device_id msm_vfe44_dt_match[] = {
+ {
+ .compatible = "qcom,vfe44",
+ .data = &vfe44_hw_info,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_vfe44_dt_match);
+
+static struct platform_driver vfe44_driver = {
+ .probe = vfe_hw_probe,
+ .driver = {
+ .name = "msm_vfe44",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_vfe44_dt_match,
+ },
+};
+
+static int __init msm_vfe44_init_module(void)
+{
+ return platform_driver_register(&vfe44_driver);
+}
+
+static void __exit msm_vfe44_exit_module(void)
+{
+ platform_driver_unregister(&vfe44_driver);
+}
+
+module_init(msm_vfe44_init_module);
+module_exit(msm_vfe44_exit_module);
+MODULE_DESCRIPTION("MSM VFE44 driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.h
new file mode 100644
index 000000000000..187933b7372d
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.h
@@ -0,0 +1,17 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_ISP44_H__
+#define __MSM_ISP44_H__
+
+extern struct msm_vfe_hardware_info vfe44_hw_info;
+#endif /* __MSM_ISP44_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
new file mode 100644
index 000000000000..4b5ae76ca21e
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
@@ -0,0 +1,2193 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/ratelimit.h>
+
+#include "msm_isp46.h"
+#include "msm_isp_util.h"
+#include "msm_isp_axi_util.h"
+#include "msm_isp_stats_util.h"
+#include "msm_isp.h"
+#include "msm.h"
+#include "msm_camera_io_util.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+#define STATS_IDX_BF_SCALE 0
+#define STATS_IDX_HDR_BE 1
+#define STATS_IDX_BG 2
+#define STATS_IDX_BF 3
+#define STATS_IDX_HDR_BHIST 4
+#define STATS_IDX_RS 5
+#define STATS_IDX_CS 6
+#define STATS_IDX_IHIST 7
+#define STATS_IDX_BHIST 8
+
+#define VFE46_8994V1_VERSION 0x60000000
+
+#define VFE46_BURST_LEN 3
+#define VFE46_FETCH_BURST_LEN 3
+#define VFE46_STATS_BURST_LEN 3
+#define VFE46_UB_SIZE_VFE0 2048
+#define VFE46_UB_SIZE_VFE1 1536
+#define VFE46_UB_STATS_SIZE 144
+#define MSM_ISP46_TOTAL_IMAGE_UB_VFE0 (VFE46_UB_SIZE_VFE0 - VFE46_UB_STATS_SIZE)
+#define MSM_ISP46_TOTAL_IMAGE_UB_VFE1 (VFE46_UB_SIZE_VFE1 - VFE46_UB_STATS_SIZE)
+#define VFE46_WM_BASE(idx) (0xA0 + 0x24 * idx)
+#define VFE46_RDI_BASE(idx) (0x39C + 0x4 * idx)
+#define VFE46_XBAR_BASE(idx) (0x90 + 0x4 * (idx / 2))
+#define VFE46_XBAR_SHIFT(idx) ((idx%2) ? 16 : 0)
+#define VFE46_PING_PONG_BASE(wm, ping_pong) \
+ (VFE46_WM_BASE(wm) + 0x4 * (1 + ((~ping_pong) & 0x1)))
+#define SHIFT_BF_SCALE_BIT 1
+#define VFE46_NUM_STATS_COMP 2
+#define VFE46_BUS_RD_CGC_OVERRIDE_BIT 16
+
+static uint32_t stats_base_addr[] = {
+ 0x1E4, /* BF_SCALE */
+ 0x19C, /* HDR_BE */
+ 0x1F0, /* BG */
+ 0x1CC, /* BF */
+ 0x1B4, /* HDR_BHIST */
+ 0x220, /* RS */
+ 0x238, /* CS */
+ 0x250, /* IHIST */
+ 0x208, /* BHIST (SKIN_BHIST) */
+};
+
+static uint8_t stats_pingpong_offset_map[] = {
+ 11, /* BF_SCALE */
+ 8, /* HDR_BE */
+ 12, /* BG */
+ 10, /* BF */
+ 9, /* HDR_BHIST */
+ 14, /* RS */
+ 15, /* CS */
+ 16, /* IHIST */
+ 13, /* BHIST (SKIN_BHIST) */
+};
+
+#define VFE46_NUM_STATS_TYPE 9
+#define VFE46_STATS_BASE(idx) (stats_base_addr[idx])
+#define VFE46_STATS_PING_PONG_BASE(idx, ping_pong) \
+ (VFE46_STATS_BASE(idx) + 0x4 * \
+ (~(ping_pong >> (stats_pingpong_offset_map[idx])) & 0x1))
+
+#define VFE46_CLK_IDX 2
+static struct msm_cam_clk_info msm_vfe46_clk_info[VFE_CLK_INFO_MAX];
+static int32_t msm_vfe46_init_dt_parms(struct vfe_device *vfe_dev,
+ struct msm_vfe_hw_init_parms *dt_parms, void __iomem *dev_mem_base)
+{
+ struct device_node *of_node;
+ int32_t i = 0 , rc = 0;
+ uint32_t *dt_settings = NULL, *dt_regs = NULL, num_dt_entries = 0;
+
+ of_node = vfe_dev->pdev->dev.of_node;
+
+ rc = of_property_read_u32(of_node, dt_parms->entries,
+ &num_dt_entries);
+ if (rc < 0 || !num_dt_entries) {
+ pr_err("%s: NO QOS entries found\n", __func__);
+ return -EINVAL;
+ } else {
+ dt_settings = kzalloc(sizeof(uint32_t) * num_dt_entries,
+ GFP_KERNEL);
+ if (!dt_settings) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ dt_regs = kzalloc(sizeof(uint32_t) * num_dt_entries,
+ GFP_KERNEL);
+ if (!dt_regs) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ kfree(dt_settings);
+ return -ENOMEM;
+ }
+ rc = of_property_read_u32_array(of_node, dt_parms->regs,
+ dt_regs, num_dt_entries);
+ if (rc < 0) {
+ pr_err("%s: NO QOS BUS BDG info\n", __func__);
+ kfree(dt_settings);
+ kfree(dt_regs);
+ return -EINVAL;
+ } else {
+ if (dt_parms->settings) {
+ rc = of_property_read_u32_array(of_node,
+ dt_parms->settings,
+ dt_settings, num_dt_entries);
+ if (rc < 0) {
+ pr_err("%s: NO QOS settings\n",
+ __func__);
+ kfree(dt_settings);
+ kfree(dt_regs);
+ } else {
+ for (i = 0; i < num_dt_entries; i++) {
+ msm_camera_io_w(dt_settings[i],
+ dev_mem_base +
+ dt_regs[i]);
+ }
+ kfree(dt_settings);
+ kfree(dt_regs);
+ }
+ } else {
+ kfree(dt_settings);
+ kfree(dt_regs);
+ }
+ }
+ }
+ return 0;
+}
+
+static int msm_vfe46_init_hardware(struct vfe_device *vfe_dev)
+{
+ int rc = -1;
+
+ rc = msm_isp_init_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
+ if (rc < 0) {
+ pr_err("%s: Bandwidth registration Failed!\n", __func__);
+ goto bus_scale_register_failed;
+ }
+
+ if (vfe_dev->fs_vfe) {
+ rc = regulator_enable(vfe_dev->fs_vfe);
+ if (rc) {
+ pr_err("%s: Regulator enable failed\n", __func__);
+ goto fs_failed;
+ }
+ }
+
+ rc = msm_isp_get_clk_info(vfe_dev, vfe_dev->pdev, msm_vfe46_clk_info);
+ if (rc < 0) {
+ pr_err("msm_isp_get_clk_info() failed\n");
+ goto fs_failed;
+ }
+ if (vfe_dev->num_clk <= 0) {
+ pr_err("%s: Invalid num of clock\n", __func__);
+ goto fs_failed;
+ } else {
+ vfe_dev->vfe_clk =
+ kzalloc(sizeof(struct clk *) * vfe_dev->num_clk,
+ GFP_KERNEL);
+ if (!vfe_dev->vfe_clk) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+ rc = msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe46_clk_info,
+ vfe_dev->vfe_clk, vfe_dev->num_clk, 1);
+ if (rc < 0)
+ goto clk_enable_failed;
+
+ vfe_dev->vfe_base = ioremap(vfe_dev->vfe_mem->start,
+ resource_size(vfe_dev->vfe_mem));
+ if (!vfe_dev->vfe_base) {
+ rc = -ENOMEM;
+ pr_err("%s: vfe ioremap failed\n", __func__);
+ goto vfe_remap_failed;
+ }
+ vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] =
+ vfe_dev->vfe_base;
+
+ vfe_dev->vfe_vbif_base = ioremap(vfe_dev->vfe_vbif_mem->start,
+ resource_size(vfe_dev->vfe_vbif_mem));
+ if (!vfe_dev->vfe_vbif_base) {
+ rc = -ENOMEM;
+ pr_err("%s: vfe ioremap failed\n", __func__);
+ goto vbif_remap_failed;
+ }
+
+ rc = request_irq(vfe_dev->vfe_irq->start, msm_isp_process_irq,
+ IRQF_TRIGGER_RISING, "vfe", vfe_dev);
+ if (rc < 0) {
+ pr_err("%s: irq request failed\n", __func__);
+ goto irq_req_failed;
+ }
+ return rc;
+irq_req_failed:
+ iounmap(vfe_dev->vfe_vbif_base);
+ vfe_dev->vfe_vbif_base = NULL;
+vbif_remap_failed:
+ iounmap(vfe_dev->vfe_base);
+ vfe_dev->vfe_base = NULL;
+vfe_remap_failed:
+ msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe46_clk_info,
+ vfe_dev->vfe_clk, vfe_dev->num_clk, 0);
+clk_enable_failed:
+ if (vfe_dev->fs_vfe)
+ regulator_disable(vfe_dev->fs_vfe);
+ kfree(vfe_dev->vfe_clk);
+fs_failed:
+ msm_isp_deinit_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
+bus_scale_register_failed:
+ return rc;
+}
+
+static void msm_vfe46_release_hardware(struct vfe_device *vfe_dev)
+{
+ /* when closing node, disable all irq */
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x5C);
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x60);
+
+ disable_irq(vfe_dev->vfe_irq->start);
+ free_irq(vfe_dev->vfe_irq->start, vfe_dev);
+ tasklet_kill(&vfe_dev->vfe_tasklet);
+ msm_isp_flush_tasklet(vfe_dev);
+ iounmap(vfe_dev->vfe_vbif_base);
+ vfe_dev->vfe_vbif_base = NULL;
+ msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe46_clk_info,
+ vfe_dev->vfe_clk, vfe_dev->num_clk, 0);
+ vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] = NULL;
+ iounmap(vfe_dev->vfe_base);
+ vfe_dev->vfe_base = NULL;
+ kfree(vfe_dev->vfe_clk);
+ regulator_disable(vfe_dev->fs_vfe);
+ msm_isp_deinit_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
+}
+
+static void msm_vfe46_init_hardware_reg(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_hw_init_parms qos_parms;
+ struct msm_vfe_hw_init_parms vbif_parms;
+ struct msm_vfe_hw_init_parms ds_parms;
+
+ memset(&qos_parms, 0, sizeof(struct msm_vfe_hw_init_parms));
+ memset(&vbif_parms, 0, sizeof(struct msm_vfe_hw_init_parms));
+ memset(&ds_parms, 0, sizeof(struct msm_vfe_hw_init_parms));
+
+ qos_parms.entries = "qos-entries";
+ qos_parms.regs = "qos-regs";
+ qos_parms.settings = "qos-settings";
+ vbif_parms.entries = "vbif-entries";
+ vbif_parms.regs = "vbif-regs";
+ vbif_parms.settings = "vbif-settings";
+ ds_parms.entries = "ds-entries";
+ ds_parms.regs = "ds-regs";
+ ds_parms.settings = "ds-settings";
+
+ msm_vfe46_init_dt_parms(vfe_dev, &qos_parms, vfe_dev->vfe_base);
+ msm_vfe46_init_dt_parms(vfe_dev, &ds_parms, vfe_dev->vfe_base);
+ msm_vfe46_init_dt_parms(vfe_dev, &vbif_parms, vfe_dev->vfe_vbif_base);
+
+ /* BUS_CFG */
+ msm_camera_io_w(0x00000001, vfe_dev->vfe_base + 0x84);
+ /* IRQ_MASK/CLEAR */
+ msm_camera_io_w(0xE00000F1, vfe_dev->vfe_base + 0x5C);
+ msm_camera_io_w_mb(0xE1FFFFFF, vfe_dev->vfe_base + 0x60);
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x68);
+}
+
+static void msm_vfe46_clear_status_reg(struct vfe_device *vfe_dev)
+{
+ msm_camera_io_w(0x80000000, vfe_dev->vfe_base + 0x5C);
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x60);
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x58);
+}
+
+static void msm_vfe46_process_reset_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ if (irq_status0 & (1 << 31)) {
+ complete(&vfe_dev->reset_complete);
+ vfe_dev->reset_pending = 0;
+ }
+}
+
+static void msm_vfe46_process_halt_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ if (irq_status1 & (1 << 8)) {
+ complete(&vfe_dev->halt_complete);
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x374);
+ }
+}
+
+static void msm_vfe46_process_input_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ if (!(irq_status0 & 0x1000003))
+ return;
+
+ if (irq_status0 & (1 << 24)) {
+ ISP_DBG("%s: Fetch Engine Read IRQ\n", __func__);
+ msm_isp_fetch_engine_done_notify(vfe_dev,
+ &vfe_dev->fetch_engine_info);
+ }
+
+ if (irq_status0 & (1 << 0)) {
+ ISP_DBG("%s: SOF IRQ\n", __func__);
+ msm_isp_increment_frame_id(vfe_dev, VFE_PIX_0, ts);
+ }
+
+ if (irq_status0 & (1 << 1))
+ ISP_DBG("%s: EOF IRQ\n", __func__);
+}
+
+static void msm_vfe46_process_violation_status(
+ struct vfe_device *vfe_dev)
+{
+ uint32_t violation_status = vfe_dev->error_info.violation_status;
+
+ if (violation_status > 39) {
+ pr_err("%s: invalid violation status %d\n",
+ __func__, violation_status);
+ return;
+ }
+
+ pr_err("%s: VFE pipeline violation status %d\n", __func__,
+ violation_status);
+}
+
+static void msm_vfe46_process_error_status(struct vfe_device *vfe_dev)
+{
+ uint32_t error_status1 = vfe_dev->error_info.error_mask1;
+
+ if (error_status1 & (1 << 0)) {
+ pr_err("%s: camif error status: 0x%x\n",
+ __func__, vfe_dev->error_info.camif_status);
+ msm_camera_io_dump(vfe_dev->vfe_base + 0x3A8, 0x30, 1);
+ }
+ if (error_status1 & (1 << 1))
+ pr_err("%s: stats bhist overwrite\n", __func__);
+ if (error_status1 & (1 << 2))
+ pr_err("%s: stats cs overwrite\n", __func__);
+ if (error_status1 & (1 << 3))
+ pr_err("%s: stats ihist overwrite\n", __func__);
+ if (error_status1 & (1 << 4))
+ pr_err("%s: realign buf y overflow\n", __func__);
+ if (error_status1 & (1 << 5))
+ pr_err("%s: realign buf cb overflow\n", __func__);
+ if (error_status1 & (1 << 6))
+ pr_err("%s: realign buf cr overflow\n", __func__);
+ if (error_status1 & (1 << 7)) {
+ msm_vfe46_process_violation_status(vfe_dev);
+ }
+ if (error_status1 & (1 << 9))
+ pr_err("%s: image master 0 bus overflow\n", __func__);
+ if (error_status1 & (1 << 10))
+ pr_err("%s: image master 1 bus overflow\n", __func__);
+ if (error_status1 & (1 << 11))
+ pr_err("%s: image master 2 bus overflow\n", __func__);
+ if (error_status1 & (1 << 12))
+ pr_err("%s: image master 3 bus overflow\n", __func__);
+ if (error_status1 & (1 << 13))
+ pr_err("%s: image master 4 bus overflow\n", __func__);
+ if (error_status1 & (1 << 14))
+ pr_err("%s: image master 5 bus overflow\n", __func__);
+ if (error_status1 & (1 << 15))
+ pr_err("%s: image master 6 bus overflow\n", __func__);
+ if (error_status1 & (1 << 16))
+ pr_err("%s: status hdr be bus overflow\n", __func__);
+ if (error_status1 & (1 << 17))
+ pr_err("%s: status bg bus overflow\n", __func__);
+ if (error_status1 & (1 << 18))
+ pr_err("%s: status bf bus overflow\n", __func__);
+ if (error_status1 & (1 << 19))
+ pr_err("%s: status hdr bhist bus overflow\n", __func__);
+ if (error_status1 & (1 << 20))
+ pr_err("%s: status rs bus overflow\n", __func__);
+ if (error_status1 & (1 << 21))
+ pr_err("%s: status cs bus overflow\n", __func__);
+ if (error_status1 & (1 << 22))
+ pr_err("%s: status ihist bus overflow\n", __func__);
+ if (error_status1 & (1 << 23))
+ pr_err("%s: status skin bhist bus overflow\n", __func__);
+ if (error_status1 & (1 << 24))
+ pr_err("%s: status bf scale bus overflow\n", __func__);
+}
+
+static void msm_vfe46_enable_camif_error(struct vfe_device *vfe_dev,
+ int enable)
+{
+ uint32_t val;
+
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x60);
+ if (enable)
+ msm_camera_io_w_mb(val | BIT(0), vfe_dev->vfe_base + 0x60);
+ else
+ msm_camera_io_w_mb(val & ~(BIT(0)), vfe_dev->vfe_base + 0x60);
+}
+
+static void msm_vfe46_read_irq_status(struct vfe_device *vfe_dev,
+ uint32_t *irq_status0, uint32_t *irq_status1)
+{
+ uint32_t irq_mask0, irq_mask1;
+
+ *irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x6C);
+ *irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x70);
+ msm_camera_io_w(*irq_status0, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w(*irq_status1, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x58);
+
+ irq_mask0 = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
+ irq_mask1 = msm_camera_io_r(vfe_dev->vfe_base + 0x60);
+ *irq_status0 &= irq_mask0;
+ *irq_status1 &= irq_mask1;
+
+ if (*irq_status1 & (1 << 0)) {
+ vfe_dev->error_info.camif_status =
+ msm_camera_io_r(vfe_dev->vfe_base + 0x3D0);
+ msm_vfe46_enable_camif_error(vfe_dev, 0);
+ }
+
+ if (*irq_status1 & (1 << 7))
+ vfe_dev->error_info.violation_status =
+ msm_camera_io_r(vfe_dev->vfe_base + 0x7C);
+
+}
+
+static void msm_vfe46_process_reg_update(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ enum msm_vfe_input_src i;
+ uint32_t shift_irq;
+ uint8_t reg_updated = 0;
+ unsigned long flags;
+
+ if (!(irq_status0 & 0xF0))
+ return;
+ /* Shift status bits so that PIX REG UPDATE is 1st bit */
+ shift_irq = ((irq_status0 & 0xF0) >> 4);
+
+ for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
+ if (shift_irq & BIT(i)) {
+ reg_updated |= BIT(i);
+ ISP_DBG("%s Reg Update IRQ %x\n", __func__,
+ (uint32_t)BIT(i));
+
+ switch (i) {
+ case VFE_PIX_0:
+ msm_isp_save_framedrop_values(vfe_dev,
+ VFE_PIX_0);
+ msm_isp_notify(vfe_dev, ISP_EVENT_REG_UPDATE,
+ VFE_PIX_0, ts);
+ if (atomic_read(
+ &vfe_dev->stats_data.stats_update))
+ msm_isp_stats_stream_update(vfe_dev);
+ if (vfe_dev->axi_data.camif_state ==
+ CAMIF_STOPPING)
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ reg_update(vfe_dev, i);
+ break;
+ case VFE_RAW_0:
+ case VFE_RAW_1:
+ case VFE_RAW_2:
+ msm_isp_increment_frame_id(vfe_dev, i, ts);
+ msm_isp_notify(vfe_dev, ISP_EVENT_SOF, i, ts);
+ msm_isp_update_framedrop_reg(vfe_dev, i);
+ /*
+ * Reg Update is pseudo SOF for RDI,
+ * so request every frame
+ */
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
+ vfe_dev, i);
+ break;
+ default:
+ pr_err("%s: Error case\n", __func__);
+ return;
+ }
+ if (vfe_dev->axi_data.stream_update[i])
+ msm_isp_axi_stream_update(vfe_dev, i);
+ if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
+ msm_isp_axi_cfg_update(vfe_dev, i);
+ if (atomic_read(
+ &vfe_dev->axi_data.axi_cfg_update[i]) ==
+ 0)
+ msm_isp_notify(vfe_dev,
+ ISP_EVENT_STREAM_UPDATE_DONE,
+ i, ts);
+ }
+ }
+ }
+
+ spin_lock_irqsave(&vfe_dev->reg_update_lock, flags);
+ if (reg_updated & BIT(VFE_PIX_0))
+ vfe_dev->reg_updated = 1;
+
+ vfe_dev->reg_update_requested &= ~reg_updated;
+ spin_unlock_irqrestore(&vfe_dev->reg_update_lock, flags);
+}
+
+static void msm_vfe46_process_epoch_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ if (!(irq_status0 & 0xc))
+ return;
+
+ if (irq_status0 & BIT(2)) {
+ msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
+ ISP_DBG("%s: EPOCH0 IRQ\n", __func__);
+ msm_isp_update_framedrop_reg(vfe_dev, VFE_PIX_0);
+ msm_isp_update_stats_framedrop_reg(vfe_dev);
+ msm_isp_update_error_frame_count(vfe_dev);
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
+ && vfe_dev->axi_data.src_info[VFE_PIX_0].
+ pix_stream_count == 0) {
+ if (vfe_dev->axi_data.stream_update[VFE_PIX_0])
+ msm_isp_axi_stream_update(vfe_dev, VFE_PIX_0);
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
+ vfe_dev, VFE_PIX_0);
+ }
+ }
+}
+
+static void msm_vfe46_reg_update(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src)
+{
+ uint32_t update_mask = 0;
+ unsigned long flags;
+
+ /* This HW supports upto VFE_RAW_2 */
+ if (frame_src > VFE_RAW_2 && frame_src != VFE_SRC_MAX) {
+ pr_err("%s Error case\n", __func__);
+ return;
+ }
+
+ /*
+ * If frame_src == VFE_SRC_MAX request reg_update on all
+ * supported INTF
+ */
+ if (frame_src == VFE_SRC_MAX)
+ update_mask = 0xF;
+ else
+ update_mask = BIT((uint32_t)frame_src);
+ ISP_DBG("%s update_mask %x\n", __func__, update_mask);
+
+ spin_lock_irqsave(&vfe_dev->reg_update_lock, flags);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].reg_update_frame_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+ vfe_dev->reg_update_requested |= update_mask;
+ vfe_dev->common_data->dual_vfe_res->reg_update_mask[vfe_dev->pdev->id] =
+ vfe_dev->reg_update_requested;
+ if ((vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE1) &&
+ ((frame_src == VFE_PIX_0) || (frame_src == VFE_SRC_MAX))) {
+ msm_camera_io_w_mb(update_mask,
+ vfe_dev->common_data->dual_vfe_res->vfe_base[ISP_VFE0]
+ + 0x3D8);
+ msm_camera_io_w_mb(update_mask,
+ vfe_dev->vfe_base + 0x3D8);
+ } else if (!vfe_dev->is_split ||
+ ((frame_src == VFE_PIX_0) &&
+ (vfe_dev->axi_data.camif_state == CAMIF_STOPPING)) ||
+ (frame_src >= VFE_RAW_0 && frame_src <= VFE_SRC_MAX)) {
+ msm_camera_io_w_mb(update_mask,
+ vfe_dev->vfe_base + 0x3D8);
+ }
+ spin_unlock_irqrestore(&vfe_dev->reg_update_lock, flags);
+}
+
+static long msm_vfe46_reset_hardware(struct vfe_device *vfe_dev,
+ uint32_t first_start, uint32_t blocking_call)
+{
+ long rc = 0;
+ init_completion(&vfe_dev->reset_complete);
+
+ if (blocking_call)
+ vfe_dev->reset_pending = 1;
+
+ if (first_start) {
+ msm_camera_io_w_mb(0x1FF, vfe_dev->vfe_base + 0x18);
+ } else {
+ msm_camera_io_w_mb(0x1EF, vfe_dev->vfe_base + 0x18);
+ msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w(0xFFFFFEFF, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x58);
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ reload_wm(vfe_dev, vfe_dev->vfe_base, 0x0001FFFF);
+ }
+
+ if (blocking_call) {
+ rc = wait_for_completion_timeout(
+ &vfe_dev->reset_complete, msecs_to_jiffies(50));
+ if (rc <= 0) {
+ pr_err("%s:%d failed: reset timeout\n", __func__,
+ __LINE__);
+ vfe_dev->reset_pending = 0;
+ }
+ }
+
+ return rc;
+}
+
+static void msm_vfe46_axi_reload_wm(struct vfe_device *vfe_dev,
+ void __iomem *vfe_base, uint32_t reload_mask)
+{
+ msm_camera_io_w_mb(reload_mask, vfe_base + 0x80);
+}
+
+static void msm_vfe46_axi_update_cgc_override(struct vfe_device *vfe_dev,
+ uint8_t wm_idx, uint8_t enable)
+{
+ uint32_t val;
+
+ /* Change CGC override */
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x3C);
+ if (enable)
+ val |= (1 << wm_idx);
+ else
+ val &= ~(1 << wm_idx);
+ msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x3C);
+}
+
+static void msm_vfe46_axi_enable_wm(void __iomem *vfe_base,
+ uint8_t wm_idx, uint8_t enable)
+{
+ uint32_t val;
+
+ val = msm_camera_io_r(vfe_base + VFE46_WM_BASE(wm_idx));
+ if (enable)
+ val |= 0x1;
+ else
+ val &= ~0x1;
+ msm_camera_io_w_mb(val,
+ vfe_base + VFE46_WM_BASE(wm_idx));
+}
+
+static void msm_vfe46_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t comp_mask, comp_mask_index =
+ stream_info->comp_mask_index;
+ uint32_t irq_mask;
+
+ comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x74);
+ comp_mask &= ~(0x7F << (comp_mask_index * 8));
+ comp_mask |= (axi_data->composite_info[comp_mask_index].
+ stream_composite_mask << (comp_mask_index * 8));
+ msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x74);
+
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
+ irq_mask |= 1 << (comp_mask_index + 25);
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x5C);
+}
+
+static void msm_vfe46_axi_clear_comp_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ uint32_t comp_mask, comp_mask_index = stream_info->comp_mask_index;
+ uint32_t irq_mask;
+
+ comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x74);
+ comp_mask &= ~(0x7F << (comp_mask_index * 8));
+ msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x74);
+
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
+ irq_mask &= ~(1 << (comp_mask_index + 25));
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x5C);
+}
+
+static void msm_vfe46_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ uint32_t irq_mask;
+
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
+ irq_mask |= 1 << (stream_info->wm[0] + 8);
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x5C);
+}
+
+static void msm_vfe46_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ uint32_t irq_mask;
+
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
+ irq_mask &= ~(1 << (stream_info->wm[0] + 8));
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x5C);
+}
+
+static void msm_vfe46_cfg_framedrop(void __iomem *vfe_base,
+ struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
+ uint32_t framedrop_period)
+{
+ uint32_t i, temp;
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ msm_camera_io_w(framedrop_pattern, vfe_base +
+ VFE46_WM_BASE(stream_info->wm[i]) + 0x1C);
+ temp = msm_camera_io_r(vfe_base +
+ VFE46_WM_BASE(stream_info->wm[i]) + 0xC);
+ temp &= 0xFFFFFF83;
+ msm_camera_io_w(temp | (framedrop_period - 1) << 2,
+ vfe_base + VFE46_WM_BASE(stream_info->wm[i]) + 0xC);
+ }
+}
+
+static void msm_vfe46_clear_framedrop(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ uint32_t i;
+
+ for (i = 0; i < stream_info->num_planes; i++)
+ msm_camera_io_w(0, vfe_dev->vfe_base +
+ VFE46_WM_BASE(stream_info->wm[i]) + 0x1C);
+}
+
+static int32_t msm_vfe46_convert_bpp_to_reg(int32_t bpp, uint32_t *bpp_reg)
+{
+ int rc = 0;
+ switch (bpp) {
+ case 8:
+ *bpp_reg = 0;
+ break;
+ case 10:
+ *bpp_reg = 0x1;
+ break;
+ case 12:
+ *bpp_reg = 0x2;
+ break;
+ case 14:
+ *bpp_reg = 0x3;
+ break;
+ default:
+ pr_err("%s:%d invalid bpp %d\n", __func__, __LINE__, bpp);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int32_t msm_vfe46_convert_io_fmt_to_reg(
+ enum msm_isp_pack_fmt pack_format, uint32_t *pack_reg)
+{
+ int rc = 0;
+
+ switch (pack_format) {
+ case QCOM:
+ *pack_reg = 0x0;
+ break;
+ case MIPI:
+ *pack_reg = 0x1;
+ break;
+ case DPCM6:
+ *pack_reg = 0x2;
+ break;
+ case DPCM8:
+ *pack_reg = 0x3;
+ break;
+ case PLAIN8:
+ *pack_reg = 0x4;
+ break;
+ case PLAIN16:
+ *pack_reg = 0x5;
+ break;
+ default:
+ pr_err("%s: invalid pack fmt %d!\n", __func__, pack_format);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+static int32_t msm_vfe46_cfg_io_format(struct vfe_device *vfe_dev,
+ enum msm_vfe_axi_stream_src stream_src, uint32_t io_format)
+{
+ int rc = 0;
+ int bpp = 0, read_bpp = 0;
+ enum msm_isp_pack_fmt pack_fmt = 0, read_pack_fmt = 0;
+ uint32_t bpp_reg = 0, pack_reg = 0;
+ uint32_t read_bpp_reg = 0, read_pack_reg = 0;
+ uint32_t io_format_reg = 0; /*io format register bit*/
+
+ io_format_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x88);
+
+ /*input config*/
+ if ((stream_src < RDI_INTF_0) &&
+ (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux ==
+ EXTERNAL_READ)) {
+ read_bpp = msm_isp_get_bit_per_pixel(
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format);
+ rc = msm_vfe46_convert_bpp_to_reg(read_bpp, &read_bpp_reg);
+ if (rc < 0) {
+ pr_err("%s: convert_bpp_to_reg err! in_bpp %d rc %d\n",
+ __func__, read_bpp, rc);
+ return rc;
+ }
+
+ read_pack_fmt = msm_isp_get_pack_format(
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format);
+ rc = msm_vfe46_convert_io_fmt_to_reg(
+ read_pack_fmt, &read_pack_reg);
+ if (rc < 0) {
+ pr_err("%s: convert_io_fmt_to_reg err! rc = %d\n",
+ __func__, rc);
+ return rc;
+ }
+ /*use input format(v4l2_pix_fmt) to get pack format*/
+ io_format_reg &= 0xFFC8FFFF;
+ io_format_reg |= (read_bpp_reg << 20 | read_pack_reg << 16);
+ }
+
+ bpp = msm_isp_get_bit_per_pixel(io_format);
+ rc = msm_vfe46_convert_bpp_to_reg(bpp, &bpp_reg);
+ if (rc < 0) {
+ pr_err("%s: convert_bpp_to_reg err! bpp %d rc = %d\n",
+ __func__, bpp, rc);
+ return rc;
+ }
+
+ switch (stream_src) {
+ case PIX_VIDEO:
+ case PIX_ENCODER:
+ case PIX_VIEWFINDER:
+ case CAMIF_RAW:
+ io_format_reg &= 0xFFFFCFFF;
+ io_format_reg |= bpp_reg << 12;
+ break;
+ case IDEAL_RAW:
+ /*use output format(v4l2_pix_fmt) to get pack format*/
+ pack_fmt = msm_isp_get_pack_format(io_format);
+ rc = msm_vfe46_convert_io_fmt_to_reg(pack_fmt, &pack_reg);
+ if (rc < 0) {
+ pr_err("%s: convert_io_fmt_to_reg err! rc = %d\n",
+ __func__, rc);
+ return rc;
+ }
+ io_format_reg &= 0xFFFFFFC8;
+ io_format_reg |= bpp_reg << 4 | pack_reg;
+ break;
+ case RDI_INTF_0:
+ case RDI_INTF_1:
+ case RDI_INTF_2:
+ default:
+ pr_err("%s: Invalid stream source\n", __func__);
+ return -EINVAL;
+ }
+ msm_camera_io_w(io_format_reg, vfe_dev->vfe_base + 0x88);
+ return 0;
+}
+
+static int msm_vfe46_start_fetch_engine(struct vfe_device *vfe_dev,
+ void *arg)
+{
+ int rc = 0;
+ uint32_t bufq_handle = 0;
+ struct msm_isp_buffer *buf = NULL;
+ struct msm_vfe_fetch_eng_start *fe_cfg = arg;
+ struct msm_isp_buffer_mapped_info mapped_info;
+
+ if (vfe_dev->fetch_engine_info.is_busy == 1) {
+ pr_err("%s: fetch engine busy\n", __func__);
+ return -EINVAL;
+ }
+
+ memset(&mapped_info, 0, sizeof(struct msm_isp_buffer_mapped_info));
+ /* There is other option of passing buffer address from user,
+ in such case, driver needs to map the buffer and use it*/
+ vfe_dev->fetch_engine_info.session_id = fe_cfg->session_id;
+ vfe_dev->fetch_engine_info.stream_id = fe_cfg->stream_id;
+ vfe_dev->fetch_engine_info.offline_mode = fe_cfg->offline_mode;
+ vfe_dev->fetch_engine_info.fd = fe_cfg->fd;
+
+ if (!fe_cfg->offline_mode) {
+ bufq_handle = vfe_dev->buf_mgr->ops->get_bufq_handle(
+ vfe_dev->buf_mgr, fe_cfg->session_id,
+ fe_cfg->stream_id);
+ vfe_dev->fetch_engine_info.bufq_handle = bufq_handle;
+
+ rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
+ vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
+ if (rc < 0 || !buf) {
+ pr_err("%s: No fetch buffer rc= %d buf= %p\n",
+ __func__, rc, buf);
+ return -EINVAL;
+ }
+ mapped_info = buf->mapped_info[0];
+ buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ } else {
+ rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
+ &mapped_info, fe_cfg->fd);
+ if (rc < 0) {
+ pr_err("%s: can not map buffer\n", __func__);
+ return -EINVAL;
+ }
+ }
+ vfe_dev->fetch_engine_info.buf_idx = fe_cfg->buf_idx;
+ vfe_dev->fetch_engine_info.is_busy = 1;
+
+ msm_camera_io_w(mapped_info.paddr, vfe_dev->vfe_base + 0x268);
+
+ msm_camera_io_w_mb(0x100000, vfe_dev->vfe_base + 0x80);
+ msm_camera_io_w_mb(0x200000, vfe_dev->vfe_base + 0x80);
+
+ ISP_DBG("%s:VFE%d Fetch Engine ready\n", __func__, vfe_dev->pdev->id);
+ return 0;
+}
+
+static void msm_vfe46_cfg_fetch_engine(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg)
+{
+ uint32_t x_size_word, temp;
+ struct msm_vfe_fetch_engine_cfg *fe_cfg = NULL;
+
+ if (pix_cfg->input_mux == EXTERNAL_READ) {
+ fe_cfg = &pix_cfg->fetch_engine_cfg;
+ pr_debug("%s:VFE%d wd x ht buf = %d x %d, fe = %d x %d\n",
+ __func__, vfe_dev->pdev->id, fe_cfg->buf_width,
+ fe_cfg->buf_height,
+ fe_cfg->fetch_width, fe_cfg->fetch_height);
+
+ temp = msm_camera_io_r(vfe_dev->vfe_base + 0x84);
+ temp &= 0xFFFFFFFD;
+ temp |= (1 << 1);
+ msm_camera_io_w(temp, vfe_dev->vfe_base + 0x84);
+
+ temp = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
+ temp &= 0xFEFFFFFF;
+ temp |= (1 << 24);
+ msm_camera_io_w(temp, vfe_dev->vfe_base + 0x5C);
+
+ temp = fe_cfg->fetch_height - 1;
+ msm_camera_io_w(temp & 0x3FFF, vfe_dev->vfe_base + 0x278);
+
+ x_size_word = msm_isp_cal_word_per_line(
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format,
+ fe_cfg->fetch_width);
+ msm_camera_io_w((x_size_word - 1) << 16,
+ vfe_dev->vfe_base + 0x27C);
+
+ msm_camera_io_w(x_size_word << 16 |
+ (temp & 0x3FFF) << 2 | VFE46_FETCH_BURST_LEN,
+ vfe_dev->vfe_base + 0x280);
+
+ temp = ((fe_cfg->buf_width - 1) & 0x3FFF) << 16 |
+ ((fe_cfg->buf_height - 1) & 0x3FFF);
+ msm_camera_io_w(temp, vfe_dev->vfe_base + 0x284);
+
+ /* need to use formulae to calculate MAIN_UNPACK_PATTERN*/
+ msm_camera_io_w(0xF6543210, vfe_dev->vfe_base + 0x288);
+ msm_camera_io_w(0xF, vfe_dev->vfe_base + 0x2A4);
+
+ vfe_dev->hw_info->vfe_ops.axi_ops.update_cgc_override(vfe_dev,
+ VFE46_BUS_RD_CGC_OVERRIDE_BIT, 1);
+
+ temp = msm_camera_io_r(vfe_dev->vfe_base + 0x50);
+ temp |= 2 << 5;
+ temp |= 128 << 8;
+ temp |= pix_cfg->pixel_pattern;
+ msm_camera_io_w(temp, vfe_dev->vfe_base + 0x50);
+
+ } else {
+ pr_err("%s: Invalid mux configuration - mux: %d", __func__,
+ pix_cfg->input_mux);
+ }
+}
+
+static void msm_vfe46_cfg_testgen(struct vfe_device *vfe_dev,
+ struct msm_vfe_testgen_cfg *testgen_cfg)
+{
+ uint32_t temp;
+ uint32_t bit_per_pixel = 0;
+ uint32_t bpp_reg = 0;
+ uint32_t bayer_pix_pattern_reg = 0;
+ uint32_t unicolorbar_reg = 0;
+ uint32_t unicolor_enb = 0;
+
+ bit_per_pixel = msm_isp_get_bit_per_pixel(
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format);
+
+ switch (bit_per_pixel) {
+ case 8:
+ bpp_reg = 0x0;
+ break;
+ case 10:
+ bpp_reg = 0x1;
+ break;
+ case 12:
+ bpp_reg = 0x10;
+ break;
+ case 14:
+ bpp_reg = 0x11;
+ break;
+ default:
+ pr_err("%s: invalid bpp %d\n", __func__, bit_per_pixel);
+ break;
+ }
+
+ msm_camera_io_w(bpp_reg << 16 | testgen_cfg->burst_num_frame,
+ vfe_dev->vfe_base + 0xAF8);
+
+ msm_camera_io_w(((testgen_cfg->lines_per_frame - 1) << 16) |
+ (testgen_cfg->pixels_per_line - 1), vfe_dev->vfe_base + 0xAFC);
+
+ temp = msm_camera_io_r(vfe_dev->vfe_base + 0x50);
+ temp |= (((testgen_cfg->h_blank) & 0x3FFF) << 8);
+ temp |= (1 << 24);
+ msm_camera_io_w(temp, vfe_dev->vfe_base + 0x50);
+
+ msm_camera_io_w((1 << 16) | testgen_cfg->v_blank,
+ vfe_dev->vfe_base + 0xB0C);
+
+ switch (testgen_cfg->pixel_bayer_pattern) {
+ case ISP_BAYER_RGRGRG:
+ bayer_pix_pattern_reg = 0x0;
+ break;
+ case ISP_BAYER_GRGRGR:
+ bayer_pix_pattern_reg = 0x1;
+ break;
+ case ISP_BAYER_BGBGBG:
+ bayer_pix_pattern_reg = 0x10;
+ break;
+ case ISP_BAYER_GBGBGB:
+ bayer_pix_pattern_reg = 0x11;
+ break;
+ default:
+ pr_err("%s: invalid pix pattern %d\n",
+ __func__, bit_per_pixel);
+ break;
+ }
+
+ if (testgen_cfg->color_bar_pattern == COLOR_BAR_8_COLOR) {
+ unicolor_enb = 0x0;
+ } else {
+ unicolor_enb = 0x1;
+ switch (testgen_cfg->color_bar_pattern) {
+ case UNICOLOR_WHITE:
+ unicolorbar_reg = 0x0;
+ break;
+ case UNICOLOR_YELLOW:
+ unicolorbar_reg = 0x1;
+ break;
+ case UNICOLOR_CYAN:
+ unicolorbar_reg = 0x10;
+ break;
+ case UNICOLOR_GREEN:
+ unicolorbar_reg = 0x11;
+ break;
+ case UNICOLOR_MAGENTA:
+ unicolorbar_reg = 0x100;
+ break;
+ case UNICOLOR_RED:
+ unicolorbar_reg = 0x101;
+ break;
+ case UNICOLOR_BLUE:
+ unicolorbar_reg = 0x110;
+ break;
+ case UNICOLOR_BLACK:
+ unicolorbar_reg = 0x111;
+ break;
+ default:
+ pr_err("%s: invalid colorbar %d\n",
+ __func__, testgen_cfg->color_bar_pattern);
+ break;
+ }
+ }
+
+ msm_camera_io_w((testgen_cfg->rotate_period << 8) |
+ (bayer_pix_pattern_reg << 6) | (unicolor_enb << 4) |
+ (unicolorbar_reg), vfe_dev->vfe_base + 0xB14);
+ return;
+}
+
+static void msm_vfe46_cfg_camif(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg)
+{
+ uint16_t first_pixel, last_pixel, first_line, last_line;
+ struct msm_vfe_camif_cfg *camif_cfg = &pix_cfg->camif_cfg;
+ uint32_t val, subsample_period, subsample_pattern;
+ struct msm_vfe_camif_subsample_cfg *subsample_cfg =
+ &pix_cfg->camif_cfg.subsample_cfg;
+ uint16_t bus_sub_en = 0;
+
+ if (subsample_cfg->pixel_skip || subsample_cfg->line_skip)
+ bus_sub_en = 1;
+ else
+ bus_sub_en = 0;
+
+ vfe_dev->dual_vfe_enable = camif_cfg->is_split;
+
+ msm_camera_io_w(pix_cfg->input_mux << 5 | pix_cfg->pixel_pattern,
+ vfe_dev->vfe_base + 0x50);
+
+ first_pixel = camif_cfg->first_pixel;
+ last_pixel = camif_cfg->last_pixel;
+ first_line = camif_cfg->first_line;
+ last_line = camif_cfg->last_line;
+ subsample_period = camif_cfg->subsample_cfg.irq_subsample_period;
+ subsample_pattern = camif_cfg->subsample_cfg.irq_subsample_pattern;
+
+ if (bus_sub_en) {
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x3AC);
+ val &= 0xFFFFFFDF;
+ val = val | bus_sub_en << 5;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x3AC);
+ subsample_cfg->pixel_skip &= 0x0000FFFF;
+ subsample_cfg->line_skip &= 0x0000FFFF;
+ msm_camera_io_w((subsample_cfg->line_skip << 16) |
+ subsample_cfg->pixel_skip, vfe_dev->vfe_base + 0x3C0);
+ }
+
+ msm_camera_io_w(camif_cfg->lines_per_frame << 16 |
+ camif_cfg->pixels_per_line, vfe_dev->vfe_base + 0x3B4);
+
+ msm_camera_io_w(first_pixel << 16 | last_pixel,
+ vfe_dev->vfe_base + 0x3B8);
+
+ msm_camera_io_w(first_line << 16 | last_line,
+ vfe_dev->vfe_base + 0x3BC);
+
+ if (subsample_period && subsample_pattern) {
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x3AC);
+ val &= 0xFFE0FFFF;
+ val = (subsample_period - 1) << 16;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x3AC);
+ ISP_DBG("%s:camif PERIOD %x PATTERN %x\n",
+ __func__, subsample_period, subsample_pattern);
+
+ val = subsample_pattern;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x3C8);
+ } else {
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x3C8);
+ }
+
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x39C);
+ val |= camif_cfg->camif_input;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x39C);
+}
+
+static void msm_vfe46_cfg_input_mux(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg)
+{
+ uint32_t core_cfg = 0;
+ uint32_t val = 0;
+
+ core_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x50);
+ core_cfg &= 0xFFFFFF9F;
+
+ switch (pix_cfg->input_mux) {
+ case CAMIF:
+ core_cfg |= 0x0 << 5;
+ msm_camera_io_w_mb(core_cfg, vfe_dev->vfe_base + 0x50);
+ msm_vfe46_cfg_camif(vfe_dev, pix_cfg);
+ break;
+ case TESTGEN:
+ /* Change CGC override */
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x3C);
+ val |= (1 << 31);
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x3C);
+
+ /* CAMIF and TESTGEN will both go thorugh CAMIF*/
+ core_cfg |= 0x1 << 5;
+ msm_camera_io_w_mb(core_cfg, vfe_dev->vfe_base + 0x50);
+ msm_vfe46_cfg_camif(vfe_dev, pix_cfg);
+ msm_vfe46_cfg_testgen(vfe_dev, &pix_cfg->testgen_cfg);
+ break;
+ case EXTERNAL_READ:
+ core_cfg |= 0x2 << 5;
+ msm_camera_io_w_mb(core_cfg, vfe_dev->vfe_base + 0x50);
+ msm_vfe46_cfg_fetch_engine(vfe_dev, pix_cfg);
+ break;
+ default:
+ pr_err("%s: Unsupported input mux %d\n",
+ __func__, pix_cfg->input_mux);
+ break;
+ }
+ return;
+}
+
+static void msm_vfe46_update_camif_state(struct vfe_device *vfe_dev,
+ enum msm_isp_camif_update_state update_state)
+{
+ uint32_t val;
+ bool bus_en, vfe_en;
+
+ if (update_state == NO_UPDATE)
+ return;
+
+ if (update_state == ENABLE_CAMIF) {
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
+ val |= 0xF5;
+ msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x5C);
+
+ bus_en =
+ ((vfe_dev->axi_data.
+ src_info[VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
+ vfe_en =
+ ((vfe_dev->axi_data.
+ src_info[VFE_PIX_0].pix_stream_count > 0) ? 1 : 0);
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x3AC);
+ val &= 0xFFFFFF3F;
+ val = val | bus_en << 7 | vfe_en << 6;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x3AC);
+ msm_camera_io_w_mb(0x4, vfe_dev->vfe_base + 0x3A8);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x3A8);
+ /* configure EPOCH0 for 20 lines */
+ msm_camera_io_w_mb(0x140000, vfe_dev->vfe_base + 0x3CC);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].active = 1;
+ /* testgen GO*/
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
+ msm_camera_io_w(1, vfe_dev->vfe_base + 0xAF4);
+ } else if (update_state == DISABLE_CAMIF) {
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x3A8);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
+ /* testgen OFF*/
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
+ msm_camera_io_w(1 << 1, vfe_dev->vfe_base + 0xAF4);
+ } else if (update_state == DISABLE_CAMIF_IMMEDIATELY) {
+ msm_camera_io_w_mb(0x6, vfe_dev->vfe_base + 0x3A8);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
+ msm_camera_io_w(1 << 1, vfe_dev->vfe_base + 0xAF4);
+ }
+}
+
+static void msm_vfe46_cfg_rdi_reg(
+ struct vfe_device *vfe_dev, struct msm_vfe_rdi_cfg *rdi_cfg,
+ enum msm_vfe_input_src input_src)
+{
+ uint8_t rdi = input_src - VFE_RAW_0;
+ uint32_t rdi_reg_cfg;
+
+ rdi_reg_cfg = msm_camera_io_r(
+ vfe_dev->vfe_base + VFE46_RDI_BASE(rdi));
+ rdi_reg_cfg &= 0x03;
+ rdi_reg_cfg |= (rdi * 3) << 28 | rdi_cfg->cid << 4 | 0x4;
+ msm_camera_io_w(
+ rdi_reg_cfg, vfe_dev->vfe_base + VFE46_RDI_BASE(rdi));
+}
+
+static void msm_vfe46_axi_cfg_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint8_t plane_idx)
+{
+ uint32_t val;
+ uint32_t wm_base = VFE46_WM_BASE(stream_info->wm[plane_idx]);
+
+ val = msm_camera_io_r(vfe_dev->vfe_base + wm_base + 0xC);
+ val &= ~0x2;
+ if (stream_info->frame_based)
+ val |= 0x2;
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0xC);
+ if (!stream_info->frame_based) {
+ /* WR_IMAGE_SIZE */
+ val =
+ ((msm_isp_cal_word_per_line(
+ stream_info->output_format,
+ stream_info->plane_cfg[plane_idx].
+ output_width)+3)/4 - 1) << 16 |
+ (stream_info->plane_cfg[plane_idx].
+ output_height - 1);
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
+ /* WR_BUFFER_CFG */
+ val = VFE46_BURST_LEN |
+ (stream_info->plane_cfg[plane_idx].output_height - 1) <<
+ 2 |
+ ((msm_isp_cal_word_per_line(stream_info->output_format,
+ stream_info->plane_cfg[plane_idx].
+ output_stride)+1)/2) << 16;
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
+ }
+ /* WR_IRQ_SUBSAMPLE_PATTERN */
+ msm_camera_io_w(0xFFFFFFFF,
+ vfe_dev->vfe_base + wm_base + 0x20);
+ /* TD: Add IRQ subsample pattern */
+}
+
+static void msm_vfe46_axi_clear_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
+{
+ uint32_t val = 0;
+ uint32_t wm_base = VFE46_WM_BASE(stream_info->wm[plane_idx]);
+
+ /* WR_ADDR_CFG */
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0xC);
+ /* WR_IMAGE_SIZE */
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
+ /* WR_BUFFER_CFG */
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x18);
+ /* WR_IRQ_SUBSAMPLE_PATTERN */
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x20);
+}
+
+static void msm_vfe46_axi_cfg_wm_xbar_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint8_t plane_idx)
+{
+ struct msm_vfe_axi_plane_cfg *plane_cfg =
+ &stream_info->plane_cfg[plane_idx];
+ uint8_t wm = stream_info->wm[plane_idx];
+ uint32_t xbar_cfg = 0;
+ uint32_t xbar_reg_cfg = 0;
+
+ switch (stream_info->stream_src) {
+ case PIX_VIDEO:
+ case PIX_ENCODER:
+ case PIX_VIEWFINDER: {
+ if (plane_cfg->output_plane_format != CRCB_PLANE &&
+ plane_cfg->output_plane_format != CBCR_PLANE) {
+ /* SINGLE_STREAM_SEL */
+ xbar_cfg |= plane_cfg->output_plane_format << 8;
+ } else {
+ switch (stream_info->output_format) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV14:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV24:
+ /* PAIR_STREAM_SWAP_CTRL */
+ xbar_cfg |= 0x3 << 4;
+ break;
+ }
+ xbar_cfg |= 0x1 << 2; /* PAIR_STREAM_EN */
+ }
+ if (stream_info->stream_src == PIX_VIEWFINDER)
+ xbar_cfg |= 0x1; /* VIEW_STREAM_EN */
+ else if (stream_info->stream_src == PIX_VIDEO)
+ xbar_cfg |= 0x2;
+ break;
+ }
+ case CAMIF_RAW:
+ xbar_cfg = 0x300;
+ break;
+ case IDEAL_RAW:
+ xbar_cfg = 0x400;
+ break;
+ case RDI_INTF_0:
+ xbar_cfg = 0xC00;
+ break;
+ case RDI_INTF_1:
+ xbar_cfg = 0xD00;
+ break;
+ case RDI_INTF_2:
+ xbar_cfg = 0xE00;
+ break;
+ default:
+ pr_err("%s: Invalid stream src\n", __func__);
+ break;
+ }
+ xbar_reg_cfg =
+ msm_camera_io_r(vfe_dev->vfe_base + VFE46_XBAR_BASE(wm));
+ xbar_reg_cfg &= ~(0xFFFF << VFE46_XBAR_SHIFT(wm));
+ xbar_reg_cfg |= (xbar_cfg << VFE46_XBAR_SHIFT(wm));
+ msm_camera_io_w(xbar_reg_cfg,
+ vfe_dev->vfe_base + VFE46_XBAR_BASE(wm));
+}
+
+static void msm_vfe46_axi_clear_wm_xbar_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
+{
+ uint8_t wm = stream_info->wm[plane_idx];
+ uint32_t xbar_reg_cfg = 0;
+
+ xbar_reg_cfg =
+ msm_camera_io_r(vfe_dev->vfe_base + VFE46_XBAR_BASE(wm));
+ xbar_reg_cfg &= ~(0xFFFF << VFE46_XBAR_SHIFT(wm));
+ msm_camera_io_w(xbar_reg_cfg,
+ vfe_dev->vfe_base + VFE46_XBAR_BASE(wm));
+}
+
+
+static void msm_vfe46_cfg_axi_ub_equal_default(
+ struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t ub_offset = 0;
+ struct msm_vfe_axi_shared_data *axi_data =
+ &vfe_dev->axi_data;
+ uint32_t total_image_size = 0;
+ uint8_t num_used_wms = 0;
+ uint32_t prop_size = 0;
+ uint32_t wm_ub_size;
+ uint64_t delta;
+
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ if (axi_data->free_wm[i] > 0) {
+ num_used_wms++;
+ total_image_size += axi_data->wm_image_size[i];
+ }
+ }
+ if (vfe_dev->pdev->id == ISP_VFE0) {
+ prop_size = MSM_ISP46_TOTAL_IMAGE_UB_VFE0 -
+ axi_data->hw_info->min_wm_ub * num_used_wms;
+ } else if (vfe_dev->pdev->id == ISP_VFE1) {
+ prop_size = MSM_ISP46_TOTAL_IMAGE_UB_VFE1 -
+ axi_data->hw_info->min_wm_ub * num_used_wms;
+ } else {
+ pr_err("%s: incorrect VFE device\n", __func__);
+ }
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ if (axi_data->free_wm[i]) {
+ delta = (uint64_t)axi_data->wm_image_size[i] *
+ (uint64_t)prop_size;
+ do_div(delta, total_image_size);
+ wm_ub_size = axi_data->hw_info->min_wm_ub +
+ (uint32_t)delta;
+ msm_camera_io_w(ub_offset << 16 | (wm_ub_size - 1),
+ vfe_dev->vfe_base + VFE46_WM_BASE(i) + 0x10);
+ ub_offset += wm_ub_size;
+ } else
+ msm_camera_io_w(0,
+ vfe_dev->vfe_base + VFE46_WM_BASE(i) + 0x10);
+ }
+}
+
+static void msm_vfe46_cfg_axi_ub_equal_slicing(
+ struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t ub_offset = 0;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t ub_equal_slice = 0;
+ if (vfe_dev->pdev->id == ISP_VFE0) {
+ ub_equal_slice = MSM_ISP46_TOTAL_IMAGE_UB_VFE0 /
+ axi_data->hw_info->num_wm;
+ } else if (vfe_dev->pdev->id == ISP_VFE1) {
+ ub_equal_slice = MSM_ISP46_TOTAL_IMAGE_UB_VFE1 /
+ axi_data->hw_info->num_wm;
+ } else {
+ pr_err("%s: incorrect VFE device\n ", __func__);
+ }
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ msm_camera_io_w(ub_offset << 16 | (ub_equal_slice - 1),
+ vfe_dev->vfe_base + VFE46_WM_BASE(i) + 0x10);
+ ub_offset += ub_equal_slice;
+ }
+}
+
+static void msm_vfe46_cfg_axi_ub(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+ axi_data->wm_ub_cfg_policy = MSM_WM_UB_CFG_DEFAULT;
+ if (axi_data->wm_ub_cfg_policy == MSM_WM_UB_EQUAL_SLICING)
+ msm_vfe46_cfg_axi_ub_equal_slicing(vfe_dev);
+ else
+ msm_vfe46_cfg_axi_ub_equal_default(vfe_dev);
+}
+
+static void msm_vfe46_read_wm_ping_pong_addr(
+ struct vfe_device *vfe_dev)
+{
+ msm_camera_io_dump(vfe_dev->vfe_base +
+ (VFE46_WM_BASE(0) & 0xFFFFFFF0), 0x200, 1);
+}
+
+static void msm_vfe46_update_ping_pong_addr(
+ void __iomem *vfe_base,
+ uint8_t wm_idx, uint32_t pingpong_bit, dma_addr_t paddr,
+ int32_t buf_size)
+{
+ uint32_t paddr32 = (paddr & 0xFFFFFFFF);
+
+ msm_camera_io_w(paddr32, vfe_base +
+ VFE46_PING_PONG_BASE(wm_idx, pingpong_bit));
+}
+
+static int msm_vfe46_axi_halt(struct vfe_device *vfe_dev,
+ uint32_t blocking)
+{
+ int rc = 0;
+ enum msm_vfe_input_src i;
+
+ /* Keep only halt and restart mask */
+ msm_camera_io_w(BIT(31), vfe_dev->vfe_base + 0x5C);
+ msm_camera_io_w(BIT(8), vfe_dev->vfe_base + 0x60);
+
+ /*Clear IRQ Status0, only leave reset irq mask*/
+ msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x64);
+
+ /*Clear IRQ Status1, only leave halt irq mask*/
+ msm_camera_io_w(0xFFFFFEFF, vfe_dev->vfe_base + 0x68);
+
+ /*push clear cmd*/
+ msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x58);
+
+ if (atomic_read(&vfe_dev->error_info.overflow_state)
+ == OVERFLOW_DETECTED)
+ pr_err_ratelimited("%s: VFE%d halt for recovery, blocking %d\n",
+ __func__, vfe_dev->pdev->id, blocking);
+
+ if (blocking) {
+ init_completion(&vfe_dev->halt_complete);
+ /* Halt AXI Bus Bridge */
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x374);
+ rc = wait_for_completion_timeout(
+ &vfe_dev->halt_complete, msecs_to_jiffies(500));
+ if (rc <= 0)
+ pr_err("%s:VFE%d halt timeout rc=%d\n", __func__,
+ vfe_dev->pdev->id, rc);
+ } else {
+ /* Halt AXI Bus Bridge */
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x374);
+ }
+
+ for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
+ /* if any stream is waiting for update, signal complete */
+ if (vfe_dev->axi_data.stream_update[i]) {
+ ISP_DBG("%s: complete stream update\n", __func__);
+ msm_isp_axi_stream_update(vfe_dev, i);
+ if (vfe_dev->axi_data.stream_update[i])
+ msm_isp_axi_stream_update(vfe_dev, i);
+ }
+ if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
+ ISP_DBG("%s: complete on axi config update\n",
+ __func__);
+ msm_isp_axi_cfg_update(vfe_dev, i);
+ if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i]))
+ msm_isp_axi_cfg_update(vfe_dev, i);
+ }
+ }
+
+ if (atomic_read(&vfe_dev->stats_data.stats_update)) {
+ ISP_DBG("%s: complete on stats update\n", __func__);
+ msm_isp_stats_stream_update(vfe_dev);
+ if (atomic_read(&vfe_dev->stats_data.stats_update))
+ msm_isp_stats_stream_update(vfe_dev);
+ }
+
+ return rc;
+}
+
+static int msm_vfe46_axi_restart(struct vfe_device *vfe_dev,
+ uint32_t blocking, uint32_t enable_camif)
+{
+ vfe_dev->hw_info->vfe_ops.core_ops.restore_irq_mask(vfe_dev);
+ msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w(0xFFFFFEFF, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x58);
+ msm_camera_io_w_mb(0x20000, vfe_dev->vfe_base + 0x3CC);
+
+ /* Start AXI */
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x374);
+
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev, VFE_SRC_MAX);
+ memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
+ atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
+
+ if (enable_camif) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, ENABLE_CAMIF);
+ }
+
+ return 0;
+}
+
+static uint32_t msm_vfe46_get_wm_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ return (irq_status0 >> 8) & 0x7F;
+}
+
+static uint32_t msm_vfe46_get_comp_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ return (irq_status0 >> 25) & 0xF;
+}
+
+static uint32_t msm_vfe46_get_pingpong_status(
+ struct vfe_device *vfe_dev)
+{
+ return msm_camera_io_r(vfe_dev->vfe_base + 0x2A8);
+}
+
+static int msm_vfe46_get_stats_idx(enum msm_isp_stats_type stats_type)
+{
+ switch (stats_type) {
+ case MSM_ISP_STATS_HDR_BE:
+ return STATS_IDX_HDR_BE;
+ case MSM_ISP_STATS_BG:
+ return STATS_IDX_BG;
+ case MSM_ISP_STATS_BF:
+ return STATS_IDX_BF;
+ case MSM_ISP_STATS_HDR_BHIST:
+ return STATS_IDX_HDR_BHIST;
+ case MSM_ISP_STATS_RS:
+ return STATS_IDX_RS;
+ case MSM_ISP_STATS_CS:
+ return STATS_IDX_CS;
+ case MSM_ISP_STATS_IHIST:
+ return STATS_IDX_IHIST;
+ case MSM_ISP_STATS_BHIST:
+ return STATS_IDX_BHIST;
+ case MSM_ISP_STATS_BF_SCALE:
+ return STATS_IDX_BF_SCALE;
+ default:
+ pr_err("%s: Invalid stats type\n", __func__);
+ return -EINVAL;
+ }
+}
+
+static int msm_vfe46_stats_check_streams(
+ struct msm_vfe_stats_stream *stream_info)
+{
+ if (stream_info[STATS_IDX_BF].state ==
+ STATS_AVALIABLE &&
+ stream_info[STATS_IDX_BF_SCALE].state !=
+ STATS_AVALIABLE) {
+ pr_err("%s: does not support BF_SCALE while BF is disabled\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (stream_info[STATS_IDX_BF].state != STATS_AVALIABLE &&
+ stream_info[STATS_IDX_BF_SCALE].state != STATS_AVALIABLE &&
+ stream_info[STATS_IDX_BF].composite_flag !=
+ stream_info[STATS_IDX_BF_SCALE].composite_flag) {
+ pr_err("%s: Different composite flag for BF and BF_SCALE\n",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void msm_vfe46_stats_cfg_comp_mask(
+ struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t request_comp_index, uint8_t enable)
+{
+ uint32_t comp_mask_reg, mask_bf_scale;
+ atomic_t *stats_comp_mask;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+
+ if (vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask < 1)
+ return;
+
+ if (request_comp_index >= MAX_NUM_STATS_COMP_MASK) {
+ pr_err("%s: num of comp masks %d exceed max %d\n",
+ __func__, request_comp_index,
+ MAX_NUM_STATS_COMP_MASK);
+ return;
+ }
+
+ if (vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask >
+ MAX_NUM_STATS_COMP_MASK) {
+ pr_err("%s: num of comp masks %d exceed max %d\n",
+ __func__,
+ vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask,
+ MAX_NUM_STATS_COMP_MASK);
+ return;
+ }
+
+ /* BF scale is controlled by BF also so ignore bit 0 of BF scale */
+ stats_mask = stats_mask & 0x1FF;
+ mask_bf_scale = stats_mask >> SHIFT_BF_SCALE_BIT;
+
+ stats_comp_mask = &stats_data->stats_comp_mask[request_comp_index];
+ comp_mask_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x78);
+
+ if (enable) {
+ comp_mask_reg |= mask_bf_scale << (16 + request_comp_index * 8);
+ atomic_set(stats_comp_mask, stats_mask |
+ atomic_read(stats_comp_mask));
+ } else {
+ if (!(atomic_read(stats_comp_mask) & stats_mask))
+ return;
+ if (stats_mask & (1 << STATS_IDX_BF_SCALE) &&
+ atomic_read(stats_comp_mask) &
+ (1 << STATS_IDX_BF_SCALE))
+ atomic_set(stats_comp_mask,
+ ~(1 << STATS_IDX_BF_SCALE) &
+ atomic_read(stats_comp_mask));
+
+ atomic_set(stats_comp_mask,
+ ~stats_mask & atomic_read(stats_comp_mask));
+ comp_mask_reg &= ~(mask_bf_scale <<
+ (16 + request_comp_index * 8));
+ }
+ msm_camera_io_w(comp_mask_reg, vfe_dev->vfe_base + 0x78);
+
+ ISP_DBG("%s: comp_mask_reg: %x comp mask0 %x mask1: %x\n",
+ __func__, comp_mask_reg,
+ atomic_read(&stats_data->stats_comp_mask[0]),
+ atomic_read(&stats_data->stats_comp_mask[1]));
+
+ return;
+}
+
+static void msm_vfe46_stats_cfg_wm_irq_mask(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ uint32_t irq_mask;
+
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
+ irq_mask |= 1 << (STATS_IDX(stream_info->stream_handle) + 15);
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x5C);
+}
+
+static void msm_vfe46_stats_clear_wm_irq_mask(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ uint32_t irq_mask;
+
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
+ irq_mask &= ~(1 << (STATS_IDX(stream_info->stream_handle) + 15));
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x5C);
+}
+
+static void msm_vfe46_stats_cfg_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ int stats_idx = STATS_IDX(stream_info->stream_handle);
+ uint32_t stats_base = VFE46_STATS_BASE(stats_idx);
+
+ /*
+ * BF_SCALE does not have its own WR_ADDR_CFG,
+ * IRQ_FRAMEDROP_PATTERN and IRQ_SUBSAMPLE_PATTERN;
+ * it's using the same from BF.
+ */
+ if (stats_idx == STATS_IDX_BF_SCALE)
+ return;
+
+ /* WR_ADDR_CFG */
+ msm_camera_io_w(stream_info->framedrop_period << 2,
+ vfe_dev->vfe_base + stats_base + 0x8);
+ /* WR_IRQ_FRAMEDROP_PATTERN */
+ msm_camera_io_w(stream_info->framedrop_pattern,
+ vfe_dev->vfe_base + stats_base + 0x10);
+ /* WR_IRQ_SUBSAMPLE_PATTERN */
+ msm_camera_io_w(0xFFFFFFFF,
+ vfe_dev->vfe_base + stats_base + 0x14);
+}
+
+static void msm_vfe46_stats_clear_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ uint32_t val = 0;
+ int stats_idx = STATS_IDX(stream_info->stream_handle);
+ uint32_t stats_base = VFE46_STATS_BASE(stats_idx);
+
+ /*
+ * BF_SCALE does not have its own WR_ADDR_CFG,
+ * IRQ_FRAMEDROP_PATTERN and IRQ_SUBSAMPLE_PATTERN;
+ * it's using the same from BF.
+ */
+ if (stats_idx == STATS_IDX_BF_SCALE)
+ return;
+
+ /* WR_ADDR_CFG */
+ msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x8);
+ /* WR_IRQ_FRAMEDROP_PATTERN */
+ msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x10);
+ /* WR_IRQ_SUBSAMPLE_PATTERN */
+ msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x14);
+}
+
+static void msm_vfe46_stats_cfg_ub(struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t ub_offset = 0;
+ uint32_t ub_size[VFE46_NUM_STATS_TYPE] = {
+ 16, /* MSM_ISP_STATS_BF_SCALE */
+ 16, /* MSM_ISP_STATS_HDR_BE */
+ 16, /* MSM_ISP_STATS_BG */
+ 16, /* MSM_ISP_STATS_BF */
+ 16, /* MSM_ISP_STATS_HDR_BHIST */
+ 16, /* MSM_ISP_STATS_RS */
+ 16, /* MSM_ISP_STATS_CS */
+ 16, /* MSM_ISP_STATS_IHIST */
+ 16, /* MSM_ISP_STATS_BHIST */
+ };
+ if (vfe_dev->pdev->id == ISP_VFE1)
+ ub_offset = VFE46_UB_SIZE_VFE1;
+ else if (vfe_dev->pdev->id == ISP_VFE0)
+ ub_offset = VFE46_UB_SIZE_VFE0;
+ else
+ pr_err("%s: incorrect VFE device\n", __func__);
+
+ for (i = 0; i < VFE46_NUM_STATS_TYPE; i++) {
+ ub_offset -= ub_size[i];
+ msm_camera_io_w(VFE46_STATS_BURST_LEN << 30 |
+ ub_offset << 16 | (ub_size[i] - 1),
+ vfe_dev->vfe_base + VFE46_STATS_BASE(i) +
+ ((i == STATS_IDX_BF_SCALE) ? 0x8 : 0xC));
+ }
+}
+
+static void msm_vfe46_stats_update_cgc_override(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable)
+{
+ int i;
+ uint32_t module_cfg, cgc_mask = 0;
+
+ for (i = 0; i < VFE46_NUM_STATS_TYPE; i++) {
+ if ((stats_mask >> i) & 0x1) {
+ switch (i) {
+ case STATS_IDX_HDR_BE:
+ cgc_mask |= 1;
+ break;
+ case STATS_IDX_HDR_BHIST:
+ cgc_mask |= (1 << 1);
+ break;
+ case STATS_IDX_BF:
+ cgc_mask |= (1 << 2);
+ break;
+ case STATS_IDX_BG:
+ cgc_mask |= (1 << 3);
+ break;
+ case STATS_IDX_BHIST:
+ cgc_mask |= (1 << 4);
+ break;
+ case STATS_IDX_RS:
+ cgc_mask |= (1 << 5);
+ break;
+ case STATS_IDX_CS:
+ cgc_mask |= (1 << 6);
+ break;
+ case STATS_IDX_IHIST:
+ cgc_mask |= (1 << 7);
+ break;
+ case STATS_IDX_BF_SCALE:
+ cgc_mask |= (1 << 2);
+ break;
+ default:
+ pr_err("%s: Invalid stats mask\n", __func__);
+ return;
+ }
+ }
+ }
+
+ /* CGC override */
+ module_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x30);
+ if (enable)
+ module_cfg |= cgc_mask;
+ else
+ module_cfg &= ~cgc_mask;
+ msm_camera_io_w(module_cfg, vfe_dev->vfe_base + 0x30);
+}
+
+static bool msm_vfe46_is_module_cfg_lock_needed(
+ uint32_t reg_offset)
+{
+ return false;
+}
+
+static void msm_vfe46_stats_enable_module(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable)
+{
+ int i;
+ uint32_t module_cfg, module_cfg_mask = 0;
+ uint32_t stats_cfg, stats_cfg_mask = 0;
+
+ for (i = 0; i < VFE46_NUM_STATS_TYPE; i++) {
+ if ((stats_mask >> i) & 0x1) {
+ switch (i) {
+ case STATS_IDX_HDR_BE:
+ module_cfg_mask |= 1;
+ break;
+ case STATS_IDX_HDR_BHIST:
+ module_cfg_mask |= 1 << 1;
+ break;
+ case STATS_IDX_BF:
+ module_cfg_mask |= 1 << 2;
+ break;
+ case STATS_IDX_BG:
+ module_cfg_mask |= 1 << 3;
+ break;
+ case STATS_IDX_BHIST:
+ module_cfg_mask |= 1 << 4;
+ break;
+ case STATS_IDX_RS:
+ module_cfg_mask |= 1 << 5;
+ break;
+ case STATS_IDX_CS:
+ module_cfg_mask |= 1 << 6;
+ break;
+ case STATS_IDX_IHIST:
+ module_cfg_mask |= 1 << 7;
+ break;
+ case STATS_IDX_BF_SCALE:
+ stats_cfg_mask |= 1 << 5;
+ break;
+ default:
+ pr_err("%s: Invalid stats mask\n", __func__);
+ return;
+ }
+ }
+ }
+
+ module_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x44);
+ if (enable)
+ module_cfg |= module_cfg_mask;
+ else
+ module_cfg &= ~module_cfg_mask;
+ msm_camera_io_w(module_cfg, vfe_dev->vfe_base + 0x44);
+
+ stats_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x9B8);
+ if (enable)
+ stats_cfg |= stats_cfg_mask;
+ else
+ stats_cfg &= ~stats_cfg_mask;
+ msm_camera_io_w(stats_cfg, vfe_dev->vfe_base + 0x9B8);
+}
+
+static void msm_vfe46_stats_update_ping_pong_addr(
+ void __iomem *vfe_base, struct msm_vfe_stats_stream *stream_info,
+ uint32_t pingpong_status, dma_addr_t paddr)
+{
+ uint32_t paddr32 = (paddr & 0xFFFFFFFF);
+ int stats_idx = STATS_IDX(stream_info->stream_handle);
+
+ msm_camera_io_w(paddr32, vfe_base +
+ VFE46_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
+}
+
+static uint32_t msm_vfe46_stats_get_wm_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ return (irq_status0 >> 15) & 0x1FF;
+}
+
+static uint32_t msm_vfe46_stats_get_comp_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ return (irq_status0 >> 29) & 0x3;
+}
+
+static uint32_t msm_vfe46_stats_get_frame_id(
+ struct vfe_device *vfe_dev)
+{
+ return vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+}
+
+static int msm_vfe46_get_platform_data(struct vfe_device *vfe_dev)
+{
+ int rc = 0;
+
+ vfe_dev->vfe_mem = platform_get_resource_byname(vfe_dev->pdev,
+ IORESOURCE_MEM, "vfe");
+ if (!vfe_dev->vfe_mem) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto vfe_no_resource;
+ }
+
+ vfe_dev->vfe_vbif_mem = platform_get_resource_byname(
+ vfe_dev->pdev,
+ IORESOURCE_MEM, "vfe_vbif");
+ if (!vfe_dev->vfe_vbif_mem) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto vfe_no_resource;
+ }
+
+ vfe_dev->vfe_irq = platform_get_resource_byname(vfe_dev->pdev,
+ IORESOURCE_IRQ, "vfe");
+ if (!vfe_dev->vfe_irq) {
+ pr_err("%s: no irq resource?\n", __func__);
+ rc = -ENODEV;
+ goto vfe_no_resource;
+ }
+
+ vfe_dev->fs_vfe = regulator_get(&vfe_dev->pdev->dev, "vdd");
+ if (IS_ERR(vfe_dev->fs_vfe)) {
+ pr_err("%s: Regulator get failed %ld\n", __func__,
+ PTR_ERR(vfe_dev->fs_vfe));
+ vfe_dev->fs_vfe = NULL;
+ rc = -ENODEV;
+ goto vfe_no_resource;
+ }
+
+vfe_no_resource:
+ return rc;
+}
+
+static void msm_vfe46_get_error_mask(
+ uint32_t *error_mask0, uint32_t *error_mask1)
+{
+ *error_mask0 = 0x00000000;
+ *error_mask1 = 0x01FFFEFF;
+}
+
+static void msm_vfe46_get_overflow_mask(uint32_t *overflow_mask)
+{
+ *overflow_mask = 0x01FFFE7E;
+}
+
+static void msm_vfe46_get_rdi_wm_mask(struct vfe_device *vfe_dev,
+ uint32_t *rdi_wm_mask)
+{
+ *rdi_wm_mask = vfe_dev->axi_data.rdi_wm_mask;
+}
+
+static void msm_vfe46_get_irq_mask(struct vfe_device *vfe_dev,
+ uint32_t *irq0_mask, uint32_t *irq1_mask)
+{
+ *irq0_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
+ *irq1_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x60);
+}
+
+static void msm_vfe46_restore_irq_mask(struct vfe_device *vfe_dev)
+{
+ msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask0,
+ vfe_dev->vfe_base + 0x5C);
+ msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask1,
+ vfe_dev->vfe_base + 0x60);
+}
+
+
+static void msm_vfe46_get_halt_restart_mask(uint32_t *irq0_mask,
+ uint32_t *irq1_mask)
+{
+ *irq0_mask = BIT(31);
+ *irq1_mask = BIT(8);
+}
+static struct msm_vfe_axi_hardware_info msm_vfe46_axi_hw_info = {
+ .num_wm = 7,
+ .num_comp_mask = 3,
+ .num_rdi = 3,
+ .num_rdi_master = 3,
+ .min_wm_ub = 96,
+ .scratch_buf_range = SZ_32M,
+};
+
+static struct msm_vfe_stats_hardware_info msm_vfe46_stats_hw_info = {
+ .stats_capability_mask =
+ 1 << MSM_ISP_STATS_HDR_BE | 1 << MSM_ISP_STATS_BF |
+ 1 << MSM_ISP_STATS_BG | 1 << MSM_ISP_STATS_BHIST |
+ 1 << MSM_ISP_STATS_HDR_BHIST | 1 << MSM_ISP_STATS_IHIST |
+ 1 << MSM_ISP_STATS_RS | 1 << MSM_ISP_STATS_CS |
+ 1 << MSM_ISP_STATS_BF_SCALE,
+ .stats_ping_pong_offset = stats_pingpong_offset_map,
+ .num_stats_type = VFE46_NUM_STATS_TYPE,
+ .num_stats_comp_mask = VFE46_NUM_STATS_COMP,
+};
+
+struct msm_vfe_hardware_info vfe46_hw_info = {
+ .num_iommu_ctx = 1,
+ .num_iommu_secure_ctx = 1,
+ .vfe_clk_idx = VFE46_CLK_IDX,
+ .runtime_axi_update = 0,
+ .vfe_ops = {
+ .irq_ops = {
+ .read_irq_status = msm_vfe46_read_irq_status,
+ .process_camif_irq = msm_vfe46_process_input_irq,
+ .process_reset_irq = msm_vfe46_process_reset_irq,
+ .process_halt_irq = msm_vfe46_process_halt_irq,
+ .process_reset_irq = msm_vfe46_process_reset_irq,
+ .process_reg_update = msm_vfe46_process_reg_update,
+ .process_axi_irq = msm_isp_process_axi_irq,
+ .process_stats_irq = msm_isp_process_stats_irq,
+ .process_epoch_irq = msm_vfe46_process_epoch_irq,
+ .enable_camif_err = msm_vfe46_enable_camif_error,
+ },
+ .axi_ops = {
+ .reload_wm = msm_vfe46_axi_reload_wm,
+ .enable_wm = msm_vfe46_axi_enable_wm,
+ .cfg_io_format = msm_vfe46_cfg_io_format,
+ .cfg_comp_mask = msm_vfe46_axi_cfg_comp_mask,
+ .clear_comp_mask = msm_vfe46_axi_clear_comp_mask,
+ .cfg_wm_irq_mask = msm_vfe46_axi_cfg_wm_irq_mask,
+ .clear_wm_irq_mask = msm_vfe46_axi_clear_wm_irq_mask,
+ .cfg_framedrop = msm_vfe46_cfg_framedrop,
+ .clear_framedrop = msm_vfe46_clear_framedrop,
+ .cfg_wm_reg = msm_vfe46_axi_cfg_wm_reg,
+ .clear_wm_reg = msm_vfe46_axi_clear_wm_reg,
+ .cfg_wm_xbar_reg = msm_vfe46_axi_cfg_wm_xbar_reg,
+ .clear_wm_xbar_reg = msm_vfe46_axi_clear_wm_xbar_reg,
+ .cfg_ub = msm_vfe46_cfg_axi_ub,
+ .read_wm_ping_pong_addr =
+ msm_vfe46_read_wm_ping_pong_addr,
+ .update_ping_pong_addr =
+ msm_vfe46_update_ping_pong_addr,
+ .get_comp_mask = msm_vfe46_get_comp_mask,
+ .get_wm_mask = msm_vfe46_get_wm_mask,
+ .get_pingpong_status = msm_vfe46_get_pingpong_status,
+ .halt = msm_vfe46_axi_halt,
+ .restart = msm_vfe46_axi_restart,
+ .update_cgc_override =
+ msm_vfe46_axi_update_cgc_override,
+ },
+ .core_ops = {
+ .reg_update = msm_vfe46_reg_update,
+ .cfg_input_mux = msm_vfe46_cfg_input_mux,
+ .update_camif_state = msm_vfe46_update_camif_state,
+ .start_fetch_eng = msm_vfe46_start_fetch_engine,
+ .cfg_rdi_reg = msm_vfe46_cfg_rdi_reg,
+ .reset_hw = msm_vfe46_reset_hardware,
+ .init_hw = msm_vfe46_init_hardware,
+ .init_hw_reg = msm_vfe46_init_hardware_reg,
+ .clear_status_reg = msm_vfe46_clear_status_reg,
+ .release_hw = msm_vfe46_release_hardware,
+ .get_platform_data = msm_vfe46_get_platform_data,
+ .get_error_mask = msm_vfe46_get_error_mask,
+ .get_overflow_mask = msm_vfe46_get_overflow_mask,
+ .get_rdi_wm_mask = msm_vfe46_get_rdi_wm_mask,
+ .get_irq_mask = msm_vfe46_get_irq_mask,
+ .restore_irq_mask = msm_vfe46_restore_irq_mask,
+ .get_halt_restart_mask =
+ msm_vfe46_get_halt_restart_mask,
+ .process_error_status = msm_vfe46_process_error_status,
+ .is_module_cfg_lock_needed =
+ msm_vfe46_is_module_cfg_lock_needed,
+ },
+ .stats_ops = {
+ .get_stats_idx = msm_vfe46_get_stats_idx,
+ .check_streams = msm_vfe46_stats_check_streams,
+ .cfg_comp_mask = msm_vfe46_stats_cfg_comp_mask,
+ .cfg_wm_irq_mask = msm_vfe46_stats_cfg_wm_irq_mask,
+ .clear_wm_irq_mask = msm_vfe46_stats_clear_wm_irq_mask,
+ .cfg_wm_reg = msm_vfe46_stats_cfg_wm_reg,
+ .clear_wm_reg = msm_vfe46_stats_clear_wm_reg,
+ .cfg_ub = msm_vfe46_stats_cfg_ub,
+ .enable_module = msm_vfe46_stats_enable_module,
+ .update_ping_pong_addr =
+ msm_vfe46_stats_update_ping_pong_addr,
+ .get_comp_mask = msm_vfe46_stats_get_comp_mask,
+ .get_wm_mask = msm_vfe46_stats_get_wm_mask,
+ .get_frame_id = msm_vfe46_stats_get_frame_id,
+ .get_pingpong_status = msm_vfe46_get_pingpong_status,
+ .update_cgc_override =
+ msm_vfe46_stats_update_cgc_override,
+ },
+ },
+ .dmi_reg_offset = 0xACC,
+ .axi_hw_info = &msm_vfe46_axi_hw_info,
+ .stats_hw_info = &msm_vfe46_stats_hw_info,
+};
+EXPORT_SYMBOL(vfe46_hw_info);
+
+static const struct of_device_id msm_vfe46_dt_match[] = {
+ {
+ .compatible = "qcom,vfe46",
+ .data = &vfe46_hw_info,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_vfe46_dt_match);
+
+static struct platform_driver vfe46_driver = {
+ .probe = vfe_hw_probe,
+ .driver = {
+ .name = "msm_vfe46",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_vfe46_dt_match,
+ },
+};
+
+static int __init msm_vfe46_init_module(void)
+{
+ return platform_driver_register(&vfe46_driver);
+}
+
+static void __exit msm_vfe46_exit_module(void)
+{
+ platform_driver_unregister(&vfe46_driver);
+}
+
+module_init(msm_vfe46_init_module);
+module_exit(msm_vfe46_exit_module);
+MODULE_DESCRIPTION("MSM VFE46 driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.h
new file mode 100644
index 000000000000..bf8739318d41
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.h
@@ -0,0 +1,17 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_ISP46_H__
+#define __MSM_ISP46_H__
+
+extern struct msm_vfe_hardware_info vfe46_hw_info;
+#endif /* __MSM_ISP46_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
new file mode 100644
index 000000000000..b217186fe1c5
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -0,0 +1,2418 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/ratelimit.h>
+
+
+#include "msm_isp47.h"
+#include "msm_isp_util.h"
+#include "msm_isp_axi_util.h"
+#include "msm_isp_stats_util.h"
+#include "msm_isp.h"
+#include "msm.h"
+#include "msm_camera_io_util.h"
+#include "cam_hw_ops.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+#define VFE47_8996V1_VERSION 0x70000000
+
+#define VFE47_BURST_LEN 3
+#define VFE47_FETCH_BURST_LEN 3
+#define VFE47_STATS_BURST_LEN 3
+#define VFE47_UB_SIZE_VFE0 2048
+#define VFE47_UB_SIZE_VFE1 1536
+#define VFE47_UB_STATS_SIZE 144
+#define MSM_ISP47_TOTAL_IMAGE_UB_VFE0 (VFE47_UB_SIZE_VFE0 - VFE47_UB_STATS_SIZE)
+#define MSM_ISP47_TOTAL_IMAGE_UB_VFE1 (VFE47_UB_SIZE_VFE1 - VFE47_UB_STATS_SIZE)
+#define VFE47_WM_BASE(idx) (0xA0 + 0x2C * idx)
+#define VFE47_RDI_BASE(idx) (0x46C + 0x4 * idx)
+#define VFE47_XBAR_BASE(idx) (0x90 + 0x4 * (idx / 2))
+#define VFE47_XBAR_SHIFT(idx) ((idx%2) ? 16 : 0)
+/*add ping MAX and Pong MAX*/
+#define VFE47_PING_PONG_BASE(wm, ping_pong) \
+ (VFE47_WM_BASE(wm) + 0x4 * (1 + (((~ping_pong) & 0x1) * 2)))
+#define SHIFT_BF_SCALE_BIT 1
+#define VFE47_NUM_STATS_COMP 2
+
+#define VFE47_BUS_RD_CGC_OVERRIDE_BIT 16
+
+/*composite mask order*/
+#define STATS_COMP_IDX_HDR_BE 0
+#define STATS_COMP_IDX_BG 1
+#define STATS_COMP_IDX_BF 2
+#define STATS_COMP_IDX_HDR_BHIST 3
+#define STATS_COMP_IDX_RS 4
+#define STATS_COMP_IDX_CS 5
+#define STATS_COMP_IDX_IHIST 6
+#define STATS_COMP_IDX_BHIST 7
+#define STATS_COMP_IDX_AEC_BG 8
+#define VFE47_VBIF_CLK_OFFSET 0x4
+
+static uint32_t stats_base_addr[] = {
+ 0x1D4, /* HDR_BE */
+ 0x254, /* BG(AWB_BG) */
+ 0x214, /* BF */
+ 0x1F4, /* HDR_BHIST */
+ 0x294, /* RS */
+ 0x2B4, /* CS */
+ 0x2D4, /* IHIST */
+ 0x274, /* BHIST (SKIN_BHIST) */
+ 0x234, /* AEC_BG */
+};
+
+static uint8_t stats_pingpong_offset_map[] = {
+ 8, /* HDR_BE */
+ 12, /* BG(AWB_BG) */
+ 10, /* BF */
+ 9, /* HDR_BHIST */
+ 14, /* RS */
+ 15, /* CS */
+ 16, /* IHIST */
+ 13, /* BHIST (SKIN_BHIST) */
+ 11, /* AEC_BG */
+};
+
+static uint8_t stats_irq_map_comp_mask[] = {
+ 16, /* HDR_BE */
+ 17, /* BG(AWB_BG) */
+ 18, /* BF EARLY DONE/ BF */
+ 19, /* HDR_BHIST */
+ 20, /* RS */
+ 21, /* CS */
+ 22, /* IHIST */
+ 23, /* BHIST (SKIN_BHIST) */
+ 15, /* AEC_BG */
+};
+#define VFE47_NUM_STATS_TYPE 9
+#define VFE47_STATS_BASE(idx) (stats_base_addr[idx])
+#define VFE47_STATS_PING_PONG_BASE(idx, ping_pong) \
+ (VFE47_STATS_BASE(idx) + 0x4 * \
+ (~(ping_pong >> (stats_pingpong_offset_map[idx])) & 0x1) * 2)
+
+#define VFE47_SRC_CLK_DTSI_IDX 5
+static struct msm_cam_clk_info msm_vfe47_clk_info[VFE_CLK_INFO_MAX];
+static int32_t msm_vfe47_init_dt_parms(struct vfe_device *vfe_dev,
+ struct msm_vfe_hw_init_parms *dt_parms, void __iomem *dev_mem_base)
+{
+ struct device_node *of_node;
+ int32_t i = 0 , rc = 0;
+ uint32_t *dt_settings = NULL, *dt_regs = NULL, num_dt_entries = 0;
+
+ of_node = vfe_dev->pdev->dev.of_node;
+
+ rc = of_property_read_u32(of_node, dt_parms->entries,
+ &num_dt_entries);
+ if (rc < 0 || !num_dt_entries) {
+ pr_err("%s: NO QOS entries found\n", __func__);
+ return -EINVAL;
+ } else {
+ dt_settings = kzalloc(sizeof(uint32_t) * num_dt_entries,
+ GFP_KERNEL);
+ if (!dt_settings) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ dt_regs = kzalloc(sizeof(uint32_t) * num_dt_entries,
+ GFP_KERNEL);
+ if (!dt_regs) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ kfree(dt_settings);
+ return -ENOMEM;
+ }
+ rc = of_property_read_u32_array(of_node, dt_parms->regs,
+ dt_regs, num_dt_entries);
+ if (rc < 0) {
+ pr_err("%s: NO QOS BUS BDG info\n", __func__);
+ kfree(dt_settings);
+ kfree(dt_regs);
+ return -EINVAL;
+ } else {
+ if (dt_parms->settings) {
+ rc = of_property_read_u32_array(of_node,
+ dt_parms->settings,
+ dt_settings, num_dt_entries);
+ if (rc < 0) {
+ pr_err("%s: NO QOS settings\n",
+ __func__);
+ kfree(dt_settings);
+ kfree(dt_regs);
+ } else {
+ for (i = 0; i < num_dt_entries; i++) {
+ msm_camera_io_w(dt_settings[i],
+ dev_mem_base +
+ dt_regs[i]);
+ }
+ kfree(dt_settings);
+ kfree(dt_regs);
+ }
+ } else {
+ kfree(dt_settings);
+ kfree(dt_regs);
+ }
+ }
+ }
+ return 0;
+}
+
+static int msm_vfe47_init_hardware(struct vfe_device *vfe_dev)
+{
+ int rc = -1;
+
+ rc = cam_config_ahb_clk(CAM_AHB_CLIENT_VFE, CAMERA_AHB_SVS_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ goto ahb_vote_fail;
+ }
+
+ rc = msm_isp_init_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
+ if (rc < 0) {
+ pr_err("%s: Bandwidth registration Failed!\n", __func__);
+ goto bus_scale_register_failed;
+ }
+
+ if (!vfe_dev->fs_vfe) {
+ vfe_dev->fs_vfe = regulator_get(&vfe_dev->pdev->dev, "vdd");
+ if (IS_ERR(vfe_dev->fs_vfe)) {
+ pr_err("%s: Regulator vfe get failed %ld\n", __func__,
+ PTR_ERR(vfe_dev->fs_vfe));
+ rc = -ENODEV;
+ goto bus_scale_register_failed;
+ }
+ }
+
+ if (!vfe_dev->fs_camss) {
+ vfe_dev->fs_camss = regulator_get(&vfe_dev->pdev->dev,
+ "camss-vdd");
+ if (IS_ERR(vfe_dev->fs_camss)) {
+ pr_err("%s: Regulator camss get failed %ld\n", __func__,
+ PTR_ERR(vfe_dev->fs_camss));
+ rc = -ENODEV;
+ goto camss_vdd_regulator_failed;
+ }
+ }
+
+ if (!vfe_dev->fs_mmagic_camss) {
+ vfe_dev->fs_mmagic_camss = regulator_get(&vfe_dev->pdev->dev,
+ "mmagic-vdd");
+ if (IS_ERR(vfe_dev->fs_mmagic_camss)) {
+ pr_err("%s: Regulator mmagic get failed %ld\n",
+ __func__, PTR_ERR(vfe_dev->fs_mmagic_camss));
+ rc = -ENODEV;
+ goto mmagic_vdd_regulator_failed;
+ }
+ }
+
+
+ if (vfe_dev->fs_mmagic_camss) {
+ rc = regulator_enable(vfe_dev->fs_mmagic_camss);
+ if (rc) {
+ pr_err("%s: Regulator enable mmagic camss failed\n",
+ __func__);
+ goto fs_mmagic_failed;
+ }
+ }
+
+ if (vfe_dev->fs_camss) {
+ rc = regulator_enable(vfe_dev->fs_camss);
+ if (rc) {
+ pr_err("%s: Regulator enable camss failed\n", __func__);
+ goto fs_camss_failed;
+ }
+ }
+
+ if (vfe_dev->fs_vfe) {
+ rc = regulator_enable(vfe_dev->fs_vfe);
+ if (rc) {
+ pr_err("%s: Regulator enable failed\n", __func__);
+ goto fs_vfe_failed;
+ }
+ }
+
+ rc = msm_isp_get_clk_info(vfe_dev, vfe_dev->pdev, msm_vfe47_clk_info);
+ if (rc < 0) {
+ pr_err("msm_isp_get_clk_info() failed\n");
+ goto clk_enable_failed;
+ }
+ if (vfe_dev->num_clk <= 0) {
+ pr_err("%s: Invalid num of clock\n", __func__);
+ goto clk_enable_failed;
+ } else {
+ vfe_dev->vfe_clk =
+ kzalloc(sizeof(struct clk *) * vfe_dev->num_clk,
+ GFP_KERNEL);
+ if (!vfe_dev->vfe_clk) {
+ pr_err("%s:%d No memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ }
+ rc = msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe47_clk_info,
+ vfe_dev->vfe_clk, vfe_dev->num_clk, 1);
+ if (rc < 0)
+ goto clk_enable_failed;
+
+ vfe_dev->vfe_base = ioremap(vfe_dev->vfe_mem->start,
+ resource_size(vfe_dev->vfe_mem));
+ if (!vfe_dev->vfe_base) {
+ rc = -ENOMEM;
+ pr_err("%s: vfe ioremap failed\n", __func__);
+ goto vfe_remap_failed;
+ }
+ vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] =
+ vfe_dev->vfe_base;
+
+ vfe_dev->vfe_vbif_base = ioremap(vfe_dev->vfe_vbif_mem->start,
+ resource_size(vfe_dev->vfe_vbif_mem));
+ if (!vfe_dev->vfe_vbif_base) {
+ rc = -ENOMEM;
+ pr_err("%s: vfe ioremap failed\n", __func__);
+ goto vbif_remap_failed;
+ }
+
+ rc = request_irq(vfe_dev->vfe_irq->start, msm_isp_process_irq,
+ IRQF_TRIGGER_RISING, "vfe", vfe_dev);
+ if (rc < 0) {
+ pr_err("%s: irq request failed\n", __func__);
+ goto irq_req_failed;
+ }
+ return rc;
+irq_req_failed:
+ iounmap(vfe_dev->vfe_vbif_base);
+ vfe_dev->vfe_vbif_base = NULL;
+vbif_remap_failed:
+ iounmap(vfe_dev->vfe_base);
+ vfe_dev->vfe_base = NULL;
+vfe_remap_failed:
+ msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe47_clk_info,
+ vfe_dev->vfe_clk, vfe_dev->num_clk, 0);
+clk_enable_failed:
+ if (vfe_dev->fs_vfe)
+ regulator_disable(vfe_dev->fs_vfe);
+ kfree(vfe_dev->vfe_clk);
+fs_vfe_failed:
+ if (vfe_dev->fs_camss)
+ regulator_disable(vfe_dev->fs_camss);
+fs_camss_failed:
+ if (vfe_dev->fs_mmagic_camss)
+ regulator_disable(vfe_dev->fs_mmagic_camss);
+fs_mmagic_failed:
+ regulator_put(vfe_dev->fs_mmagic_camss);
+ vfe_dev->fs_mmagic_camss = NULL;
+mmagic_vdd_regulator_failed:
+ regulator_put(vfe_dev->fs_camss);
+ vfe_dev->fs_camss = NULL;
+camss_vdd_regulator_failed:
+ regulator_put(vfe_dev->fs_vfe);
+ vfe_dev->fs_vfe = NULL;
+ msm_isp_deinit_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
+bus_scale_register_failed:
+ cam_config_ahb_clk(CAM_AHB_CLIENT_VFE, CAMERA_AHB_SUSPEND_VOTE);
+ahb_vote_fail:
+ return rc;
+}
+
+static void msm_vfe47_release_hardware(struct vfe_device *vfe_dev)
+{
+ /* when closing node, disable all irq */
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x5C);
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x60);
+
+ disable_irq(vfe_dev->vfe_irq->start);
+ free_irq(vfe_dev->vfe_irq->start, vfe_dev);
+ tasklet_kill(&vfe_dev->vfe_tasklet);
+ msm_isp_flush_tasklet(vfe_dev);
+ iounmap(vfe_dev->vfe_vbif_base);
+ vfe_dev->vfe_vbif_base = NULL;
+ vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] = NULL;
+ msm_cam_clk_enable(&vfe_dev->pdev->dev, msm_vfe47_clk_info,
+ vfe_dev->vfe_clk, vfe_dev->num_clk, 0);
+ iounmap(vfe_dev->vfe_base);
+ vfe_dev->vfe_base = NULL;
+ kfree(vfe_dev->vfe_clk);
+ if (vfe_dev->fs_vfe) {
+ regulator_disable(vfe_dev->fs_vfe);
+ regulator_put(vfe_dev->fs_vfe);
+ vfe_dev->fs_vfe = NULL;
+ }
+ if (vfe_dev->fs_camss) {
+ regulator_disable(vfe_dev->fs_camss);
+ regulator_put(vfe_dev->fs_camss);
+ vfe_dev->fs_camss = NULL;
+ }
+ if (vfe_dev->fs_mmagic_camss) {
+ regulator_disable(vfe_dev->fs_mmagic_camss);
+ regulator_put(vfe_dev->fs_mmagic_camss);
+ vfe_dev->fs_mmagic_camss = NULL;
+ }
+ msm_isp_deinit_bandwidth_mgr(ISP_VFE0 + vfe_dev->pdev->id);
+
+ if (cam_config_ahb_clk(CAM_AHB_CLIENT_VFE, CAMERA_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to vote for AHB\n", __func__);
+}
+
+static void msm_vfe47_init_hardware_reg(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_hw_init_parms qos_parms;
+ struct msm_vfe_hw_init_parms vbif_parms;
+ struct msm_vfe_hw_init_parms ds_parms;
+
+ memset(&qos_parms, 0, sizeof(struct msm_vfe_hw_init_parms));
+ memset(&vbif_parms, 0, sizeof(struct msm_vfe_hw_init_parms));
+ memset(&ds_parms, 0, sizeof(struct msm_vfe_hw_init_parms));
+
+ qos_parms.entries = "qos-entries";
+ qos_parms.regs = "qos-regs";
+ qos_parms.settings = "qos-settings";
+ vbif_parms.entries = "vbif-entries";
+ vbif_parms.regs = "vbif-regs";
+ vbif_parms.settings = "vbif-settings";
+ ds_parms.entries = "ds-entries";
+ ds_parms.regs = "ds-regs";
+ ds_parms.settings = "ds-settings";
+
+ msm_vfe47_init_dt_parms(vfe_dev, &qos_parms, vfe_dev->vfe_base);
+ msm_vfe47_init_dt_parms(vfe_dev, &ds_parms, vfe_dev->vfe_base);
+ msm_vfe47_init_dt_parms(vfe_dev, &vbif_parms, vfe_dev->vfe_vbif_base);
+
+
+ /* BUS_CFG */
+ msm_camera_io_w(0x00000101, vfe_dev->vfe_base + 0x84);
+ /* IRQ_MASK/CLEAR */
+ msm_camera_io_w(0xE00000F3, vfe_dev->vfe_base + 0x5C);
+ msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x60);
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x68);
+}
+
+static void msm_vfe47_clear_status_reg(struct vfe_device *vfe_dev)
+{
+ msm_camera_io_w(0x80000000, vfe_dev->vfe_base + 0x5C);
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x60);
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x58);
+}
+
+static void msm_vfe47_process_reset_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ if (irq_status0 & (1 << 31)) {
+ complete(&vfe_dev->reset_complete);
+ vfe_dev->reset_pending = 0;
+ }
+}
+
+static void msm_vfe47_process_halt_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ uint32_t val = 0;
+
+ if (irq_status1 & (1 << 8)) {
+ complete(&vfe_dev->halt_complete);
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x400);
+ }
+
+ val = msm_camera_io_r(vfe_dev->vfe_vbif_base + VFE47_VBIF_CLK_OFFSET);
+ val &= ~(0x1);
+ msm_camera_io_w(val, vfe_dev->vfe_vbif_base + VFE47_VBIF_CLK_OFFSET);
+}
+
+static void msm_vfe47_process_input_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ if (!(irq_status0 & 0x1000003))
+ return;
+
+ if (irq_status0 & (1 << 0)) {
+ ISP_DBG("%s: SOF IRQ\n", __func__);
+ msm_isp_increment_frame_id(vfe_dev, VFE_PIX_0, ts);
+ }
+
+ if (irq_status0 & (1 << 24)) {
+ ISP_DBG("%s: Fetch Engine Read IRQ\n", __func__);
+ msm_isp_fetch_engine_done_notify(vfe_dev,
+ &vfe_dev->fetch_engine_info);
+ }
+
+
+ if (irq_status0 & (1 << 1))
+ ISP_DBG("%s: EOF IRQ\n", __func__);
+}
+
+static void msm_vfe47_process_violation_status(
+ struct vfe_device *vfe_dev)
+{
+ uint32_t violation_status = vfe_dev->error_info.violation_status;
+
+ if (violation_status > 39) {
+ pr_err("%s: invalid violation status %d\n",
+ __func__, violation_status);
+ return;
+ }
+
+ pr_err("%s: VFE pipeline violation status %d\n", __func__,
+ violation_status);
+}
+
+static void msm_vfe47_process_error_status(struct vfe_device *vfe_dev)
+{
+ uint32_t error_status1 = vfe_dev->error_info.error_mask1;
+
+ if (error_status1 & (1 << 0)) {
+ pr_err("%s: camif error status: 0x%x\n",
+ __func__, vfe_dev->error_info.camif_status);
+ /* dump camif registers on camif error */
+ msm_camera_io_dump(vfe_dev->vfe_base + 0x478, 0x34, 1);
+ }
+ if (error_status1 & (1 << 1))
+ pr_err("%s: stats bhist overwrite\n", __func__);
+ if (error_status1 & (1 << 2))
+ pr_err("%s: stats cs overwrite\n", __func__);
+ if (error_status1 & (1 << 3))
+ pr_err("%s: stats ihist overwrite\n", __func__);
+ if (error_status1 & (1 << 4))
+ pr_err("%s: realign buf y overflow\n", __func__);
+ if (error_status1 & (1 << 5))
+ pr_err("%s: realign buf cb overflow\n", __func__);
+ if (error_status1 & (1 << 6))
+ pr_err("%s: realign buf cr overflow\n", __func__);
+ if (error_status1 & (1 << 7)) {
+ msm_vfe47_process_violation_status(vfe_dev);
+ }
+ if (error_status1 & (1 << 9))
+ pr_err("%s: image master 0 bus overflow\n", __func__);
+ if (error_status1 & (1 << 10))
+ pr_err("%s: image master 1 bus overflow\n", __func__);
+ if (error_status1 & (1 << 11))
+ pr_err("%s: image master 2 bus overflow\n", __func__);
+ if (error_status1 & (1 << 12))
+ pr_err("%s: image master 3 bus overflow\n", __func__);
+ if (error_status1 & (1 << 13))
+ pr_err("%s: image master 4 bus overflow\n", __func__);
+ if (error_status1 & (1 << 14))
+ pr_err("%s: image master 5 bus overflow\n", __func__);
+ if (error_status1 & (1 << 15))
+ pr_err("%s: image master 6 bus overflow\n", __func__);
+ if (error_status1 & (1 << 16))
+ pr_err("%s: status hdr be bus overflow\n", __func__);
+ if (error_status1 & (1 << 17))
+ pr_err("%s: status bg bus overflow\n", __func__);
+ if (error_status1 & (1 << 18))
+ pr_err("%s: status bf bus overflow\n", __func__);
+ if (error_status1 & (1 << 19))
+ pr_err("%s: status hdr bhist bus overflow\n", __func__);
+ if (error_status1 & (1 << 20))
+ pr_err("%s: status rs bus overflow\n", __func__);
+ if (error_status1 & (1 << 21))
+ pr_err("%s: status cs bus overflow\n", __func__);
+ if (error_status1 & (1 << 22))
+ pr_err("%s: status ihist bus overflow\n", __func__);
+ if (error_status1 & (1 << 23))
+ pr_err("%s: status skin bhist bus overflow\n", __func__);
+ if (error_status1 & (1 << 24))
+ pr_err("%s: status aec bg bus overflow\n", __func__);
+ if (error_status1 & (1 << 25))
+ pr_err("%s: status dsp error\n", __func__);
+}
+
+static void msm_vfe47_enable_camif_error(struct vfe_device *vfe_dev,
+ int enable)
+{
+ uint32_t val;
+
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x60);
+ if (enable)
+ msm_camera_io_w_mb(val | BIT(0), vfe_dev->vfe_base + 0x60);
+ else
+ msm_camera_io_w_mb(val & ~(BIT(0)), vfe_dev->vfe_base + 0x60);
+}
+
+static void msm_vfe47_read_irq_status(struct vfe_device *vfe_dev,
+ uint32_t *irq_status0, uint32_t *irq_status1)
+{
+ uint32_t irq_mask0, irq_mask1;
+
+ *irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x6C);
+ *irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x70);
+ /* Mask off bits that are not enabled */
+ msm_camera_io_w(*irq_status0, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w(*irq_status1, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x58);
+ irq_mask0 = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
+ irq_mask1 = msm_camera_io_r(vfe_dev->vfe_base + 0x60);
+ *irq_status0 &= irq_mask0;
+ *irq_status1 &= irq_mask1;
+
+ if (*irq_status1 & (1 << 0)) {
+ vfe_dev->error_info.camif_status =
+ msm_camera_io_r(vfe_dev->vfe_base + 0x4A4);
+ /* mask off camif error after first occurrance */
+ msm_vfe47_enable_camif_error(vfe_dev, 0);
+ }
+
+ if (*irq_status1 & (1 << 7))
+ vfe_dev->error_info.violation_status =
+ msm_camera_io_r(vfe_dev->vfe_base + 0x7C);
+
+}
+
+static void msm_vfe47_process_reg_update(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ enum msm_vfe_input_src i;
+ uint32_t shift_irq;
+ uint8_t reg_updated = 0;
+ unsigned long flags;
+
+ if (!(irq_status0 & 0xF0))
+ return;
+ /* Shift status bits so that PIX SOF is 1st bit */
+ shift_irq = ((irq_status0 & 0xF0) >> 4);
+
+ for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
+ if (shift_irq & BIT(i)) {
+ reg_updated |= BIT(i);
+ ISP_DBG("%s REG_UPDATE IRQ %x\n", __func__,
+ (uint32_t)BIT(i));
+ switch (i) {
+ case VFE_PIX_0:
+ msm_isp_save_framedrop_values(vfe_dev,
+ VFE_PIX_0);
+ msm_isp_notify(vfe_dev, ISP_EVENT_REG_UPDATE,
+ VFE_PIX_0, ts);
+ if (atomic_read(
+ &vfe_dev->stats_data.stats_update))
+ msm_isp_stats_stream_update(vfe_dev);
+ if (vfe_dev->axi_data.camif_state ==
+ CAMIF_STOPPING)
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ reg_update(vfe_dev, i);
+ break;
+ case VFE_RAW_0:
+ case VFE_RAW_1:
+ case VFE_RAW_2:
+ msm_isp_increment_frame_id(vfe_dev, i, ts);
+ msm_isp_save_framedrop_values(vfe_dev, i);
+ msm_isp_notify(vfe_dev, ISP_EVENT_SOF, i, ts);
+ msm_isp_update_framedrop_reg(vfe_dev, i);
+ /*
+ * Reg Update is pseudo SOF for RDI,
+ * so request every frame
+ */
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ reg_update(vfe_dev, i);
+ break;
+ default:
+ pr_err("%s: Error case\n", __func__);
+ return;
+ }
+ if (vfe_dev->axi_data.stream_update[i])
+ msm_isp_axi_stream_update(vfe_dev, i);
+ if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
+ msm_isp_axi_cfg_update(vfe_dev, i);
+ if (atomic_read(
+ &vfe_dev->axi_data.axi_cfg_update[i]) ==
+ 0)
+ msm_isp_notify(vfe_dev,
+ ISP_EVENT_STREAM_UPDATE_DONE,
+ i, ts);
+ }
+ }
+ }
+
+ spin_lock_irqsave(&vfe_dev->reg_update_lock, flags);
+ if (reg_updated & BIT(VFE_PIX_0))
+ vfe_dev->reg_updated = 1;
+
+ vfe_dev->reg_update_requested &= ~reg_updated;
+ spin_unlock_irqrestore(&vfe_dev->reg_update_lock, flags);
+}
+
+static void msm_vfe47_process_epoch_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ if (!(irq_status0 & 0xc))
+ return;
+
+ if (irq_status0 & BIT(2)) {
+ msm_isp_notify(vfe_dev, ISP_EVENT_SOF, VFE_PIX_0, ts);
+ ISP_DBG("%s: EPOCH0 IRQ\n", __func__);
+ msm_isp_update_framedrop_reg(vfe_dev, VFE_PIX_0);
+ msm_isp_update_stats_framedrop_reg(vfe_dev);
+ msm_isp_update_error_frame_count(vfe_dev);
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].raw_stream_count > 0
+ && vfe_dev->axi_data.src_info[VFE_PIX_0].
+ pix_stream_count == 0) {
+ if (vfe_dev->axi_data.stream_update[VFE_PIX_0])
+ msm_isp_axi_stream_update(vfe_dev, VFE_PIX_0);
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
+ vfe_dev, VFE_PIX_0);
+ }
+ }
+}
+
+static void msm_vfe47_reg_update(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src)
+{
+ uint32_t update_mask = 0;
+ unsigned long flags;
+
+ /* This HW supports upto VFE_RAW_2 */
+ if (frame_src > VFE_RAW_2 && frame_src != VFE_SRC_MAX) {
+ pr_err("%s Error case\n", __func__);
+ return;
+ }
+
+ /*
+ * If frame_src == VFE_SRC_MAX request reg_update on all
+ * supported INTF
+ */
+ if (frame_src == VFE_SRC_MAX)
+ update_mask = 0xF;
+ else
+ update_mask = BIT((uint32_t)frame_src);
+ ISP_DBG("%s update_mask %x\n", __func__, update_mask);
+
+ spin_lock_irqsave(&vfe_dev->reg_update_lock, flags);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].reg_update_frame_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+ vfe_dev->reg_update_requested |= update_mask;
+ vfe_dev->common_data->dual_vfe_res->reg_update_mask[vfe_dev->pdev->id] =
+ vfe_dev->reg_update_requested;
+ if ((vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE1) &&
+ ((frame_src == VFE_PIX_0) || (frame_src == VFE_SRC_MAX))) {
+ msm_camera_io_w_mb(update_mask,
+ vfe_dev->common_data->dual_vfe_res->vfe_base[ISP_VFE0]
+ + 0x4AC);
+ msm_camera_io_w_mb(update_mask,
+ vfe_dev->vfe_base + 0x4AC);
+ } else if (!vfe_dev->is_split ||
+ ((frame_src == VFE_PIX_0) &&
+ (vfe_dev->axi_data.camif_state == CAMIF_STOPPING)) ||
+ (frame_src >= VFE_RAW_0 && frame_src <= VFE_SRC_MAX)) {
+ msm_camera_io_w_mb(update_mask,
+ vfe_dev->vfe_base + 0x4AC);
+ }
+ spin_unlock_irqrestore(&vfe_dev->reg_update_lock, flags);
+}
+
+static long msm_vfe47_reset_hardware(struct vfe_device *vfe_dev,
+ uint32_t first_start, uint32_t blocking_call)
+{
+ long rc = 0;
+
+ init_completion(&vfe_dev->reset_complete);
+
+ if (blocking_call)
+ vfe_dev->reset_pending = 1;
+
+ if (first_start) {
+ msm_camera_io_w_mb(0x3FF, vfe_dev->vfe_base + 0x18);
+ } else {
+ msm_camera_io_w_mb(0x3EF, vfe_dev->vfe_base + 0x18);
+ msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w(0xFFFFFEFF, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x58);
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ reload_wm(vfe_dev, vfe_dev->vfe_base, 0x0001FFFF);
+ }
+
+ if (blocking_call) {
+ rc = wait_for_completion_timeout(
+ &vfe_dev->reset_complete, msecs_to_jiffies(50));
+ if (rc <= 0) {
+ pr_err("%s:%d failed: reset timeout\n", __func__,
+ __LINE__);
+ vfe_dev->reset_pending = 0;
+ }
+ }
+
+ return rc;
+}
+
+static void msm_vfe47_axi_reload_wm(struct vfe_device *vfe_dev,
+ void __iomem *vfe_base, uint32_t reload_mask)
+{
+ msm_camera_io_w_mb(reload_mask, vfe_base + 0x80);
+}
+
+static void msm_vfe47_axi_update_cgc_override(struct vfe_device *vfe_dev,
+ uint8_t wm_idx, uint8_t enable)
+{
+ uint32_t val;
+
+ /* Change CGC override */
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x3C);
+ if (enable)
+ val |= (1 << wm_idx);
+ else
+ val &= ~(1 << wm_idx);
+ msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x3C);
+}
+
+static void msm_vfe47_axi_enable_wm(void __iomem *vfe_base,
+ uint8_t wm_idx, uint8_t enable)
+{
+ uint32_t val;
+
+ val = msm_camera_io_r(vfe_base + VFE47_WM_BASE(wm_idx));
+ if (enable)
+ val |= 0x1;
+ else
+ val &= ~0x1;
+ msm_camera_io_w_mb(val,
+ vfe_base + VFE47_WM_BASE(wm_idx));
+}
+
+static void msm_vfe47_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t comp_mask, comp_mask_index =
+ stream_info->comp_mask_index;
+ uint32_t irq_mask;
+
+ comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x74);
+ comp_mask &= ~(0x7F << (comp_mask_index * 8));
+ comp_mask |= (axi_data->composite_info[comp_mask_index].
+ stream_composite_mask << (comp_mask_index * 8));
+ msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x74);
+
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
+ irq_mask |= 1 << (comp_mask_index + 25);
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x5C);
+}
+
+static void msm_vfe47_axi_clear_comp_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ uint32_t comp_mask, comp_mask_index = stream_info->comp_mask_index;
+ uint32_t irq_mask;
+
+ comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x74);
+ comp_mask &= ~(0x7F << (comp_mask_index * 8));
+ msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x74);
+
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
+ irq_mask &= ~(1 << (comp_mask_index + 25));
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x5C);
+}
+
+static void msm_vfe47_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ uint32_t irq_mask;
+
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
+ irq_mask |= 1 << (stream_info->wm[0] + 8);
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x5C);
+}
+
+static void msm_vfe47_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ uint32_t irq_mask;
+
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
+ irq_mask &= ~(1 << (stream_info->wm[0] + 8));
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x5C);
+}
+
+static void msm_vfe47_cfg_framedrop(void __iomem *vfe_base,
+ struct msm_vfe_axi_stream *stream_info, uint32_t framedrop_pattern,
+ uint32_t framedrop_period)
+{
+ uint32_t i, temp;
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ msm_camera_io_w(framedrop_pattern, vfe_base +
+ VFE47_WM_BASE(stream_info->wm[i]) + 0x24);
+ temp = msm_camera_io_r(vfe_base +
+ VFE47_WM_BASE(stream_info->wm[i]) + 0x14);
+ temp &= 0xFFFFFF83;
+ msm_camera_io_w(temp | (framedrop_period - 1) << 2,
+ vfe_base + VFE47_WM_BASE(stream_info->wm[i]) + 0x14);
+ }
+}
+
+static void msm_vfe47_clear_framedrop(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ uint32_t i;
+
+ for (i = 0; i < stream_info->num_planes; i++)
+ msm_camera_io_w(0, vfe_dev->vfe_base +
+ VFE47_WM_BASE(stream_info->wm[i]) + 0x24);
+}
+
+static int32_t msm_vfe47_convert_bpp_to_reg(int32_t bpp, uint32_t *bpp_reg)
+{
+ int rc = 0;
+ switch (bpp) {
+ case 8:
+ *bpp_reg = 0;
+ break;
+ case 10:
+ *bpp_reg = 1;
+ break;
+ case 12:
+ *bpp_reg = 2;
+ break;
+ case 14:
+ *bpp_reg = 3;
+ break;
+ default:
+ pr_err("%s:%d invalid bpp %d", __func__, __LINE__, bpp);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int32_t msm_vfe47_convert_io_fmt_to_reg(
+ enum msm_isp_pack_fmt pack_format, uint32_t *pack_reg)
+{
+ int rc = 0;
+
+ switch (pack_format) {
+ case QCOM:
+ *pack_reg = 0x0;
+ break;
+ case MIPI:
+ *pack_reg = 0x1;
+ break;
+ case DPCM6:
+ *pack_reg = 0x2;
+ break;
+ case DPCM8:
+ *pack_reg = 0x3;
+ break;
+ case PLAIN8:
+ *pack_reg = 0x4;
+ break;
+ case PLAIN16:
+ *pack_reg = 0x5;
+ break;
+ case DPCM10:
+ *pack_reg = 0x6;
+ break;
+ default:
+ pr_err("%s: invalid pack fmt %d!\n", __func__, pack_format);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+static int32_t msm_vfe47_cfg_io_format(struct vfe_device *vfe_dev,
+ enum msm_vfe_axi_stream_src stream_src, uint32_t io_format)
+{
+ int rc = 0;
+ int bpp = 0, read_bpp = 0;
+ enum msm_isp_pack_fmt pack_fmt = 0, read_pack_fmt = 0;
+ uint32_t bpp_reg = 0, pack_reg = 0;
+ uint32_t read_bpp_reg = 0, read_pack_reg = 0;
+ uint32_t io_format_reg = 0; /*io format register bit*/
+
+ io_format_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x88);
+
+ /*input config*/
+ if ((stream_src < RDI_INTF_0) &&
+ (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux ==
+ EXTERNAL_READ)) {
+ read_bpp = msm_isp_get_bit_per_pixel(
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format);
+ rc = msm_vfe47_convert_bpp_to_reg(read_bpp, &read_bpp_reg);
+ if (rc < 0) {
+ pr_err("%s: convert_bpp_to_reg err! in_bpp %d rc %d\n",
+ __func__, read_bpp, rc);
+ return rc;
+ }
+
+ read_pack_fmt = msm_isp_get_pack_format(
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format);
+ rc = msm_vfe47_convert_io_fmt_to_reg(
+ read_pack_fmt, &read_pack_reg);
+ if (rc < 0) {
+ pr_err("%s: convert_io_fmt_to_reg err! rc = %d\n",
+ __func__, rc);
+ return rc;
+ }
+ /*use input format(v4l2_pix_fmt) to get pack format*/
+ io_format_reg &= 0xFFC8FFFF;
+ io_format_reg |= (read_bpp_reg << 20 | read_pack_reg << 16);
+ }
+
+ bpp = msm_isp_get_bit_per_pixel(io_format);
+ rc = msm_vfe47_convert_bpp_to_reg(bpp, &bpp_reg);
+ if (rc < 0) {
+ pr_err("%s: convert_bpp_to_reg err! bpp %d rc = %d\n",
+ __func__, bpp, rc);
+ return rc;
+ }
+
+ switch (stream_src) {
+ case PIX_VIDEO:
+ case PIX_ENCODER:
+ case PIX_VIEWFINDER:
+ case CAMIF_RAW:
+ io_format_reg &= 0xFFFFCFFF;
+ io_format_reg |= bpp_reg << 12;
+ break;
+ case IDEAL_RAW:
+ /*use output format(v4l2_pix_fmt) to get pack format*/
+ pack_fmt = msm_isp_get_pack_format(io_format);
+ rc = msm_vfe47_convert_io_fmt_to_reg(pack_fmt, &pack_reg);
+ if (rc < 0) {
+ pr_err("%s: convert_io_fmt_to_reg err! rc = %d\n",
+ __func__, rc);
+ return rc;
+ }
+ io_format_reg &= 0xFFFFFFC8;
+ io_format_reg |= bpp_reg << 4 | pack_reg;
+ break;
+ case RDI_INTF_0:
+ case RDI_INTF_1:
+ case RDI_INTF_2:
+ default:
+ pr_err("%s: Invalid stream source\n", __func__);
+ return -EINVAL;
+ }
+ msm_camera_io_w(io_format_reg, vfe_dev->vfe_base + 0x88);
+ return 0;
+}
+
+static int msm_vfe47_start_fetch_engine(struct vfe_device *vfe_dev,
+ void *arg)
+{
+ int rc = 0;
+ uint32_t bufq_handle = 0;
+ struct msm_isp_buffer *buf = NULL;
+ struct msm_vfe_fetch_eng_start *fe_cfg = arg;
+ struct msm_isp_buffer_mapped_info mapped_info;
+
+ if (vfe_dev->fetch_engine_info.is_busy == 1) {
+ pr_err("%s: fetch engine busy\n", __func__);
+ return -EINVAL;
+ }
+
+ memset(&mapped_info, 0, sizeof(struct msm_isp_buffer_mapped_info));
+
+ /* There is other option of passing buffer address from user,
+ in such case, driver needs to map the buffer and use it*/
+ vfe_dev->fetch_engine_info.session_id = fe_cfg->session_id;
+ vfe_dev->fetch_engine_info.stream_id = fe_cfg->stream_id;
+ vfe_dev->fetch_engine_info.offline_mode = fe_cfg->offline_mode;
+ vfe_dev->fetch_engine_info.fd = fe_cfg->fd;
+
+ if (!fe_cfg->offline_mode) {
+ bufq_handle = vfe_dev->buf_mgr->ops->get_bufq_handle(
+ vfe_dev->buf_mgr, fe_cfg->session_id,
+ fe_cfg->stream_id);
+ vfe_dev->fetch_engine_info.bufq_handle = bufq_handle;
+
+ rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
+ vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
+ if (rc < 0 || !buf) {
+ pr_err("%s: No fetch buffer rc= %d buf= %p\n",
+ __func__, rc, buf);
+ return -EINVAL;
+ }
+ mapped_info = buf->mapped_info[0];
+ buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ } else {
+ rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
+ &mapped_info, fe_cfg->fd);
+ if (rc < 0) {
+ pr_err("%s: can not map buffer\n", __func__);
+ return -EINVAL;
+ }
+ }
+
+ vfe_dev->fetch_engine_info.buf_idx = fe_cfg->buf_idx;
+ vfe_dev->fetch_engine_info.is_busy = 1;
+
+ msm_camera_io_w(mapped_info.paddr, vfe_dev->vfe_base + 0x2F4);
+
+ msm_camera_io_w_mb(0x100000, vfe_dev->vfe_base + 0x80);
+ msm_camera_io_w_mb(0x200000, vfe_dev->vfe_base + 0x80);
+
+ ISP_DBG("%s:VFE%d Fetch Engine ready\n", __func__, vfe_dev->pdev->id);
+
+ return 0;
+}
+
+static void msm_vfe47_cfg_fetch_engine(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg)
+{
+ uint32_t x_size_word, temp;
+ struct msm_vfe_fetch_engine_cfg *fe_cfg = NULL;
+
+ if (pix_cfg->input_mux == EXTERNAL_READ) {
+ fe_cfg = &pix_cfg->fetch_engine_cfg;
+ pr_debug("%s:VFE%d wd x ht buf = %d x %d, fe = %d x %d\n",
+ __func__, vfe_dev->pdev->id, fe_cfg->buf_width,
+ fe_cfg->buf_height,
+ fe_cfg->fetch_width, fe_cfg->fetch_height);
+
+ vfe_dev->hw_info->vfe_ops.axi_ops.update_cgc_override(vfe_dev,
+ VFE47_BUS_RD_CGC_OVERRIDE_BIT, 1);
+
+ temp = msm_camera_io_r(vfe_dev->vfe_base + 0x84);
+ temp &= 0xFFFFFFFD;
+ temp |= (1 << 1);
+ msm_camera_io_w(temp, vfe_dev->vfe_base + 0x84);
+
+ temp = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
+ temp &= 0xFEFFFFFF;
+ temp |= (1 << 24);
+ msm_camera_io_w(temp, vfe_dev->vfe_base + 0x5C);
+
+ temp = fe_cfg->fetch_height - 1;
+ msm_camera_io_w(temp & 0x3FFF, vfe_dev->vfe_base + 0x308);
+
+ x_size_word = msm_isp_cal_word_per_line(
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format,
+ fe_cfg->fetch_width);
+ msm_camera_io_w((x_size_word - 1) << 16,
+ vfe_dev->vfe_base + 0x30c);
+
+ msm_camera_io_w(x_size_word << 16 |
+ (temp & 0x3FFF) << 2 | VFE47_FETCH_BURST_LEN,
+ vfe_dev->vfe_base + 0x310);
+
+ temp = ((fe_cfg->buf_width - 1) & 0x3FFF) << 16 |
+ ((fe_cfg->buf_height - 1) & 0x3FFF);
+ msm_camera_io_w(temp, vfe_dev->vfe_base + 0x314);
+
+ /* need to use formulae to calculate MAIN_UNPACK_PATTERN*/
+ msm_camera_io_w(0xF6543210, vfe_dev->vfe_base + 0x318);
+ msm_camera_io_w(0xF, vfe_dev->vfe_base + 0x334);
+
+ temp = msm_camera_io_r(vfe_dev->vfe_base + 0x50);
+ temp |= 2 << 5;
+ temp |= 128 << 8;
+ temp |= (pix_cfg->pixel_pattern & 0x3);
+ msm_camera_io_w(temp, vfe_dev->vfe_base + 0x50);
+
+ } else {
+ pr_err("%s: Invalid mux configuration - mux: %d", __func__,
+ pix_cfg->input_mux);
+ }
+}
+
+static void msm_vfe47_cfg_testgen(struct vfe_device *vfe_dev,
+ struct msm_vfe_testgen_cfg *testgen_cfg)
+{
+ uint32_t temp;
+ uint32_t bit_per_pixel = 0;
+ uint32_t bpp_reg = 0;
+ uint32_t bayer_pix_pattern_reg = 0;
+ uint32_t unicolorbar_reg = 0;
+ uint32_t unicolor_enb = 0;
+
+ bit_per_pixel = msm_isp_get_bit_per_pixel(
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format);
+
+ switch (bit_per_pixel) {
+ case 8:
+ bpp_reg = 0x0;
+ break;
+ case 10:
+ bpp_reg = 0x1;
+ break;
+ case 12:
+ bpp_reg = 0x10;
+ break;
+ case 14:
+ bpp_reg = 0x11;
+ break;
+ default:
+ pr_err("%s: invalid bpp %d\n", __func__, bit_per_pixel);
+ break;
+ }
+
+ msm_camera_io_w(bpp_reg << 16 | testgen_cfg->burst_num_frame,
+ vfe_dev->vfe_base + 0xC5C);
+
+ msm_camera_io_w(((testgen_cfg->lines_per_frame - 1) << 16) |
+ (testgen_cfg->pixels_per_line - 1), vfe_dev->vfe_base + 0xC60);
+
+ temp = msm_camera_io_r(vfe_dev->vfe_base + 0x50);
+ temp |= (((testgen_cfg->h_blank) & 0x3FFF) << 8);
+ temp |= (1 << 22);
+ msm_camera_io_w(temp, vfe_dev->vfe_base + 0x50);
+
+ msm_camera_io_w((1 << 16) | testgen_cfg->v_blank,
+ vfe_dev->vfe_base + 0xC70);
+
+ switch (testgen_cfg->pixel_bayer_pattern) {
+ case ISP_BAYER_RGRGRG:
+ bayer_pix_pattern_reg = 0x0;
+ break;
+ case ISP_BAYER_GRGRGR:
+ bayer_pix_pattern_reg = 0x1;
+ break;
+ case ISP_BAYER_BGBGBG:
+ bayer_pix_pattern_reg = 0x10;
+ break;
+ case ISP_BAYER_GBGBGB:
+ bayer_pix_pattern_reg = 0x11;
+ break;
+ default:
+ pr_err("%s: invalid pix pattern %d\n",
+ __func__, bit_per_pixel);
+ break;
+ }
+
+ if (testgen_cfg->color_bar_pattern == COLOR_BAR_8_COLOR) {
+ unicolor_enb = 0x0;
+ } else {
+ unicolor_enb = 0x1;
+ switch (testgen_cfg->color_bar_pattern) {
+ case UNICOLOR_WHITE:
+ unicolorbar_reg = 0x0;
+ break;
+ case UNICOLOR_YELLOW:
+ unicolorbar_reg = 0x1;
+ break;
+ case UNICOLOR_CYAN:
+ unicolorbar_reg = 0x10;
+ break;
+ case UNICOLOR_GREEN:
+ unicolorbar_reg = 0x11;
+ break;
+ case UNICOLOR_MAGENTA:
+ unicolorbar_reg = 0x100;
+ break;
+ case UNICOLOR_RED:
+ unicolorbar_reg = 0x101;
+ break;
+ case UNICOLOR_BLUE:
+ unicolorbar_reg = 0x110;
+ break;
+ case UNICOLOR_BLACK:
+ unicolorbar_reg = 0x111;
+ break;
+ default:
+ pr_err("%s: invalid colorbar %d\n",
+ __func__, testgen_cfg->color_bar_pattern);
+ break;
+ }
+ }
+
+ msm_camera_io_w((testgen_cfg->rotate_period << 8) |
+ (bayer_pix_pattern_reg << 6) | (unicolor_enb << 4) |
+ (unicolorbar_reg), vfe_dev->vfe_base + 0xC78);
+ return;
+}
+
+static void msm_vfe47_cfg_camif(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg)
+{
+ uint16_t first_pixel, last_pixel, first_line, last_line;
+ struct msm_vfe_camif_cfg *camif_cfg = &pix_cfg->camif_cfg;
+ uint32_t val, subsample_period, subsample_pattern;
+ uint32_t irq_sub_period = 32;
+ uint32_t frame_sub_period = 32;
+ struct msm_vfe_camif_subsample_cfg *subsample_cfg =
+ &pix_cfg->camif_cfg.subsample_cfg;
+ uint16_t bus_sub_en = 0;
+ if (subsample_cfg->pixel_skip || subsample_cfg->line_skip)
+ bus_sub_en = 1;
+ else
+ bus_sub_en = 0;
+
+ vfe_dev->dual_vfe_enable = camif_cfg->is_split;
+
+ msm_camera_io_w(pix_cfg->input_mux << 5 | pix_cfg->pixel_pattern,
+ vfe_dev->vfe_base + 0x50);
+
+ first_pixel = camif_cfg->first_pixel;
+ last_pixel = camif_cfg->last_pixel;
+ first_line = camif_cfg->first_line;
+ last_line = camif_cfg->last_line;
+ subsample_period = camif_cfg->subsample_cfg.irq_subsample_period;
+ subsample_pattern = camif_cfg->subsample_cfg.irq_subsample_pattern;
+
+ msm_camera_io_w((camif_cfg->lines_per_frame - 1) << 16 |
+ (camif_cfg->pixels_per_line - 1), vfe_dev->vfe_base + 0x484);
+ if (bus_sub_en) {
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x47C);
+ val &= 0xFFFFFFDF;
+ val = val | bus_sub_en << 5;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x47C);
+ subsample_cfg->pixel_skip &= 0x0000FFFF;
+ subsample_cfg->line_skip &= 0x0000FFFF;
+ msm_camera_io_w((subsample_cfg->line_skip << 16) |
+ subsample_cfg->pixel_skip, vfe_dev->vfe_base + 0x490);
+ }
+
+
+ msm_camera_io_w(first_pixel << 16 | last_pixel,
+ vfe_dev->vfe_base + 0x488);
+
+ msm_camera_io_w(first_line << 16 | last_line,
+ vfe_dev->vfe_base + 0x48C);
+
+ msm_camera_io_w(((irq_sub_period - 1) << 8) | 0 << 5 |
+ (frame_sub_period - 1), vfe_dev->vfe_base + 0x494);
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x498);
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x49C);
+ if (subsample_period && subsample_pattern) {
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x494);
+ val &= 0xFFFFE0FF;
+ val = (subsample_period - 1) << 8;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x494);
+ ISP_DBG("%s:camif PERIOD %x PATTERN %x\n",
+ __func__, subsample_period, subsample_pattern);
+
+ val = subsample_pattern;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x49C);
+ } else {
+ msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x49C);
+ }
+
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x46C);
+ val |= camif_cfg->camif_input;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x46C);
+}
+
+static void msm_vfe47_cfg_input_mux(struct vfe_device *vfe_dev,
+ struct msm_vfe_pix_cfg *pix_cfg)
+{
+ uint32_t core_cfg = 0;
+ uint32_t val = 0;
+
+ core_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x50);
+ core_cfg &= 0xFFFFFF9F;
+
+ switch (pix_cfg->input_mux) {
+ case CAMIF:
+ core_cfg |= 0x0 << 5;
+ msm_camera_io_w_mb(core_cfg, vfe_dev->vfe_base + 0x50);
+ msm_vfe47_cfg_camif(vfe_dev, pix_cfg);
+ break;
+ case TESTGEN:
+ /* Change CGC override */
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x3C);
+ val |= (1 << 31);
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x3C);
+
+ /* CAMIF and TESTGEN will both go thorugh CAMIF*/
+ core_cfg |= 0x1 << 5;
+ msm_camera_io_w_mb(core_cfg, vfe_dev->vfe_base + 0x50);
+ msm_vfe47_cfg_camif(vfe_dev, pix_cfg);
+ msm_vfe47_cfg_testgen(vfe_dev, &pix_cfg->testgen_cfg);
+ break;
+ case EXTERNAL_READ:
+ core_cfg |= 0x2 << 5;
+ msm_camera_io_w_mb(core_cfg, vfe_dev->vfe_base + 0x50);
+ msm_vfe47_cfg_fetch_engine(vfe_dev, pix_cfg);
+ break;
+ default:
+ pr_err("%s: Unsupported input mux %d\n",
+ __func__, pix_cfg->input_mux);
+ break;
+ }
+ return;
+}
+
+static void msm_vfe47_configure_hvx(struct vfe_device *vfe_dev,
+ uint8_t is_stream_on)
+{
+ uint32_t val;
+ if (is_stream_on == 1) {
+ /* Enable HVX */
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x50);
+ val |= (1 << 3);
+ msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x50);
+ val &= 0xFF7FFFFF;
+ if (vfe_dev->hvx_cmd == HVX_ROUND_TRIP)
+ val |= (1 << 23);
+ msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x50);
+ } else {
+ /* Disable HVX */
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x50);
+ val &= 0xFFFFFFF7;
+ msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x50);
+ }
+}
+
+static void msm_vfe47_update_camif_state(struct vfe_device *vfe_dev,
+ enum msm_isp_camif_update_state update_state)
+{
+ uint32_t val;
+ bool bus_en, vfe_en;
+
+ if (update_state == NO_UPDATE)
+ return;
+
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x47C);
+ if (update_state == ENABLE_CAMIF) {
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
+ val |= 0xF5;
+ msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x5C);
+
+ if ((vfe_dev->hvx_cmd > HVX_DISABLE) &&
+ (vfe_dev->hvx_cmd <= HVX_ROUND_TRIP))
+ msm_vfe47_configure_hvx(vfe_dev, 1);
+
+ bus_en =
+ ((vfe_dev->axi_data.
+ src_info[VFE_PIX_0].raw_stream_count > 0) ? 1 : 0);
+ vfe_en =
+ ((vfe_dev->axi_data.
+ src_info[VFE_PIX_0].pix_stream_count > 0) ? 1 : 0);
+ val = msm_camera_io_r(vfe_dev->vfe_base + 0x47C);
+ val &= 0xFFFFFF3F;
+ val = val | bus_en << 7 | vfe_en << 6;
+ msm_camera_io_w(val, vfe_dev->vfe_base + 0x47C);
+ msm_camera_io_w_mb(0x4, vfe_dev->vfe_base + 0x478);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x478);
+ /* configure EPOCH0 for 20 lines */
+ msm_camera_io_w_mb(0x140000, vfe_dev->vfe_base + 0x4A0);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].active = 1;
+ /* testgen GO*/
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
+ msm_camera_io_w(1, vfe_dev->vfe_base + 0xC58);
+ } else if (update_state == DISABLE_CAMIF) {
+ msm_camera_io_w_mb(0x0, vfe_dev->vfe_base + 0x478);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
+ /* testgen OFF*/
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
+ msm_camera_io_w(1 << 1, vfe_dev->vfe_base + 0xC58);
+
+ if ((vfe_dev->hvx_cmd > HVX_DISABLE) &&
+ (vfe_dev->hvx_cmd <= HVX_ROUND_TRIP))
+ msm_vfe47_configure_hvx(vfe_dev, 0);
+
+ } else if (update_state == DISABLE_CAMIF_IMMEDIATELY) {
+ msm_camera_io_w_mb(0x6, vfe_dev->vfe_base + 0x478);
+ vfe_dev->axi_data.src_info[VFE_PIX_0].active = 0;
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
+ msm_camera_io_w(1 << 1, vfe_dev->vfe_base + 0xC58);
+
+ if ((vfe_dev->hvx_cmd > HVX_DISABLE) &&
+ (vfe_dev->hvx_cmd <= HVX_ROUND_TRIP))
+ msm_vfe47_configure_hvx(vfe_dev, 0);
+
+ }
+}
+
+static void msm_vfe47_cfg_rdi_reg(
+ struct vfe_device *vfe_dev, struct msm_vfe_rdi_cfg *rdi_cfg,
+ enum msm_vfe_input_src input_src)
+{
+ uint8_t rdi = input_src - VFE_RAW_0;
+ uint32_t rdi_reg_cfg;
+
+ rdi_reg_cfg = msm_camera_io_r(
+ vfe_dev->vfe_base + VFE47_RDI_BASE(rdi));
+ rdi_reg_cfg &= 0x3;
+ rdi_reg_cfg |= (rdi * 3) << 28 | rdi_cfg->cid << 4 | 1 << 2;
+ msm_camera_io_w(
+ rdi_reg_cfg, vfe_dev->vfe_base + VFE47_RDI_BASE(rdi));
+}
+
+static void msm_vfe47_axi_cfg_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint8_t plane_idx)
+{
+ uint32_t val;
+ uint32_t wm_base = VFE47_WM_BASE(stream_info->wm[plane_idx]);
+
+ val = msm_camera_io_r(vfe_dev->vfe_base + wm_base + 0x14);
+ val &= ~0x2;
+ if (stream_info->frame_based)
+ val |= 0x2;
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
+ if (!stream_info->frame_based) {
+ /* WR_IMAGE_SIZE */
+ val = ((msm_isp_cal_word_per_line(
+ stream_info->output_format,
+ stream_info->plane_cfg[plane_idx].
+ output_width)+3)/4 - 1) << 16 |
+ (stream_info->plane_cfg[plane_idx].
+ output_height - 1);
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x1C);
+ /* WR_BUFFER_CFG */
+ val = VFE47_BURST_LEN |
+ (stream_info->plane_cfg[plane_idx].output_height - 1) <<
+ 2 |
+ ((msm_isp_cal_word_per_line(stream_info->output_format,
+ stream_info->plane_cfg[plane_idx].
+ output_stride)+1)/2) << 16;
+ }
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x20);
+ /* WR_IRQ_SUBSAMPLE_PATTERN */
+ msm_camera_io_w(0xFFFFFFFF,
+ vfe_dev->vfe_base + wm_base + 0x28);
+}
+
+static void msm_vfe47_axi_clear_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
+{
+ uint32_t val = 0;
+ uint32_t wm_base = VFE47_WM_BASE(stream_info->wm[plane_idx]);
+
+ /* WR_ADDR_CFG */
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x14);
+ /* WR_IMAGE_SIZE */
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x1C);
+ /* WR_BUFFER_CFG */
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x20);
+ /* WR_IRQ_SUBSAMPLE_PATTERN */
+ msm_camera_io_w(val, vfe_dev->vfe_base + wm_base + 0x28);
+}
+
+static void msm_vfe47_axi_cfg_wm_xbar_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint8_t plane_idx)
+{
+ struct msm_vfe_axi_plane_cfg *plane_cfg =
+ &stream_info->plane_cfg[plane_idx];
+ uint8_t wm = stream_info->wm[plane_idx];
+ uint32_t xbar_cfg = 0;
+ uint32_t xbar_reg_cfg = 0;
+
+ switch (stream_info->stream_src) {
+ case PIX_VIDEO:
+ case PIX_ENCODER:
+ case PIX_VIEWFINDER: {
+ if (plane_cfg->output_plane_format != CRCB_PLANE &&
+ plane_cfg->output_plane_format != CBCR_PLANE) {
+ /* SINGLE_STREAM_SEL */
+ xbar_cfg |= plane_cfg->output_plane_format << 8;
+ } else {
+ switch (stream_info->output_format) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV14:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV24:
+ /* PAIR_STREAM_SWAP_CTRL */
+ xbar_cfg |= 0x3 << 4;
+ break;
+ }
+ xbar_cfg |= 0x1 << 2; /* PAIR_STREAM_EN */
+ }
+ if (stream_info->stream_src == PIX_VIEWFINDER)
+ xbar_cfg |= 0x1; /* VIEW_STREAM_EN */
+ else if (stream_info->stream_src == PIX_VIDEO)
+ xbar_cfg |= 0x2;
+ break;
+ }
+ case CAMIF_RAW:
+ xbar_cfg = 0x300;
+ break;
+ case IDEAL_RAW:
+ xbar_cfg = 0x400;
+ break;
+ case RDI_INTF_0:
+ xbar_cfg = 0xC00;
+ break;
+ case RDI_INTF_1:
+ xbar_cfg = 0xD00;
+ break;
+ case RDI_INTF_2:
+ xbar_cfg = 0xE00;
+ break;
+ default:
+ pr_err("%s: Invalid stream src\n", __func__);
+ break;
+ }
+
+ xbar_reg_cfg =
+ msm_camera_io_r(vfe_dev->vfe_base + VFE47_XBAR_BASE(wm));
+ xbar_reg_cfg &= ~(0xFFFF << VFE47_XBAR_SHIFT(wm));
+ xbar_reg_cfg |= (xbar_cfg << VFE47_XBAR_SHIFT(wm));
+ msm_camera_io_w(xbar_reg_cfg,
+ vfe_dev->vfe_base + VFE47_XBAR_BASE(wm));
+}
+
+static void msm_vfe47_axi_clear_wm_xbar_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint8_t plane_idx)
+{
+ uint8_t wm = stream_info->wm[plane_idx];
+ uint32_t xbar_reg_cfg = 0;
+
+ xbar_reg_cfg =
+ msm_camera_io_r(vfe_dev->vfe_base + VFE47_XBAR_BASE(wm));
+ xbar_reg_cfg &= ~(0xFFFF << VFE47_XBAR_SHIFT(wm));
+ msm_camera_io_w(xbar_reg_cfg,
+ vfe_dev->vfe_base + VFE47_XBAR_BASE(wm));
+}
+
+
+static void msm_vfe47_cfg_axi_ub_equal_default(
+ struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t ub_offset = 0;
+ struct msm_vfe_axi_shared_data *axi_data =
+ &vfe_dev->axi_data;
+ uint32_t total_image_size = 0;
+ uint8_t num_used_wms = 0;
+ uint32_t prop_size = 0;
+ uint32_t wm_ub_size;
+ uint64_t delta;
+
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ if (axi_data->free_wm[i] > 0) {
+ num_used_wms++;
+ total_image_size += axi_data->wm_image_size[i];
+ }
+ }
+ if (vfe_dev->pdev->id == ISP_VFE0) {
+ prop_size = MSM_ISP47_TOTAL_IMAGE_UB_VFE0 -
+ axi_data->hw_info->min_wm_ub * num_used_wms;
+ } else if (vfe_dev->pdev->id == ISP_VFE1) {
+ prop_size = MSM_ISP47_TOTAL_IMAGE_UB_VFE1 -
+ axi_data->hw_info->min_wm_ub * num_used_wms;
+ } else {
+ pr_err("%s: incorrect VFE device\n", __func__);
+ }
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ if (axi_data->free_wm[i]) {
+ delta = (uint64_t)axi_data->wm_image_size[i] *
+ (uint64_t)prop_size;
+ do_div(delta, total_image_size);
+ wm_ub_size = axi_data->hw_info->min_wm_ub +
+ (uint32_t)delta;
+ msm_camera_io_w(ub_offset << 16 | (wm_ub_size - 1),
+ vfe_dev->vfe_base + VFE47_WM_BASE(i) + 0x18);
+ ub_offset += wm_ub_size;
+ } else
+ msm_camera_io_w(0,
+ vfe_dev->vfe_base + VFE47_WM_BASE(i) + 0x18);
+ }
+}
+
+static void msm_vfe47_cfg_axi_ub_equal_slicing(
+ struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t ub_offset = 0;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t ub_equal_slice = 0;
+ if (vfe_dev->pdev->id == ISP_VFE0) {
+ ub_equal_slice = MSM_ISP47_TOTAL_IMAGE_UB_VFE0 /
+ axi_data->hw_info->num_wm;
+ } else if (vfe_dev->pdev->id == ISP_VFE1) {
+ ub_equal_slice = MSM_ISP47_TOTAL_IMAGE_UB_VFE1 /
+ axi_data->hw_info->num_wm;
+ } else {
+ pr_err("%s: incorrect VFE device\n ", __func__);
+ }
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ msm_camera_io_w(ub_offset << 16 | (ub_equal_slice - 1),
+ vfe_dev->vfe_base + VFE47_WM_BASE(i) + 0x18);
+ ub_offset += ub_equal_slice;
+ }
+}
+
+static void msm_vfe47_cfg_axi_ub(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+ axi_data->wm_ub_cfg_policy = MSM_WM_UB_CFG_DEFAULT;
+ if (axi_data->wm_ub_cfg_policy == MSM_WM_UB_EQUAL_SLICING)
+ msm_vfe47_cfg_axi_ub_equal_slicing(vfe_dev);
+ else
+ msm_vfe47_cfg_axi_ub_equal_default(vfe_dev);
+}
+
+static void msm_vfe47_read_wm_ping_pong_addr(
+ struct vfe_device *vfe_dev)
+{
+ msm_camera_io_dump(vfe_dev->vfe_base +
+ (VFE47_WM_BASE(0) & 0xFFFFFFF0), 0x200, 1);
+}
+
+static void msm_vfe47_update_ping_pong_addr(
+ void __iomem *vfe_base,
+ uint8_t wm_idx, uint32_t pingpong_bit, dma_addr_t paddr,
+ int32_t buf_size)
+{
+ uint32_t paddr32 = (paddr & 0xFFFFFFFF);
+ uint32_t paddr32_max = 0;
+
+ if (buf_size < 0)
+ buf_size = 0;
+
+ paddr32_max = (paddr + buf_size) & 0xFFFFFFC0;
+
+ msm_camera_io_w(paddr32, vfe_base +
+ VFE47_PING_PONG_BASE(wm_idx, pingpong_bit));
+ msm_camera_io_w(paddr32_max, vfe_base +
+ VFE47_PING_PONG_BASE(wm_idx, pingpong_bit) + 0x4);
+
+}
+
+static int msm_vfe47_axi_halt(struct vfe_device *vfe_dev,
+ uint32_t blocking)
+{
+ int rc = 0;
+ enum msm_vfe_input_src i;
+ uint32_t val = 0;
+
+ val = msm_camera_io_r(vfe_dev->vfe_vbif_base + VFE47_VBIF_CLK_OFFSET);
+ val |= 0x1;
+ msm_camera_io_w(val, vfe_dev->vfe_vbif_base + VFE47_VBIF_CLK_OFFSET);
+
+ /* Keep only halt and reset mask */
+ msm_camera_io_w(BIT(31), vfe_dev->vfe_base + 0x5C);
+ msm_camera_io_w(BIT(8), vfe_dev->vfe_base + 0x60);
+
+ /*Clear IRQ Status0, only leave reset irq mask*/
+ msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x64);
+
+ /*Clear IRQ Status1, only leave halt irq mask*/
+ msm_camera_io_w(0xFFFFFEFF, vfe_dev->vfe_base + 0x68);
+
+ /*push clear cmd*/
+ msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x58);
+
+
+ if (atomic_read(&vfe_dev->error_info.overflow_state)
+ == OVERFLOW_DETECTED)
+ pr_err_ratelimited("%s: VFE%d halt for recovery, blocking %d\n",
+ __func__, vfe_dev->pdev->id, blocking);
+
+ if (blocking) {
+ init_completion(&vfe_dev->halt_complete);
+ /* Halt AXI Bus Bridge */
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x400);
+ rc = wait_for_completion_timeout(
+ &vfe_dev->halt_complete, msecs_to_jiffies(500));
+ if (rc <= 0)
+ pr_err("%s:VFE%d halt timeout rc=%d\n", __func__,
+ vfe_dev->pdev->id, rc);
+
+ } else {
+ /* Halt AXI Bus Bridge */
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x400);
+ }
+
+ for (i = VFE_PIX_0; i <= VFE_RAW_2; i++) {
+ /* if any stream is waiting for update, signal complete */
+ if (vfe_dev->axi_data.stream_update[i]) {
+ ISP_DBG("%s: complete stream update\n", __func__);
+ msm_isp_axi_stream_update(vfe_dev, i);
+ if (vfe_dev->axi_data.stream_update[i])
+ msm_isp_axi_stream_update(vfe_dev, i);
+ }
+ if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i])) {
+ ISP_DBG("%s: complete on axi config update\n",
+ __func__);
+ msm_isp_axi_cfg_update(vfe_dev, i);
+ if (atomic_read(&vfe_dev->axi_data.axi_cfg_update[i]))
+ msm_isp_axi_cfg_update(vfe_dev, i);
+ }
+ }
+
+ if (atomic_read(&vfe_dev->stats_data.stats_update)) {
+ ISP_DBG("%s: complete on stats update\n", __func__);
+ msm_isp_stats_stream_update(vfe_dev);
+ if (atomic_read(&vfe_dev->stats_data.stats_update))
+ msm_isp_stats_stream_update(vfe_dev);
+ }
+
+ return rc;
+}
+
+static int msm_vfe47_axi_restart(struct vfe_device *vfe_dev,
+ uint32_t blocking, uint32_t enable_camif)
+{
+ vfe_dev->hw_info->vfe_ops.core_ops.restore_irq_mask(vfe_dev);
+ msm_camera_io_w(0x7FFFFFFF, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w(0xFFFFFEFF, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x58);
+
+ /* Start AXI */
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x400);
+
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev, VFE_SRC_MAX);
+ memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
+ atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
+
+ if (enable_camif) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, ENABLE_CAMIF);
+ }
+
+ return 0;
+}
+
+static uint32_t msm_vfe47_get_wm_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ return (irq_status0 >> 8) & 0x7F;
+}
+
+static uint32_t msm_vfe47_get_comp_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ return (irq_status0 >> 25) & 0xF;
+}
+
+static uint32_t msm_vfe47_get_pingpong_status(
+ struct vfe_device *vfe_dev)
+{
+ return msm_camera_io_r(vfe_dev->vfe_base + 0x338);
+}
+
+static int msm_vfe47_get_stats_idx(enum msm_isp_stats_type stats_type)
+{
+ /*idx use for composite, need to map to irq status*/
+ switch (stats_type) {
+ case MSM_ISP_STATS_HDR_BE:
+ return STATS_COMP_IDX_HDR_BE;
+ case MSM_ISP_STATS_BG:
+ return STATS_COMP_IDX_BG;
+ case MSM_ISP_STATS_BF:
+ return STATS_COMP_IDX_BF;
+ case MSM_ISP_STATS_HDR_BHIST:
+ return STATS_COMP_IDX_HDR_BHIST;
+ case MSM_ISP_STATS_RS:
+ return STATS_COMP_IDX_RS;
+ case MSM_ISP_STATS_CS:
+ return STATS_COMP_IDX_CS;
+ case MSM_ISP_STATS_IHIST:
+ return STATS_COMP_IDX_IHIST;
+ case MSM_ISP_STATS_BHIST:
+ return STATS_COMP_IDX_BHIST;
+ case MSM_ISP_STATS_AEC_BG:
+ return STATS_COMP_IDX_AEC_BG;
+ default:
+ pr_err("%s: Invalid stats type\n", __func__);
+ return -EINVAL;
+ }
+}
+
+static int msm_vfe47_stats_check_streams(
+ struct msm_vfe_stats_stream *stream_info)
+{
+ return 0;
+}
+
+static void msm_vfe47_stats_cfg_comp_mask(
+ struct vfe_device *vfe_dev, uint32_t stats_mask,
+ uint8_t request_comp_index, uint8_t enable)
+{
+ uint32_t comp_mask_reg;
+ atomic_t *stats_comp_mask;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+
+ if (vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask < 1)
+ return;
+
+ if (request_comp_index >= MAX_NUM_STATS_COMP_MASK) {
+ pr_err("%s: num of comp masks %d exceed max %d\n",
+ __func__, request_comp_index,
+ MAX_NUM_STATS_COMP_MASK);
+ return;
+ }
+
+ if (vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask >
+ MAX_NUM_STATS_COMP_MASK) {
+ pr_err("%s: num of comp masks %d exceed max %d\n",
+ __func__,
+ vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask,
+ MAX_NUM_STATS_COMP_MASK);
+ return;
+ }
+
+ stats_mask = stats_mask & 0x1FF;
+
+ stats_comp_mask = &stats_data->stats_comp_mask[request_comp_index];
+ comp_mask_reg = msm_camera_io_r(vfe_dev->vfe_base + 0x78);
+
+ if (enable) {
+ comp_mask_reg |= stats_mask << (request_comp_index * 16);
+ atomic_set(stats_comp_mask, stats_mask |
+ atomic_read(stats_comp_mask));
+ } else {
+ if (!(atomic_read(stats_comp_mask) & stats_mask))
+ return;
+
+ atomic_set(stats_comp_mask,
+ ~stats_mask & atomic_read(stats_comp_mask));
+ comp_mask_reg &= ~(stats_mask << (request_comp_index * 16));
+ }
+
+ msm_camera_io_w(comp_mask_reg, vfe_dev->vfe_base + 0x78);
+
+ ISP_DBG("%s: comp_mask_reg: %x comp mask0 %x mask1: %x\n",
+ __func__, comp_mask_reg,
+ atomic_read(&stats_data->stats_comp_mask[0]),
+ atomic_read(&stats_data->stats_comp_mask[1]));
+
+ return;
+}
+
+static void msm_vfe47_stats_cfg_wm_irq_mask(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ uint32_t irq_mask;
+ uint32_t irq_mask_1;
+
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
+ irq_mask_1 = msm_camera_io_r(vfe_dev->vfe_base + 0x60);
+
+ switch (STATS_IDX(stream_info->stream_handle)) {
+ case STATS_COMP_IDX_AEC_BG:
+ irq_mask |= 1 << 15;
+ break;
+ case STATS_COMP_IDX_HDR_BE:
+ irq_mask |= 1 << 16;
+ break;
+ case STATS_COMP_IDX_BG:
+ irq_mask |= 1 << 17;
+ break;
+ case STATS_COMP_IDX_BF:
+ irq_mask |= 1 << 18;
+ irq_mask_1 |= 1 << 26;
+ break;
+ case STATS_COMP_IDX_HDR_BHIST:
+ irq_mask |= 1 << 19;
+ break;
+ case STATS_COMP_IDX_RS:
+ irq_mask |= 1 << 20;
+ break;
+ case STATS_COMP_IDX_CS:
+ irq_mask |= 1 << 21;
+ break;
+ case STATS_COMP_IDX_IHIST:
+ irq_mask |= 1 << 22;
+ break;
+ case STATS_COMP_IDX_BHIST:
+ irq_mask |= 1 << 23;
+ break;
+ default:
+ pr_err("%s: Invalid stats idx %d\n", __func__,
+ STATS_IDX(stream_info->stream_handle));
+ }
+
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x5C);
+ msm_camera_io_w(irq_mask_1, vfe_dev->vfe_base + 0x60);
+}
+
+static void msm_vfe47_stats_clear_wm_irq_mask(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ uint32_t irq_mask, irq_mask_1;
+
+ irq_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
+ irq_mask_1 = msm_camera_io_r(vfe_dev->vfe_base + 0x60);
+
+ switch (STATS_IDX(stream_info->stream_handle)) {
+ case STATS_COMP_IDX_AEC_BG:
+ irq_mask &= ~(1 << 15);
+ break;
+ case STATS_COMP_IDX_HDR_BE:
+ irq_mask &= ~(1 << 16);
+ break;
+ case STATS_COMP_IDX_BG:
+ irq_mask &= ~(1 << 17);
+ break;
+ case STATS_COMP_IDX_BF:
+ irq_mask &= ~(1 << 18);
+ irq_mask_1 &= ~(1 << 26);
+ break;
+ case STATS_COMP_IDX_HDR_BHIST:
+ irq_mask &= ~(1 << 19);
+ break;
+ case STATS_COMP_IDX_RS:
+ irq_mask &= ~(1 << 20);
+ break;
+ case STATS_COMP_IDX_CS:
+ irq_mask &= ~(1 << 21);
+ break;
+ case STATS_COMP_IDX_IHIST:
+ irq_mask &= ~(1 << 22);
+ break;
+ case STATS_COMP_IDX_BHIST:
+ irq_mask &= ~(1 << 23);
+ break;
+ default:
+ pr_err("%s: Invalid stats idx %d\n", __func__,
+ STATS_IDX(stream_info->stream_handle));
+ }
+
+ msm_camera_io_w(irq_mask, vfe_dev->vfe_base + 0x5C);
+ msm_camera_io_w(irq_mask_1, vfe_dev->vfe_base + 0x60);
+}
+
+static void msm_vfe47_stats_cfg_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ int stats_idx = STATS_IDX(stream_info->stream_handle);
+ uint32_t stats_base = VFE47_STATS_BASE(stats_idx);
+
+ /* WR_ADDR_CFG */
+ msm_camera_io_w(stream_info->framedrop_period << 2,
+ vfe_dev->vfe_base + stats_base + 0x10);
+ /* WR_IRQ_FRAMEDROP_PATTERN */
+ msm_camera_io_w(stream_info->framedrop_pattern,
+ vfe_dev->vfe_base + stats_base + 0x18);
+ /* WR_IRQ_SUBSAMPLE_PATTERN */
+ msm_camera_io_w(0xFFFFFFFF,
+ vfe_dev->vfe_base + stats_base + 0x1C);
+}
+
+static void msm_vfe47_stats_clear_wm_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ uint32_t val = 0;
+ int stats_idx = STATS_IDX(stream_info->stream_handle);
+ uint32_t stats_base = VFE47_STATS_BASE(stats_idx);
+
+ /* WR_ADDR_CFG */
+ msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x10);
+ /* WR_IRQ_FRAMEDROP_PATTERN */
+ msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x18);
+ /* WR_IRQ_SUBSAMPLE_PATTERN */
+ msm_camera_io_w(val, vfe_dev->vfe_base + stats_base + 0x1C);
+}
+
+static void msm_vfe47_stats_cfg_ub(struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t ub_offset = 0;
+ uint32_t ub_size[VFE47_NUM_STATS_TYPE] = {
+ 16, /* MSM_ISP_STATS_HDR_BE */
+ 16, /* MSM_ISP_STATS_BG */
+ 16, /* MSM_ISP_STATS_BF */
+ 16, /* MSM_ISP_STATS_HDR_BHIST */
+ 16, /* MSM_ISP_STATS_RS */
+ 16, /* MSM_ISP_STATS_CS */
+ 16, /* MSM_ISP_STATS_IHIST */
+ 16, /* MSM_ISP_STATS_BHIST */
+ 16, /* MSM_ISP_STATS_AEC_BG */
+ };
+ if (vfe_dev->pdev->id == ISP_VFE1)
+ ub_offset = VFE47_UB_SIZE_VFE1;
+ else if (vfe_dev->pdev->id == ISP_VFE0)
+ ub_offset = VFE47_UB_SIZE_VFE0;
+ else
+ pr_err("%s: incorrect VFE device\n", __func__);
+
+ for (i = 0; i < VFE47_NUM_STATS_TYPE; i++) {
+ ub_offset -= ub_size[i];
+ msm_camera_io_w(VFE47_STATS_BURST_LEN << 30 |
+ ub_offset << 16 | (ub_size[i] - 1),
+ vfe_dev->vfe_base + VFE47_STATS_BASE(i) + 0x14);
+ }
+}
+
+static void msm_vfe47_stats_update_cgc_override(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable)
+{
+ int i;
+ uint32_t module_cfg, cgc_mask = 0;
+
+ for (i = 0; i < VFE47_NUM_STATS_TYPE; i++) {
+ if ((stats_mask >> i) & 0x1) {
+ switch (i) {
+ case STATS_COMP_IDX_HDR_BE:
+ cgc_mask |= 1;
+ break;
+ case STATS_COMP_IDX_BG:
+ cgc_mask |= (1 << 3);
+ break;
+ case STATS_COMP_IDX_BHIST:
+ cgc_mask |= (1 << 4);
+ break;
+ case STATS_COMP_IDX_RS:
+ cgc_mask |= (1 << 5);
+ break;
+ case STATS_COMP_IDX_CS:
+ cgc_mask |= (1 << 6);
+ break;
+ case STATS_COMP_IDX_IHIST:
+ cgc_mask |= (1 << 7);
+ break;
+ case STATS_COMP_IDX_AEC_BG:
+ cgc_mask |= (1 << 8);
+ break;
+ case STATS_COMP_IDX_BF:
+ cgc_mask |= (1 << 2);
+ break;
+ case STATS_COMP_IDX_HDR_BHIST:
+ cgc_mask |= (1 << 1);
+ break;
+ default:
+ pr_err("%s: Invalid stats mask\n", __func__);
+ return;
+ }
+ }
+ }
+
+ /* CGC override: enforce BAF for DMI */
+ module_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x30);
+ if (enable)
+ module_cfg |= cgc_mask;
+ else
+ module_cfg &= ~cgc_mask;
+ msm_camera_io_w(module_cfg, vfe_dev->vfe_base + 0x30);
+}
+
+static bool msm_vfe47_is_module_cfg_lock_needed(
+ uint32_t reg_offset)
+{
+ return false;
+}
+
+static void msm_vfe47_stats_enable_module(struct vfe_device *vfe_dev,
+ uint32_t stats_mask, uint8_t enable)
+{
+ int i;
+ uint32_t module_cfg, module_cfg_mask = 0;
+
+ /* BF stats involve DMI cfg, ignore*/
+ for (i = 0; i < VFE47_NUM_STATS_TYPE; i++) {
+ if ((stats_mask >> i) & 0x1) {
+ switch (i) {
+ case STATS_COMP_IDX_HDR_BE:
+ module_cfg_mask |= 1;
+ break;
+ case STATS_COMP_IDX_HDR_BHIST:
+ module_cfg_mask |= 1 << 1;
+ break;
+ case STATS_COMP_IDX_BF:
+ module_cfg_mask |= 1 << 2;
+ break;
+ case STATS_COMP_IDX_BG:
+ module_cfg_mask |= 1 << 3;
+ break;
+ case STATS_COMP_IDX_BHIST:
+ module_cfg_mask |= 1 << 4;
+ break;
+ case STATS_COMP_IDX_RS:
+ module_cfg_mask |= 1 << 5;
+ break;
+ case STATS_COMP_IDX_CS:
+ module_cfg_mask |= 1 << 6;
+ break;
+ case STATS_COMP_IDX_IHIST:
+ module_cfg_mask |= 1 << 7;
+ break;
+ case STATS_COMP_IDX_AEC_BG:
+ module_cfg_mask |= 1 << 8;
+ break;
+ default:
+ pr_err("%s: Invalid stats mask\n", __func__);
+ return;
+ }
+ }
+ }
+
+ module_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x44);
+ if (enable)
+ module_cfg |= module_cfg_mask;
+ else
+ module_cfg &= ~module_cfg_mask;
+
+ msm_camera_io_w(module_cfg, vfe_dev->vfe_base + 0x44);
+
+/* need to move to userspace
+ uint32_t stats_cfg;
+ stats_cfg = msm_camera_io_r(vfe_dev->vfe_base + 0x9B8);
+ if (enable)
+ stats_cfg |= stats_cfg_mask;
+ else
+ stats_cfg &= ~stats_cfg_mask;
+ msm_camera_io_w(stats_cfg, vfe_dev->vfe_base + 0x9B8);
+*/
+}
+
+static void msm_vfe47_stats_update_ping_pong_addr(
+ void __iomem *vfe_base, struct msm_vfe_stats_stream *stream_info,
+ uint32_t pingpong_status, dma_addr_t paddr)
+{
+ uint32_t paddr32 = (paddr & 0xFFFFFFFF);
+ int stats_idx = STATS_IDX(stream_info->stream_handle);
+
+ msm_camera_io_w(paddr32, vfe_base +
+ VFE47_STATS_PING_PONG_BASE(stats_idx, pingpong_status));
+}
+
+static uint32_t msm_vfe47_stats_get_wm_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ /* TODO: define bf early done irq in status_0 and
+ bf pingpong done in status_1*/
+ uint32_t comp_mapped_irq_mask = 0;
+ int i = 0;
+
+ /*
+ * remove early done and handle seperately,
+ * add bf idx on status 1
+ */
+ irq_status0 &= ~(1 << 18);
+
+ for (i = 0; i < VFE47_NUM_STATS_TYPE; i++)
+ if ((irq_status0 >> stats_irq_map_comp_mask[i]) & 0x1)
+ comp_mapped_irq_mask |= (1 << i);
+ if ((irq_status1 >> 26) & 0x1)
+ comp_mapped_irq_mask |= (1 << STATS_COMP_IDX_BF);
+
+ return comp_mapped_irq_mask;
+}
+
+static uint32_t msm_vfe47_stats_get_comp_mask(
+ uint32_t irq_status0, uint32_t irq_status1)
+{
+ return (irq_status0 >> 29) & 0x3;
+}
+
+static uint32_t msm_vfe47_stats_get_frame_id(
+ struct vfe_device *vfe_dev)
+{
+ return vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+}
+
+static int msm_vfe47_get_platform_data(struct vfe_device *vfe_dev)
+{
+ int rc = 0;
+
+ vfe_dev->vfe_mem = platform_get_resource_byname(vfe_dev->pdev,
+ IORESOURCE_MEM, "vfe");
+ if (!vfe_dev->vfe_mem) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto vfe_no_resource;
+ }
+
+ vfe_dev->vfe_vbif_mem = platform_get_resource_byname(
+ vfe_dev->pdev,
+ IORESOURCE_MEM, "vfe_vbif");
+ if (!vfe_dev->vfe_vbif_mem) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto vfe_no_resource;
+ }
+
+ vfe_dev->vfe_irq = platform_get_resource_byname(vfe_dev->pdev,
+ IORESOURCE_IRQ, "vfe");
+ if (!vfe_dev->vfe_irq) {
+ pr_err("%s: no irq resource?\n", __func__);
+ rc = -ENODEV;
+ goto vfe_no_resource;
+ }
+
+vfe_no_resource:
+ return rc;
+}
+
+static void msm_vfe47_get_error_mask(
+ uint32_t *error_mask0, uint32_t *error_mask1)
+{
+ *error_mask0 = 0x00000000;
+ *error_mask1 = 0x0BFFFEFF;
+}
+
+static void msm_vfe47_get_overflow_mask(uint32_t *overflow_mask)
+{
+ *overflow_mask = 0x09FFFE7E;
+}
+
+static void msm_vfe47_get_rdi_wm_mask(struct vfe_device *vfe_dev,
+ uint32_t *rdi_wm_mask)
+{
+ *rdi_wm_mask = vfe_dev->axi_data.rdi_wm_mask;
+}
+
+static void msm_vfe47_get_irq_mask(struct vfe_device *vfe_dev,
+ uint32_t *irq0_mask, uint32_t *irq1_mask)
+{
+ *irq0_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
+ *irq1_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x60);
+}
+
+static void msm_vfe47_restore_irq_mask(struct vfe_device *vfe_dev)
+{
+ msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask0,
+ vfe_dev->vfe_base + 0x5C);
+ msm_camera_io_w(vfe_dev->error_info.overflow_recover_irq_mask1,
+ vfe_dev->vfe_base + 0x60);
+}
+
+
+static void msm_vfe47_get_halt_restart_mask(uint32_t *irq0_mask,
+ uint32_t *irq1_mask)
+{
+ *irq0_mask = BIT(31);
+ *irq1_mask = BIT(8);
+}
+
+static struct msm_vfe_axi_hardware_info msm_vfe47_axi_hw_info = {
+ .num_wm = 7,
+ .num_comp_mask = 3,
+ .num_rdi = 3,
+ .num_rdi_master = 3,
+ .min_wm_ub = 96,
+ .scratch_buf_range = SZ_32M,
+};
+
+static struct msm_vfe_stats_hardware_info msm_vfe47_stats_hw_info = {
+ .stats_capability_mask =
+ 1 << MSM_ISP_STATS_HDR_BE | 1 << MSM_ISP_STATS_BF |
+ 1 << MSM_ISP_STATS_BG | 1 << MSM_ISP_STATS_BHIST |
+ 1 << MSM_ISP_STATS_HDR_BHIST | 1 << MSM_ISP_STATS_IHIST |
+ 1 << MSM_ISP_STATS_RS | 1 << MSM_ISP_STATS_CS |
+ 1 << MSM_ISP_STATS_AEC_BG,
+ .stats_ping_pong_offset = stats_pingpong_offset_map,
+ .num_stats_type = VFE47_NUM_STATS_TYPE,
+ .num_stats_comp_mask = VFE47_NUM_STATS_COMP,
+};
+
+struct msm_vfe_hardware_info vfe47_hw_info = {
+ .num_iommu_ctx = 1,
+ .num_iommu_secure_ctx = 0,
+ .vfe_clk_idx = VFE47_SRC_CLK_DTSI_IDX,
+ .runtime_axi_update = 1,
+ .vfe_ops = {
+ .irq_ops = {
+ .read_irq_status = msm_vfe47_read_irq_status,
+ .process_camif_irq = msm_vfe47_process_input_irq,
+ .process_reset_irq = msm_vfe47_process_reset_irq,
+ .process_halt_irq = msm_vfe47_process_halt_irq,
+ .process_reset_irq = msm_vfe47_process_reset_irq,
+ .process_reg_update = msm_vfe47_process_reg_update,
+ .process_axi_irq = msm_isp_process_axi_irq,
+ .process_stats_irq = msm_isp_process_stats_irq,
+ .process_epoch_irq = msm_vfe47_process_epoch_irq,
+ .enable_camif_err = msm_vfe47_enable_camif_error,
+ },
+ .axi_ops = {
+ .reload_wm = msm_vfe47_axi_reload_wm,
+ .enable_wm = msm_vfe47_axi_enable_wm,
+ .cfg_io_format = msm_vfe47_cfg_io_format,
+ .cfg_comp_mask = msm_vfe47_axi_cfg_comp_mask,
+ .clear_comp_mask = msm_vfe47_axi_clear_comp_mask,
+ .cfg_wm_irq_mask = msm_vfe47_axi_cfg_wm_irq_mask,
+ .clear_wm_irq_mask = msm_vfe47_axi_clear_wm_irq_mask,
+ .cfg_framedrop = msm_vfe47_cfg_framedrop,
+ .clear_framedrop = msm_vfe47_clear_framedrop,
+ .cfg_wm_reg = msm_vfe47_axi_cfg_wm_reg,
+ .clear_wm_reg = msm_vfe47_axi_clear_wm_reg,
+ .cfg_wm_xbar_reg = msm_vfe47_axi_cfg_wm_xbar_reg,
+ .clear_wm_xbar_reg = msm_vfe47_axi_clear_wm_xbar_reg,
+ .cfg_ub = msm_vfe47_cfg_axi_ub,
+ .read_wm_ping_pong_addr =
+ msm_vfe47_read_wm_ping_pong_addr,
+ .update_ping_pong_addr =
+ msm_vfe47_update_ping_pong_addr,
+ .get_comp_mask = msm_vfe47_get_comp_mask,
+ .get_wm_mask = msm_vfe47_get_wm_mask,
+ .get_pingpong_status = msm_vfe47_get_pingpong_status,
+ .halt = msm_vfe47_axi_halt,
+ .restart = msm_vfe47_axi_restart,
+ .update_cgc_override =
+ msm_vfe47_axi_update_cgc_override,
+ },
+ .core_ops = {
+ .reg_update = msm_vfe47_reg_update,
+ .cfg_input_mux = msm_vfe47_cfg_input_mux,
+ .update_camif_state = msm_vfe47_update_camif_state,
+ .start_fetch_eng = msm_vfe47_start_fetch_engine,
+ .cfg_rdi_reg = msm_vfe47_cfg_rdi_reg,
+ .reset_hw = msm_vfe47_reset_hardware,
+ .init_hw = msm_vfe47_init_hardware,
+ .init_hw_reg = msm_vfe47_init_hardware_reg,
+ .clear_status_reg = msm_vfe47_clear_status_reg,
+ .release_hw = msm_vfe47_release_hardware,
+ .get_platform_data = msm_vfe47_get_platform_data,
+ .get_error_mask = msm_vfe47_get_error_mask,
+ .get_overflow_mask = msm_vfe47_get_overflow_mask,
+ .get_rdi_wm_mask = msm_vfe47_get_rdi_wm_mask,
+ .get_irq_mask = msm_vfe47_get_irq_mask,
+ .restore_irq_mask = msm_vfe47_restore_irq_mask,
+ .get_halt_restart_mask =
+ msm_vfe47_get_halt_restart_mask,
+ .process_error_status = msm_vfe47_process_error_status,
+ .is_module_cfg_lock_needed =
+ msm_vfe47_is_module_cfg_lock_needed,
+ },
+ .stats_ops = {
+ .get_stats_idx = msm_vfe47_get_stats_idx,
+ .check_streams = msm_vfe47_stats_check_streams,
+ .cfg_comp_mask = msm_vfe47_stats_cfg_comp_mask,
+ .cfg_wm_irq_mask = msm_vfe47_stats_cfg_wm_irq_mask,
+ .clear_wm_irq_mask = msm_vfe47_stats_clear_wm_irq_mask,
+ .cfg_wm_reg = msm_vfe47_stats_cfg_wm_reg,
+ .clear_wm_reg = msm_vfe47_stats_clear_wm_reg,
+ .cfg_ub = msm_vfe47_stats_cfg_ub,
+ .enable_module = msm_vfe47_stats_enable_module,
+ .update_ping_pong_addr =
+ msm_vfe47_stats_update_ping_pong_addr,
+ .get_comp_mask = msm_vfe47_stats_get_comp_mask,
+ .get_wm_mask = msm_vfe47_stats_get_wm_mask,
+ .get_frame_id = msm_vfe47_stats_get_frame_id,
+ .get_pingpong_status = msm_vfe47_get_pingpong_status,
+ .update_cgc_override =
+ msm_vfe47_stats_update_cgc_override,
+ },
+ },
+ .dmi_reg_offset = 0xC2C,
+ .axi_hw_info = &msm_vfe47_axi_hw_info,
+ .stats_hw_info = &msm_vfe47_stats_hw_info,
+};
+EXPORT_SYMBOL(vfe47_hw_info);
+
+static const struct of_device_id msm_vfe47_dt_match[] = {
+ {
+ .compatible = "qcom,vfe47",
+ .data = &vfe47_hw_info,
+ },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_vfe47_dt_match);
+
+static struct platform_driver vfe47_driver = {
+ .probe = vfe_hw_probe,
+ .driver = {
+ .name = "msm_vfe47",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_vfe47_dt_match,
+ },
+};
+
+static int __init msm_vfe47_init_module(void)
+{
+ return platform_driver_register(&vfe47_driver);
+}
+
+static void __exit msm_vfe47_exit_module(void)
+{
+ platform_driver_unregister(&vfe47_driver);
+}
+
+module_init(msm_vfe47_init_module);
+module_exit(msm_vfe47_exit_module);
+MODULE_DESCRIPTION("MSM VFE47 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.h
new file mode 100644
index 000000000000..12b079678dc1
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.h
@@ -0,0 +1,17 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_ISP47_H__
+#define __MSM_ISP47_H__
+
+extern struct msm_vfe_hardware_info vfe47_hw_info;
+#endif /* __MSM_ISP47_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
new file mode 100644
index 000000000000..fca0d319b9ef
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -0,0 +1,3356 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+#include <asm/div64.h>
+#include "msm_isp_util.h"
+#include "msm_isp_axi_util.h"
+
+#define HANDLE_TO_IDX(handle) (handle & 0xFF)
+#define ISP_SOF_DEBUG_COUNT 0
+
+static int msm_isp_update_dual_HW_ms_info_at_start(
+ struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src stream_src);
+
+#define DUAL_VFE_AND_VFE1(s, v) ((s->stream_src < RDI_INTF_0) && \
+ v->is_split && vfe_dev->pdev->id == ISP_VFE1)
+
+#define RDI_OR_NOT_DUAL_VFE(v, s) (!v->is_split || \
+ ((s->stream_src >= RDI_INTF_0) && \
+ (stream_info->stream_src <= RDI_INTF_2)))
+
+static inline struct msm_vfe_axi_stream *__dual_vfe_stream(
+ struct dual_vfe_resource *d,
+ struct msm_vfe_axi_stream *s, int vfe_id)
+{
+ struct msm_vfe_axi_stream *stream_info = NULL;
+ struct msm_vfe_axi_shared_data *axi_data;
+ uint32_t handle = 0;
+
+ axi_data = d->axi_data[vfe_id];
+ handle = HANDLE_TO_IDX(s->stream_handle);
+ if (!axi_data || (handle >= VFE_AXI_SRC_MAX)) {
+ pr_err("%s:%d invalid vfe0 axi data handle %d\n",
+ __func__, __LINE__,
+ handle);
+ return NULL;
+ }
+ stream_info = &axi_data->stream_info[handle];
+ return stream_info;
+}
+
+int msm_isp_axi_create_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
+{
+ int i = stream_cfg_cmd->stream_src;
+
+ if (i >= VFE_AXI_SRC_MAX) {
+ pr_err("%s:%d invalid stream_src %d\n", __func__, __LINE__,
+ stream_cfg_cmd->stream_src);
+ return -EINVAL;
+ }
+
+ if (axi_data->stream_info[i].state != AVAILABLE) {
+ pr_err("%s:%d invalid state %d expected %d for src %d\n",
+ __func__, __LINE__, axi_data->stream_info[i].state,
+ AVAILABLE, i);
+ return -EINVAL;
+ }
+
+ if ((axi_data->stream_handle_cnt << 8) == 0)
+ axi_data->stream_handle_cnt++;
+
+ stream_cfg_cmd->axi_stream_handle =
+ (++axi_data->stream_handle_cnt) << 8 | i;
+
+ ISP_DBG(" vfe %d handle %x\n", vfe_dev->pdev->id,
+ stream_cfg_cmd->axi_stream_handle);
+
+ memset(&axi_data->stream_info[i], 0,
+ sizeof(struct msm_vfe_axi_stream));
+ spin_lock_init(&axi_data->stream_info[i].lock);
+ axi_data->stream_info[i].session_id = stream_cfg_cmd->session_id;
+ axi_data->stream_info[i].stream_id = stream_cfg_cmd->stream_id;
+ axi_data->stream_info[i].buf_divert = stream_cfg_cmd->buf_divert;
+ axi_data->stream_info[i].state = INACTIVE;
+ axi_data->stream_info[i].stream_handle =
+ stream_cfg_cmd->axi_stream_handle;
+ axi_data->stream_info[i].controllable_output =
+ stream_cfg_cmd->controllable_output;
+ axi_data->stream_info[i].prev_framedrop_period = 0x7FFFFFFF;
+ if (stream_cfg_cmd->controllable_output)
+ stream_cfg_cmd->frame_skip_pattern = SKIP_ALL;
+ INIT_LIST_HEAD(&axi_data->stream_info[i].request_q);
+ return 0;
+}
+
+void msm_isp_axi_destroy_stream(
+ struct msm_vfe_axi_shared_data *axi_data, int stream_idx)
+{
+ if (axi_data->stream_info[stream_idx].state != AVAILABLE) {
+ axi_data->stream_info[stream_idx].state = AVAILABLE;
+ axi_data->stream_info[stream_idx].stream_handle = 0;
+ } else {
+ pr_err("%s: stream does not exist\n", __func__);
+ }
+}
+
+int msm_isp_validate_axi_request(struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
+{
+ int rc = -1, i;
+ struct msm_vfe_axi_stream *stream_info = NULL;
+ if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)
+ < VFE_AXI_SRC_MAX) {
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
+ } else {
+ pr_err("%s: Invalid axi_stream_handle\n", __func__);
+ return rc;
+ }
+
+ if (!stream_info) {
+ pr_err("%s: Stream info is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (stream_cfg_cmd->output_format) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ case V4L2_PIX_FMT_SBGGR14:
+ case V4L2_PIX_FMT_SGBRG14:
+ case V4L2_PIX_FMT_SGRBG14:
+ case V4L2_PIX_FMT_SRGGB14:
+ case V4L2_PIX_FMT_QBGGR8:
+ case V4L2_PIX_FMT_QGBRG8:
+ case V4L2_PIX_FMT_QGRBG8:
+ case V4L2_PIX_FMT_QRGGB8:
+ case V4L2_PIX_FMT_QBGGR10:
+ case V4L2_PIX_FMT_QGBRG10:
+ case V4L2_PIX_FMT_QGRBG10:
+ case V4L2_PIX_FMT_QRGGB10:
+ case V4L2_PIX_FMT_QBGGR12:
+ case V4L2_PIX_FMT_QGBRG12:
+ case V4L2_PIX_FMT_QGRBG12:
+ case V4L2_PIX_FMT_QRGGB12:
+ case V4L2_PIX_FMT_QBGGR14:
+ case V4L2_PIX_FMT_QGBRG14:
+ case V4L2_PIX_FMT_QGRBG14:
+ case V4L2_PIX_FMT_QRGGB14:
+ case V4L2_PIX_FMT_P16BGGR10:
+ case V4L2_PIX_FMT_P16GBRG10:
+ case V4L2_PIX_FMT_P16GRBG10:
+ case V4L2_PIX_FMT_P16RGGB10:
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_META:
+ case V4L2_PIX_FMT_GREY:
+ stream_info->num_planes = 1;
+ stream_info->format_factor = ISP_Q2;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV14:
+ case V4L2_PIX_FMT_NV41:
+ stream_info->num_planes = 2;
+ stream_info->format_factor = 1.5 * ISP_Q2;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ stream_info->num_planes = 2;
+ stream_info->format_factor = 2 * ISP_Q2;
+ break;
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ stream_info->num_planes = 2;
+ stream_info->format_factor = 3 * ISP_Q2;
+ break;
+ /*TD: Add more image format*/
+ default:
+ msm_isp_print_fourcc_error(__func__,
+ stream_cfg_cmd->output_format);
+ return rc;
+ }
+
+ if (axi_data->hw_info->num_wm - axi_data->num_used_wm <
+ stream_info->num_planes) {
+ pr_err("%s: No free write masters\n", __func__);
+ return rc;
+ }
+
+ if ((stream_info->num_planes > 1) &&
+ (axi_data->hw_info->num_comp_mask -
+ axi_data->num_used_composite_mask < 1)) {
+ pr_err("%s: No free composite mask\n", __func__);
+ return rc;
+ }
+
+ if (stream_cfg_cmd->init_frame_drop >= MAX_INIT_FRAME_DROP) {
+ pr_err("%s: Invalid skip pattern\n", __func__);
+ return rc;
+ }
+
+ if (stream_cfg_cmd->frame_skip_pattern >= MAX_SKIP) {
+ pr_err("%s: Invalid skip pattern\n", __func__);
+ return rc;
+ }
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ stream_info->plane_cfg[i] = stream_cfg_cmd->plane_cfg[i];
+ stream_info->max_width = max(stream_info->max_width,
+ stream_cfg_cmd->plane_cfg[i].output_width);
+ }
+
+ stream_info->output_format = stream_cfg_cmd->output_format;
+ stream_info->runtime_output_format = stream_info->output_format;
+ stream_info->stream_src = stream_cfg_cmd->stream_src;
+ stream_info->frame_based = stream_cfg_cmd->frame_base;
+ return 0;
+}
+
+static uint32_t msm_isp_axi_get_plane_size(
+ struct msm_vfe_axi_stream *stream_info, int plane_idx)
+{
+ uint32_t size = 0;
+ struct msm_vfe_axi_plane_cfg *plane_cfg = stream_info->plane_cfg;
+ switch (stream_info->output_format) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_QBGGR8:
+ case V4L2_PIX_FMT_QGBRG8:
+ case V4L2_PIX_FMT_QGRBG8:
+ case V4L2_PIX_FMT_QRGGB8:
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_META:
+ case V4L2_PIX_FMT_GREY:
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ break;
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_QBGGR10:
+ case V4L2_PIX_FMT_QGBRG10:
+ case V4L2_PIX_FMT_QGRBG10:
+ case V4L2_PIX_FMT_QRGGB10:
+ /* TODO: fix me */
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ break;
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ case V4L2_PIX_FMT_QBGGR12:
+ case V4L2_PIX_FMT_QGBRG12:
+ case V4L2_PIX_FMT_QGRBG12:
+ case V4L2_PIX_FMT_QRGGB12:
+ case V4L2_PIX_FMT_SBGGR14:
+ case V4L2_PIX_FMT_SGBRG14:
+ case V4L2_PIX_FMT_SGRBG14:
+ case V4L2_PIX_FMT_SRGGB14:
+ case V4L2_PIX_FMT_QBGGR14:
+ case V4L2_PIX_FMT_QGBRG14:
+ case V4L2_PIX_FMT_QGRBG14:
+ case V4L2_PIX_FMT_QRGGB14:
+ /* TODO: fix me */
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ break;
+ case V4L2_PIX_FMT_P16BGGR10:
+ case V4L2_PIX_FMT_P16GBRG10:
+ case V4L2_PIX_FMT_P16GRBG10:
+ case V4L2_PIX_FMT_P16RGGB10:
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ if (plane_cfg[plane_idx].output_plane_format == Y_PLANE)
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ else
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ break;
+ case V4L2_PIX_FMT_NV14:
+ case V4L2_PIX_FMT_NV41:
+ if (plane_cfg[plane_idx].output_plane_format == Y_PLANE)
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ else
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ size = plane_cfg[plane_idx].output_height *
+ plane_cfg[plane_idx].output_width;
+ break;
+ /*TD: Add more image format*/
+ default:
+ msm_isp_print_fourcc_error(__func__,
+ stream_info->output_format);
+ break;
+ }
+ return size;
+}
+
+void msm_isp_axi_reserve_wm(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int i, j;
+ for (i = 0; i < stream_info->num_planes; i++) {
+ for (j = 0; j < axi_data->hw_info->num_wm; j++) {
+ if (!axi_data->free_wm[j]) {
+ axi_data->free_wm[j] =
+ stream_info->stream_handle;
+ axi_data->wm_image_size[j] =
+ msm_isp_axi_get_plane_size(
+ stream_info, i);
+ axi_data->num_used_wm++;
+ break;
+ }
+ }
+ ISP_DBG("%s vfe %d stream_handle %x wm %d\n", __func__,
+ vfe_dev->pdev->id,
+ stream_info->stream_handle, j);
+ stream_info->wm[i] = j;
+ }
+}
+
+void msm_isp_axi_free_wm(struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int i;
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ axi_data->free_wm[stream_info->wm[i]] = 0;
+ axi_data->num_used_wm--;
+ }
+ if (stream_info->stream_src <= IDEAL_RAW)
+ axi_data->num_pix_stream++;
+ else if (stream_info->stream_src < VFE_AXI_SRC_MAX)
+ axi_data->num_rdi_stream++;
+}
+
+void msm_isp_axi_reserve_comp_mask(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int i;
+ uint8_t comp_mask = 0;
+ for (i = 0; i < stream_info->num_planes; i++)
+ comp_mask |= 1 << stream_info->wm[i];
+
+ for (i = 0; i < axi_data->hw_info->num_comp_mask; i++) {
+ if (!axi_data->composite_info[i].stream_handle) {
+ axi_data->composite_info[i].stream_handle =
+ stream_info->stream_handle;
+ axi_data->composite_info[i].
+ stream_composite_mask = comp_mask;
+ axi_data->num_used_composite_mask++;
+ break;
+ }
+ }
+ stream_info->comp_mask_index = i;
+ return;
+}
+
+void msm_isp_axi_free_comp_mask(struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ axi_data->composite_info[stream_info->comp_mask_index].
+ stream_composite_mask = 0;
+ axi_data->composite_info[stream_info->comp_mask_index].
+ stream_handle = 0;
+ axi_data->num_used_composite_mask--;
+}
+
+int msm_isp_axi_check_stream_state(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
+{
+ int rc = 0, i;
+ unsigned long flags;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_vfe_axi_stream *stream_info;
+ enum msm_vfe_axi_state valid_state =
+ (stream_cfg_cmd->cmd == START_STREAM) ? INACTIVE : ACTIVE;
+
+ if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+ return -EINVAL;
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+ VFE_AXI_SRC_MAX) {
+ return -EINVAL;
+ }
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ spin_lock_irqsave(&stream_info->lock, flags);
+ if (stream_info->state != valid_state) {
+ if ((stream_info->state == PAUSING ||
+ stream_info->state == PAUSED ||
+ stream_info->state == RESUME_PENDING ||
+ stream_info->state == RESUMING ||
+ stream_info->state == UPDATING) &&
+ (stream_cfg_cmd->cmd == STOP_STREAM ||
+ stream_cfg_cmd->cmd == STOP_IMMEDIATELY)) {
+ stream_info->state = ACTIVE;
+ } else {
+ pr_err("%s: Invalid stream state: %d\n",
+ __func__, stream_info->state);
+ spin_unlock_irqrestore(
+ &stream_info->lock, flags);
+ if (stream_cfg_cmd->cmd == START_STREAM)
+ rc = -EINVAL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
+ return rc;
+}
+
+/**
+ * msm_isp_cfg_framedrop_reg() - Program the period and pattern
+ * @vfe_dev: The device for which the period and pattern is programmed
+ * @stream_info: The stream for which programming is done
+ *
+ * This function calculates the period and pattern to be configured
+ * for the stream based on the current frame id of the stream's input
+ * source and the initial framedrops.
+ *
+ * Returns void.
+ */
+static void msm_isp_cfg_framedrop_reg(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ struct msm_vfe_axi_stream *vfe0_stream_info = NULL;
+ uint32_t runtime_init_frame_drop;
+
+ uint32_t framedrop_pattern = 0;
+ uint32_t framedrop_period = MSM_VFE_STREAM_STOP_PERIOD;
+ enum msm_vfe_input_src frame_src = SRC_TO_INTF(stream_info->stream_src);
+
+ if (vfe_dev->axi_data.src_info[frame_src].frame_id >=
+ stream_info->init_frame_drop)
+ runtime_init_frame_drop = 0;
+ else
+ runtime_init_frame_drop = stream_info->init_frame_drop -
+ vfe_dev->axi_data.src_info[frame_src].frame_id;
+
+ if (!runtime_init_frame_drop)
+ framedrop_period = stream_info->current_framedrop_period;
+
+ if (MSM_VFE_STREAM_STOP_PERIOD != framedrop_period)
+ framedrop_pattern = 0x1;
+
+ ISP_DBG("%s: stream %x framedrop pattern %x period %u\n", __func__,
+ stream_info->stream_handle, framedrop_pattern,
+ framedrop_period);
+
+ BUG_ON(0 == framedrop_period);
+ if (DUAL_VFE_AND_VFE1(stream_info, vfe_dev)) {
+ vfe0_stream_info = __dual_vfe_stream(
+ vfe_dev->common_data->dual_vfe_res,
+ stream_info, ISP_VFE0);
+ if (vfe0_stream_info) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.cfg_framedrop(
+ vfe_dev->common_data->dual_vfe_res->
+ vfe_base[ISP_VFE0],
+ vfe0_stream_info, framedrop_pattern,
+ framedrop_period);
+ vfe_dev->hw_info->vfe_ops.axi_ops.cfg_framedrop(
+ vfe_dev->vfe_base, stream_info,
+ framedrop_pattern,
+ framedrop_period);
+
+ stream_info->prev_framedrop_period =
+ (framedrop_period | 0x80000000);
+ vfe0_stream_info->prev_framedrop_period =
+ (framedrop_period | 0x80000000);
+ }
+ } else if (RDI_OR_NOT_DUAL_VFE(vfe_dev, stream_info)) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.cfg_framedrop(
+ vfe_dev->vfe_base, stream_info, framedrop_pattern,
+ framedrop_period);
+ stream_info->prev_framedrop_period =
+ (framedrop_period | 0x80000000);
+ }
+}
+
+/**
+ * msm_isp_update_framedrop_reg() - Update frame period pattern on h/w
+ * @vfe_dev: The h/w on which the perion pattern is updated.
+ * @frame_src: Input source.
+ *
+ * If the period and pattern needs to be updated for a stream then it is
+ * updated here. Updates happen if initial frame drop reaches 0 or burst
+ * streams have been provided new skip pattern from user space.
+ *
+ * Returns void
+ */
+void msm_isp_update_framedrop_reg(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src)
+{
+ int i;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_vfe_axi_stream *stream_info;
+ unsigned long flags;
+
+ for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
+ if (SRC_TO_INTF(axi_data->stream_info[i].stream_src) !=
+ frame_src) {
+ continue;
+ }
+ stream_info = &axi_data->stream_info[i];
+ if (stream_info->state != ACTIVE)
+ continue;
+
+ spin_lock_irqsave(&stream_info->lock, flags);
+
+ if (BURST_STREAM == stream_info->stream_type) {
+ if (0 == stream_info->runtime_num_burst_capture)
+ stream_info->current_framedrop_period =
+ MSM_VFE_STREAM_STOP_PERIOD;
+ }
+
+ /*
+ * re-configure the period pattern, only if it's not already
+ * set to what we want
+ */
+ if (stream_info->current_framedrop_period !=
+ stream_info->prev_framedrop_period) {
+ /*
+ * If we previously tried to set a valid period which
+ * did not take effect then we may have missed a reg
+ * update, print error to indicate this condition
+ */
+ if ((stream_info->prev_framedrop_period & 0x80000000) &&
+ (stream_info->current_framedrop_period ==
+ (stream_info->prev_framedrop_period &
+ ~0x80000000)))
+ ISP_DBG("Framedop setting for %p not taken effect %x/%x, frame_src %x\n",
+ stream_info,
+ stream_info->prev_framedrop_period,
+ stream_info->current_framedrop_period,
+ frame_src);
+
+ msm_isp_cfg_framedrop_reg(vfe_dev, stream_info);
+ }
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
+}
+
+/**
+ * msm_isp_reset_framedrop() - Compute the framedrop period pattern
+ * @vfe_dev: Device for which the period and pattern is computed
+ * @stream_info: The stream for the which period and pattern is generated
+ *
+ * This function is called when stream starts or is reset. It's main
+ * purpose is to setup the runtime parameters of framedrop required
+ * for the stream.
+ *
+ * Returms void
+ */
+void msm_isp_reset_framedrop(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ stream_info->runtime_num_burst_capture = stream_info->num_burst_capture;
+
+ /**
+ * only reset none controllable output stream, since the
+ * controllable stream framedrop period will be controlled
+ * by the request frame api
+ */
+ if (!stream_info->controllable_output) {
+ stream_info->current_framedrop_period =
+ msm_isp_get_framedrop_period(
+ stream_info->frame_skip_pattern);
+ }
+
+ msm_isp_cfg_framedrop_reg(vfe_dev, stream_info);
+ ISP_DBG("%s: init frame drop: %d\n", __func__,
+ stream_info->init_frame_drop);
+ ISP_DBG("%s: num_burst_capture: %d\n", __func__,
+ stream_info->runtime_num_burst_capture);
+}
+
+void msm_isp_check_for_output_error(struct vfe_device *vfe_dev,
+ struct msm_isp_timestamp *ts, struct msm_isp_sof_info *sof_info)
+{
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data;
+ int i;
+
+ if (!vfe_dev || !sof_info) {
+ pr_err("%s %d failed: vfe_dev %p sof_info %p\n", __func__,
+ __LINE__, vfe_dev, sof_info);
+ return;
+ }
+ sof_info->regs_not_updated = 0;
+ sof_info->reg_update_fail_mask = 0;
+ sof_info->stream_get_buf_fail_mask = 0;
+
+ axi_data = &vfe_dev->axi_data;
+ /* report that registers are not updated and return empty buffer for
+ * controllable outputs
+ */
+ if (!vfe_dev->reg_updated) {
+ sof_info->regs_not_updated =
+ vfe_dev->reg_update_requested;
+ for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
+ stream_info = &axi_data->stream_info[i];
+ if (stream_info->state != ACTIVE ||
+ !stream_info->controllable_output ||
+ (SRC_TO_INTF(stream_info->stream_src) !=
+ VFE_PIX_0))
+ continue;
+
+ if (stream_info->undelivered_request_cnt) {
+ if (msm_isp_drop_frame(vfe_dev, stream_info, ts,
+ sof_info)) {
+ pr_err("drop frame failed\n");
+
+ }
+ }
+ }
+ }
+ vfe_dev->reg_updated = 0;
+
+ /* report frame drop per stream */
+ if (vfe_dev->error_info.framedrop_flag) {
+ for (i = 0; i < BUF_MGR_NUM_BUF_Q; i++) {
+ if (vfe_dev->error_info.stream_framedrop_count[i]) {
+ ISP_DBG("%s: get buf failed i %d\n", __func__,
+ i);
+ sof_info->stream_get_buf_fail_mask |= (1 << i);
+ vfe_dev->error_info.
+ stream_framedrop_count[i] = 0;
+ }
+ }
+ vfe_dev->error_info.framedrop_flag = 0;
+ }
+}
+
+void msm_isp_increment_frame_id(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src, struct msm_isp_timestamp *ts)
+{
+ struct msm_vfe_src_info *src_info = NULL;
+ struct msm_vfe_sof_info *sof_info = NULL;
+ enum msm_vfe_dual_hw_type dual_hw_type;
+ enum msm_vfe_dual_hw_ms_type ms_type;
+ struct msm_vfe_sof_info *master_sof_info = NULL;
+ int32_t time, master_time, delta;
+ uint32_t sof_incr = 0;
+ unsigned long flags;
+
+ if (vfe_dev->axi_data.src_info[frame_src].frame_id == 0)
+ msm_isp_update_dual_HW_ms_info_at_start(vfe_dev, frame_src);
+
+ spin_lock_irqsave(&vfe_dev->common_data->common_dev_data_lock, flags);
+ dual_hw_type =
+ vfe_dev->axi_data.src_info[frame_src].dual_hw_type;
+ ms_type =
+ vfe_dev->axi_data.src_info[frame_src].
+ dual_hw_ms_info.dual_hw_ms_type;
+ /*
+ * Increment frame_id if
+ * 1. Not Master Slave
+ * 2. Master
+ * 3. Slave and Master is Inactive
+ *
+ * OR
+ * (in other words)
+ * If SLAVE and Master active, don't increment slave frame_id.
+ * Instead use Master frame_id for Slave.
+ */
+ if ((dual_hw_type == DUAL_HW_MASTER_SLAVE) &&
+ (ms_type == MS_TYPE_SLAVE) &&
+ (vfe_dev->common_data->ms_resource.master_active == 1)) {
+ /* DUAL_HW_MS_SLAVE && MASTER active */
+ time = ts->buf_time.tv_sec * 1000 +
+ ts->buf_time.tv_usec / 1000;
+ master_sof_info = &vfe_dev->common_data->ms_resource.
+ master_sof_info;
+ master_time = master_sof_info->mono_timestamp_ms;
+ delta = vfe_dev->common_data->ms_resource.sof_delta_threshold;
+ ISP_DBG("%s: vfe %d frame %d Slave time %d Master time %d delta %d\n",
+ __func__, vfe_dev->pdev->id,
+ vfe_dev->axi_data.src_info[frame_src].frame_id,
+ time, master_time, time - master_time);
+
+ if (time - master_time > delta)
+ sof_incr = 1;
+
+ /*
+ * If delta < 5ms, slave frame_id = master frame_id
+ * If delta > 5ms, slave frame_id = master frame_id + 1
+ * CANNOT support Batch Mode with this logic currently.
+ */
+ vfe_dev->axi_data.src_info[frame_src].frame_id =
+ master_sof_info->frame_id + sof_incr;
+ } else {
+ if (frame_src == VFE_PIX_0) {
+ vfe_dev->axi_data.src_info[frame_src].frame_id +=
+ vfe_dev->axi_data.src_info[frame_src].
+ sof_counter_step;
+ src_info = &vfe_dev->axi_data.src_info[frame_src];
+
+ if (!src_info->frame_id &&
+ !src_info->reg_update_frame_id &&
+ ((src_info->frame_id -
+ src_info->reg_update_frame_id) >
+ (MAX_REG_UPDATE_THRESHOLD *
+ src_info->sof_counter_step))) {
+ pr_err("%s:%d reg_update not received for %d frames\n",
+ __func__, __LINE__,
+ src_info->frame_id -
+ src_info->reg_update_frame_id);
+
+ msm_isp_halt_send_error(vfe_dev,
+ ISP_EVENT_REG_UPDATE_MISSING);
+ }
+
+ } else
+ vfe_dev->axi_data.src_info[frame_src].frame_id++;
+ }
+
+ sof_info = vfe_dev->axi_data.src_info[frame_src].
+ dual_hw_ms_info.sof_info;
+ if (dual_hw_type == DUAL_HW_MASTER_SLAVE &&
+ sof_info != NULL) {
+ sof_info->frame_id = vfe_dev->axi_data.src_info[frame_src].
+ frame_id;
+ sof_info->timestamp_ms = ts->event_time.tv_sec * 1000 +
+ ts->event_time.tv_usec / 1000;
+ sof_info->mono_timestamp_ms = ts->buf_time.tv_sec * 1000 +
+ ts->buf_time.tv_usec / 1000;
+ }
+ spin_unlock_irqrestore(&vfe_dev->common_data->common_dev_data_lock,
+ flags);
+}
+
+void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type,
+ enum msm_vfe_input_src frame_src, struct msm_isp_timestamp *ts)
+{
+ struct msm_isp_event_data event_data;
+ struct msm_vfe_sof_info *sof_info = NULL, *self_sof = NULL;
+ enum msm_vfe_dual_hw_ms_type ms_type;
+ int i, j;
+
+ memset(&event_data, 0, sizeof(event_data));
+
+ switch (event_type) {
+ case ISP_EVENT_SOF:
+ if (frame_src == VFE_PIX_0) {
+ /* Frame id is incremented in CAMIF SOF. Event is sent
+ * in EPOCH. If by this time, both VFE dont have same
+ * frame_id, then we have scheduling issues or some
+ * other s/w issue */
+ if (vfe_dev->is_split &&
+ vfe_dev->common_data->dual_vfe_res->
+ axi_data[0]->
+ src_info[VFE_PIX_0].frame_id !=
+ vfe_dev->common_data->dual_vfe_res->
+ axi_data[1]->
+ src_info[VFE_PIX_0].frame_id) {
+ pr_err_ratelimited("%s: Error! 2 VFE out of sync vfe0 frame_id %u vfe1 %u\n",
+ __func__,
+ vfe_dev->common_data->dual_vfe_res->
+ axi_data[0]->
+ src_info[VFE_PIX_0].frame_id,
+ vfe_dev->common_data->dual_vfe_res->
+ axi_data[1]->
+ src_info[VFE_PIX_0].frame_id);
+ }
+ if (vfe_dev->isp_sof_debug < ISP_SOF_DEBUG_COUNT)
+ pr_err("%s: PIX0 frame id: %u\n", __func__,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
+ vfe_dev->isp_sof_debug++;
+ } else if (frame_src == VFE_RAW_0) {
+ if (vfe_dev->isp_raw0_debug < ISP_SOF_DEBUG_COUNT)
+ pr_err("%s: RAW_0 frame id: %u\n", __func__,
+ vfe_dev->axi_data.src_info[VFE_RAW_0].frame_id);
+ vfe_dev->isp_raw0_debug++;
+ } else if (frame_src == VFE_RAW_1) {
+ if (vfe_dev->isp_raw1_debug < ISP_SOF_DEBUG_COUNT)
+ pr_err("%s: RAW_1 frame id: %u\n", __func__,
+ vfe_dev->axi_data.src_info[VFE_RAW_1].frame_id);
+ vfe_dev->isp_raw1_debug++;
+ } else if (frame_src == VFE_RAW_2) {
+ if (vfe_dev->isp_raw2_debug < ISP_SOF_DEBUG_COUNT)
+ pr_err("%s: RAW_2 frame id: %u\n", __func__,
+ vfe_dev->axi_data.src_info[VFE_RAW_2].frame_id);
+ vfe_dev->isp_raw2_debug++;
+ }
+
+ ISP_DBG("%s: vfe %d frame_src %d frame id: %u\n", __func__,
+ vfe_dev->pdev->id, frame_src,
+ vfe_dev->axi_data.src_info[frame_src].frame_id);
+
+ /*
+ * Cannot support dual_cam and framedrop same time in union.
+ * If need to support framedrop as well, move delta calculation
+ * to userspace
+ */
+ if (vfe_dev->axi_data.src_info[frame_src].dual_hw_type ==
+ DUAL_HW_MASTER_SLAVE) {
+ spin_lock(&vfe_dev->common_data->common_dev_data_lock);
+ self_sof = vfe_dev->axi_data.src_info[frame_src].
+ dual_hw_ms_info.sof_info;
+ if (!self_sof) {
+ spin_unlock(&vfe_dev->common_data->
+ common_dev_data_lock);
+ break;
+ }
+ ms_type = vfe_dev->axi_data.src_info[frame_src].
+ dual_hw_ms_info.dual_hw_ms_type;
+ if (ms_type == MS_TYPE_MASTER) {
+ for (i = 0, j = 0; i < MS_NUM_SLAVE_MAX; i++) {
+ if (!(vfe_dev->common_data->
+ ms_resource.slave_active_mask
+ & (1 << i)))
+ continue;
+ sof_info = &vfe_dev->common_data->
+ ms_resource.slave_sof_info[i];
+ event_data.u.sof_info.ms_delta_info.
+ delta[j] =
+ self_sof->mono_timestamp_ms -
+ sof_info->mono_timestamp_ms;
+ j++;
+ if (j == vfe_dev->common_data->
+ ms_resource.num_slave)
+ break;
+ }
+ event_data.u.sof_info.ms_delta_info.
+ num_delta_info = j;
+ } else {
+ sof_info = &vfe_dev->common_data->ms_resource.
+ master_sof_info;
+ event_data.u.sof_info.ms_delta_info.
+ num_delta_info = 1;
+ event_data.u.sof_info.ms_delta_info.delta[0] =
+ self_sof->mono_timestamp_ms -
+ sof_info->mono_timestamp_ms;
+ }
+ spin_unlock(&vfe_dev->common_data->
+ common_dev_data_lock);
+ } else
+ if (frame_src == VFE_PIX_0) {
+ msm_isp_check_for_output_error(vfe_dev, ts,
+ &event_data.u.sof_info);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ event_data.frame_id = vfe_dev->axi_data.src_info[frame_src].frame_id;
+ event_data.timestamp = ts->event_time;
+ event_data.mono_timestamp = ts->buf_time;
+ msm_isp_send_event(vfe_dev, event_type | frame_src, &event_data);
+}
+
+/**
+ * msm_isp_calculate_framedrop() - Setup frame period and pattern
+ * @axi_data: Structure describing the h/w streams.
+ * @stream_cfg_cmd: User space input parameter for perion/pattern.
+ *
+ * Initialize the h/w stream framedrop period and pattern sent
+ * by user space.
+ *
+ * Returns 0 on success else error code.
+ */
+int msm_isp_calculate_framedrop(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
+{
+ uint32_t framedrop_period = 0;
+ struct msm_vfe_axi_stream *stream_info = NULL;
+ if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)
+ < VFE_AXI_SRC_MAX) {
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
+ } else {
+ pr_err("%s: Invalid stream handle", __func__);
+ return -EINVAL;
+ }
+ if (!stream_info) {
+ pr_err("%s: Stream info is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ framedrop_period = msm_isp_get_framedrop_period(
+ stream_cfg_cmd->frame_skip_pattern);
+ stream_info->frame_skip_pattern =
+ stream_cfg_cmd->frame_skip_pattern;
+ if (stream_cfg_cmd->frame_skip_pattern == SKIP_ALL)
+ stream_info->current_framedrop_period =
+ MSM_VFE_STREAM_STOP_PERIOD;
+ else
+ stream_info->current_framedrop_period = framedrop_period;
+
+ stream_info->init_frame_drop = stream_cfg_cmd->init_frame_drop;
+
+ if (stream_cfg_cmd->burst_count > 0) {
+ stream_info->stream_type = BURST_STREAM;
+ stream_info->num_burst_capture =
+ stream_cfg_cmd->burst_count;
+ } else {
+ stream_info->stream_type = CONTINUOUS_STREAM;
+ }
+ return 0;
+}
+
+void msm_isp_calculate_bandwidth(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int bpp = 0;
+ if (stream_info->stream_src < RDI_INTF_0) {
+ stream_info->bandwidth =
+ (axi_data->src_info[VFE_PIX_0].pixel_clock /
+ axi_data->src_info[VFE_PIX_0].width) *
+ stream_info->max_width;
+ stream_info->bandwidth = (unsigned long)stream_info->bandwidth *
+ stream_info->format_factor / ISP_Q2;
+ } else {
+ int rdi = SRC_TO_INTF(stream_info->stream_src);
+ bpp = msm_isp_get_bit_per_pixel(stream_info->output_format);
+ if (rdi < VFE_SRC_MAX)
+ stream_info->bandwidth =
+ (axi_data->src_info[rdi].pixel_clock / 8) * bpp;
+ else
+ pr_err("%s: Invalid rdi interface\n", __func__);
+ }
+}
+
+#ifdef CONFIG_MSM_AVTIMER
+void msm_isp_start_avtimer(void)
+{
+ avcs_core_open();
+ avcs_core_disable_power_collapse(1);
+}
+
+static inline void msm_isp_get_avtimer_ts(
+ struct msm_isp_timestamp *time_stamp)
+{
+ int rc = 0;
+ uint32_t avtimer_usec = 0;
+ uint64_t avtimer_tick = 0;
+
+ rc = avcs_core_query_timer(&avtimer_tick);
+ if (rc < 0) {
+ pr_err("%s: Error: Invalid AVTimer Tick, rc=%d\n",
+ __func__, rc);
+ /* In case of error return zero AVTimer Tick Value */
+ time_stamp->vt_time.tv_sec = 0;
+ time_stamp->vt_time.tv_usec = 0;
+ } else {
+ avtimer_usec = do_div(avtimer_tick, USEC_PER_SEC);
+ time_stamp->vt_time.tv_sec = (uint32_t)(avtimer_tick);
+ time_stamp->vt_time.tv_usec = avtimer_usec;
+ pr_debug("%s: AVTimer TS = %u:%u\n", __func__,
+ (uint32_t)(avtimer_tick), avtimer_usec);
+ }
+}
+#else
+void msm_isp_start_avtimer(void)
+{
+ pr_err("AV Timer is not supported\n");
+}
+
+static inline void msm_isp_get_avtimer_ts(
+ struct msm_isp_timestamp *time_stamp)
+{
+ pr_err_ratelimited("%s: Error: AVTimer driver not available\n",
+ __func__);
+ time_stamp->vt_time.tv_sec = 0;
+ time_stamp->vt_time.tv_usec = 0;
+}
+#endif
+
+int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0, i;
+ uint32_t io_format = 0;
+ struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd = arg;
+ struct msm_vfe_axi_stream *stream_info;
+
+ rc = msm_isp_axi_create_stream(vfe_dev,
+ &vfe_dev->axi_data, stream_cfg_cmd);
+ if (rc) {
+ pr_err("%s: create stream failed\n", __func__);
+ return rc;
+ }
+
+ rc = msm_isp_validate_axi_request(
+ &vfe_dev->axi_data, stream_cfg_cmd);
+ if (rc) {
+ pr_err("%s: Request validation failed\n", __func__);
+ if (HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle) <
+ VFE_AXI_SRC_MAX)
+ msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
+ HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle));
+ return rc;
+ }
+ stream_info = &vfe_dev->axi_data.
+ stream_info[HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle)];
+ if (!stream_info) {
+ pr_err("%s: can not find stream handle %x\n", __func__,
+ stream_cfg_cmd->axi_stream_handle);
+ return -EINVAL;
+ }
+
+ stream_info->memory_input = stream_cfg_cmd->memory_input;
+ vfe_dev->reg_update_requested &=
+ ~(BIT(SRC_TO_INTF(stream_info->stream_src)));
+
+ msm_isp_axi_reserve_wm(vfe_dev, &vfe_dev->axi_data, stream_info);
+
+ if (stream_info->stream_src < RDI_INTF_0) {
+ io_format = vfe_dev->axi_data.src_info[VFE_PIX_0].input_format;
+ if (stream_info->stream_src == CAMIF_RAW ||
+ stream_info->stream_src == IDEAL_RAW) {
+ if (stream_info->stream_src == CAMIF_RAW &&
+ io_format != stream_info->output_format)
+ pr_debug("%s: Overriding input format\n",
+ __func__);
+
+ io_format = stream_info->output_format;
+ }
+ rc = vfe_dev->hw_info->vfe_ops.axi_ops.cfg_io_format(
+ vfe_dev, stream_info->stream_src, io_format);
+ if (rc) {
+ pr_err("%s: cfg io format failed\n", __func__);
+ goto done;
+ }
+ }
+ rc = msm_isp_calculate_framedrop(&vfe_dev->axi_data, stream_cfg_cmd);
+ if (rc)
+ goto done;
+ if (stream_cfg_cmd->vt_enable && !vfe_dev->vt_enable) {
+ vfe_dev->vt_enable = stream_cfg_cmd->vt_enable;
+ msm_isp_start_avtimer();
+ }
+ if (stream_info->num_planes > 1) {
+ msm_isp_axi_reserve_comp_mask(
+ &vfe_dev->axi_data, stream_info);
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_comp_mask(vfe_dev, stream_info);
+ } else {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_irq_mask(vfe_dev, stream_info);
+ }
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_reg(vfe_dev, stream_info, i);
+
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_xbar_reg(vfe_dev, stream_info, i);
+ }
+ /* initialize the WM ping pong with scratch buffer */
+ msm_isp_cfg_stream_scratch(vfe_dev, stream_info, VFE_PING_FLAG);
+ msm_isp_cfg_stream_scratch(vfe_dev, stream_info, VFE_PONG_FLAG);
+
+done:
+ if (rc) {
+ msm_isp_axi_free_wm(&vfe_dev->axi_data, stream_info);
+ msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
+ HANDLE_TO_IDX(stream_cfg_cmd->axi_stream_handle));
+ }
+ return rc;
+}
+
+int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0, i;
+ struct msm_vfe_axi_stream_release_cmd *stream_release_cmd = arg;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_stream_cfg_cmd stream_cfg;
+
+
+ if (HANDLE_TO_IDX(stream_release_cmd->stream_handle) >=
+ VFE_AXI_SRC_MAX) {
+ pr_err("%s: Invalid stream handle\n", __func__);
+ return -EINVAL;
+ }
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_release_cmd->stream_handle)];
+ if (stream_info->state == AVAILABLE) {
+ pr_err("%s: Stream already released\n", __func__);
+ return -EINVAL;
+ } else if (stream_info->state != INACTIVE) {
+ stream_cfg.cmd = STOP_STREAM;
+ stream_cfg.num_streams = 1;
+ stream_cfg.stream_handle[0] = stream_release_cmd->stream_handle;
+ msm_isp_cfg_axi_stream(vfe_dev, (void *) &stream_cfg);
+ }
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ clear_wm_reg(vfe_dev, stream_info, i);
+
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ clear_wm_xbar_reg(vfe_dev, stream_info, i);
+ }
+
+ if (stream_info->num_planes > 1) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ clear_comp_mask(vfe_dev, stream_info);
+ msm_isp_axi_free_comp_mask(&vfe_dev->axi_data, stream_info);
+ } else {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ clear_wm_irq_mask(vfe_dev, stream_info);
+ }
+
+ vfe_dev->hw_info->vfe_ops.axi_ops.clear_framedrop(vfe_dev, stream_info);
+ msm_isp_axi_free_wm(axi_data, stream_info);
+
+ msm_isp_axi_destroy_stream(&vfe_dev->axi_data,
+ HANDLE_TO_IDX(stream_release_cmd->stream_handle));
+
+ return rc;
+}
+
+static int msm_isp_axi_stream_enable_cfg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, int32_t dual_vfe_sync)
+{
+ int i, vfe_id = 0, enable_wm = 0;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+ struct dual_vfe_resource *dual_vfe_res = NULL;
+
+ if (stream_idx >= MAX_NUM_STREAM) {
+ pr_err("%s: Invalid stream_idx", __func__);
+ goto error;
+ }
+
+ if (stream_info->state == INACTIVE)
+ goto error;
+
+ if (stream_info->state == START_PENDING ||
+ stream_info->state == RESUME_PENDING) {
+ enable_wm = 1;
+ } else {
+ enable_wm = 0;
+ }
+ for (i = 0; i < stream_info->num_planes; i++) {
+ /*
+ * In case when sensor is streaming, use dual vfe sync mode
+ * to enable wm together and avoid split.
+ */
+ if ((stream_info->stream_src < RDI_INTF_0) &&
+ vfe_dev->is_split && vfe_dev->pdev->id == ISP_VFE1 &&
+ dual_vfe_sync) {
+ dual_vfe_res = vfe_dev->common_data->dual_vfe_res;
+ if (!dual_vfe_res->vfe_base[ISP_VFE0] ||
+ !dual_vfe_res->axi_data[ISP_VFE0] ||
+ !dual_vfe_res->vfe_base[ISP_VFE1] ||
+ !dual_vfe_res->axi_data[ISP_VFE1]) {
+ pr_err("%s:%d failed vfe0 %p %p vfe %p %p\n",
+ __func__, __LINE__,
+ dual_vfe_res->vfe_base[ISP_VFE0],
+ dual_vfe_res->axi_data[ISP_VFE0],
+ dual_vfe_res->vfe_base[ISP_VFE1],
+ dual_vfe_res->axi_data[ISP_VFE1]);
+ goto error;
+ }
+ for (vfe_id = 0; vfe_id < MAX_VFE; vfe_id++) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ enable_wm(dual_vfe_res->vfe_base[vfe_id],
+ dual_vfe_res->axi_data[vfe_id]->
+ stream_info[stream_idx].wm[i],
+ enable_wm);
+ }
+ } else if (!vfe_dev->is_split ||
+ (stream_info->stream_src >= RDI_INTF_0 &&
+ stream_info->stream_src <= RDI_INTF_2) ||
+ !dual_vfe_sync) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ enable_wm(vfe_dev->vfe_base, stream_info->wm[i],
+ enable_wm);
+ }
+ if (!enable_wm) {
+ /* Issue a reg update for Raw Snapshot Case
+ * since we dont have reg update ack
+ */
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].
+ raw_stream_count > 0
+ && vfe_dev->axi_data.src_info[VFE_PIX_0].
+ pix_stream_count == 0) {
+ if (stream_info->stream_src == CAMIF_RAW ||
+ stream_info->stream_src == IDEAL_RAW) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ reg_update(vfe_dev,
+ VFE_PIX_0);
+ }
+ }
+ }
+ }
+ if (stream_info->state == START_PENDING)
+ axi_data->num_active_stream++;
+ else if (stream_info->state == STOP_PENDING)
+ axi_data->num_active_stream--;
+ return 0;
+error:
+ return -EINVAL;
+}
+
+void msm_isp_axi_stream_update(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src)
+{
+ int i;
+ unsigned long flags;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+ for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
+ if (SRC_TO_INTF(axi_data->stream_info[i].stream_src) !=
+ frame_src) {
+ ISP_DBG("%s stream_src %d frame_src %d\n", __func__,
+ SRC_TO_INTF(
+ axi_data->stream_info[i].stream_src),
+ frame_src);
+ continue;
+ }
+ if (axi_data->stream_info[i].state == UPDATING)
+ axi_data->stream_info[i].state = ACTIVE;
+ else if (axi_data->stream_info[i].state == START_PENDING ||
+ axi_data->stream_info[i].state == STOP_PENDING) {
+ msm_isp_axi_stream_enable_cfg(
+ vfe_dev, &axi_data->stream_info[i], 1);
+ axi_data->stream_info[i].state =
+ axi_data->stream_info[i].state ==
+ START_PENDING ? STARTING : STOPPING;
+ } else if (axi_data->stream_info[i].state == STARTING ||
+ axi_data->stream_info[i].state == STOPPING) {
+ axi_data->stream_info[i].state =
+ axi_data->stream_info[i].state == STARTING ?
+ ACTIVE : INACTIVE;
+ }
+ }
+
+ spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
+ if (vfe_dev->axi_data.stream_update[frame_src]) {
+ vfe_dev->axi_data.stream_update[frame_src]--;
+ }
+ spin_unlock_irqrestore(&vfe_dev->shared_data_lock, flags);
+
+ if (vfe_dev->axi_data.pipeline_update == DISABLE_CAMIF ||
+ (vfe_dev->axi_data.pipeline_update ==
+ DISABLE_CAMIF_IMMEDIATELY)) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ enable_module(vfe_dev, 0xFF, 0);
+ vfe_dev->axi_data.pipeline_update = NO_UPDATE;
+ }
+
+ if (vfe_dev->axi_data.stream_update[frame_src] == 0)
+ complete(&vfe_dev->stream_config_complete);
+}
+
+static void msm_isp_reload_ping_pong_offset(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int i, j;
+ uint32_t bit;
+ struct msm_isp_buffer *buf;
+ int32_t buf_size_byte = 0;
+ int32_t word_per_line = 0;
+
+ for (i = 0; i < 2; i++) {
+ buf = stream_info->buf[i];
+ if (!buf)
+ continue;
+
+ bit = i ? 0 : 1;
+
+ for (j = 0; j < stream_info->num_planes; j++) {
+ word_per_line = msm_isp_cal_word_per_line(
+ stream_info->output_format, stream_info->
+ plane_cfg[j].output_stride);
+ if (word_per_line < 0) {
+ /* 0 means no prefetch*/
+ word_per_line = 0;
+ buf_size_byte = 0;
+ } else {
+ buf_size_byte = (word_per_line * 8 *
+ stream_info->plane_cfg[j].
+ output_scan_lines) - stream_info->
+ plane_cfg[j].plane_addr_offset;
+ }
+
+ vfe_dev->hw_info->vfe_ops.axi_ops.update_ping_pong_addr(
+ vfe_dev->vfe_base, stream_info->wm[j], bit,
+ buf->mapped_info[j].paddr +
+ stream_info->plane_cfg[j].plane_addr_offset,
+ buf_size_byte);
+ }
+ }
+}
+
+void msm_isp_axi_cfg_update(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src)
+{
+ int i, j;
+ uint32_t update_state;
+ unsigned long flags;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_vfe_axi_stream *stream_info;
+ int num_stream = 0;
+
+ for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
+ if (SRC_TO_INTF(axi_data->stream_info[i].stream_src) !=
+ frame_src) {
+ continue;
+ }
+ num_stream++;
+ stream_info = &axi_data->stream_info[i];
+ if ((stream_info->stream_type == BURST_STREAM &&
+ !stream_info->controllable_output) ||
+ stream_info->state == AVAILABLE)
+ continue;
+ spin_lock_irqsave(&stream_info->lock, flags);
+ if (stream_info->state == PAUSING) {
+ /*AXI Stopped, apply update*/
+ stream_info->state = PAUSED;
+ msm_isp_reload_ping_pong_offset(vfe_dev, stream_info);
+ for (j = 0; j < stream_info->num_planes; j++)
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_reg(vfe_dev, stream_info, j);
+ /*Resume AXI*/
+ stream_info->state = RESUME_PENDING;
+ msm_isp_axi_stream_enable_cfg(
+ vfe_dev, &axi_data->stream_info[i], 1);
+ stream_info->state = RESUMING;
+ } else if (stream_info->state == RESUMING) {
+ stream_info->runtime_output_format =
+ stream_info->output_format;
+ stream_info->state = ACTIVE;
+ }
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
+
+ if (num_stream)
+ update_state = atomic_dec_return(
+ &axi_data->axi_cfg_update[frame_src]);
+}
+
+static int msm_isp_get_done_buf(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint32_t pingpong_status,
+ struct msm_isp_buffer **done_buf)
+{
+ uint32_t pingpong_bit = 0, i;
+ int rc = 0;
+
+ pingpong_bit = (~(pingpong_status >> stream_info->wm[0]) & 0x1);
+ for (i = 0; i < stream_info->num_planes; i++) {
+ if (pingpong_bit !=
+ (~(pingpong_status >> stream_info->wm[i]) & 0x1)) {
+ pr_debug("%s: Write master ping pong mismatch. Status: 0x%x\n",
+ __func__, pingpong_status);
+ }
+ }
+
+ *done_buf = stream_info->buf[pingpong_bit];
+ /* For null buffer there is nothing to do */
+ if (NULL == *done_buf) {
+ ISP_DBG("%s vfe %d done buf is null\n", __func__,
+ vfe_dev->pdev->id);
+ return rc;
+ }
+ ISP_DBG("%s vfe %d pingpong %d buf %d bufq %x\n", __func__,
+ vfe_dev->pdev->id, pingpong_bit,
+ (*done_buf)->buf_idx, (*done_buf)->bufq_handle);
+
+ /*
+ * Put scratch buffer if there was a valid buffer to avoid getting
+ * pagefaults in case we do not want another frame from the ping/pong
+ * position when current done_buf is obtained from
+ */
+ msm_isp_cfg_stream_scratch(vfe_dev, stream_info, pingpong_status);
+
+ if (stream_info->controllable_output) {
+ stream_info->buf[pingpong_bit] = NULL;
+ if (!stream_info->undelivered_request_cnt) {
+ pr_err_ratelimited("%s:%d error undelivered_request_cnt 0\n",
+ __func__, __LINE__);
+ } else {
+ stream_info->undelivered_request_cnt--;
+ if (pingpong_bit != stream_info->sw_ping_pong_bit) {
+ pr_err("%s:%d ping pong bit actual %d sw %d\n",
+ __func__, __LINE__, pingpong_bit,
+ stream_info->sw_ping_pong_bit);
+
+ rc = -EINVAL;
+ }
+ stream_info->sw_ping_pong_bit ^= 1;
+ }
+ }
+
+ return rc;
+}
+
+void msm_isp_halt_send_error(struct vfe_device *vfe_dev, uint32_t event)
+{
+ uint32_t i = 0;
+ struct msm_isp_event_data error_event;
+ struct msm_vfe_axi_halt_cmd halt_cmd;
+
+ memset(&halt_cmd, 0, sizeof(struct msm_vfe_axi_halt_cmd));
+ memset(&error_event, 0, sizeof(struct msm_isp_event_data));
+ halt_cmd.stop_camif = 1;
+ halt_cmd.overflow_detected = 0;
+ halt_cmd.blocking_halt = 0;
+
+ pr_err("%s: vfe%d fatal error!\n", __func__, vfe_dev->pdev->id);
+
+ atomic_set(&vfe_dev->error_info.overflow_state,
+ HALT_ENFORCED);
+
+ /* heavy spin lock in axi halt, avoid spin lock outside. */
+ msm_isp_axi_halt(vfe_dev, &halt_cmd);
+
+ for (i = 0; i < VFE_AXI_SRC_MAX; i++)
+ vfe_dev->axi_data.stream_info[i].state =
+ INACTIVE;
+
+ error_event.frame_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+
+ msm_isp_send_event(vfe_dev, event, &error_event);
+}
+
+int msm_isp_print_ping_pong_address(struct vfe_device *vfe_dev,
+ unsigned long fault_addr)
+{
+ int i, j;
+ struct msm_isp_buffer *buf = NULL;
+ uint32_t pingpong_bit;
+ struct msm_vfe_axi_stream *stream_info = NULL;
+
+ for (j = 0; j < VFE_AXI_SRC_MAX; j++) {
+ stream_info = &vfe_dev->axi_data.stream_info[j];
+ if (stream_info->state == INACTIVE)
+ continue;
+
+ for (pingpong_bit = 0; pingpong_bit < 2; pingpong_bit++) {
+ for (i = 0; i < stream_info->num_planes; i++) {
+ buf = stream_info->buf[pingpong_bit];
+ if (buf == NULL) {
+ pr_err("%s: buf NULL\n", __func__);
+ continue;
+ }
+ pr_debug("%s: stream_id %x ping-pong %d plane %d start_addr %lu addr_offset %x len %zx stride %d scanline %d\n"
+ , __func__, stream_info->stream_id,
+ pingpong_bit, i, (unsigned long)
+ buf->mapped_info[i].paddr,
+ stream_info->
+ plane_cfg[i].plane_addr_offset,
+ buf->mapped_info[i].len,
+ stream_info->
+ plane_cfg[i].output_stride,
+ stream_info->
+ plane_cfg[i].output_scan_lines
+ );
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int msm_isp_cfg_ping_pong_address(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint32_t pingpong_status,
+ uint8_t sync_dual_vfe)
+{
+ int i, rc = -1;
+ struct msm_isp_buffer *buf = NULL;
+ uint32_t bufq_handle = 0;
+ struct msm_vfe_frame_request_queue *queue_req;
+ uint32_t pingpong_bit;
+ uint32_t buf_cnt = 0;
+ uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+ uint32_t buffer_size_byte = 0;
+ int32_t word_per_line = 0;
+ dma_addr_t paddr;
+ struct dual_vfe_resource *dual_vfe_res = NULL;
+ uint32_t vfe_id = 0;
+ unsigned long flags;
+
+ if (stream_idx >= VFE_AXI_SRC_MAX) {
+ pr_err("%s: Invalid stream_idx", __func__);
+ return rc;
+ }
+
+ if (!stream_info->controllable_output) {
+ bufq_handle = stream_info->bufq_handle[VFE_BUF_QUEUE_DEFAULT];
+ } else {
+ queue_req = list_first_entry_or_null(&stream_info->request_q,
+ struct msm_vfe_frame_request_queue, list);
+ if (!queue_req)
+ return 0;
+
+ bufq_handle = stream_info->
+ bufq_handle[queue_req->buff_queue_id];
+
+ if (!bufq_handle || stream_info->request_q_cnt <= 0) {
+ pr_err_ratelimited("%s: Drop request. Shared stream is stopped.\n",
+ __func__);
+ return -EINVAL;
+ }
+ queue_req->cmd_used = 0;
+ list_del(&queue_req->list);
+ stream_info->request_q_cnt--;
+ }
+
+ rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
+ vfe_dev->pdev->id, bufq_handle, &buf, &buf_cnt);
+
+ if (rc < 0) {
+ ISP_DBG("%s vfe %d get_buf fail bufq %x\n", __func__,
+ vfe_dev->pdev->id, bufq_handle);
+ vfe_dev->error_info.
+ stream_framedrop_count[bufq_handle & 0xFF]++;
+ vfe_dev->error_info.framedrop_flag = 1;
+ return rc;
+ }
+
+ if (buf->num_planes != stream_info->num_planes) {
+ pr_err("%s: Invalid buffer\n", __func__);
+ rc = -EINVAL;
+ goto buf_error;
+ }
+
+ /* make sure that streams are in right state */
+ if ((stream_info->stream_src < RDI_INTF_0) &&
+ vfe_dev->is_split &&
+ (buf_cnt >= MAX_VFE) && sync_dual_vfe) {
+ dual_vfe_res = vfe_dev->common_data->dual_vfe_res;
+ if (!dual_vfe_res->vfe_base[ISP_VFE0] ||
+ !dual_vfe_res->axi_data[ISP_VFE0] ||
+ !dual_vfe_res->vfe_base[ISP_VFE1] ||
+ !dual_vfe_res->axi_data[ISP_VFE1]) {
+ pr_err("%s:%d failed vfe0 %p %p vfe %p %p\n",
+ __func__, __LINE__,
+ dual_vfe_res->vfe_base[ISP_VFE0],
+ dual_vfe_res->axi_data[ISP_VFE0],
+ dual_vfe_res->vfe_base[ISP_VFE1],
+ dual_vfe_res->axi_data[ISP_VFE1]);
+ rc = -EINVAL;
+ goto buf_error;
+ }
+ } else if (!sync_dual_vfe || !vfe_dev->is_split ||
+ (stream_info->stream_src >= RDI_INTF_0 &&
+ stream_info->stream_src <= RDI_INTF_2)) {
+ dual_vfe_res = NULL;
+ } else {
+ pingpong_bit = (~(pingpong_status >> stream_info->wm[0]) & 0x1);
+ stream_info->buf[pingpong_bit] = buf;
+ return 0;
+ }
+
+ for (i = 0; i < stream_info->num_planes; i++) {
+ word_per_line = msm_isp_cal_word_per_line(
+ stream_info->output_format, stream_info->
+ plane_cfg[i].output_stride);
+ if (word_per_line < 0) {
+ /* 0 means no prefetch*/
+ word_per_line = 0;
+ buffer_size_byte = 0;
+ } else {
+ buffer_size_byte = (word_per_line * 8 *
+ stream_info->plane_cfg[i].
+ output_scan_lines) - stream_info->
+ plane_cfg[i].plane_addr_offset;
+ }
+
+ paddr = buf->mapped_info[i].paddr;
+ /* Isolate pingpong_bit from pingpong_status */
+ pingpong_bit = ((pingpong_status >>
+ stream_info->wm[i]) & 0x1);
+ ISP_DBG("%s: vfe %d config buf %d to pingpong %d stream %x\n",
+ __func__, vfe_dev->pdev->id,
+ buf->buf_idx, !pingpong_bit,
+ bufq_handle);
+
+ if (dual_vfe_res) {
+ for (vfe_id = 0; vfe_id < MAX_VFE; vfe_id++) {
+ if (vfe_id != vfe_dev->pdev->id)
+ spin_lock_irqsave(
+ &dual_vfe_res->
+ axi_data[vfe_id]->
+ stream_info[stream_idx].
+ lock, flags);
+
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ update_ping_pong_addr(
+ dual_vfe_res->vfe_base[vfe_id],
+ dual_vfe_res->axi_data[vfe_id]->
+ stream_info[stream_idx].wm[i],
+ pingpong_bit, paddr +
+ dual_vfe_res->axi_data[vfe_id]->
+ stream_info[stream_idx].
+ plane_cfg[i].plane_addr_offset,
+ buffer_size_byte);
+
+ if (i == 0) {
+ dual_vfe_res->axi_data[vfe_id]->
+ stream_info[stream_idx].
+ buf[!pingpong_bit] =
+ buf;
+ }
+ if (vfe_id != vfe_dev->pdev->id)
+ spin_unlock_irqrestore(
+ &dual_vfe_res->
+ axi_data[vfe_id]->
+ stream_info[stream_idx].
+ lock, flags);
+ }
+ } else {
+ vfe_dev->hw_info->vfe_ops.axi_ops.update_ping_pong_addr(
+ vfe_dev->vfe_base, stream_info->wm[i],
+ pingpong_bit, paddr +
+ stream_info->plane_cfg[i].plane_addr_offset,
+ buffer_size_byte);
+ stream_info->buf[!pingpong_bit] =
+ buf;
+ }
+ }
+
+ return 0;
+buf_error:
+ vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx);
+ return rc;
+}
+
+static void msm_isp_process_done_buf(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, struct msm_isp_buffer *buf,
+ struct msm_isp_timestamp *ts)
+{
+ int rc;
+ unsigned long flags;
+ struct msm_isp_event_data buf_event;
+ struct timeval *time_stamp;
+ uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+ uint32_t frame_id;
+ uint32_t buf_src;
+ uint8_t drop_frame = 0;
+ struct msm_isp_bufq *bufq = NULL;
+ memset(&buf_event, 0, sizeof(buf_event));
+
+ frame_id = vfe_dev->axi_data.
+ src_info[SRC_TO_INTF(stream_info->stream_src)].frame_id;
+
+ if (stream_idx >= VFE_AXI_SRC_MAX) {
+ pr_err_ratelimited("%s: Invalid stream_idx", __func__);
+ return;
+ }
+
+ if (SRC_TO_INTF(stream_info->stream_src) >= VFE_SRC_MAX) {
+ pr_err_ratelimited("%s: Invalid stream index, put buf back to vb2 queue\n",
+ __func__);
+ rc = vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx);
+ return;
+ }
+
+ if (stream_info->stream_type != BURST_STREAM &&
+ (stream_info->sw_skip.stream_src_mask &
+ (1 << stream_info->stream_src))) {
+ /* Hw stream output of this src is requested for drop */
+ if (stream_info->sw_skip.skip_mode == SKIP_ALL) {
+ /* drop all buffers */
+ drop_frame = 1;
+ } else if (stream_info->sw_skip.skip_mode == SKIP_RANGE &&
+ (stream_info->sw_skip.min_frame_id <= frame_id &&
+ stream_info->sw_skip.max_frame_id >= frame_id)) {
+ drop_frame = 1;
+ } else if (frame_id > stream_info->sw_skip.max_frame_id) {
+ spin_lock_irqsave(&stream_info->lock, flags);
+ memset(&stream_info->sw_skip, 0,
+ sizeof(struct msm_isp_sw_framskip));
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
+ }
+
+ if (!buf || !ts)
+ return;
+
+ if (vfe_dev->vt_enable) {
+ msm_isp_get_avtimer_ts(ts);
+ time_stamp = &ts->vt_time;
+ } else {
+ time_stamp = &ts->buf_time;
+ }
+
+ rc = vfe_dev->buf_mgr->ops->get_buf_src(vfe_dev->buf_mgr,
+ buf->bufq_handle, &buf_src);
+ if (rc != 0) {
+ pr_err_ratelimited("%s: Error getting buf_src\n", __func__);
+ return;
+ }
+
+ if (stream_info->buf_divert && rc == 0 &&
+ buf_src != MSM_ISP_BUFFER_SRC_SCRATCH) {
+ rc = vfe_dev->buf_mgr->ops->buf_divert(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx,
+ time_stamp, frame_id);
+ } else {
+ rc = vfe_dev->buf_mgr->ops->update_put_buf_cnt(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx, frame_id);
+ }
+
+ /* Buf divert return value represent whether the buf
+ * can be diverted. A positive return value means
+ * other ISP hardware is still processing the frame.
+ */
+ if (rc > 0) {
+ ISP_DBG("%s: vfe_id %d bufq %x buf_id %d put_cnt 1\n", __func__,
+ vfe_dev->pdev->id, buf->bufq_handle, buf->buf_idx);
+ return;
+ } else if (rc == 0) {
+ if ((buf->frame_id != frame_id) &&
+ vfe_dev->axi_data.enable_frameid_recovery) {
+ struct msm_isp_event_data error_event;
+
+ error_event.frame_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+ error_event.u.error_info.err_type =
+ ISP_ERROR_FRAME_ID_MISMATCH;
+ rc = vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx);
+ msm_isp_send_event(vfe_dev, ISP_EVENT_ERROR,
+ &error_event);
+ pr_err("%s: Error! frame id mismatch!! 1st buf frame %d,curr frame %d\n",
+ __func__, buf->frame_id, frame_id);
+ vfe_dev->buf_mgr->frameId_mismatch_recovery = 1;
+ return;
+ }
+ if (drop_frame) {
+ /* Put but if dual vfe usecase and
+ * both vfe have done using buf
+ */
+ rc = vfe_dev->buf_mgr->ops->put_buf(
+ vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx);
+ if (!rc) {
+ ISP_DBG("%s:%d vfe_id %d Buffer dropped %d\n",
+ __func__, __LINE__, vfe_dev->pdev->id,
+ frame_id);
+ return;
+ }
+ }
+
+ buf_event.frame_id = frame_id;
+ buf_event.timestamp = *time_stamp;
+ buf_event.u.buf_done.session_id = stream_info->session_id;
+ buf_event.u.buf_done.stream_id = stream_info->stream_id;
+ buf_event.u.buf_done.handle = buf->bufq_handle;
+ buf_event.u.buf_done.buf_idx = buf->buf_idx;
+ buf_event.u.buf_done.output_format =
+ stream_info->runtime_output_format;
+ if (stream_info->buf_divert &&
+ buf_src != MSM_ISP_BUFFER_SRC_SCRATCH) {
+ ISP_DBG(
+ "%s: vfe_id %d send buf_divert frame_id %d buf-id %d bufq %x\n",
+ __func__, vfe_dev->pdev->id, frame_id,
+ buf->buf_idx, buf->bufq_handle);
+
+ bufq = vfe_dev->buf_mgr->ops->get_bufq(vfe_dev->buf_mgr,
+ buf->bufq_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq buf_handle %x\n",
+ __func__, buf->bufq_handle);
+ }
+
+ if ((bufq != NULL) && bufq->buf_type == ISP_SHARE_BUF)
+ msm_isp_send_event(vfe_dev->common_data->
+ dual_vfe_res->vfe_dev[ISP_VFE1],
+ ISP_EVENT_BUF_DIVERT, &buf_event);
+ else
+ msm_isp_send_event(vfe_dev,
+ ISP_EVENT_BUF_DIVERT, &buf_event);
+ } else {
+ ISP_DBG("%s: vfe_id %d send buf done buf-id %d bufq %x\n",
+ __func__, vfe_dev->pdev->id, buf->buf_idx,
+ buf->bufq_handle);
+ msm_isp_send_event(vfe_dev, ISP_EVENT_BUF_DONE,
+ &buf_event);
+ vfe_dev->buf_mgr->ops->buf_done(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx, time_stamp,
+ frame_id, stream_info->runtime_output_format);
+ }
+ } else {
+ pr_err_ratelimited("%s: Warning! Unexpected return value rc = %d\n",
+ __func__, rc);
+ }
+}
+
+int msm_isp_drop_frame(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, struct msm_isp_timestamp *ts,
+ struct msm_isp_sof_info *sof_info)
+{
+ struct msm_isp_buffer *done_buf = NULL;
+ uint32_t pingpong_status, frame_id;
+ unsigned long flags;
+ struct msm_isp_bufq *bufq = NULL;
+ int rc = -1;
+
+ if (!vfe_dev || !stream_info || !ts || !sof_info) {
+ pr_err("%s %d vfe_dev %p stream_info %p ts %p op_info %p\n",
+ __func__, __LINE__, vfe_dev, stream_info, ts,
+ sof_info);
+ return -EINVAL;
+ }
+
+ pingpong_status =
+ ~vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status(vfe_dev);
+
+ if (stream_info->stream_type == BURST_STREAM)
+ stream_info->runtime_num_burst_capture--;
+
+ spin_lock_irqsave(&stream_info->lock, flags);
+ rc = msm_isp_get_done_buf(vfe_dev, stream_info,
+ pingpong_status, &done_buf);
+ if (rc < 0) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ msm_isp_halt_send_error(vfe_dev, ISP_EVENT_PING_PONG_MISMATCH);
+ return rc;
+ }
+
+ if (stream_info->stream_type == CONTINUOUS_STREAM ||
+ stream_info->runtime_num_burst_capture > 1) {
+ ISP_DBG("%s: vfe %d drop frame bufq %x\n", __func__,
+ vfe_dev->pdev->id, done_buf->bufq_handle);
+ msm_isp_cfg_ping_pong_address(vfe_dev, stream_info,
+ pingpong_status, 1);
+ }
+
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+
+ frame_id = vfe_dev->axi_data.
+ src_info[SRC_TO_INTF(stream_info->stream_src)].frame_id;
+
+ if (done_buf) {
+ rc = vfe_dev->buf_mgr->ops->update_put_buf_cnt(vfe_dev->buf_mgr,
+ done_buf->bufq_handle, done_buf->buf_idx, frame_id);
+ if (rc == 0) {
+ vfe_dev->buf_mgr->ops->buf_done(vfe_dev->buf_mgr,
+ done_buf->bufq_handle, done_buf->buf_idx,
+ &ts->buf_time, frame_id,
+ stream_info->runtime_output_format);
+ }
+
+ bufq = vfe_dev->buf_mgr->ops->get_bufq(vfe_dev->buf_mgr,
+ done_buf->bufq_handle);
+ if (!bufq) {
+ pr_err("%s: Invalid bufq buf_handle %x\n",
+ __func__, done_buf->bufq_handle);
+ return -EINVAL;
+ }
+ sof_info->reg_update_fail_mask |=
+ 1 << (bufq->bufq_handle & 0xF);
+ }
+ return 0;
+}
+
+static enum msm_isp_camif_update_state
+ msm_isp_get_camif_update_state(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
+{
+ int i;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint8_t pix_stream_cnt = 0, cur_pix_stream_cnt;
+ cur_pix_stream_cnt =
+ axi_data->src_info[VFE_PIX_0].pix_stream_count +
+ axi_data->src_info[VFE_PIX_0].raw_stream_count;
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ stream_info =
+ &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ if (stream_info->stream_src < RDI_INTF_0)
+ pix_stream_cnt++;
+ }
+
+ if ((pix_stream_cnt) &&
+ (axi_data->src_info[VFE_PIX_0].input_mux != EXTERNAL_READ)) {
+
+ if (cur_pix_stream_cnt == 0 && pix_stream_cnt &&
+ stream_cfg_cmd->cmd == START_STREAM)
+ return ENABLE_CAMIF;
+ else if (cur_pix_stream_cnt &&
+ (cur_pix_stream_cnt - pix_stream_cnt) == 0 &&
+ stream_cfg_cmd->cmd == STOP_STREAM)
+ return DISABLE_CAMIF;
+ else if (cur_pix_stream_cnt &&
+ (cur_pix_stream_cnt - pix_stream_cnt) == 0 &&
+ stream_cfg_cmd->cmd == STOP_IMMEDIATELY)
+ return DISABLE_CAMIF_IMMEDIATELY;
+ }
+
+ return NO_UPDATE;
+}
+
+static void msm_isp_update_camif_output_count(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd)
+{
+ int i;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+ if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+ return;
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+ VFE_AXI_SRC_MAX) {
+ return;
+ }
+ stream_info =
+ &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ if (stream_info->stream_src >= RDI_INTF_0)
+ continue;
+ if (stream_info->stream_src == PIX_ENCODER ||
+ stream_info->stream_src == PIX_VIEWFINDER ||
+ stream_info->stream_src == PIX_VIDEO ||
+ stream_info->stream_src == IDEAL_RAW) {
+ if (stream_cfg_cmd->cmd == START_STREAM)
+ vfe_dev->axi_data.src_info[VFE_PIX_0].
+ pix_stream_count++;
+ else
+ vfe_dev->axi_data.src_info[VFE_PIX_0].
+ pix_stream_count--;
+ } else if (stream_info->stream_src == CAMIF_RAW) {
+ if (stream_cfg_cmd->cmd == START_STREAM)
+ vfe_dev->axi_data.src_info[VFE_PIX_0].
+ raw_stream_count++;
+ else
+ vfe_dev->axi_data.src_info[VFE_PIX_0].
+ raw_stream_count--;
+ }
+ }
+}
+
+/*Factor in Q2 format*/
+#define ISP_DEFAULT_FORMAT_FACTOR 6
+#define ISP_BUS_UTILIZATION_FACTOR 6
+static int msm_isp_update_stream_bandwidth(struct vfe_device *vfe_dev)
+{
+ int i, rc = 0;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint64_t total_pix_bandwidth = 0, total_rdi_bandwidth = 0;
+ uint32_t num_pix_streams = 0;
+ uint64_t total_bandwidth = 0;
+
+ for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
+ stream_info = &axi_data->stream_info[i];
+ if (stream_info->state == ACTIVE ||
+ stream_info->state == START_PENDING) {
+ if (stream_info->stream_src < RDI_INTF_0) {
+ total_pix_bandwidth += stream_info->bandwidth;
+ num_pix_streams++;
+ } else {
+ total_rdi_bandwidth += stream_info->bandwidth;
+ }
+ }
+ }
+ total_bandwidth = total_pix_bandwidth + total_rdi_bandwidth;
+ rc = msm_isp_update_bandwidth(ISP_VFE0 + vfe_dev->pdev->id,
+ (total_bandwidth + MSM_ISP_MIN_AB),
+ (total_bandwidth + MSM_ISP_MIN_IB));
+
+ if (rc < 0)
+ pr_err("%s: update failed\n", __func__);
+
+ return rc;
+}
+
+static int msm_isp_axi_wait_for_cfg_done(struct vfe_device *vfe_dev,
+ enum msm_isp_camif_update_state camif_update,
+ uint32_t src_mask, int regUpdateCnt)
+{
+ int rc;
+ unsigned long flags;
+ enum msm_vfe_input_src i = 0;
+ spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
+
+ for (i = 0; i < VFE_SRC_MAX; i++) {
+ if (src_mask & (1 << i)) {
+ if (vfe_dev->axi_data.stream_update[i] > 0) {
+ pr_err("%s:Stream Update in progress. cnt %d\n",
+ __func__,
+ vfe_dev->axi_data.stream_update[i]);
+ spin_unlock_irqrestore(
+ &vfe_dev->shared_data_lock, flags);
+ return -EINVAL;
+ }
+ vfe_dev->axi_data.stream_update[i] = regUpdateCnt;
+ }
+ }
+ if (src_mask) {
+ init_completion(&vfe_dev->stream_config_complete);
+ vfe_dev->axi_data.pipeline_update = camif_update;
+ }
+ spin_unlock_irqrestore(&vfe_dev->shared_data_lock, flags);
+ rc = wait_for_completion_timeout(
+ &vfe_dev->stream_config_complete,
+ msecs_to_jiffies(VFE_MAX_CFG_TIMEOUT));
+ if (rc == 0) {
+ for (i = 0; i < VFE_SRC_MAX; i++) {
+ if (src_mask & (1 << i)) {
+ spin_lock_irqsave(&vfe_dev->shared_data_lock,
+ flags);
+ vfe_dev->axi_data.stream_update[i] = 0;
+ spin_unlock_irqrestore(&vfe_dev->
+ shared_data_lock, flags);
+ }
+ }
+ pr_err("%s: wait timeout\n", __func__);
+ rc = -EBUSY;
+ } else {
+ rc = 0;
+ }
+ return rc;
+}
+
+static int msm_isp_init_stream_ping_pong_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info)
+{
+ int rc = 0;
+
+ /*Set address for both PING & PONG register */
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev,
+ stream_info, VFE_PING_FLAG, 0);
+ if (rc < 0) {
+ pr_err("%s: No free buffer for ping\n",
+ __func__);
+ return rc;
+ }
+
+ if (stream_info->stream_type != BURST_STREAM ||
+ stream_info->runtime_num_burst_capture > 1)
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev,
+ stream_info, VFE_PONG_FLAG, 0);
+
+ if (rc < 0) {
+ pr_err("%s: No free buffer for pong\n",
+ __func__);
+ return rc;
+ }
+
+ return rc;
+}
+
+static void msm_isp_get_stream_wm_mask(
+ struct msm_vfe_axi_stream *stream_info,
+ uint32_t *wm_reload_mask)
+{
+ int i;
+ for (i = 0; i < stream_info->num_planes; i++)
+ *wm_reload_mask |= (1 << stream_info->wm[i]);
+}
+
+int msm_isp_axi_halt(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_halt_cmd *halt_cmd)
+{
+ int rc = 0;
+
+ if (atomic_read(&vfe_dev->error_info.overflow_state) ==
+ OVERFLOW_DETECTED) {
+ ISP_DBG("%s: VFE%d already halted, direct return\n",
+ __func__, vfe_dev->pdev->id);
+ return rc;
+ }
+
+ if (halt_cmd->overflow_detected) {
+ /*Store current IRQ mask*/
+ if (vfe_dev->error_info.overflow_recover_irq_mask0 == 0) {
+ vfe_dev->hw_info->vfe_ops.core_ops.get_irq_mask(vfe_dev,
+ &vfe_dev->error_info.overflow_recover_irq_mask0,
+ &vfe_dev->error_info.overflow_recover_irq_mask1);
+ }
+
+ atomic_cmpxchg(&vfe_dev->error_info.overflow_state,
+ NO_OVERFLOW, OVERFLOW_DETECTED);
+ pr_err("%s: VFE%d Bus overflow detected: start recovery!\n",
+ __func__, vfe_dev->pdev->id);
+ }
+
+ rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev,
+ halt_cmd->blocking_halt);
+
+ if (halt_cmd->stop_camif) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY);
+ }
+
+ return rc;
+}
+
+int msm_isp_axi_reset(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_reset_cmd *reset_cmd)
+{
+ int rc = 0, i, j;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_isp_bufq *bufq = NULL;
+ uint32_t bufq_handle = 0, bufq_id = 0;
+
+ if (!reset_cmd) {
+ pr_err("%s: NULL pointer reset cmd %p\n", __func__, reset_cmd);
+ rc = -1;
+ return rc;
+ }
+
+ rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev,
+ 0, reset_cmd->blocking);
+
+ for (i = 0, j = 0; j < axi_data->num_active_stream &&
+ i < VFE_AXI_SRC_MAX; i++, j++) {
+ stream_info = &axi_data->stream_info[i];
+ if (stream_info->stream_src >= VFE_AXI_SRC_MAX) {
+ rc = -1;
+ pr_err("%s invalid stream src = %d\n", __func__,
+ stream_info->stream_src);
+ break;
+ }
+ if (stream_info->state != ACTIVE) {
+ j--;
+ continue;
+ }
+
+ for (bufq_id = 0; bufq_id < VFE_BUF_QUEUE_MAX; bufq_id++) {
+ bufq_handle = stream_info->bufq_handle[bufq_id];
+ if (!bufq_handle)
+ continue;
+
+ bufq = vfe_dev->buf_mgr->ops->get_bufq(vfe_dev->buf_mgr,
+ bufq_handle);
+ if (!bufq) {
+ pr_err("%s: bufq null %p by handle %x\n",
+ __func__, bufq, bufq_handle);
+ continue;
+ }
+
+ vfe_dev->buf_mgr->ops->flush_buf(
+ vfe_dev->buf_mgr, bufq_handle,
+ MSM_ISP_BUFFER_FLUSH_ALL);
+ memset(&stream_info->buf, 0, sizeof(stream_info->buf));
+
+ axi_data->src_info[SRC_TO_INTF(stream_info->
+ stream_src)]. frame_id = reset_cmd->frame_id;
+ msm_isp_reset_burst_count_and_frame_drop(vfe_dev,
+ stream_info);
+ }
+ }
+
+ if (rc < 0)
+ pr_err("%s Error! reset hw Timed out\n", __func__);
+
+ return rc;
+}
+
+int msm_isp_axi_restart(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_restart_cmd *restart_cmd)
+{
+ int rc = 0, i, j;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t wm_reload_mask = 0x0;
+ unsigned long flags;
+
+ vfe_dev->buf_mgr->frameId_mismatch_recovery = 0;
+ for (i = 0, j = 0; j < axi_data->num_active_stream &&
+ i < VFE_AXI_SRC_MAX; i++, j++) {
+ stream_info = &axi_data->stream_info[i];
+ if (stream_info->state != ACTIVE) {
+ j--;
+ continue;
+ }
+ msm_isp_get_stream_wm_mask(stream_info, &wm_reload_mask);
+ spin_lock_irqsave(&stream_info->lock, flags);
+ msm_isp_init_stream_ping_pong_reg(vfe_dev, stream_info);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
+ vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
+ vfe_dev->vfe_base, wm_reload_mask);
+
+ rc = vfe_dev->hw_info->vfe_ops.axi_ops.restart(vfe_dev, 0,
+ restart_cmd->enable_camif);
+ if (rc < 0)
+ pr_err("%s Error restarting HW\n", __func__);
+
+ return rc;
+}
+
+static int msm_isp_axi_update_cgc_override(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
+ uint8_t cgc_override)
+{
+ int i = 0, j = 0;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+
+ if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+ return -EINVAL;
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+ VFE_AXI_SRC_MAX) {
+ return -EINVAL;
+ }
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ for (j = 0; j < stream_info->num_planes; j++) {
+ if (vfe_dev->hw_info->vfe_ops.axi_ops.
+ update_cgc_override)
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ update_cgc_override(vfe_dev,
+ stream_info->wm[j], cgc_override);
+ }
+ }
+ return 0;
+}
+
+static int msm_isp_update_dual_HW_ms_info_at_start(
+ struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src stream_src)
+{
+ int rc = 0;
+ uint32_t j, k, max_sof = 0;
+ uint8_t slave_id;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_vfe_src_info *src_info = NULL;
+ uint32_t vfe_id = 0;
+ unsigned long flags;
+
+ if (stream_src >= VFE_SRC_MAX) {
+ pr_err("%s: Error! Invalid src %u\n", __func__, stream_src);
+ return -EINVAL;
+ }
+
+ src_info = &axi_data->src_info[stream_src];
+ if (src_info->dual_hw_type != DUAL_HW_MASTER_SLAVE)
+ return rc;
+
+ spin_lock_irqsave(&vfe_dev->common_data->common_dev_data_lock, flags);
+ if (src_info->dual_hw_ms_info.dual_hw_ms_type ==
+ MS_TYPE_MASTER) {
+ if (vfe_dev->common_data->ms_resource.master_active == 1) {
+ spin_unlock_irqrestore(&vfe_dev->common_data->
+ common_dev_data_lock, flags);
+ return rc;
+ }
+
+ vfe_dev->common_data->ms_resource.master_active = 1;
+
+ /*
+ * If any slaves are active, then find the max slave
+ * frame_id and set it to Master, so master will start
+ * higher and then the slave can copy master frame_id
+ * without repeating.
+ */
+ if (!vfe_dev->common_data->ms_resource.slave_active_mask) {
+ spin_unlock_irqrestore(&vfe_dev->common_data->
+ common_dev_data_lock, flags);
+ return rc;
+ }
+
+ for (j = 0, k = 0; k < MS_NUM_SLAVE_MAX; k++) {
+ if (!(vfe_dev->common_data->ms_resource.
+ reserved_slave_mask & (1 << k)))
+ continue;
+
+ if (vfe_dev->common_data->ms_resource.slave_active_mask
+ & (1 << k) &&
+ (vfe_dev->common_data->ms_resource.
+ slave_sof_info[k].frame_id > max_sof)) {
+ max_sof = vfe_dev->common_data->ms_resource.
+ slave_sof_info[k].frame_id;
+ }
+ j++;
+ if (j == vfe_dev->common_data->ms_resource.num_slave)
+ break;
+ }
+ vfe_dev->axi_data.src_info[stream_src].frame_id =
+ max_sof + 1;
+ if (vfe_dev->is_split) {
+ vfe_id = vfe_dev->pdev->id;
+ vfe_id = (vfe_id == 0) ? 1 : 0;
+ vfe_dev->common_data->dual_vfe_res->axi_data[vfe_id]->
+ src_info[stream_src].frame_id = max_sof + 1;
+ }
+
+ ISP_DBG("%s: Setting Master frame_id to %u\n", __func__,
+ max_sof + 1);
+ } else {
+ if (src_info->dual_hw_ms_info.sof_info != NULL) {
+ slave_id = src_info->dual_hw_ms_info.slave_id;
+ vfe_dev->common_data->ms_resource.slave_active_mask |=
+ (1 << slave_id);
+ }
+ }
+ spin_unlock_irqrestore(&vfe_dev->common_data->common_dev_data_lock,
+ flags);
+
+ return rc;
+}
+
+static int msm_isp_update_dual_HW_ms_info_at_stop(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
+ enum msm_isp_camif_update_state camif_update)
+{
+ int i, rc = 0;
+ uint8_t slave_id;
+ struct msm_vfe_axi_stream *stream_info = NULL;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ enum msm_vfe_input_src stream_src = VFE_SRC_MAX;
+ struct msm_vfe_src_info *src_info = NULL;
+
+ if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM ||
+ stream_cfg_cmd->num_streams == 0)
+ return -EINVAL;
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+ VFE_AXI_SRC_MAX) {
+ return -EINVAL;
+ }
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ stream_src = SRC_TO_INTF(stream_info->stream_src);
+
+ /* Remove PIX if DISABLE CAMIF */
+ if (stream_src == VFE_PIX_0 && !((camif_update == DISABLE_CAMIF)
+ || (camif_update == DISABLE_CAMIF_IMMEDIATELY)))
+ continue;
+
+ src_info = &axi_data->src_info[stream_src];
+ if (src_info->dual_hw_type != DUAL_HW_MASTER_SLAVE)
+ continue;
+
+ spin_lock(&vfe_dev->common_data->common_dev_data_lock);
+ if (src_info->dual_hw_ms_info.dual_hw_ms_type ==
+ MS_TYPE_MASTER) {
+ /*
+ * Once Master is inactive, slave will increment
+ * its own frame_id
+ */
+ vfe_dev->common_data->ms_resource.master_active = 0;
+ } else {
+ slave_id = src_info->dual_hw_ms_info.slave_id;
+ vfe_dev->common_data->ms_resource.reserved_slave_mask &=
+ ~(1 << slave_id);
+ vfe_dev->common_data->ms_resource.slave_active_mask &=
+ ~(1 << slave_id);
+ vfe_dev->common_data->ms_resource.num_slave--;
+ }
+ src_info->dual_hw_ms_info.sof_info = NULL;
+ spin_unlock(&vfe_dev->common_data->common_dev_data_lock);
+ vfe_dev->vfe_ub_policy = 0;
+ }
+
+ return rc;
+}
+
+static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
+ enum msm_isp_camif_update_state camif_update)
+{
+ int i, rc = 0;
+ uint8_t src_state, wait_for_complete = 0;
+ uint32_t wm_reload_mask = 0x0;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ uint32_t src_mask = 0;
+ unsigned long flags;
+
+ if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM)
+ return -EINVAL;
+
+ if (camif_update == ENABLE_CAMIF) {
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id = 0;
+ }
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+ VFE_AXI_SRC_MAX) {
+ return -EINVAL;
+ }
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX)
+ src_state = axi_data->src_info[
+ SRC_TO_INTF(stream_info->stream_src)].active;
+ else {
+ ISP_DBG("%s: invalid src info index\n", __func__);
+ return -EINVAL;
+ }
+
+ msm_isp_calculate_bandwidth(axi_data, stream_info);
+ msm_isp_get_stream_wm_mask(stream_info, &wm_reload_mask);
+ spin_lock_irqsave(&stream_info->lock, flags);
+ msm_isp_reset_framedrop(vfe_dev, stream_info);
+ rc = msm_isp_init_stream_ping_pong_reg(vfe_dev, stream_info);
+ if (rc < 0) {
+ pr_err("%s: No buffer for stream%d\n", __func__,
+ HANDLE_TO_IDX(
+ stream_cfg_cmd->stream_handle[i]));
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ return rc;
+ }
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+
+ stream_info->state = START_PENDING;
+ ISP_DBG("start axi Stream 0x%x src_state %d src type %d\n",
+ stream_info->stream_id, src_state,
+ stream_info->stream_type);
+ if (src_state) {
+ src_mask |= (1 << SRC_TO_INTF(stream_info->stream_src));
+ wait_for_complete = 1;
+ } else {
+ if (vfe_dev->dump_reg)
+ msm_camera_io_dump(vfe_dev->vfe_base,
+ 0x900, 1);
+
+ /*Configure AXI start bits to start immediately*/
+ msm_isp_axi_stream_enable_cfg(vfe_dev, stream_info, 0);
+ stream_info->state = ACTIVE;
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
+ SRC_TO_INTF(stream_info->stream_src));
+
+ /*
+ * Active bit is set in enable_camif for PIX.
+ * For RDI, set it here
+ */
+ if (SRC_TO_INTF(stream_info->stream_src) >= VFE_RAW_0 &&
+ SRC_TO_INTF(stream_info->stream_src) <
+ VFE_SRC_MAX) {
+ /* Incase PIX and RDI streams are part of same
+ * session, this will ensure RDI stream will
+ * have same frame id as of PIX stream
+ */
+ if (stream_cfg_cmd->sync_frame_id_src)
+ vfe_dev->axi_data.src_info[SRC_TO_INTF(
+ stream_info->stream_src)].frame_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0]
+ .frame_id;
+ else
+ vfe_dev->axi_data.src_info[SRC_TO_INTF(
+ stream_info->stream_src)].frame_id = 0;
+ vfe_dev->axi_data.src_info[SRC_TO_INTF(
+ stream_info->stream_src)].active = 1;
+ }
+ }
+ }
+ msm_isp_update_stream_bandwidth(vfe_dev);
+ vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
+ vfe_dev->vfe_base, wm_reload_mask);
+ msm_isp_update_camif_output_count(vfe_dev, stream_cfg_cmd);
+
+ if (camif_update == ENABLE_CAMIF) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, camif_update);
+ vfe_dev->axi_data.camif_state = CAMIF_ENABLE;
+ vfe_dev->hw_info->vfe_ops.irq_ops.enable_camif_err(vfe_dev, 1);
+ }
+
+ if (wait_for_complete) {
+ rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update,
+ src_mask, 2);
+ if (rc < 0)
+ pr_err("%s: wait for config done failed\n", __func__);
+ }
+
+ return rc;
+}
+
+static int msm_isp_stop_axi_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd,
+ enum msm_isp_camif_update_state camif_update)
+{
+ int i, rc = 0;
+ uint8_t wait_for_complete_for_this_stream = 0;
+ struct msm_vfe_axi_stream *stream_info = NULL;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ int ext_read =
+ (axi_data->src_info[VFE_PIX_0].input_mux == EXTERNAL_READ);
+ uint32_t src_mask = 0, intf, bufq_id = 0, bufq_handle = 0;
+ unsigned long flags;
+
+ if (stream_cfg_cmd->num_streams > MAX_NUM_STREAM ||
+ stream_cfg_cmd->num_streams == 0)
+ return -EINVAL;
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ if (HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]) >=
+ VFE_AXI_SRC_MAX) {
+ return -EINVAL;
+ }
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+
+ /* set ping pong address to scratch before stream stop */
+ spin_lock_irqsave(&stream_info->lock, flags);
+ msm_isp_cfg_stream_scratch(vfe_dev, stream_info, VFE_PING_FLAG);
+ msm_isp_cfg_stream_scratch(vfe_dev, stream_info, VFE_PONG_FLAG);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ wait_for_complete_for_this_stream = 0;
+
+ stream_info->state = STOP_PENDING;
+ ISP_DBG("Stop axi Stream 0x%x,\n",
+ stream_info->stream_id);
+ if (stream_info->stream_src == CAMIF_RAW ||
+ stream_info->stream_src == IDEAL_RAW) {
+ /* We dont get reg update IRQ for raw snapshot
+ * so frame skip cant be ocnfigured
+ */
+ if ((camif_update != DISABLE_CAMIF_IMMEDIATELY) &&
+ (!ext_read))
+ wait_for_complete_for_this_stream = 1;
+
+ } else if (stream_info->stream_type == BURST_STREAM &&
+ stream_info->runtime_num_burst_capture == 0) {
+ /* Configure AXI writemasters to stop immediately
+ * since for burst case, write masters already skip
+ * all frames.
+ */
+ if (stream_info->stream_src == RDI_INTF_0 ||
+ stream_info->stream_src == RDI_INTF_1 ||
+ stream_info->stream_src == RDI_INTF_2)
+ wait_for_complete_for_this_stream = 1;
+ } else {
+ if ((camif_update != DISABLE_CAMIF_IMMEDIATELY) &&
+ (!ext_read))
+ wait_for_complete_for_this_stream = 1;
+ }
+ ISP_DBG("%s: vfe_dev %d camif_update %d wait %d\n", __func__,
+ vfe_dev->pdev->id,
+ camif_update,
+ wait_for_complete_for_this_stream);
+ intf = SRC_TO_INTF(stream_info->stream_src);
+ if (!wait_for_complete_for_this_stream ||
+ stream_info->state == INACTIVE ||
+ !vfe_dev->axi_data.src_info[intf].active) {
+ msm_isp_axi_stream_enable_cfg(vfe_dev, stream_info, 0);
+ stream_info->state = INACTIVE;
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev,
+ SRC_TO_INTF(stream_info->stream_src));
+
+ /*
+ * Active bit is reset in disble_camif for PIX.
+ * For RDI, reset it here for not wait_for_complete
+ * This is assuming there is only 1 stream mapped to
+ * each RDI.
+ */
+ if (intf >= VFE_RAW_0 &&
+ intf < VFE_SRC_MAX) {
+ vfe_dev->axi_data.src_info[intf].active = 0;
+ }
+ } else
+ src_mask |= (1 << intf);
+
+ }
+
+ if (src_mask) {
+ rc = msm_isp_axi_wait_for_cfg_done(vfe_dev, camif_update,
+ src_mask, 2);
+ if (rc < 0) {
+ pr_err("%s: wait for config done failed, retry...\n",
+ __func__);
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(
+ stream_cfg_cmd->stream_handle[i])];
+ stream_info->state = STOPPING;
+ msm_isp_axi_stream_enable_cfg(
+ vfe_dev, stream_info, 0);
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(
+ vfe_dev,
+ SRC_TO_INTF(stream_info->stream_src));
+ rc = msm_isp_axi_wait_for_cfg_done(vfe_dev,
+ camif_update, src_mask, 1);
+ if (rc < 0)
+ pr_err("%s: vfe%d cfg done failed\n",
+ __func__, vfe_dev->pdev->id);
+ else
+ pr_err("%s: vfe%d retry success! report err!\n",
+ __func__, vfe_dev->pdev->id);
+ rc = -EBUSY;
+ }
+ }
+
+ /*
+ * Active bit is reset in disble_camif for PIX.
+ * For RDI, reset it here after wait_for_complete
+ * This is assuming there is only 1 stream mapped to each RDI
+ */
+ for (i = VFE_RAW_0; i < VFE_SRC_MAX; i++) {
+ if (src_mask & (1 << i)) {
+ vfe_dev->axi_data.src_info[i].active = 0;
+ }
+ }
+ }
+
+ if (camif_update == DISABLE_CAMIF) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, DISABLE_CAMIF);
+ vfe_dev->axi_data.camif_state = CAMIF_DISABLE;
+ } else if ((camif_update == DISABLE_CAMIF_IMMEDIATELY) ||
+ (ext_read)) {
+ /*during stop immediately, stop output then stop input*/
+ vfe_dev->hw_info->vfe_ops.irq_ops.enable_camif_err(vfe_dev, 0);
+ vfe_dev->ignore_error = 1;
+ vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
+ if (!ext_read)
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev,
+ DISABLE_CAMIF_IMMEDIATELY);
+ vfe_dev->axi_data.camif_state = CAMIF_STOPPED;
+ vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 0, 1);
+ vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
+ vfe_dev->hw_info->vfe_ops.irq_ops.enable_camif_err(vfe_dev, 1);
+ vfe_dev->ignore_error = 0;
+ }
+ msm_isp_update_camif_output_count(vfe_dev, stream_cfg_cmd);
+ msm_isp_update_stream_bandwidth(vfe_dev);
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])];
+ for (bufq_id = 0; bufq_id < VFE_BUF_QUEUE_MAX; bufq_id++) {
+ bufq_handle = stream_info->bufq_handle[bufq_id];
+ if (!bufq_handle)
+ continue;
+
+ vfe_dev->buf_mgr->ops->flush_buf(
+ vfe_dev->buf_mgr, bufq_handle,
+ MSM_ISP_BUFFER_FLUSH_ALL);
+ }
+ memset(&stream_info->buf, 0, sizeof(stream_info->buf));
+ vfe_dev->reg_update_requested &=
+ ~(BIT(SRC_TO_INTF(stream_info->stream_src)));
+ }
+
+ return rc;
+}
+
+
+int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0, ret;
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd = arg;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ enum msm_isp_camif_update_state camif_update;
+
+ rc = msm_isp_axi_check_stream_state(vfe_dev, stream_cfg_cmd);
+ if (rc < 0) {
+ pr_err("%s: Invalid stream state\n", __func__);
+ return rc;
+ }
+
+ if (axi_data->num_active_stream == 0) {
+ /*Configure UB*/
+ vfe_dev->hw_info->vfe_ops.axi_ops.cfg_ub(vfe_dev);
+ /*when start reset overflow state*/
+ atomic_set(&vfe_dev->error_info.overflow_state,
+ NO_OVERFLOW);
+ }
+ camif_update = msm_isp_get_camif_update_state(vfe_dev, stream_cfg_cmd);
+ if (camif_update == DISABLE_CAMIF)
+ vfe_dev->axi_data.camif_state = CAMIF_STOPPING;
+ if (stream_cfg_cmd->cmd == START_STREAM) {
+ msm_isp_axi_update_cgc_override(vfe_dev, stream_cfg_cmd, 1);
+
+ rc = msm_isp_start_axi_stream(
+ vfe_dev, stream_cfg_cmd, camif_update);
+ } else {
+ rc = msm_isp_stop_axi_stream(
+ vfe_dev, stream_cfg_cmd, camif_update);
+
+ msm_isp_axi_update_cgc_override(vfe_dev, stream_cfg_cmd, 0);
+ if (axi_data->num_active_stream == 0) {
+ /* Reset hvx state */
+ vfe_dev->hvx_cmd = HVX_DISABLE;
+ }
+
+ /*
+ * Use different ret value to not overwrite the error from
+ * msm_isp_stop_axi_stream
+ */
+ ret = msm_isp_update_dual_HW_ms_info_at_stop(
+ vfe_dev, stream_cfg_cmd, camif_update);
+ if (ret < 0)
+ pr_warn("%s: Warning! Update dual_cam failed\n",
+ __func__);
+ }
+
+ if (rc < 0)
+ pr_err("%s: start/stop stream failed\n", __func__);
+ return rc;
+}
+
+static int msm_isp_return_empty_buffer(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint32_t user_stream_id,
+ uint32_t frame_id, enum msm_vfe_input_src frame_src)
+{
+ int rc = -1;
+ struct msm_isp_buffer *buf = NULL;
+ uint32_t bufq_handle = 0, buf_cnt = 0;
+ uint32_t stream_idx;
+ struct msm_isp_event_data error_event;
+ struct msm_isp_timestamp timestamp;
+
+ if (!vfe_dev || !stream_info) {
+ pr_err("%s %d failed: vfe_dev %p stream_info %p\n", __func__,
+ __LINE__, vfe_dev, stream_info);
+ return -EINVAL;
+ }
+
+ stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
+ if (!stream_info->controllable_output)
+ return -EINVAL;
+
+ if (frame_src >= VFE_SRC_MAX) {
+ pr_err("%s: Invalid frame_src %d", __func__, frame_src);
+ return -EINVAL;
+ }
+
+ if (stream_idx >= VFE_AXI_SRC_MAX) {
+ pr_err("%s: Invalid stream_idx", __func__);
+ return rc;
+ }
+
+ if (user_stream_id == stream_info->stream_id)
+ bufq_handle = stream_info->bufq_handle[VFE_BUF_QUEUE_DEFAULT];
+ else
+ bufq_handle = stream_info->bufq_handle[VFE_BUF_QUEUE_SHARED];
+
+
+ rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
+ vfe_dev->pdev->id, bufq_handle, &buf, &buf_cnt);
+ if (rc < 0) {
+ vfe_dev->error_info.
+ stream_framedrop_count[bufq_handle & 0xFF]++;
+ return rc;
+ }
+
+ msm_isp_get_timestamp(&timestamp);
+ rc = vfe_dev->buf_mgr->ops->update_put_buf_cnt(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx, frame_id);
+ if (rc == 0) {
+ vfe_dev->buf_mgr->ops->buf_done(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx,
+ &timestamp.buf_time, frame_id,
+ stream_info->runtime_output_format);
+ }
+
+ error_event.frame_id = frame_id;
+ error_event.u.error_info.err_type = ISP_ERROR_RETURN_EMPTY_BUFFER;
+ error_event.u.error_info.session_id = stream_info->session_id;
+ error_event.u.error_info.stream_id_mask =
+ 1 << (bufq_handle & 0xFF);
+ msm_isp_send_event(vfe_dev, ISP_EVENT_ERROR, &error_event);
+
+ return 0;
+}
+
+static int msm_isp_request_frame(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint32_t user_stream_id,
+ uint32_t frame_id)
+{
+ struct msm_vfe_axi_stream_request_cmd stream_cfg_cmd;
+ struct msm_vfe_frame_request_queue *queue_req;
+ uint32_t pingpong_status;
+ unsigned long flags;
+ int rc = 0;
+ enum msm_vfe_input_src frame_src = 0;
+ struct dual_vfe_resource *dual_vfe_res = NULL;
+ uint32_t vfe_id = 0;
+
+ if (!vfe_dev || !stream_info) {
+ pr_err("%s %d failed: vfe_dev %p stream_info %p\n", __func__,
+ __LINE__, vfe_dev, stream_info);
+ return -EINVAL;
+ }
+
+ if (!stream_info->controllable_output)
+ return 0;
+
+
+ if (stream_info->stream_src >= VFE_AXI_SRC_MAX) {
+ pr_err("%s:%d invalid stream src %d\n", __func__, __LINE__,
+ stream_info->stream_src);
+ return -EINVAL;
+ }
+
+ frame_src = SRC_TO_INTF(stream_info->stream_src);
+ /*
+ * If PIX stream is active then RDI path uses SOF frame ID of PIX
+ * In case of standalone RDI streaming, SOF are used from
+ * individual intf.
+ */
+ if (((vfe_dev->axi_data.src_info[VFE_PIX_0].active) && (frame_id <=
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id)) ||
+ ((!vfe_dev->axi_data.src_info[VFE_PIX_0].active) && (frame_id <=
+ vfe_dev->axi_data.src_info[frame_src].frame_id)) ||
+ stream_info->undelivered_request_cnt >= MAX_BUFFERS_IN_HW) {
+ pr_debug("%s:%d invalid request_frame %d cur frame id %d pix %d\n",
+ __func__, __LINE__, frame_id,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].active);
+
+ rc = msm_isp_return_empty_buffer(vfe_dev, stream_info,
+ user_stream_id, frame_id, frame_src);
+ if (rc < 0)
+ pr_err("%s:%d failed: return_empty_buffer src %d\n",
+ __func__, __LINE__, frame_src);
+ return 0;
+ }
+ if ((frame_src == VFE_PIX_0) && !stream_info->undelivered_request_cnt &&
+ MSM_VFE_STREAM_STOP_PERIOD !=
+ stream_info->prev_framedrop_period) {
+ pr_debug("%s:%d vfe %d frame_id %d prev_pattern %x stream_id %x\n",
+ __func__, __LINE__, vfe_dev->pdev->id, frame_id,
+ stream_info->prev_framedrop_period,
+ stream_info->stream_id);
+
+ rc = msm_isp_return_empty_buffer(vfe_dev, stream_info,
+ user_stream_id, frame_id, frame_src);
+ if (rc < 0)
+ pr_err("%s:%d failed: return_empty_buffer src %d\n",
+ __func__, __LINE__, frame_src);
+ vfe_dev->hw_info->vfe_ops.axi_ops.cfg_framedrop(
+ vfe_dev->vfe_base, stream_info, 0, 0);
+ stream_info->current_framedrop_period =
+ MSM_VFE_STREAM_STOP_PERIOD;
+ return 0;
+ }
+
+ spin_lock_irqsave(&stream_info->lock, flags);
+ queue_req = &stream_info->request_queue_cmd[stream_info->request_q_idx];
+ if (queue_req->cmd_used) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err_ratelimited("%s: Request queue overflow.\n", __func__);
+ return -EINVAL;
+ }
+
+ if (user_stream_id == stream_info->stream_id)
+ queue_req->buff_queue_id = VFE_BUF_QUEUE_DEFAULT;
+ else
+ queue_req->buff_queue_id = VFE_BUF_QUEUE_SHARED;
+
+ if (!stream_info->bufq_handle[queue_req->buff_queue_id]) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err("%s:%d stream is stoped\n", __func__, __LINE__);
+ return 0;
+ }
+ queue_req->cmd_used = 1;
+
+ stream_info->request_q_idx =
+ (stream_info->request_q_idx + 1) % MSM_VFE_REQUESTQ_SIZE;
+ list_add_tail(&queue_req->list, &stream_info->request_q);
+ stream_info->request_q_cnt++;
+
+ stream_info->undelivered_request_cnt++;
+ stream_cfg_cmd.axi_stream_handle = stream_info->stream_handle;
+ stream_cfg_cmd.frame_skip_pattern = NO_SKIP;
+ stream_cfg_cmd.init_frame_drop = 0;
+ stream_cfg_cmd.burst_count = stream_info->request_q_cnt;
+
+ if (stream_info->undelivered_request_cnt == 1) {
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev, stream_info,
+ VFE_PING_FLAG, 1);
+ if (rc) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ stream_info->undelivered_request_cnt--;
+ pr_err_ratelimited("%s:%d fail to cfg HAL buffer\n",
+ __func__, __LINE__);
+ return rc;
+ }
+
+ dual_vfe_res = vfe_dev->common_data->dual_vfe_res;
+ vfe_id = vfe_dev->pdev->id;
+ msm_isp_get_stream_wm_mask(stream_info,
+ &dual_vfe_res->wm_reload_mask[vfe_id]);
+ if (stream_info->stream_src < RDI_INTF_0 &&
+ vfe_dev->is_split &&
+ vfe_dev->pdev->id == ISP_VFE1) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
+ dual_vfe_res->vfe_base[ISP_VFE0],
+ dual_vfe_res->wm_reload_mask[ISP_VFE0]);
+ vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
+ vfe_dev->vfe_base,
+ dual_vfe_res->wm_reload_mask[ISP_VFE1]);
+ dual_vfe_res->wm_reload_mask[ISP_VFE0] = 0;
+ dual_vfe_res->wm_reload_mask[ISP_VFE1] = 0;
+ } else if (!vfe_dev->is_split ||
+ (stream_info->stream_src >= RDI_INTF_0 &&
+ stream_info->stream_src <= RDI_INTF_2)){
+ vfe_dev->hw_info->vfe_ops.axi_ops.reload_wm(vfe_dev,
+ vfe_dev->vfe_base,
+ dual_vfe_res->wm_reload_mask[vfe_id]);
+ dual_vfe_res->wm_reload_mask[vfe_id] = 0;
+ }
+ stream_info->sw_ping_pong_bit = 0;
+ } else if (stream_info->undelivered_request_cnt == 2) {
+ pingpong_status =
+ vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status(
+ vfe_dev);
+
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev, stream_info,
+ pingpong_status, 1);
+ if (rc) {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ stream_info->undelivered_request_cnt--;
+ pr_err_ratelimited("%s:%d Failed to cfg HAL buffer\n",
+ __func__, __LINE__);
+ return rc;
+ }
+ } else {
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ stream_info->undelivered_request_cnt--;
+ pr_err_ratelimited("%s: Invalid undeliver frame count %d\n",
+ __func__, stream_info->undelivered_request_cnt);
+ return -EINVAL;
+ }
+
+ rc = msm_isp_calculate_framedrop(&vfe_dev->axi_data, &stream_cfg_cmd);
+ if (0 == rc)
+ msm_isp_reset_framedrop(vfe_dev, stream_info);
+
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+
+ return rc;
+}
+
+static int msm_isp_add_buf_queue(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint32_t stream_id)
+{
+ int rc = 0;
+ uint32_t bufq_id = 0;
+
+ if (stream_id == stream_info->stream_id)
+ bufq_id = VFE_BUF_QUEUE_DEFAULT;
+ else
+ bufq_id = VFE_BUF_QUEUE_SHARED;
+
+ stream_info->bufq_handle[bufq_id] =
+ vfe_dev->buf_mgr->ops->get_bufq_handle(vfe_dev->buf_mgr,
+ stream_info->session_id, stream_id);
+ if (stream_info->bufq_handle[bufq_id] == 0) {
+ pr_err("%s: failed: No valid buffer queue for stream: 0x%x\n",
+ __func__, stream_id);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static void msm_isp_remove_buf_queue(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, uint32_t stream_id)
+{
+ uint32_t bufq_id = 0;
+ unsigned long flags;
+
+ if (stream_id == stream_info->stream_id)
+ bufq_id = VFE_BUF_QUEUE_DEFAULT;
+ else
+ bufq_id = VFE_BUF_QUEUE_SHARED;
+
+ spin_lock_irqsave(&stream_info->lock, flags);
+ stream_info->bufq_handle[bufq_id] = 0;
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+
+}
+
+int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0, i, j;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ struct msm_vfe_axi_stream_update_cmd *update_cmd = arg;
+ struct msm_vfe_axi_stream_cfg_update_info *update_info;
+ struct msm_isp_sw_framskip *sw_skip_info = NULL;
+ unsigned long flags;
+
+ /*num_stream is uint32 and update_info[] bound by MAX_NUM_STREAM*/
+ if (update_cmd->num_streams > MAX_NUM_STREAM)
+ return -EINVAL;
+
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info = &update_cmd->update_info[i];
+ /*check array reference bounds*/
+ if (HANDLE_TO_IDX(update_info->stream_handle) >=
+ VFE_AXI_SRC_MAX) {
+ return -EINVAL;
+ }
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(update_info->stream_handle)];
+ if (SRC_TO_INTF(stream_info->stream_src) >= VFE_SRC_MAX)
+ continue;
+ if (stream_info->state != ACTIVE &&
+ stream_info->state != INACTIVE &&
+ update_cmd->update_type !=
+ UPDATE_STREAM_REQUEST_FRAMES &&
+ update_cmd->update_type !=
+ UPDATE_STREAM_REMOVE_BUFQ &&
+ update_cmd->update_type !=
+ UPDATE_STREAM_SW_FRAME_DROP) {
+ pr_err("%s: Invalid stream state %d, update cmd %d\n",
+ __func__, stream_info->state,
+ stream_info->stream_id);
+ return -EINVAL;
+ }
+ if (update_cmd->update_type == UPDATE_STREAM_AXI_CONFIG &&
+ atomic_read(&axi_data->axi_cfg_update[
+ SRC_TO_INTF(stream_info->stream_src)])) {
+ pr_err("%s: AXI stream config updating\n", __func__);
+ return -EBUSY;
+ }
+ }
+
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info = &update_cmd->update_info[i];
+ stream_info = &axi_data->stream_info[
+ HANDLE_TO_IDX(update_info->stream_handle)];
+
+ switch (update_cmd->update_type) {
+ case ENABLE_STREAM_BUF_DIVERT:
+ stream_info->buf_divert = 1;
+ break;
+ case DISABLE_STREAM_BUF_DIVERT:
+ stream_info->buf_divert = 0;
+ vfe_dev->buf_mgr->ops->flush_buf(vfe_dev->buf_mgr,
+ stream_info->bufq_handle[VFE_BUF_QUEUE_DEFAULT],
+ MSM_ISP_BUFFER_FLUSH_DIVERTED);
+ break;
+ case UPDATE_STREAM_FRAMEDROP_PATTERN: {
+ uint32_t framedrop_period =
+ msm_isp_get_framedrop_period(
+ update_info->skip_pattern);
+ spin_lock_irqsave(&stream_info->lock, flags);
+ /* no change then break early */
+ if (stream_info->current_framedrop_period ==
+ framedrop_period) {
+ spin_unlock_irqrestore(&stream_info->lock,
+ flags);
+ break;
+ }
+ if (stream_info->controllable_output) {
+ pr_err("Controllable output streams does not support custom frame skip pattern\n");
+ spin_unlock_irqrestore(&stream_info->lock,
+ flags);
+ return -EINVAL;
+ }
+ if (update_info->skip_pattern == SKIP_ALL)
+ stream_info->current_framedrop_period =
+ MSM_VFE_STREAM_STOP_PERIOD;
+ else
+ stream_info->current_framedrop_period =
+ framedrop_period;
+ if (stream_info->stream_type != BURST_STREAM)
+ msm_isp_cfg_framedrop_reg(vfe_dev, stream_info);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ break;
+ }
+ case UPDATE_STREAM_SW_FRAME_DROP: {
+ sw_skip_info = &update_info->sw_skip_info;
+ if (sw_skip_info->stream_src_mask != 0) {
+ /* SW image buffer drop */
+ pr_debug("%s:%x sw skip type %x mode %d min %d max %d\n",
+ __func__, stream_info->stream_id,
+ sw_skip_info->stats_type_mask,
+ sw_skip_info->skip_mode,
+ sw_skip_info->min_frame_id,
+ sw_skip_info->max_frame_id);
+ spin_lock_irqsave(&stream_info->lock, flags);
+ stream_info->sw_skip = *sw_skip_info;
+ spin_unlock_irqrestore(&stream_info->lock,
+ flags);
+ }
+ break;
+ }
+ case UPDATE_STREAM_AXI_CONFIG: {
+ for (j = 0; j < stream_info->num_planes; j++) {
+ stream_info->plane_cfg[j] =
+ update_info->plane_cfg[j];
+ }
+ stream_info->output_format = update_info->output_format;
+ if ((stream_info->state == ACTIVE) &&
+ ((vfe_dev->hw_info->runtime_axi_update == 0) ||
+ (vfe_dev->dual_vfe_enable == 1))) {
+ spin_lock_irqsave(&stream_info->lock, flags);
+ stream_info->state = PAUSE_PENDING;
+ msm_isp_axi_stream_enable_cfg(
+ vfe_dev, stream_info, 1);
+ stream_info->state = PAUSING;
+ atomic_set(&axi_data->
+ axi_cfg_update[SRC_TO_INTF(
+ stream_info->stream_src)],
+ UPDATE_REQUESTED);
+ spin_unlock_irqrestore(&stream_info->lock,
+ flags);
+ } else {
+ for (j = 0; j < stream_info->num_planes; j++) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_reg(vfe_dev, stream_info, j);
+ }
+
+ spin_lock_irqsave(&stream_info->lock, flags);
+ if (stream_info->state != ACTIVE) {
+ stream_info->runtime_output_format =
+ stream_info->output_format;
+ } else {
+ stream_info->state = RESUMING;
+ atomic_set(&axi_data->
+ axi_cfg_update[SRC_TO_INTF(
+ stream_info->stream_src)],
+ APPLYING_UPDATE_RESUME);
+ }
+ spin_unlock_irqrestore(&stream_info->lock,
+ flags);
+ }
+ break;
+ }
+ case UPDATE_STREAM_REQUEST_FRAMES: {
+ rc = msm_isp_request_frame(vfe_dev, stream_info,
+ update_info->user_stream_id,
+ update_info->frame_id);
+ if (rc)
+ pr_err("%s failed to request frame!\n",
+ __func__);
+ break;
+ }
+ case UPDATE_STREAM_ADD_BUFQ: {
+ rc = msm_isp_add_buf_queue(vfe_dev, stream_info,
+ update_info->user_stream_id);
+ if (rc)
+ pr_err("%s failed to add bufq!\n", __func__);
+ break;
+ }
+ case UPDATE_STREAM_REMOVE_BUFQ: {
+ msm_isp_remove_buf_queue(vfe_dev, stream_info,
+ update_info->user_stream_id);
+ pr_debug("%s, Remove bufq for Stream 0x%x\n",
+ __func__, stream_info->stream_id);
+ if (stream_info->state == ACTIVE) {
+ stream_info->state = UPDATING;
+ rc = msm_isp_axi_wait_for_cfg_done(vfe_dev,
+ NO_UPDATE, (1 << SRC_TO_INTF(
+ stream_info->stream_src)), 2);
+ if (rc < 0)
+ pr_err("%s: wait for update failed\n",
+ __func__);
+ }
+
+ break;
+ }
+ default:
+ pr_err("%s: Invalid update type\n", __func__);
+ return -EINVAL;
+ }
+ }
+ return rc;
+}
+
+void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint32_t pingpong_status,
+ struct msm_isp_timestamp *ts)
+{
+ int rc;
+ struct msm_isp_buffer *done_buf = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&stream_info->lock, flags);
+
+ if (stream_info->state == INACTIVE) {
+ msm_isp_cfg_stream_scratch(vfe_dev, stream_info,
+ pingpong_status);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ pr_err_ratelimited("%s: Warning! Stream already inactive. Drop irq handling\n",
+ __func__);
+ return;
+ }
+ rc = msm_isp_get_done_buf(vfe_dev, stream_info,
+ pingpong_status, &done_buf);
+
+ if (rc < 0) {
+ pr_err_ratelimited("%s:VFE%d get done buf fail\n",
+ __func__, vfe_dev->pdev->id);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ msm_isp_halt_send_error(vfe_dev, ISP_EVENT_PING_PONG_MISMATCH);
+ return;
+ }
+ if (vfe_dev->buf_mgr->frameId_mismatch_recovery == 1) {
+ pr_err_ratelimited("%s: Mismatch Recovery in progress, drop frame!\n",
+ __func__);
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ return;
+ }
+
+ stream_info->frame_id++;
+
+ if (stream_info->stream_type == BURST_STREAM && done_buf &&
+ stream_info->runtime_num_burst_capture) {
+ ISP_DBG("%s: burst_frame_count: %d\n",
+ __func__,
+ stream_info->runtime_num_burst_capture);
+ BUG_ON(0 == stream_info->runtime_num_burst_capture);
+ stream_info->runtime_num_burst_capture--;
+ }
+
+ if (stream_info->stream_type == CONTINUOUS_STREAM ||
+ stream_info->runtime_num_burst_capture > 1)
+ rc = msm_isp_cfg_ping_pong_address(vfe_dev,
+ stream_info, pingpong_status, 1);
+
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+
+ if (done_buf)
+ msm_isp_process_done_buf(vfe_dev, stream_info,
+ done_buf, ts);
+}
+
+void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ int i, rc = 0;
+ uint32_t comp_mask = 0, wm_mask = 0;
+ uint32_t pingpong_status, stream_idx;
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_composite_info *comp_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ int wm;
+
+ comp_mask = vfe_dev->hw_info->vfe_ops.axi_ops.
+ get_comp_mask(irq_status0, irq_status1);
+ wm_mask = vfe_dev->hw_info->vfe_ops.axi_ops.
+ get_wm_mask(irq_status0, irq_status1);
+ if (!(comp_mask || wm_mask))
+ return;
+
+ ISP_DBG("%s: status: 0x%x\n", __func__, irq_status0);
+ pingpong_status =
+ vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status(vfe_dev);
+
+ for (i = 0; i < axi_data->hw_info->num_comp_mask; i++) {
+ rc = 0;
+ comp_info = &axi_data->composite_info[i];
+ wm_mask &= ~(comp_info->stream_composite_mask);
+ if (comp_mask & (1 << i)) {
+ stream_idx = HANDLE_TO_IDX(comp_info->stream_handle);
+ if ((!comp_info->stream_handle) ||
+ (stream_idx >= VFE_AXI_SRC_MAX)) {
+ pr_err_ratelimited("%s: Invalid handle for composite irq\n",
+ __func__);
+ for (wm = 0; wm < axi_data->hw_info->num_wm;
+ wm++)
+ if (comp_info->stream_composite_mask &
+ (1 << wm))
+ msm_isp_cfg_wm_scratch(vfe_dev,
+ wm, pingpong_status);
+ continue;
+ }
+ stream_idx = HANDLE_TO_IDX(comp_info->stream_handle);
+ stream_info = &axi_data->stream_info[stream_idx];
+
+ msm_isp_process_axi_irq_stream(vfe_dev, stream_info,
+ pingpong_status, ts);
+
+ }
+ }
+
+ for (i = 0; i < axi_data->hw_info->num_wm; i++) {
+ if (wm_mask & (1 << i)) {
+ stream_idx = HANDLE_TO_IDX(axi_data->free_wm[i]);
+ if ((!axi_data->free_wm[i]) ||
+ (stream_idx >= VFE_AXI_SRC_MAX)) {
+ pr_err("%s: Invalid handle for wm irq\n",
+ __func__);
+ msm_isp_cfg_wm_scratch(vfe_dev, i,
+ pingpong_status);
+ continue;
+ }
+ stream_info = &axi_data->stream_info[stream_idx];
+
+ msm_isp_process_axi_irq_stream(vfe_dev, stream_info,
+ pingpong_status, ts);
+ }
+ }
+ return;
+}
+
+void msm_isp_axi_disable_all_wm(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_axi_stream *stream_info;
+ struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
+ int i, j;
+
+ if (!vfe_dev || !axi_data) {
+ pr_err("%s: error %p %p\n", __func__, vfe_dev, axi_data);
+ return;
+ }
+
+ for (i = 0; i < VFE_AXI_SRC_MAX; i++) {
+ stream_info = &axi_data->stream_info[i];
+
+ if (stream_info->state != ACTIVE)
+ continue;
+
+ for (j = 0; j < stream_info->num_planes; j++)
+ vfe_dev->hw_info->vfe_ops.axi_ops.enable_wm(
+ vfe_dev->vfe_base,
+ stream_info->wm[j], 0);
+ }
+}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
new file mode 100644
index 000000000000..0498ee8af722
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
@@ -0,0 +1,114 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_ISP_AXI_UTIL_H__
+#define __MSM_ISP_AXI_UTIL_H__
+
+#include "msm_isp.h"
+
+#define SRC_TO_INTF(src) \
+ ((src < RDI_INTF_0 || src == VFE_AXI_SRC_MAX) ? VFE_PIX_0 : \
+ (VFE_RAW_0 + src - RDI_INTF_0))
+
+int msm_isp_axi_create_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd);
+
+void msm_isp_axi_destroy_stream(
+ struct msm_vfe_axi_shared_data *axi_data, int stream_idx);
+
+int msm_isp_validate_axi_request(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd);
+
+void msm_isp_axi_reserve_wm(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info);
+
+void msm_isp_axi_reserve_comp_mask(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream *stream_info);
+
+int msm_isp_axi_check_stream_state(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream_cfg_cmd *stream_cfg_cmd);
+
+int msm_isp_calculate_framedrop(
+ struct msm_vfe_axi_shared_data *axi_data,
+ struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd);
+void msm_isp_reset_framedrop(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info);
+
+int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_cfg_axi_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg);
+void msm_isp_axi_cfg_update(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src);
+int msm_isp_axi_halt(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_halt_cmd *halt_cmd);
+int msm_isp_axi_reset(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_reset_cmd *reset_cmd);
+int msm_isp_axi_restart(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_restart_cmd *restart_cmd);
+
+void msm_isp_axi_stream_update(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src);
+
+void msm_isp_update_framedrop_reg(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src);
+
+void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type,
+ enum msm_vfe_input_src frame_src, struct msm_isp_timestamp *ts);
+
+void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts);
+
+void msm_isp_axi_disable_all_wm(struct vfe_device *vfe_dev);
+
+int msm_isp_print_ping_pong_address(struct vfe_device *vfe_dev,
+ unsigned long fault_addr);
+
+void msm_isp_increment_frame_id(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src, struct msm_isp_timestamp *ts);
+
+int msm_isp_drop_frame(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info, struct msm_isp_timestamp *ts,
+ struct msm_isp_sof_info *sof_info);
+
+void msm_isp_halt_send_error(struct vfe_device *vfe_dev, uint32_t event);
+
+static inline void msm_isp_cfg_wm_scratch(struct vfe_device *vfe_dev,
+ int wm,
+ uint32_t pingpong_status)
+{
+ vfe_dev->hw_info->vfe_ops.axi_ops.update_ping_pong_addr(
+ vfe_dev->vfe_base, wm,
+ pingpong_status, vfe_dev->buf_mgr->scratch_buf_addr, 0);
+}
+
+static inline void msm_isp_cfg_stream_scratch(struct vfe_device *vfe_dev,
+ struct msm_vfe_axi_stream *stream_info,
+ uint32_t pingpong_status)
+{
+ int i;
+ uint32_t pingpong_bit;
+
+ for (i = 0; i < stream_info->num_planes; i++)
+ msm_isp_cfg_wm_scratch(vfe_dev, stream_info->wm[i],
+ pingpong_status);
+ pingpong_bit = (~(pingpong_status >> stream_info->wm[0]) & 0x1);
+ stream_info->buf[pingpong_bit] = NULL;
+}
+
+#endif /* __MSM_ISP_AXI_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
new file mode 100644
index 000000000000..a3eb7f5752db
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
@@ -0,0 +1,881 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/io.h>
+#include <linux/atomic.h>
+#include <media/v4l2-subdev.h>
+#include <media/msmb_isp.h>
+#include "msm_isp_util.h"
+#include "msm_isp_stats_util.h"
+
+static int msm_isp_stats_cfg_ping_pong_address(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info, uint32_t pingpong_status,
+ struct msm_isp_buffer **done_buf)
+{
+ int rc = -1, vfe_id = 0;
+ struct msm_isp_buffer *buf;
+ uint32_t pingpong_bit = 0;
+ uint32_t buf_cnt = 0;
+ uint32_t bufq_handle = stream_info->bufq_handle;
+ uint32_t stats_pingpong_offset;
+ uint32_t stats_idx = STATS_IDX(stream_info->stream_handle);
+ struct dual_vfe_resource *dual_vfe_res = NULL;
+ struct msm_vfe_stats_stream *dual_vfe_stream_info = NULL;
+
+ if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type ||
+ stats_idx >= MSM_ISP_STATS_MAX) {
+ pr_err("%s Invalid stats index %d", __func__, stats_idx);
+ return -EINVAL;
+ }
+
+ stats_pingpong_offset =
+ vfe_dev->hw_info->stats_hw_info->stats_ping_pong_offset[
+ stats_idx];
+
+ pingpong_bit = (~(pingpong_status >> stats_pingpong_offset) & 0x1);
+ rc = vfe_dev->buf_mgr->ops->get_buf(vfe_dev->buf_mgr,
+ vfe_dev->pdev->id, bufq_handle, &buf, &buf_cnt);
+ if (rc < 0) {
+ vfe_dev->error_info.stats_framedrop_count[stats_idx]++;
+ return rc;
+ }
+
+ if (buf->num_planes != 1) {
+ pr_err("%s: Invalid buffer\n", __func__);
+ rc = -EINVAL;
+ goto buf_error;
+ }
+ if (vfe_dev->is_split && buf_cnt == MAX_VFE) {
+ dual_vfe_res = vfe_dev->common_data->dual_vfe_res;
+ if (!dual_vfe_res->vfe_base[ISP_VFE0] ||
+ !dual_vfe_res->stats_data[ISP_VFE0] ||
+ !dual_vfe_res->vfe_base[ISP_VFE1] ||
+ !dual_vfe_res->stats_data[ISP_VFE1]) {
+ pr_err("%s:%d error vfe0 %p %p vfe1 %p %p\n", __func__,
+ __LINE__, dual_vfe_res->vfe_base[ISP_VFE0],
+ dual_vfe_res->stats_data[ISP_VFE0],
+ dual_vfe_res->vfe_base[ISP_VFE1],
+ dual_vfe_res->stats_data[ISP_VFE1]);
+ } else {
+ for (vfe_id = 0; vfe_id < MAX_VFE; vfe_id++) {
+ dual_vfe_stream_info = &dual_vfe_res->
+ stats_data[vfe_id]->
+ stream_info[stats_idx];
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ update_ping_pong_addr(
+ dual_vfe_res->vfe_base[vfe_id],
+ dual_vfe_stream_info, pingpong_status,
+ buf->mapped_info[0].paddr +
+ dual_vfe_stream_info->buffer_offset);
+ }
+ }
+ } else if (!vfe_dev->is_split) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.update_ping_pong_addr(
+ vfe_dev->vfe_base, stream_info,
+ pingpong_status, buf->mapped_info[0].paddr +
+ stream_info->buffer_offset);
+ }
+
+
+ if (stream_info->buf[pingpong_bit] && done_buf)
+ *done_buf = stream_info->buf[pingpong_bit];
+
+ stream_info->buf[pingpong_bit] = buf;
+ return 0;
+buf_error:
+ vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx);
+ return rc;
+}
+
+static int32_t msm_isp_stats_buf_divert(struct vfe_device *vfe_dev,
+ struct msm_isp_buffer *done_buf, struct msm_isp_timestamp *ts,
+ struct msm_isp_event_data *buf_event,
+ struct msm_vfe_stats_stream *stream_info,
+ uint32_t *comp_stats_type_mask)
+{
+ int32_t rc = 0, frame_id = 0, drop_buffer = 0;
+ struct msm_isp_stats_event *stats_event = NULL;
+ struct msm_isp_sw_framskip *sw_skip = NULL;
+
+ if (!vfe_dev || !done_buf || !ts || !buf_event || !stream_info ||
+ !comp_stats_type_mask) {
+ pr_err("%s:%d failed: invalid params %p %p %p %p %p %p\n",
+ __func__, __LINE__, vfe_dev, done_buf, ts, buf_event,
+ stream_info, comp_stats_type_mask);
+ return -EINVAL;
+ }
+ frame_id = vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+ sw_skip = &stream_info->sw_skip;
+ stats_event = &buf_event->u.stats;
+
+ if (sw_skip->stats_type_mask &
+ (1 << stream_info->stats_type)) {
+ /* Hw stream output of this src is requested
+ for drop */
+ if (sw_skip->skip_mode == SKIP_ALL) {
+ /* drop all buffers */
+ drop_buffer = 1;
+ } else if (sw_skip->skip_mode == SKIP_RANGE &&
+ (sw_skip->min_frame_id <= frame_id &&
+ sw_skip->max_frame_id >= frame_id)) {
+ drop_buffer = 1;
+ } else if (frame_id > sw_skip->max_frame_id) {
+ memset(sw_skip, 0, sizeof
+ (struct msm_isp_sw_framskip));
+ }
+ }
+
+ rc = vfe_dev->buf_mgr->ops->buf_divert(
+ vfe_dev->buf_mgr, done_buf->bufq_handle,
+ done_buf->buf_idx, &ts->buf_time,
+ frame_id);
+ if (rc != 0) {
+ ISP_DBG("%s: vfe_id %d buf_id %d bufq %x put_cnt 1\n", __func__,
+ vfe_dev->pdev->id, done_buf->buf_idx,
+ done_buf->bufq_handle);
+ *comp_stats_type_mask |=
+ 1 << stream_info->stats_type;
+ stats_event->stats_buf_idxs
+ [stream_info->stats_type] =
+ done_buf->buf_idx;
+ return rc;
+ }
+
+ if (drop_buffer) {
+ vfe_dev->buf_mgr->ops->put_buf(
+ vfe_dev->buf_mgr,
+ done_buf->bufq_handle,
+ done_buf->buf_idx);
+ return rc;
+ }
+ stats_event->stats_buf_idxs
+ [stream_info->stats_type] =
+ done_buf->buf_idx;
+ if (!stream_info->composite_flag) {
+ stats_event->stats_mask =
+ 1 << stream_info->stats_type;
+ ISP_DBG("%s: stats frameid: 0x%x %d bufq %x\n",
+ __func__, buf_event->frame_id,
+ stream_info->stats_type, done_buf->bufq_handle);
+ msm_isp_send_event(vfe_dev,
+ ISP_EVENT_STATS_NOTIFY +
+ stream_info->stats_type,
+ buf_event);
+ } else {
+ *comp_stats_type_mask |=
+ 1 << stream_info->stats_type;
+ }
+
+ return rc;
+}
+
+static int32_t msm_isp_stats_configure(struct vfe_device *vfe_dev,
+ uint32_t stats_irq_mask, struct msm_isp_timestamp *ts,
+ bool is_composite)
+{
+ int i, rc = 0;
+ struct msm_isp_event_data buf_event;
+ struct msm_isp_stats_event *stats_event = &buf_event.u.stats;
+ struct msm_isp_buffer *done_buf;
+ struct msm_vfe_stats_stream *stream_info = NULL;
+ uint32_t pingpong_status;
+ uint32_t comp_stats_type_mask = 0;
+
+ memset(&buf_event, 0, sizeof(struct msm_isp_event_data));
+ buf_event.timestamp = ts->buf_time;
+ buf_event.frame_id = vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+ pingpong_status = vfe_dev->hw_info->
+ vfe_ops.stats_ops.get_pingpong_status(vfe_dev);
+
+ for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
+ if (!(stats_irq_mask & (1 << i)))
+ continue;
+ stream_info = &vfe_dev->stats_data.stream_info[i];
+
+ if (stream_info->state == STATS_INACTIVE) {
+ pr_debug("%s: Warning! Stream already inactive. Drop irq handling\n",
+ __func__);
+ continue;
+ }
+
+ done_buf = NULL;
+ msm_isp_stats_cfg_ping_pong_address(vfe_dev,
+ stream_info, pingpong_status, &done_buf);
+ if (done_buf) {
+ rc = msm_isp_stats_buf_divert(vfe_dev, done_buf, ts,
+ &buf_event, stream_info, &comp_stats_type_mask);
+ if (rc < 0) {
+ pr_err("%s:%d failed: stats buf divert rc %d\n",
+ __func__, __LINE__, rc);
+ }
+ }
+ }
+ if (is_composite && !rc && comp_stats_type_mask) {
+ ISP_DBG("%s:vfe_id %d comp_stats frameid %x,comp_mask %x\n",
+ __func__, vfe_dev->pdev->id, buf_event.frame_id,
+ comp_stats_type_mask);
+ stats_event->stats_mask = comp_stats_type_mask;
+ msm_isp_send_event(vfe_dev,
+ ISP_EVENT_COMP_STATS_NOTIFY, &buf_event);
+ comp_stats_type_mask = 0;
+ }
+ return rc;
+}
+
+void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts)
+{
+ int j, rc;
+ uint32_t atomic_stats_mask = 0;
+ uint32_t stats_comp_mask = 0, stats_irq_mask = 0;
+ bool comp_flag = false;
+ uint32_t num_stats_comp_mask =
+ vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
+
+ stats_comp_mask = vfe_dev->hw_info->vfe_ops.stats_ops.
+ get_comp_mask(irq_status0, irq_status1);
+ stats_irq_mask = vfe_dev->hw_info->vfe_ops.stats_ops.
+ get_wm_mask(irq_status0, irq_status1);
+ if (!(stats_comp_mask || stats_irq_mask))
+ return;
+
+ ISP_DBG("%s: vfe %d status: 0x%x\n", __func__, vfe_dev->pdev->id,
+ irq_status0);
+
+ /* Clear composite mask irq bits, they will be restored by comp mask */
+ for (j = 0; j < num_stats_comp_mask; j++) {
+ stats_irq_mask &= ~atomic_read(
+ &vfe_dev->stats_data.stats_comp_mask[j]);
+ }
+
+ /* Process non-composite irq */
+ if (stats_irq_mask) {
+ rc = msm_isp_stats_configure(vfe_dev, stats_irq_mask, ts,
+ comp_flag);
+ if (rc < 0) {
+ pr_err("%s:%d failed individual stats rc %d\n",
+ __func__, __LINE__, rc);
+ }
+ }
+
+ /* Process composite irq */
+ if (stats_comp_mask) {
+ for (j = 0; j < num_stats_comp_mask; j++) {
+ if (!(stats_comp_mask & (1 << j)))
+ continue;
+
+ atomic_stats_mask = atomic_read(
+ &vfe_dev->stats_data.stats_comp_mask[j]);
+
+ rc = msm_isp_stats_configure(vfe_dev, atomic_stats_mask,
+ ts, !comp_flag);
+ if (rc < 0) {
+ pr_err("%s:%d failed comp stats %d rc %d\n",
+ __func__, __LINE__, j, rc);
+ }
+ }
+ }
+}
+
+int msm_isp_stats_create_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream_request_cmd *stream_req_cmd)
+{
+ int rc = -1;
+ struct msm_vfe_stats_stream *stream_info = NULL;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ uint32_t stats_idx;
+
+ if (!(vfe_dev->hw_info->stats_hw_info->stats_capability_mask &
+ (1 << stream_req_cmd->stats_type))) {
+ pr_err("%s: Stats type not supported\n", __func__);
+ return rc;
+ }
+
+ stats_idx = vfe_dev->hw_info->vfe_ops.stats_ops.
+ get_stats_idx(stream_req_cmd->stats_type);
+
+ if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s Invalid stats index %d", __func__, stats_idx);
+ return -EINVAL;
+ }
+
+ stream_info = &stats_data->stream_info[stats_idx];
+ if (stream_info->state != STATS_AVALIABLE) {
+ pr_err("%s: Stats already requested\n", __func__);
+ return rc;
+ }
+
+ if (stream_req_cmd->framedrop_pattern >= MAX_SKIP) {
+ pr_err("%s: Invalid framedrop pattern\n", __func__);
+ return rc;
+ }
+
+ if (stream_req_cmd->irq_subsample_pattern >= MAX_SKIP) {
+ pr_err("%s: Invalid irq subsample pattern\n", __func__);
+ return rc;
+ }
+
+ stream_info->session_id = stream_req_cmd->session_id;
+ stream_info->stream_id = stream_req_cmd->stream_id;
+ stream_info->composite_flag = stream_req_cmd->composite_flag;
+ stream_info->stats_type = stream_req_cmd->stats_type;
+ stream_info->buffer_offset = stream_req_cmd->buffer_offset;
+ stream_info->framedrop_pattern = stream_req_cmd->framedrop_pattern;
+ stream_info->init_stats_frame_drop = stream_req_cmd->init_frame_drop;
+ stream_info->irq_subsample_pattern =
+ stream_req_cmd->irq_subsample_pattern;
+ stream_info->state = STATS_INACTIVE;
+
+ if ((vfe_dev->stats_data.stream_handle_cnt << 8) == 0)
+ vfe_dev->stats_data.stream_handle_cnt++;
+
+ stream_req_cmd->stream_handle =
+ (++vfe_dev->stats_data.stream_handle_cnt) << 8 | stats_idx;
+
+ stream_info->stream_handle = stream_req_cmd->stream_handle;
+ return 0;
+}
+
+int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = -1;
+ struct msm_vfe_stats_stream_request_cmd *stream_req_cmd = arg;
+ struct msm_vfe_stats_stream *stream_info = NULL;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ uint32_t framedrop_period;
+ uint32_t stats_idx;
+
+ rc = msm_isp_stats_create_stream(vfe_dev, stream_req_cmd);
+ if (rc < 0) {
+ pr_err("%s: create stream failed\n", __func__);
+ return rc;
+ }
+
+ stats_idx = STATS_IDX(stream_req_cmd->stream_handle);
+
+ if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s Invalid stats index %d", __func__, stats_idx);
+ return -EINVAL;
+ }
+
+ stream_info = &stats_data->stream_info[stats_idx];
+
+ framedrop_period = msm_isp_get_framedrop_period(
+ stream_req_cmd->framedrop_pattern);
+
+ if (stream_req_cmd->framedrop_pattern == SKIP_ALL)
+ stream_info->framedrop_pattern = 0x0;
+ else
+ stream_info->framedrop_pattern = 0x1;
+ stream_info->framedrop_period = framedrop_period - 1;
+
+ if (!stream_info->composite_flag)
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ cfg_wm_irq_mask(vfe_dev, stream_info);
+
+ if (stream_info->init_stats_frame_drop == 0)
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(vfe_dev,
+ stream_info);
+
+ return rc;
+}
+
+int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = -1;
+ struct msm_vfe_stats_stream_cfg_cmd stream_cfg_cmd;
+ struct msm_vfe_stats_stream_release_cmd *stream_release_cmd = arg;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ int stats_idx = STATS_IDX(stream_release_cmd->stream_handle);
+ struct msm_vfe_stats_stream *stream_info = NULL;
+
+ if (stats_idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s Invalid stats index %d", __func__, stats_idx);
+ return -EINVAL;
+ }
+
+ stream_info = &stats_data->stream_info[stats_idx];
+ if (stream_info->state == STATS_AVALIABLE) {
+ pr_err("%s: stream already release\n", __func__);
+ return rc;
+ } else if (stream_info->state != STATS_INACTIVE) {
+ stream_cfg_cmd.enable = 0;
+ stream_cfg_cmd.num_streams = 1;
+ stream_cfg_cmd.stream_handle[0] =
+ stream_release_cmd->stream_handle;
+ rc = msm_isp_cfg_stats_stream(vfe_dev, &stream_cfg_cmd);
+ }
+
+ if (!stream_info->composite_flag)
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ clear_wm_irq_mask(vfe_dev, stream_info);
+
+ vfe_dev->hw_info->vfe_ops.stats_ops.clear_wm_reg(vfe_dev, stream_info);
+ memset(stream_info, 0, sizeof(struct msm_vfe_stats_stream));
+ return 0;
+}
+
+static int msm_isp_init_stats_ping_pong_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ int rc = 0;
+ stream_info->bufq_handle =
+ vfe_dev->buf_mgr->ops->get_bufq_handle(
+ vfe_dev->buf_mgr, stream_info->session_id,
+ stream_info->stream_id);
+ if (stream_info->bufq_handle == 0) {
+ pr_err("%s: no buf configured for stream: 0x%x\n",
+ __func__, stream_info->stream_handle);
+ return -EINVAL;
+ }
+
+ rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev,
+ stream_info, VFE_PING_FLAG, NULL);
+ if (rc < 0) {
+ pr_err("%s: No free buffer for ping\n", __func__);
+ return rc;
+ }
+ rc = msm_isp_stats_cfg_ping_pong_address(vfe_dev,
+ stream_info, VFE_PONG_FLAG, NULL);
+ if (rc < 0) {
+ pr_err("%s: No free buffer for pong\n", __func__);
+ return rc;
+ }
+ return rc;
+}
+
+static void msm_isp_deinit_stats_ping_pong_reg(
+ struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream *stream_info)
+{
+ int i;
+ struct msm_isp_buffer *buf;
+ for (i = 0; i < 2; i++) {
+ buf = stream_info->buf[i];
+ if (buf)
+ vfe_dev->buf_mgr->ops->put_buf(vfe_dev->buf_mgr,
+ buf->bufq_handle, buf->buf_idx);
+ }
+}
+
+void msm_isp_update_stats_framedrop_reg(struct vfe_device *vfe_dev)
+{
+ int i;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ struct msm_vfe_stats_stream *stream_info = NULL;
+
+ for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
+ stream_info = &stats_data->stream_info[i];
+ if (stream_info->state != STATS_ACTIVE)
+ continue;
+
+ if (stream_info->init_stats_frame_drop) {
+ stream_info->init_stats_frame_drop--;
+ if (stream_info->init_stats_frame_drop == 0) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(
+ vfe_dev, stream_info);
+ }
+ }
+ }
+}
+
+void msm_isp_stats_stream_update(struct vfe_device *vfe_dev)
+{
+ int i;
+ uint32_t enable = 0;
+ uint8_t comp_flag = 0;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ struct msm_vfe_stats_ops *stats_ops =
+ &vfe_dev->hw_info->vfe_ops.stats_ops;
+
+ for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
+ if (stats_data->stream_info[i].state == STATS_START_PENDING ||
+ stats_data->stream_info[i].state ==
+ STATS_STOP_PENDING) {
+ enable = stats_data->stream_info[i].state ==
+ STATS_START_PENDING ? 1 : 0;
+ stats_data->stream_info[i].state =
+ stats_data->stream_info[i].state ==
+ STATS_START_PENDING ?
+ STATS_STARTING : STATS_STOPPING;
+ vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+ vfe_dev, BIT(i), enable);
+ comp_flag = stats_data->stream_info[i].composite_flag;
+ if (comp_flag)
+ stats_ops->cfg_comp_mask(vfe_dev, BIT(i),
+ (comp_flag - 1), enable);
+ } else if (stats_data->stream_info[i].state == STATS_STARTING ||
+ stats_data->stream_info[i].state == STATS_STOPPING) {
+ stats_data->stream_info[i].state =
+ stats_data->stream_info[i].state ==
+ STATS_STARTING ? STATS_ACTIVE : STATS_INACTIVE;
+ }
+ }
+ atomic_sub(1, &stats_data->stats_update);
+ if (!atomic_read(&stats_data->stats_update))
+ complete(&vfe_dev->stats_config_complete);
+}
+
+static int msm_isp_stats_wait_for_cfg_done(struct vfe_device *vfe_dev)
+{
+ int rc;
+ init_completion(&vfe_dev->stats_config_complete);
+ atomic_set(&vfe_dev->stats_data.stats_update, 2);
+ rc = wait_for_completion_timeout(
+ &vfe_dev->stats_config_complete,
+ msecs_to_jiffies(VFE_MAX_CFG_TIMEOUT));
+ if (rc == 0) {
+ pr_err("%s: wait timeout\n", __func__);
+ rc = -1;
+ } else {
+ rc = 0;
+ }
+ return rc;
+}
+
+static int msm_isp_stats_update_cgc_override(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
+{
+ int i;
+ uint32_t stats_mask = 0, idx;
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+
+ if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s Invalid stats index %d", __func__, idx);
+ return -EINVAL;
+ }
+ stats_mask |= 1 << idx;
+ }
+
+ if (vfe_dev->hw_info->vfe_ops.stats_ops.update_cgc_override) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.update_cgc_override(
+ vfe_dev, stats_mask, stream_cfg_cmd->enable);
+ }
+ return 0;
+}
+
+int msm_isp_stats_reset(struct vfe_device *vfe_dev)
+{
+ int i = 0;
+ struct msm_vfe_stats_stream *stream_info = NULL;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ struct msm_isp_bufq *bufq = NULL;
+
+ for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
+ stream_info = &stats_data->stream_info[i];
+ if (stream_info->state != STATS_ACTIVE)
+ continue;
+
+ bufq = vfe_dev->buf_mgr->ops->get_bufq(vfe_dev->buf_mgr,
+ stream_info->bufq_handle);
+ if (!bufq) {
+ pr_err("%s Error! bufq is NULL\n", __func__);
+ continue;
+ }
+
+ if (bufq->buf_type != ISP_SHARE_BUF)
+ msm_isp_deinit_stats_ping_pong_reg(vfe_dev,
+ stream_info);
+ else
+ vfe_dev->buf_mgr->ops->flush_buf(vfe_dev->buf_mgr,
+ stream_info->bufq_handle,
+ MSM_ISP_BUFFER_FLUSH_ALL);
+ }
+
+ return 0;
+}
+
+int msm_isp_stats_restart(struct vfe_device *vfe_dev)
+{
+ int i = 0;
+ struct msm_vfe_stats_stream *stream_info = NULL;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+
+ for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
+ stream_info = &stats_data->stream_info[i];
+ if (stream_info->state < STATS_ACTIVE)
+ continue;
+ msm_isp_init_stats_ping_pong_reg(vfe_dev, stream_info);
+ }
+
+ return 0;
+}
+
+static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
+{
+ int i, rc = 0;
+ uint32_t stats_mask = 0, idx;
+ uint32_t comp_stats_mask[MAX_NUM_STATS_COMP_MASK] = {0};
+ uint32_t num_stats_comp_mask = 0;
+ struct msm_vfe_stats_stream *stream_info;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+
+ num_stats_comp_mask =
+ vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
+ rc = vfe_dev->hw_info->vfe_ops.stats_ops.check_streams(
+ stats_data->stream_info);
+ if (rc < 0)
+ return rc;
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+
+ if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s Invalid stats index %d", __func__, idx);
+ return -EINVAL;
+ }
+
+ stream_info = &stats_data->stream_info[idx];
+ if (stream_info->stream_handle !=
+ stream_cfg_cmd->stream_handle[i]) {
+ pr_err("%s: Invalid stream handle: 0x%x received\n",
+ __func__, stream_cfg_cmd->stream_handle[i]);
+ continue;
+ }
+
+ if (stream_info->composite_flag > num_stats_comp_mask) {
+ pr_err("%s: comp grp %d exceed max %d\n",
+ __func__, stream_info->composite_flag,
+ num_stats_comp_mask);
+ return -EINVAL;
+ }
+ rc = msm_isp_init_stats_ping_pong_reg(vfe_dev, stream_info);
+ if (rc < 0) {
+ pr_err("%s: No buffer for stream%d\n", __func__, idx);
+ return rc;
+ }
+
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
+ stream_info->state = STATS_START_PENDING;
+ else
+ stream_info->state = STATS_ACTIVE;
+
+ stats_data->num_active_stream++;
+ stats_mask |= 1 << idx;
+
+ if (stream_info->composite_flag > 0)
+ comp_stats_mask[stream_info->composite_flag-1] |=
+ 1 << idx;
+
+ ISP_DBG("%s: stats_mask %x %x active streams %d\n",
+ __func__, comp_stats_mask[0],
+ comp_stats_mask[1],
+ stats_data->num_active_stream);
+
+ }
+
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
+ rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
+ } else {
+ vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+ vfe_dev, stats_mask, stream_cfg_cmd->enable);
+ for (i = 0; i < num_stats_comp_mask; i++) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
+ vfe_dev, comp_stats_mask[i], i, 1);
+ }
+ }
+ return rc;
+}
+
+static int msm_isp_stop_stats_stream(struct vfe_device *vfe_dev,
+ struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd)
+{
+ int i, rc = 0;
+ uint32_t stats_mask = 0, idx;
+ uint32_t comp_stats_mask[MAX_NUM_STATS_COMP_MASK] = {0};
+ uint32_t num_stats_comp_mask = 0;
+ struct msm_vfe_stats_stream *stream_info;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ num_stats_comp_mask =
+ vfe_dev->hw_info->stats_hw_info->num_stats_comp_mask;
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+
+ idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+
+ if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s Invalid stats index %d", __func__, idx);
+ return -EINVAL;
+ }
+
+ stream_info = &stats_data->stream_info[idx];
+ if (stream_info->stream_handle !=
+ stream_cfg_cmd->stream_handle[i]) {
+ pr_err("%s: Invalid stream handle: 0x%x received\n",
+ __func__, stream_cfg_cmd->stream_handle[i]);
+ continue;
+ }
+
+ if (stream_info->composite_flag > num_stats_comp_mask) {
+ pr_err("%s: comp grp %d exceed max %d\n",
+ __func__, stream_info->composite_flag,
+ num_stats_comp_mask);
+ return -EINVAL;
+ }
+
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
+ stream_info->state = STATS_STOP_PENDING;
+ else
+ stream_info->state = STATS_INACTIVE;
+
+ stats_data->num_active_stream--;
+ stats_mask |= 1 << idx;
+
+ if (stream_info->composite_flag > 0)
+ comp_stats_mask[stream_info->composite_flag-1] |=
+ 1 << idx;
+
+ ISP_DBG("%s: stats_mask %x %x active streams %d\n",
+ __func__, comp_stats_mask[0],
+ comp_stats_mask[1],
+ stats_data->num_active_stream);
+ }
+
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
+ rc = msm_isp_stats_wait_for_cfg_done(vfe_dev);
+ } else {
+ vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(
+ vfe_dev, stats_mask, stream_cfg_cmd->enable);
+ for (i = 0; i < num_stats_comp_mask; i++) {
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_comp_mask(
+ vfe_dev, comp_stats_mask[i], i, 0);
+ }
+ }
+
+ for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
+ idx = STATS_IDX(stream_cfg_cmd->stream_handle[i]);
+
+ if (idx >= vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s Invalid stats index %d", __func__, idx);
+ return -EINVAL;
+ }
+
+ stream_info = &stats_data->stream_info[idx];
+ msm_isp_deinit_stats_ping_pong_reg(vfe_dev, stream_info);
+ }
+ return rc;
+}
+
+int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0;
+ struct msm_vfe_stats_stream_cfg_cmd *stream_cfg_cmd = arg;
+ if (vfe_dev->stats_data.num_active_stream == 0)
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_ub(vfe_dev);
+
+ if (stream_cfg_cmd->enable) {
+ msm_isp_stats_update_cgc_override(vfe_dev, stream_cfg_cmd);
+
+ rc = msm_isp_start_stats_stream(vfe_dev, stream_cfg_cmd);
+ } else {
+ rc = msm_isp_stop_stats_stream(vfe_dev, stream_cfg_cmd);
+
+ msm_isp_stats_update_cgc_override(vfe_dev, stream_cfg_cmd);
+ }
+
+ return rc;
+}
+
+int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0, i;
+ struct msm_vfe_stats_stream *stream_info;
+ struct msm_vfe_stats_shared_data *stats_data = &vfe_dev->stats_data;
+ struct msm_vfe_axi_stream_update_cmd *update_cmd = arg;
+ struct msm_vfe_axi_stream_cfg_update_info *update_info = NULL;
+ struct msm_isp_sw_framskip *sw_skip_info = NULL;
+
+ /*validate request*/
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info = &update_cmd->update_info[i];
+ /*check array reference bounds*/
+ if (STATS_IDX(update_info->stream_handle)
+ > vfe_dev->hw_info->stats_hw_info->num_stats_type) {
+ pr_err("%s: stats idx %d out of bound!", __func__,
+ STATS_IDX(update_info->stream_handle));
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < update_cmd->num_streams; i++) {
+ update_info = &update_cmd->update_info[i];
+ stream_info = &stats_data->stream_info[
+ STATS_IDX(update_info->stream_handle)];
+ if (stream_info->stream_handle !=
+ update_info->stream_handle) {
+ pr_err("%s: stats stream handle %x %x mismatch!\n",
+ __func__, stream_info->stream_handle,
+ update_info->stream_handle);
+ continue;
+ }
+
+ switch (update_cmd->update_type) {
+ case UPDATE_STREAM_STATS_FRAMEDROP_PATTERN: {
+ uint32_t framedrop_period =
+ msm_isp_get_framedrop_period(
+ update_info->skip_pattern);
+ if (update_info->skip_pattern == SKIP_ALL)
+ stream_info->framedrop_pattern = 0x0;
+ else
+ stream_info->framedrop_pattern = 0x1;
+ stream_info->framedrop_period = framedrop_period - 1;
+ if (stream_info->init_stats_frame_drop == 0)
+ vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(
+ vfe_dev, stream_info);
+ break;
+ }
+ case UPDATE_STREAM_SW_FRAME_DROP: {
+ sw_skip_info = &update_info->sw_skip_info;
+ if (!stream_info->sw_skip.stream_src_mask)
+ stream_info->sw_skip = *sw_skip_info;
+
+ if (sw_skip_info->stats_type_mask != 0) {
+ /* No image buffer skip, only stats skip */
+ pr_debug("%s:%x skip type %x mode %d min %d max %d\n",
+ __func__, stream_info->stream_id,
+ sw_skip_info->stats_type_mask,
+ sw_skip_info->skip_mode,
+ sw_skip_info->min_frame_id,
+ sw_skip_info->max_frame_id);
+ stream_info->sw_skip.stats_type_mask =
+ sw_skip_info->stats_type_mask;
+ }
+ break;
+ }
+
+ default:
+ pr_err("%s: Invalid update type\n", __func__);
+ return -EINVAL;
+ }
+ }
+ return rc;
+}
+
+void msm_isp_stats_disable(struct vfe_device *vfe_dev)
+{
+ int i;
+ unsigned int mask = 0;
+
+ if (!vfe_dev) {
+ pr_err("%s: error NULL ptr\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++)
+ mask |= 1 << i;
+
+ vfe_dev->hw_info->vfe_ops.stats_ops.enable_module(vfe_dev, mask, 0);
+}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
new file mode 100644
index 000000000000..01120b65be92
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
@@ -0,0 +1,30 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_ISP_STATS_UTIL_H__
+#define __MSM_ISP_STATS_UTIL_H__
+
+#include "msm_isp.h"
+#define STATS_IDX(idx) (idx & 0xFF)
+
+void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1,
+ struct msm_isp_timestamp *ts);
+void msm_isp_stats_stream_update(struct vfe_device *vfe_dev);
+int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg);
+void msm_isp_update_stats_framedrop_reg(struct vfe_device *vfe_dev);
+void msm_isp_stats_disable(struct vfe_device *vfe_dev);
+int msm_isp_stats_reset(struct vfe_device *vfe_dev);
+int msm_isp_stats_restart(struct vfe_device *vfe_dev);
+#endif /* __MSM_ISP_STATS_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
new file mode 100644
index 000000000000..58b6b0b7b6bb
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -0,0 +1,2293 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+#include <linux/ratelimit.h>
+
+#include "msm.h"
+#include "msm_isp_util.h"
+#include "msm_isp_axi_util.h"
+#include "msm_isp_stats_util.h"
+#include "msm_camera_io_util.h"
+#include "cam_smmu_api.h"
+
+#define MAX_ISP_V4l2_EVENTS 100
+static DEFINE_MUTEX(bandwidth_mgr_mutex);
+static struct msm_isp_bandwidth_mgr isp_bandwidth_mgr;
+
+static uint64_t msm_isp_cpp_clk_rate;
+
+#define VFE40_8974V2_VERSION 0x1001001A
+static struct msm_bus_vectors msm_isp_init_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VFE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 0,
+ .ib = 0,
+ },
+};
+
+/* During open node request min ab/ib bus bandwidth which
+* is needed to successfully enable bus clocks
+*/
+static struct msm_bus_vectors msm_isp_ping_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VFE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = MSM_ISP_MIN_AB,
+ .ib = MSM_ISP_MIN_IB,
+ },
+};
+
+static struct msm_bus_vectors msm_isp_pong_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_VFE,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 0,
+ .ib = 0,
+ },
+};
+
+static struct msm_bus_paths msm_isp_bus_client_config[] = {
+ {
+ ARRAY_SIZE(msm_isp_init_vectors),
+ msm_isp_init_vectors,
+ },
+ {
+ ARRAY_SIZE(msm_isp_ping_vectors),
+ msm_isp_ping_vectors,
+ },
+ {
+ ARRAY_SIZE(msm_isp_pong_vectors),
+ msm_isp_pong_vectors,
+ },
+};
+
+static struct msm_bus_scale_pdata msm_isp_bus_client_pdata = {
+ msm_isp_bus_client_config,
+ ARRAY_SIZE(msm_isp_bus_client_config),
+ .name = "msm_camera_isp",
+};
+
+void msm_isp_print_fourcc_error(const char *origin, uint32_t fourcc_format)
+{
+ int i;
+ char text[5];
+ text[4] = '\0';
+ for (i = 0; i < 4; i++) {
+ text[i] = (char)(((fourcc_format) >> (i * 8)) & 0xFF);
+ if ((text[i] < '0') || (text[i] > 'z')) {
+ pr_err("%s: Invalid output format %d (unprintable)\n",
+ origin, fourcc_format);
+ return;
+ }
+ }
+ pr_err("%s: Invalid output format %s\n",
+ origin, text);
+ return;
+}
+
+int msm_isp_init_bandwidth_mgr(enum msm_isp_hw_client client)
+{
+ int rc = 0;
+ mutex_lock(&bandwidth_mgr_mutex);
+ isp_bandwidth_mgr.client_info[client].active = 1;
+ if (isp_bandwidth_mgr.use_count++) {
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return rc;
+ }
+ isp_bandwidth_mgr.bus_client =
+ msm_bus_scale_register_client(&msm_isp_bus_client_pdata);
+ if (!isp_bandwidth_mgr.bus_client) {
+ pr_err("%s: client register failed\n", __func__);
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return -EINVAL;
+ }
+
+ isp_bandwidth_mgr.bus_vector_active_idx = 1;
+ msm_bus_scale_client_update_request(
+ isp_bandwidth_mgr.bus_client,
+ isp_bandwidth_mgr.bus_vector_active_idx);
+
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return 0;
+}
+
+int msm_isp_update_bandwidth(enum msm_isp_hw_client client,
+ uint64_t ab, uint64_t ib)
+{
+ int i;
+ struct msm_bus_paths *path;
+ mutex_lock(&bandwidth_mgr_mutex);
+ if (!isp_bandwidth_mgr.use_count ||
+ !isp_bandwidth_mgr.bus_client) {
+ pr_err("%s:error bandwidth manager inactive use_cnt:%d bus_clnt:%d\n",
+ __func__, isp_bandwidth_mgr.use_count,
+ isp_bandwidth_mgr.bus_client);
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return -EINVAL;
+ }
+
+ isp_bandwidth_mgr.client_info[client].ab = ab;
+ isp_bandwidth_mgr.client_info[client].ib = ib;
+ ALT_VECTOR_IDX(isp_bandwidth_mgr.bus_vector_active_idx);
+ path =
+ &(msm_isp_bus_client_pdata.usecase[
+ isp_bandwidth_mgr.bus_vector_active_idx]);
+ path->vectors[0].ab = 0;
+ path->vectors[0].ib = 0;
+ for (i = 0; i < MAX_ISP_CLIENT; i++) {
+ if (isp_bandwidth_mgr.client_info[i].active) {
+ path->vectors[0].ab +=
+ isp_bandwidth_mgr.client_info[i].ab;
+ path->vectors[0].ib +=
+ isp_bandwidth_mgr.client_info[i].ib;
+ }
+ }
+ msm_bus_scale_client_update_request(isp_bandwidth_mgr.bus_client,
+ isp_bandwidth_mgr.bus_vector_active_idx);
+ /* Insert into circular buffer */
+ msm_isp_update_req_history(isp_bandwidth_mgr.bus_client,
+ path->vectors[0].ab,
+ path->vectors[0].ib,
+ isp_bandwidth_mgr.client_info,
+ sched_clock());
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return 0;
+}
+
+void msm_isp_deinit_bandwidth_mgr(enum msm_isp_hw_client client)
+{
+ if (client >= MAX_ISP_CLIENT) {
+ pr_err("invalid Client id %d", client);
+ return;
+ }
+ mutex_lock(&bandwidth_mgr_mutex);
+ memset(&isp_bandwidth_mgr.client_info[client], 0,
+ sizeof(struct msm_isp_bandwidth_info));
+ if (--isp_bandwidth_mgr.use_count) {
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return;
+ }
+
+ if (!isp_bandwidth_mgr.bus_client) {
+ pr_err("%s:%d error: bus client invalid\n", __func__, __LINE__);
+ mutex_unlock(&bandwidth_mgr_mutex);
+ return;
+ }
+
+ msm_bus_scale_client_update_request(
+ isp_bandwidth_mgr.bus_client, 0);
+ msm_bus_scale_unregister_client(isp_bandwidth_mgr.bus_client);
+ isp_bandwidth_mgr.bus_client = 0;
+ mutex_unlock(&bandwidth_mgr_mutex);
+}
+
+void msm_isp_util_get_bandwidth_stats(struct vfe_device *vfe_dev,
+ struct msm_isp_statistics *stats)
+{
+ stats->isp_vfe0_active = isp_bandwidth_mgr.client_info[ISP_VFE0].active;
+ stats->isp_vfe0_ab = isp_bandwidth_mgr.client_info[ISP_VFE0].ab;
+ stats->isp_vfe0_ib = isp_bandwidth_mgr.client_info[ISP_VFE0].ib;
+
+ stats->isp_vfe1_active = isp_bandwidth_mgr.client_info[ISP_VFE1].active;
+ stats->isp_vfe1_ab = isp_bandwidth_mgr.client_info[ISP_VFE1].ab;
+ stats->isp_vfe1_ib = isp_bandwidth_mgr.client_info[ISP_VFE1].ib;
+
+ stats->isp_cpp_active = isp_bandwidth_mgr.client_info[ISP_CPP].active;
+ stats->isp_cpp_ab = isp_bandwidth_mgr.client_info[ISP_CPP].ab;
+ stats->isp_cpp_ib = isp_bandwidth_mgr.client_info[ISP_CPP].ib;
+ stats->last_overflow_ab = vfe_dev->msm_isp_last_overflow_ab;
+ stats->last_overflow_ib = vfe_dev->msm_isp_last_overflow_ib;
+ stats->vfe_clk_rate = vfe_dev->msm_isp_vfe_clk_rate;
+ stats->cpp_clk_rate = msm_isp_cpp_clk_rate;
+}
+
+void msm_isp_util_update_last_overflow_ab_ib(struct vfe_device *vfe_dev)
+{
+ struct msm_bus_paths *path;
+ path = &(msm_isp_bus_client_pdata.usecase[
+ isp_bandwidth_mgr.bus_vector_active_idx]);
+ vfe_dev->msm_isp_last_overflow_ab = path->vectors[0].ab;
+ vfe_dev->msm_isp_last_overflow_ib = path->vectors[0].ib;
+}
+
+void msm_isp_util_update_clk_rate(long clock_rate)
+{
+ msm_isp_cpp_clk_rate = clock_rate;
+}
+
+uint32_t msm_isp_get_framedrop_period(
+ enum msm_vfe_frame_skip_pattern frame_skip_pattern)
+{
+ switch (frame_skip_pattern) {
+ case NO_SKIP:
+ case EVERY_2FRAME:
+ case EVERY_3FRAME:
+ case EVERY_4FRAME:
+ case EVERY_5FRAME:
+ case EVERY_6FRAME:
+ case EVERY_7FRAME:
+ case EVERY_8FRAME:
+ return frame_skip_pattern + 1;
+ case EVERY_16FRAME:
+ return 16;
+ break;
+ case EVERY_32FRAME:
+ return 32;
+ break;
+ case SKIP_ALL:
+ return 1;
+ default:
+ return 1;
+ }
+ return 1;
+}
+
+int msm_isp_get_clk_info(struct vfe_device *vfe_dev,
+ struct platform_device *pdev, struct msm_cam_clk_info *vfe_clk_info)
+{
+ uint32_t count;
+ int i, rc;
+ uint32_t rates[VFE_CLK_INFO_MAX];
+
+ struct device_node *of_node;
+ of_node = pdev->dev.of_node;
+
+ count = of_property_count_strings(of_node, "clock-names");
+
+ ISP_DBG("count = %d\n", count);
+ if (count == 0) {
+ pr_err("no clocks found in device tree, count=%d", count);
+ return 0;
+ }
+
+ if (count > VFE_CLK_INFO_MAX) {
+ pr_err("invalid count=%d, max is %d\n", count,
+ VFE_CLK_INFO_MAX);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node, "clock-names",
+ i, &(vfe_clk_info[i].clk_name));
+ ISP_DBG("clock-names[%d] = %s\n", i, vfe_clk_info[i].clk_name);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
+ if (0 == strcmp(vfe_clk_info[i].clk_name, "vfe_clk_src")) {
+ ISP_DBG("%s: find vfe src clk index %d\n", __func__, i);
+ vfe_dev->hw_info->vfe_clk_idx = i;
+ }
+ }
+ rc = of_property_read_u32_array(of_node, "qcom,clock-rates",
+ rates, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
+ for (i = 0; i < count; i++) {
+ vfe_clk_info[i].clk_rate =
+ (rates[i] == 0) ? (long)-1 : rates[i];
+ ISP_DBG("clk_rate[%d] = %ld\n", i, vfe_clk_info[i].clk_rate);
+ }
+ vfe_dev->num_clk = count;
+ return 0;
+}
+
+void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp)
+{
+ struct timespec ts;
+ get_monotonic_boottime(&ts);
+ time_stamp->buf_time.tv_sec = ts.tv_sec;
+ time_stamp->buf_time.tv_usec = ts.tv_nsec/1000;
+ do_gettimeofday(&(time_stamp->event_time));
+}
+
+static inline u32 msm_isp_evt_mask_to_isp_event(u32 evt_mask)
+{
+ u32 evt_id = ISP_EVENT_SUBS_MASK_NONE;
+
+ switch (evt_mask) {
+ case ISP_EVENT_MASK_INDEX_STATS_NOTIFY:
+ evt_id = ISP_EVENT_STATS_NOTIFY;
+ break;
+ case ISP_EVENT_MASK_INDEX_ERROR:
+ evt_id = ISP_EVENT_ERROR;
+ break;
+ case ISP_EVENT_MASK_INDEX_IOMMU_P_FAULT:
+ evt_id = ISP_EVENT_IOMMU_P_FAULT;
+ break;
+ case ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE:
+ evt_id = ISP_EVENT_STREAM_UPDATE_DONE;
+ break;
+ case ISP_EVENT_MASK_INDEX_REG_UPDATE:
+ evt_id = ISP_EVENT_REG_UPDATE;
+ break;
+ case ISP_EVENT_MASK_INDEX_SOF:
+ evt_id = ISP_EVENT_SOF;
+ break;
+ case ISP_EVENT_MASK_INDEX_BUF_DIVERT:
+ evt_id = ISP_EVENT_BUF_DIVERT;
+ break;
+ case ISP_EVENT_MASK_INDEX_BUF_DONE:
+ evt_id = ISP_EVENT_BUF_DONE;
+ break;
+ case ISP_EVENT_MASK_INDEX_COMP_STATS_NOTIFY:
+ evt_id = ISP_EVENT_COMP_STATS_NOTIFY;
+ break;
+ case ISP_EVENT_MASK_INDEX_MASK_FE_READ_DONE:
+ evt_id = ISP_EVENT_FE_READ_DONE;
+ break;
+ default:
+ evt_id = ISP_EVENT_SUBS_MASK_NONE;
+ break;
+ }
+
+ return evt_id;
+}
+
+static inline int msm_isp_subscribe_event_mask(struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub, int evt_mask_index,
+ u32 evt_id, bool subscribe_flag)
+{
+ int rc = 0, i, interface;
+
+ if (ISP_EVENT_MASK_INDEX_STATS_NOTIFY == evt_mask_index) {
+ for (i = 0; i < MSM_ISP_STATS_MAX; i++) {
+ sub->type = evt_id + i;
+ if (subscribe_flag)
+ rc = v4l2_event_subscribe(fh, sub,
+ MAX_ISP_V4l2_EVENTS, NULL);
+ else
+ rc = v4l2_event_unsubscribe(fh, sub);
+ if (rc != 0) {
+ pr_err("%s: Subs event_type =0x%x failed\n",
+ __func__, sub->type);
+ return rc;
+ }
+ }
+ } else if (ISP_EVENT_MASK_INDEX_SOF == evt_mask_index ||
+ ISP_EVENT_MASK_INDEX_REG_UPDATE == evt_mask_index ||
+ ISP_EVENT_MASK_INDEX_STREAM_UPDATE_DONE == evt_mask_index) {
+ for (interface = 0; interface < VFE_SRC_MAX; interface++) {
+ sub->type = evt_id | interface;
+ if (subscribe_flag)
+ rc = v4l2_event_subscribe(fh, sub,
+ MAX_ISP_V4l2_EVENTS, NULL);
+ else
+ rc = v4l2_event_unsubscribe(fh, sub);
+ if (rc != 0) {
+ pr_err("%s: Subs event_type =0x%x failed\n",
+ __func__, sub->type);
+ return rc;
+ }
+ }
+ } else {
+ sub->type = evt_id;
+ if (subscribe_flag)
+ rc = v4l2_event_subscribe(fh, sub,
+ MAX_ISP_V4l2_EVENTS, NULL);
+ else
+ rc = v4l2_event_unsubscribe(fh, sub);
+ if (rc != 0) {
+ pr_err("%s: Subs event_type =0x%x failed\n",
+ __func__, sub->type);
+ return rc;
+ }
+ }
+ return rc;
+}
+
+static inline int msm_isp_process_event_subscription(struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub, bool subscribe_flag)
+{
+ int rc = 0, evt_mask_index = 0;
+ u32 evt_mask = sub->type;
+ u32 evt_id = 0;
+
+ if (ISP_EVENT_SUBS_MASK_NONE == evt_mask) {
+ pr_err("%s: Subs event_type is None=0x%x\n",
+ __func__, evt_mask);
+ return 0;
+ }
+
+ for (evt_mask_index = ISP_EVENT_MASK_INDEX_STATS_NOTIFY;
+ evt_mask_index <= ISP_EVENT_MASK_INDEX_MASK_FE_READ_DONE;
+ evt_mask_index++) {
+ if (evt_mask & (1<<evt_mask_index)) {
+ evt_id = msm_isp_evt_mask_to_isp_event(evt_mask_index);
+ rc = msm_isp_subscribe_event_mask(fh, sub,
+ evt_mask_index, evt_id, subscribe_flag);
+ if (rc != 0) {
+ pr_err("%s: Subs event index:%d failed\n",
+ __func__, evt_mask_index);
+ return rc;
+ }
+ }
+ }
+ return rc;
+}
+
+int msm_isp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return msm_isp_process_event_subscription(fh, sub, true);
+}
+
+int msm_isp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return msm_isp_process_event_subscription(fh, sub, false);
+}
+
+static int msm_isp_get_max_clk_rate(struct vfe_device *vfe_dev, long *rate)
+{
+ int clk_idx = 0;
+ unsigned long max_value = ~0;
+ long round_rate = 0;
+
+ if (!vfe_dev || !rate) {
+ pr_err("%s:%d failed: vfe_dev %p rate %p\n", __func__, __LINE__,
+ vfe_dev, rate);
+ return -EINVAL;
+ }
+
+ *rate = 0;
+ if (!vfe_dev->hw_info) {
+ pr_err("%s:%d failed: vfe_dev->hw_info %p\n", __func__,
+ __LINE__, vfe_dev->hw_info);
+ return -EINVAL;
+ }
+
+ clk_idx = vfe_dev->hw_info->vfe_clk_idx;
+ if (clk_idx >= vfe_dev->num_clk) {
+ pr_err("%s:%d failed: clk_idx %d max array size %d\n",
+ __func__, __LINE__, clk_idx,
+ vfe_dev->num_clk);
+ return -EINVAL;
+ }
+
+ round_rate = clk_round_rate(vfe_dev->vfe_clk[clk_idx], max_value);
+ if (round_rate < 0) {
+ pr_err("%s: Invalid vfe clock rate\n", __func__);
+ return -EINVAL;
+ }
+
+ *rate = round_rate;
+ return 0;
+}
+
+static int msm_isp_get_clk_rates(struct vfe_device *vfe_dev,
+ struct msm_isp_clk_rates *rates)
+{
+ struct device_node *of_node;
+ int32_t rc = 0;
+ uint32_t svs = 0, nominal = 0, turbo = 0;
+ if (!vfe_dev || !rates) {
+ pr_err("%s:%d failed: vfe_dev %p rates %p\n", __func__,
+ __LINE__, vfe_dev, rates);
+ return -EINVAL;
+ }
+
+ if (!vfe_dev->pdev) {
+ pr_err("%s:%d failed: vfe_dev->pdev %p\n", __func__,
+ __LINE__, vfe_dev->pdev);
+ return -EINVAL;
+ }
+
+ of_node = vfe_dev->pdev->dev.of_node;
+
+ if (!of_node) {
+ pr_err("%s %d failed: of_node = %p\n", __func__,
+ __LINE__, of_node);
+ return -EINVAL;
+ }
+
+ /*
+ * Many older targets dont define svs.
+ * return svs=0 for older targets.
+ */
+ rc = of_property_read_u32(of_node, "max-clk-svs",
+ &svs);
+ if (rc < 0)
+ svs = 0;
+
+ rc = of_property_read_u32(of_node, "max-clk-nominal",
+ &nominal);
+ if (rc < 0 || !nominal) {
+ pr_err("%s: nominal rate error\n", __func__);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(of_node, "max-clk-turbo",
+ &turbo);
+ if (rc < 0 || !turbo) {
+ pr_err("%s: turbo rate error\n", __func__);
+ return -EINVAL;
+ }
+ rates->svs_rate = svs;
+ rates->nominal_rate = nominal;
+ rates->high_rate = turbo;
+ return 0;
+}
+
+static int msm_isp_set_clk_rate(struct vfe_device *vfe_dev, long *rate)
+{
+ int rc = 0;
+ int clk_idx = vfe_dev->hw_info->vfe_clk_idx;
+ long round_rate =
+ clk_round_rate(vfe_dev->vfe_clk[clk_idx], *rate);
+ if (round_rate < 0) {
+ pr_err("%s: Invalid vfe clock rate\n", __func__);
+ return round_rate;
+ }
+
+ rc = clk_set_rate(vfe_dev->vfe_clk[clk_idx], round_rate);
+ if (rc < 0) {
+ pr_err("%s: Vfe set rate error\n", __func__);
+ return rc;
+ }
+ *rate = round_rate;
+ vfe_dev->msm_isp_vfe_clk_rate = round_rate;
+ return 0;
+}
+
+
+static int msm_isp_start_fetch_engine(struct vfe_device *vfe_dev,
+ void *arg)
+{
+ struct msm_vfe_fetch_eng_start *fe_cfg = arg;
+ /*
+ * For Offline VFE, HAL expects same frame id
+ * for offline output which it requested in do_reprocess.
+ */
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id =
+ fe_cfg->frame_id;
+ return vfe_dev->hw_info->vfe_ops.core_ops.
+ start_fetch_eng(vfe_dev, arg);
+}
+
+void msm_isp_fetch_engine_done_notify(struct vfe_device *vfe_dev,
+ struct msm_vfe_fetch_engine_info *fetch_engine_info)
+{
+ struct msm_isp_event_data fe_rd_done_event;
+ if (!fetch_engine_info->is_busy)
+ return;
+
+ memset(&fe_rd_done_event, 0, sizeof(struct msm_isp_event_data));
+ fe_rd_done_event.frame_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+ fe_rd_done_event.u.fetch_done.session_id =
+ fetch_engine_info->session_id;
+ fe_rd_done_event.u.fetch_done.stream_id = fetch_engine_info->stream_id;
+ fe_rd_done_event.u.fetch_done.handle = fetch_engine_info->bufq_handle;
+ fe_rd_done_event.u.fetch_done.buf_idx = fetch_engine_info->buf_idx;
+ fe_rd_done_event.u.fetch_done.fd = fetch_engine_info->fd;
+ fe_rd_done_event.u.fetch_done.offline_mode =
+ fetch_engine_info->offline_mode;
+
+ ISP_DBG("%s:VFE%d ISP_EVENT_FE_READ_DONE buf_idx %d\n",
+ __func__, vfe_dev->pdev->id, fetch_engine_info->buf_idx);
+ fetch_engine_info->is_busy = 0;
+ msm_isp_send_event(vfe_dev, ISP_EVENT_FE_READ_DONE, &fe_rd_done_event);
+}
+
+static int msm_isp_cfg_pix(struct vfe_device *vfe_dev,
+ struct msm_vfe_input_cfg *input_cfg)
+{
+ int rc = 0;
+ struct msm_vfe_pix_cfg *pix_cfg = NULL;
+
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].active) {
+ pr_err("%s: pixel path is active\n", __func__);
+ return -EINVAL;
+ }
+
+ pix_cfg = &input_cfg->d.pix_cfg;
+ if ((pix_cfg->hvx_cmd > HVX_DISABLE) &&
+ (pix_cfg->hvx_cmd <= HVX_ROUND_TRIP))
+ vfe_dev->hvx_cmd = pix_cfg->hvx_cmd;
+ vfe_dev->is_split = input_cfg->d.pix_cfg.is_split;
+
+ vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock =
+ input_cfg->input_pix_clk;
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux =
+ input_cfg->d.pix_cfg.input_mux;
+ vfe_dev->axi_data.src_info[VFE_PIX_0].input_format =
+ input_cfg->d.pix_cfg.input_format;
+ vfe_dev->axi_data.src_info[VFE_PIX_0].sof_counter_step = 1;
+
+ /*
+ * Fill pixel_clock into input_pix_clk so that user space
+ * can use rounded clk rate
+ */
+ input_cfg->input_pix_clk =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].pixel_clock;
+
+ ISP_DBG("%s: input mux is %d CAMIF %d io_format 0x%x\n", __func__,
+ input_cfg->d.pix_cfg.input_mux, CAMIF,
+ input_cfg->d.pix_cfg.input_format);
+
+ if (input_cfg->d.pix_cfg.input_mux == CAMIF ||
+ input_cfg->d.pix_cfg.input_mux == TESTGEN) {
+ vfe_dev->axi_data.src_info[VFE_PIX_0].width =
+ input_cfg->d.pix_cfg.camif_cfg.pixels_per_line;
+ if (input_cfg->d.pix_cfg.camif_cfg.subsample_cfg.
+ sof_counter_step > 0) {
+ vfe_dev->axi_data.src_info[VFE_PIX_0].
+ sof_counter_step = input_cfg->d.pix_cfg.
+ camif_cfg.subsample_cfg.sof_counter_step;
+ }
+ } else if (input_cfg->d.pix_cfg.input_mux == EXTERNAL_READ) {
+ vfe_dev->axi_data.src_info[VFE_PIX_0].width =
+ input_cfg->d.pix_cfg.fetch_engine_cfg.buf_stride;
+ }
+ vfe_dev->hw_info->vfe_ops.core_ops.cfg_input_mux(
+ vfe_dev, &input_cfg->d.pix_cfg);
+ vfe_dev->hw_info->vfe_ops.core_ops.reg_update(vfe_dev, VFE_PIX_0);
+ return rc;
+}
+
+static int msm_isp_cfg_rdi(struct vfe_device *vfe_dev,
+ struct msm_vfe_input_cfg *input_cfg)
+{
+ int rc = 0;
+ if (vfe_dev->axi_data.src_info[input_cfg->input_src].active) {
+ pr_err("%s: RAW%d path is active\n", __func__,
+ input_cfg->input_src - VFE_RAW_0);
+ return -EINVAL;
+ }
+
+ vfe_dev->axi_data.src_info[input_cfg->input_src].pixel_clock =
+ input_cfg->input_pix_clk;
+ vfe_dev->hw_info->vfe_ops.core_ops.cfg_rdi_reg(
+ vfe_dev, &input_cfg->d.rdi_cfg, input_cfg->input_src);
+ return rc;
+}
+
+int msm_isp_cfg_input(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0;
+ struct msm_vfe_input_cfg *input_cfg = arg;
+ long pixel_clock = 0;
+
+ switch (input_cfg->input_src) {
+ case VFE_PIX_0:
+ rc = msm_isp_cfg_pix(vfe_dev, input_cfg);
+ break;
+ case VFE_RAW_0:
+ case VFE_RAW_1:
+ case VFE_RAW_2:
+ rc = msm_isp_cfg_rdi(vfe_dev, input_cfg);
+ break;
+ default:
+ pr_err("%s: Invalid input source\n", __func__);
+ rc = -EINVAL;
+ }
+
+ pixel_clock = input_cfg->input_pix_clk;
+ rc = msm_isp_set_clk_rate(vfe_dev,
+ &pixel_clock);
+ if (rc < 0) {
+ pr_err("%s: clock set rate failed\n", __func__);
+ return rc;
+ }
+ return rc;
+}
+
+static int msm_isp_set_dual_HW_master_slave_mode(
+ struct vfe_device *vfe_dev, void *arg)
+{
+ /*
+ * This method assumes no 2 processes are accessing it simultaneously.
+ * Currently this is guaranteed by mutex lock in ioctl.
+ * If that changes, need to revisit this
+ */
+ int rc = 0, i, j;
+ struct msm_isp_set_dual_hw_ms_cmd *dual_hw_ms_cmd = NULL;
+ struct msm_vfe_src_info *src_info = NULL;
+
+ if (!vfe_dev || !arg) {
+ pr_err("%s: Error! Invalid input vfe_dev %p arg %p\n",
+ __func__, vfe_dev, arg);
+ return -EINVAL;
+ }
+
+ dual_hw_ms_cmd = (struct msm_isp_set_dual_hw_ms_cmd *)arg;
+ vfe_dev->common_data->ms_resource.dual_hw_type = DUAL_HW_MASTER_SLAVE;
+ vfe_dev->vfe_ub_policy = MSM_WM_UB_EQUAL_SLICING;
+ if (dual_hw_ms_cmd->primary_intf < VFE_SRC_MAX) {
+ src_info = &vfe_dev->axi_data.
+ src_info[dual_hw_ms_cmd->primary_intf];
+ src_info->dual_hw_ms_info.dual_hw_ms_type =
+ dual_hw_ms_cmd->dual_hw_ms_type;
+ }
+
+ /* No lock needed here since ioctl lock protects 2 session from race */
+ if (src_info != NULL &&
+ dual_hw_ms_cmd->dual_hw_ms_type == MS_TYPE_MASTER) {
+ src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
+ ISP_DBG("%s: Master\n", __func__);
+
+ src_info->dual_hw_ms_info.sof_info =
+ &vfe_dev->common_data->ms_resource.master_sof_info;
+ vfe_dev->common_data->ms_resource.sof_delta_threshold =
+ dual_hw_ms_cmd->sof_delta_threshold;
+ } else if (src_info != NULL) {
+ spin_lock(&vfe_dev->common_data->common_dev_data_lock);
+ src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
+ ISP_DBG("%s: Slave\n", __func__);
+
+ for (j = 0; j < MS_NUM_SLAVE_MAX; j++) {
+ if (vfe_dev->common_data->ms_resource.
+ reserved_slave_mask & (1 << j))
+ continue;
+
+ vfe_dev->common_data->ms_resource.reserved_slave_mask |=
+ (1 << j);
+ vfe_dev->common_data->ms_resource.num_slave++;
+ src_info->dual_hw_ms_info.sof_info =
+ &vfe_dev->common_data->ms_resource.
+ slave_sof_info[j];
+ src_info->dual_hw_ms_info.slave_id = j;
+ ISP_DBG("%s: Slave id %d\n", __func__, j);
+ break;
+ }
+ spin_unlock(&vfe_dev->common_data->common_dev_data_lock);
+
+ if (j == MS_NUM_SLAVE_MAX) {
+ pr_err("%s: Error! Cannot find free aux resource\n",
+ __func__);
+ return -EBUSY;
+ }
+ }
+ ISP_DBG("%s: num_src %d\n", __func__, dual_hw_ms_cmd->num_src);
+ /* This for loop is for non-primary intf to be marked with Master/Slave
+ * in order for frame id sync. But their timestamp is not saved.
+ * So no sof_info resource is allocated */
+ for (i = 0; i < dual_hw_ms_cmd->num_src; i++) {
+ if (dual_hw_ms_cmd->input_src[i] >= VFE_SRC_MAX) {
+ pr_err("%s: Error! Invalid SRC param %d\n", __func__,
+ dual_hw_ms_cmd->input_src[i]);
+ return -EINVAL;
+ }
+ ISP_DBG("%s: src %d\n", __func__, dual_hw_ms_cmd->input_src[i]);
+ src_info = &vfe_dev->axi_data.
+ src_info[dual_hw_ms_cmd->input_src[i]];
+ src_info->dual_hw_type = DUAL_HW_MASTER_SLAVE;
+ src_info->dual_hw_ms_info.dual_hw_ms_type =
+ dual_hw_ms_cmd->dual_hw_ms_type;
+ }
+
+ return rc;
+}
+
+static int msm_isp_proc_cmd_list_unlocked(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0;
+ struct msm_vfe_cfg_cmd_list *proc_cmd =
+ (struct msm_vfe_cfg_cmd_list *)arg;
+ struct msm_vfe_cfg_cmd_list cmd, cmd_next;
+
+ if (!vfe_dev || !arg) {
+ pr_err("%s:%d failed: vfe_dev %p arg %p", __func__, __LINE__,
+ vfe_dev, arg);
+ return -EINVAL;
+ }
+
+ rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd->cfg_cmd);
+ if (rc < 0)
+ pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
+
+ cmd = *proc_cmd;
+
+ while (cmd.next) {
+ if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list)) {
+ pr_err("%s:%d failed: next size %u != expected %zu\n",
+ __func__, __LINE__, cmd.next_size,
+ sizeof(struct msm_vfe_cfg_cmd_list));
+ break;
+ }
+ if (copy_from_user(&cmd_next, (void __user *)cmd.next,
+ sizeof(struct msm_vfe_cfg_cmd_list))) {
+ rc = -EFAULT;
+ continue;
+ }
+
+ rc = msm_isp_proc_cmd(vfe_dev, &cmd_next.cfg_cmd);
+ if (rc < 0)
+ pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
+
+ cmd = cmd_next;
+ }
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+struct msm_vfe_cfg_cmd2_32 {
+ uint16_t num_cfg;
+ uint16_t cmd_len;
+ compat_caddr_t cfg_data;
+ compat_caddr_t cfg_cmd;
+};
+
+struct msm_vfe_cfg_cmd_list_32 {
+ struct msm_vfe_cfg_cmd2_32 cfg_cmd;
+ compat_caddr_t next;
+ uint32_t next_size;
+};
+
+#define VIDIOC_MSM_VFE_REG_CFG_COMPAT \
+ _IOWR('V', BASE_VIDIOC_PRIVATE, struct msm_vfe_cfg_cmd2_32)
+#define VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT \
+ _IOWR('V', BASE_VIDIOC_PRIVATE+14, struct msm_vfe_cfg_cmd_list_32)
+
+static void msm_isp_compat_to_proc_cmd(struct msm_vfe_cfg_cmd2 *proc_cmd,
+ struct msm_vfe_cfg_cmd2_32 *proc_cmd_ptr32)
+{
+ proc_cmd->num_cfg = proc_cmd_ptr32->num_cfg;
+ proc_cmd->cmd_len = proc_cmd_ptr32->cmd_len;
+ proc_cmd->cfg_data = compat_ptr(proc_cmd_ptr32->cfg_data);
+ proc_cmd->cfg_cmd = compat_ptr(proc_cmd_ptr32->cfg_cmd);
+}
+
+static int msm_isp_proc_cmd_list_compat(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0;
+ struct msm_vfe_cfg_cmd_list_32 *proc_cmd =
+ (struct msm_vfe_cfg_cmd_list_32 *)arg;
+ struct msm_vfe_cfg_cmd_list_32 cmd, cmd_next;
+ struct msm_vfe_cfg_cmd2 current_cmd;
+
+ if (!vfe_dev || !arg) {
+ pr_err("%s:%d failed: vfe_dev %p arg %p", __func__, __LINE__,
+ vfe_dev, arg);
+ return -EINVAL;
+ }
+ msm_isp_compat_to_proc_cmd(&current_cmd, &proc_cmd->cfg_cmd);
+ rc = msm_isp_proc_cmd(vfe_dev, &current_cmd);
+ if (rc < 0)
+ pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
+
+ cmd = *proc_cmd;
+
+ while (NULL != compat_ptr(cmd.next)) {
+ if (cmd.next_size != sizeof(struct msm_vfe_cfg_cmd_list_32)) {
+ pr_err("%s:%d failed: next size %u != expected %zu\n",
+ __func__, __LINE__, cmd.next_size,
+ sizeof(struct msm_vfe_cfg_cmd_list));
+ break;
+ }
+ if (copy_from_user(&cmd_next, compat_ptr(cmd.next),
+ sizeof(struct msm_vfe_cfg_cmd_list_32))) {
+ rc = -EFAULT;
+ continue;
+ }
+
+ msm_isp_compat_to_proc_cmd(&current_cmd, &cmd_next.cfg_cmd);
+ rc = msm_isp_proc_cmd(vfe_dev, &current_cmd);
+ if (rc < 0)
+ pr_err("%s:%d failed: rc %d", __func__, __LINE__, rc);
+
+ cmd = cmd_next;
+ }
+ return rc;
+}
+
+static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
+{
+ if (is_compat_task())
+ return msm_isp_proc_cmd_list_compat(vfe_dev, arg);
+ else
+ return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
+}
+#else /* CONFIG_COMPAT */
+static int msm_isp_proc_cmd_list(struct vfe_device *vfe_dev, void *arg)
+{
+ return msm_isp_proc_cmd_list_unlocked(vfe_dev, arg);
+}
+#endif /* CONFIG_COMPAT */
+
+
+static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ long rc = 0;
+ struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+
+ if (!vfe_dev || !vfe_dev->vfe_base) {
+ pr_err("%s:%d failed: invalid params %p\n",
+ __func__, __LINE__, vfe_dev);
+ if (vfe_dev)
+ pr_err("%s:%d failed %p\n", __func__,
+ __LINE__, vfe_dev->vfe_base);
+ return -EINVAL;
+ }
+
+ /* use real time mutex for hard real-time ioctls such as
+ * buffer operations and register updates.
+ * Use core mutex for other ioctls that could take
+ * longer time to complete such as start/stop ISP streams
+ * which blocks until the hardware start/stop streaming
+ */
+ ISP_DBG("%s: cmd: %d\n", __func__, _IOC_TYPE(cmd));
+ switch (cmd) {
+ case VIDIOC_MSM_VFE_REG_CFG: {
+ mutex_lock(&vfe_dev->realtime_mutex);
+ rc = msm_isp_proc_cmd(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ break;
+ }
+ case VIDIOC_MSM_VFE_REG_LIST_CFG: {
+ mutex_lock(&vfe_dev->realtime_mutex);
+ rc = msm_isp_proc_cmd_list(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ break;
+ }
+ case VIDIOC_MSM_ISP_REQUEST_BUF:
+ /* fallthrough */
+ case VIDIOC_MSM_ISP_ENQUEUE_BUF:
+ /* fallthrough */
+ case VIDIOC_MSM_ISP_DEQUEUE_BUF:
+ /* fallthrough */
+ case VIDIOC_MSM_ISP_UNMAP_BUF: {
+ mutex_lock(&vfe_dev->buf_mgr->lock);
+ rc = msm_isp_proc_buf_cmd(vfe_dev->buf_mgr, cmd, arg);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
+ break;
+ }
+ case VIDIOC_MSM_ISP_RELEASE_BUF: {
+ if (vfe_dev->buf_mgr == NULL) {
+ pr_err("%s: buf mgr NULL! rc = -1\n", __func__);
+ rc = -EINVAL;
+ return rc;
+ }
+ mutex_lock(&vfe_dev->buf_mgr->lock);
+ rc = msm_isp_proc_buf_cmd(vfe_dev->buf_mgr, cmd, arg);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
+ break;
+ }
+ case VIDIOC_MSM_ISP_REQUEST_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_request_axi_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_RELEASE_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_release_axi_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_CFG_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_cfg_axi_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_AXI_HALT:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_axi_halt(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_AXI_RESET:
+ mutex_lock(&vfe_dev->core_mutex);
+ if (atomic_read(&vfe_dev->error_info.overflow_state)
+ != HALT_ENFORCED) {
+ rc = msm_isp_stats_reset(vfe_dev);
+ rc |= msm_isp_axi_reset(vfe_dev, arg);
+ } else {
+ pr_err_ratelimited("%s: no HW reset, halt enforced.\n",
+ __func__);
+ }
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_AXI_RESTART:
+ mutex_lock(&vfe_dev->core_mutex);
+ if (atomic_read(&vfe_dev->error_info.overflow_state)
+ != HALT_ENFORCED) {
+ rc = msm_isp_stats_restart(vfe_dev);
+ rc |= msm_isp_axi_restart(vfe_dev, arg);
+ } else {
+ pr_err_ratelimited("%s: no AXI restart, halt enforced.\n",
+ __func__);
+ }
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_INPUT_CFG:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_cfg_input(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_SET_DUAL_HW_MASTER_SLAVE:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_set_dual_HW_master_slave_mode(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_FETCH_ENG_START:
+ case VIDIOC_MSM_ISP_MAP_BUF_START_FE:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_start_fetch_engine(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_REG_UPDATE_CMD:
+ if (arg) {
+ enum msm_vfe_input_src frame_src =
+ *((enum msm_vfe_input_src *)arg);
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ reg_update(vfe_dev, frame_src);
+ }
+ break;
+ case VIDIOC_MSM_ISP_SET_SRC_STATE:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_set_src_state(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_REQUEST_STATS_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_request_stats_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_RELEASE_STATS_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_release_stats_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_CFG_STATS_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_cfg_stats_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_UPDATE_STATS_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_update_stats_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_UPDATE_STREAM:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_update_axi_stream(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case VIDIOC_MSM_ISP_SMMU_ATTACH:
+ mutex_lock(&vfe_dev->core_mutex);
+ rc = msm_isp_smmu_attach(vfe_dev->buf_mgr, arg);
+ mutex_unlock(&vfe_dev->core_mutex);
+ break;
+ case MSM_SD_NOTIFY_FREEZE:
+ vfe_dev->isp_sof_debug = 0;
+ vfe_dev->isp_raw0_debug = 0;
+ vfe_dev->isp_raw1_debug = 0;
+ vfe_dev->isp_raw2_debug = 0;
+ break;
+ case MSM_SD_SHUTDOWN:
+ while (vfe_dev->vfe_open_cnt != 0)
+ msm_isp_close_node(sd, NULL);
+ break;
+
+ default:
+ pr_err_ratelimited("%s: Invalid ISP command %d\n", __func__,
+ cmd);
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+
+#ifdef CONFIG_COMPAT
+static long msm_isp_ioctl_compat(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+ long rc = 0;
+
+ if (!vfe_dev || !vfe_dev->vfe_base) {
+ pr_err("%s:%d failed: invalid params %p\n",
+ __func__, __LINE__, vfe_dev);
+ if (vfe_dev)
+ pr_err("%s:%d failed %p\n", __func__,
+ __LINE__, vfe_dev->vfe_base);
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case VIDIOC_MSM_VFE_REG_CFG_COMPAT: {
+ struct msm_vfe_cfg_cmd2 proc_cmd;
+ mutex_lock(&vfe_dev->realtime_mutex);
+ msm_isp_compat_to_proc_cmd(&proc_cmd,
+ (struct msm_vfe_cfg_cmd2_32 *) arg);
+ rc = msm_isp_proc_cmd(vfe_dev, &proc_cmd);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ break;
+ }
+ case VIDIOC_MSM_VFE_REG_LIST_CFG_COMPAT: {
+ mutex_lock(&vfe_dev->realtime_mutex);
+ rc = msm_isp_proc_cmd_list(vfe_dev, arg);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ break;
+ }
+ default:
+ return msm_isp_ioctl_unlocked(sd, cmd, arg);
+ }
+
+ return rc;
+}
+
+long msm_isp_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ return msm_isp_ioctl_compat(sd, cmd, arg);
+}
+#else /* CONFIG_COMPAT */
+long msm_isp_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ return msm_isp_ioctl_unlocked(sd, cmd, arg);
+}
+#endif /* CONFIG_COMPAT */
+
+static int msm_isp_send_hw_cmd(struct vfe_device *vfe_dev,
+ struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd,
+ uint32_t *cfg_data, uint32_t cmd_len)
+{
+ if (!vfe_dev || !reg_cfg_cmd) {
+ pr_err("%s:%d failed: vfe_dev %p reg_cfg_cmd %p\n", __func__,
+ __LINE__, vfe_dev, reg_cfg_cmd);
+ return -EINVAL;
+ }
+ if ((reg_cfg_cmd->cmd_type != VFE_CFG_MASK) &&
+ (!cfg_data || !cmd_len)) {
+ pr_err("%s:%d failed: cmd type %d cfg_data %p cmd_len %d\n",
+ __func__, __LINE__, reg_cfg_cmd->cmd_type, cfg_data,
+ cmd_len);
+ return -EINVAL;
+ }
+
+ /* Validate input parameters */
+ switch (reg_cfg_cmd->cmd_type) {
+ case VFE_WRITE:
+ case VFE_READ:
+ case VFE_WRITE_MB: {
+ if ((reg_cfg_cmd->u.rw_info.reg_offset >
+ (UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
+ ((reg_cfg_cmd->u.rw_info.reg_offset +
+ reg_cfg_cmd->u.rw_info.len) >
+ resource_size(vfe_dev->vfe_mem))) {
+ pr_err("%s:%d reg_offset %d len %d res %d\n",
+ __func__, __LINE__,
+ reg_cfg_cmd->u.rw_info.reg_offset,
+ reg_cfg_cmd->u.rw_info.len,
+ (uint32_t)resource_size(vfe_dev->vfe_mem));
+ return -EINVAL;
+ }
+
+ if ((reg_cfg_cmd->u.rw_info.cmd_data_offset >
+ (UINT_MAX - reg_cfg_cmd->u.rw_info.len)) ||
+ ((reg_cfg_cmd->u.rw_info.cmd_data_offset +
+ reg_cfg_cmd->u.rw_info.len) > cmd_len)) {
+ pr_err("%s:%d cmd_data_offset %d len %d cmd_len %d\n",
+ __func__, __LINE__,
+ reg_cfg_cmd->u.rw_info.cmd_data_offset,
+ reg_cfg_cmd->u.rw_info.len, cmd_len);
+ return -EINVAL;
+ }
+ break;
+ }
+
+ case VFE_WRITE_DMI_16BIT:
+ case VFE_WRITE_DMI_32BIT:
+ case VFE_WRITE_DMI_64BIT:
+ case VFE_READ_DMI_16BIT:
+ case VFE_READ_DMI_32BIT:
+ case VFE_READ_DMI_64BIT: {
+ if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT) {
+ if ((reg_cfg_cmd->u.dmi_info.hi_tbl_offset <=
+ reg_cfg_cmd->u.dmi_info.lo_tbl_offset) ||
+ (reg_cfg_cmd->u.dmi_info.hi_tbl_offset -
+ reg_cfg_cmd->u.dmi_info.lo_tbl_offset !=
+ (sizeof(uint32_t)))) {
+ pr_err("%s:%d hi %d lo %d\n",
+ __func__, __LINE__,
+ reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
+ reg_cfg_cmd->u.dmi_info.hi_tbl_offset);
+ return -EINVAL;
+ }
+ if (reg_cfg_cmd->u.dmi_info.len <= sizeof(uint32_t)) {
+ pr_err("%s:%d len %d\n",
+ __func__, __LINE__,
+ reg_cfg_cmd->u.dmi_info.len);
+ return -EINVAL;
+ }
+ if (((UINT_MAX -
+ reg_cfg_cmd->u.dmi_info.hi_tbl_offset) <
+ (reg_cfg_cmd->u.dmi_info.len -
+ sizeof(uint32_t))) ||
+ ((reg_cfg_cmd->u.dmi_info.hi_tbl_offset +
+ reg_cfg_cmd->u.dmi_info.len -
+ sizeof(uint32_t)) > cmd_len)) {
+ pr_err("%s:%d hi_tbl_offset %d len %d cmd %d\n",
+ __func__, __LINE__,
+ reg_cfg_cmd->u.dmi_info.hi_tbl_offset,
+ reg_cfg_cmd->u.dmi_info.len, cmd_len);
+ return -EINVAL;
+ }
+ }
+ if ((reg_cfg_cmd->u.dmi_info.lo_tbl_offset >
+ (UINT_MAX - reg_cfg_cmd->u.dmi_info.len)) ||
+ ((reg_cfg_cmd->u.dmi_info.lo_tbl_offset +
+ reg_cfg_cmd->u.dmi_info.len) > cmd_len)) {
+ pr_err("%s:%d lo_tbl_offset %d len %d cmd_len %d\n",
+ __func__, __LINE__,
+ reg_cfg_cmd->u.dmi_info.lo_tbl_offset,
+ reg_cfg_cmd->u.dmi_info.len, cmd_len);
+ return -EINVAL;
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ switch (reg_cfg_cmd->cmd_type) {
+ case VFE_WRITE: {
+ msm_camera_io_memcpy(vfe_dev->vfe_base +
+ reg_cfg_cmd->u.rw_info.reg_offset,
+ cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4,
+ reg_cfg_cmd->u.rw_info.len);
+ break;
+ }
+ case VFE_WRITE_MB: {
+ msm_camera_io_memcpy_mb(vfe_dev->vfe_base +
+ reg_cfg_cmd->u.rw_info.reg_offset,
+ cfg_data + reg_cfg_cmd->u.rw_info.cmd_data_offset/4,
+ reg_cfg_cmd->u.rw_info.len);
+ break;
+ }
+ case VFE_CFG_MASK: {
+ uint32_t temp;
+ bool grab_lock;
+ unsigned long flags;
+ if ((UINT_MAX - sizeof(temp) <
+ reg_cfg_cmd->u.mask_info.reg_offset) ||
+ (resource_size(vfe_dev->vfe_mem) <
+ reg_cfg_cmd->u.mask_info.reg_offset +
+ sizeof(temp))) {
+ pr_err("%s: VFE_CFG_MASK: Invalid length\n", __func__);
+ return -EINVAL;
+ }
+ grab_lock = vfe_dev->hw_info->vfe_ops.core_ops.
+ is_module_cfg_lock_needed(reg_cfg_cmd->
+ u.mask_info.reg_offset);
+ if (grab_lock)
+ spin_lock_irqsave(&vfe_dev->shared_data_lock, flags);
+ temp = msm_camera_io_r(vfe_dev->vfe_base +
+ reg_cfg_cmd->u.mask_info.reg_offset);
+
+ temp &= ~reg_cfg_cmd->u.mask_info.mask;
+ temp |= reg_cfg_cmd->u.mask_info.val;
+ msm_camera_io_w(temp, vfe_dev->vfe_base +
+ reg_cfg_cmd->u.mask_info.reg_offset);
+ if (grab_lock)
+ spin_unlock_irqrestore(&vfe_dev->shared_data_lock,
+ flags);
+ break;
+ }
+ case VFE_WRITE_DMI_16BIT:
+ case VFE_WRITE_DMI_32BIT:
+ case VFE_WRITE_DMI_64BIT: {
+ int i;
+ uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
+ uint32_t hi_val, lo_val, lo_val1;
+ if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT) {
+ hi_tbl_ptr = cfg_data +
+ reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
+ }
+ lo_tbl_ptr = cfg_data +
+ reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
+ if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_64BIT)
+ reg_cfg_cmd->u.dmi_info.len =
+ reg_cfg_cmd->u.dmi_info.len / 2;
+ for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
+ lo_val = *lo_tbl_ptr++;
+ if (reg_cfg_cmd->cmd_type == VFE_WRITE_DMI_16BIT) {
+ lo_val1 = lo_val & 0x0000FFFF;
+ lo_val = (lo_val & 0xFFFF0000)>>16;
+ msm_camera_io_w(lo_val1, vfe_dev->vfe_base +
+ vfe_dev->hw_info->dmi_reg_offset + 0x4);
+ } else if (reg_cfg_cmd->cmd_type ==
+ VFE_WRITE_DMI_64BIT) {
+ lo_tbl_ptr++;
+ hi_val = *hi_tbl_ptr;
+ hi_tbl_ptr = hi_tbl_ptr + 2;
+ msm_camera_io_w(hi_val, vfe_dev->vfe_base +
+ vfe_dev->hw_info->dmi_reg_offset);
+ }
+ msm_camera_io_w(lo_val, vfe_dev->vfe_base +
+ vfe_dev->hw_info->dmi_reg_offset + 0x4);
+ }
+ break;
+ }
+ case VFE_READ_DMI_16BIT:
+ case VFE_READ_DMI_32BIT:
+ case VFE_READ_DMI_64BIT: {
+ int i;
+ uint32_t *hi_tbl_ptr = NULL, *lo_tbl_ptr = NULL;
+ uint32_t hi_val, lo_val, lo_val1;
+ if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
+ hi_tbl_ptr = cfg_data +
+ reg_cfg_cmd->u.dmi_info.hi_tbl_offset/4;
+ }
+
+ lo_tbl_ptr = cfg_data +
+ reg_cfg_cmd->u.dmi_info.lo_tbl_offset/4;
+
+ if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT)
+ reg_cfg_cmd->u.dmi_info.len =
+ reg_cfg_cmd->u.dmi_info.len / 2;
+
+ for (i = 0; i < reg_cfg_cmd->u.dmi_info.len/4; i++) {
+ lo_val = msm_camera_io_r(vfe_dev->vfe_base +
+ vfe_dev->hw_info->dmi_reg_offset + 0x4);
+
+ if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_16BIT) {
+ lo_val1 = msm_camera_io_r(vfe_dev->vfe_base +
+ vfe_dev->hw_info->dmi_reg_offset + 0x4);
+ lo_val |= lo_val1 << 16;
+ }
+ *lo_tbl_ptr++ = lo_val;
+ if (reg_cfg_cmd->cmd_type == VFE_READ_DMI_64BIT) {
+ hi_val = msm_camera_io_r(vfe_dev->vfe_base +
+ vfe_dev->hw_info->dmi_reg_offset);
+ *hi_tbl_ptr = hi_val;
+ hi_tbl_ptr += 2;
+ lo_tbl_ptr++;
+ }
+ }
+ break;
+ }
+ case VFE_HW_UPDATE_LOCK: {
+ uint32_t update_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id;
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id != *cfg_data
+ || update_id == *cfg_data) {
+ pr_err("%s hw update lock failed acq %d, cur id %u, last id %u\n",
+ __func__,
+ *cfg_data,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id,
+ update_id);
+ return -EINVAL;
+ }
+ break;
+ }
+ case VFE_HW_UPDATE_UNLOCK: {
+ if (vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id
+ != *cfg_data) {
+ pr_err("hw update across frame boundary,begin id %u, end id %d\n",
+ *cfg_data,
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id);
+ }
+ vfe_dev->axi_data.src_info[VFE_PIX_0].last_updt_frm_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+ break;
+ }
+ case VFE_READ: {
+ int i;
+ uint32_t *data_ptr = cfg_data +
+ reg_cfg_cmd->u.rw_info.cmd_data_offset/4;
+ for (i = 0; i < reg_cfg_cmd->u.rw_info.len/4; i++) {
+ if ((data_ptr < cfg_data) ||
+ (UINT_MAX / sizeof(*data_ptr) <
+ (data_ptr - cfg_data)) ||
+ (sizeof(*data_ptr) * (data_ptr - cfg_data) >=
+ cmd_len))
+ return -EINVAL;
+ *data_ptr++ = msm_camera_io_r(vfe_dev->vfe_base +
+ reg_cfg_cmd->u.rw_info.reg_offset);
+ reg_cfg_cmd->u.rw_info.reg_offset += 4;
+ }
+ break;
+ }
+ case GET_MAX_CLK_RATE: {
+ int rc = 0;
+ unsigned long rate;
+
+ if (cmd_len != sizeof(__u32)) {
+ pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
+ __func__, __LINE__, cmd_len,
+ sizeof(__u32));
+ return -EINVAL;
+ }
+ rc = msm_isp_get_max_clk_rate(vfe_dev, &rate);
+ if (rc < 0) {
+ pr_err("%s:%d failed: rc %d\n", __func__, __LINE__, rc);
+ return -EINVAL;
+ }
+
+ *(__u32 *)cfg_data = (__u32)rate;
+
+ break;
+ }
+ case GET_CLK_RATES: {
+ int rc = 0;
+ struct msm_isp_clk_rates rates;
+ struct msm_isp_clk_rates *user_data =
+ (struct msm_isp_clk_rates *)cfg_data;
+ if (cmd_len != sizeof(struct msm_isp_clk_rates)) {
+ pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
+ __func__, __LINE__, cmd_len,
+ sizeof(struct msm_isp_clk_rates));
+ return -EINVAL;
+ }
+ rc = msm_isp_get_clk_rates(vfe_dev, &rates);
+ if (rc < 0) {
+ pr_err("%s:%d failed: rc %d\n", __func__, __LINE__, rc);
+ return -EINVAL;
+ }
+ user_data->svs_rate = rates.svs_rate;
+ user_data->nominal_rate = rates.nominal_rate;
+ user_data->high_rate = rates.high_rate;
+ break;
+ }
+ case GET_ISP_ID: {
+ uint32_t *isp_id = NULL;
+
+ if (cmd_len < sizeof(uint32_t)) {
+ pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
+ __func__, __LINE__, cmd_len,
+ sizeof(uint32_t));
+ return -EINVAL;
+ }
+
+ isp_id = (uint32_t *)cfg_data;
+ *isp_id = vfe_dev->pdev->id;
+ break;
+ }
+ case SET_WM_UB_SIZE:
+ break;
+ case SET_UB_POLICY: {
+
+ if (cmd_len < sizeof(vfe_dev->vfe_ub_policy)) {
+ pr_err("%s:%d failed: invalid cmd len %u exp %zu\n",
+ __func__, __LINE__, cmd_len,
+ sizeof(vfe_dev->vfe_ub_policy));
+ return -EINVAL;
+ }
+ vfe_dev->vfe_ub_policy = *cfg_data;
+ break;
+ }
+ }
+ return 0;
+}
+
+int msm_isp_proc_cmd(struct vfe_device *vfe_dev, void *arg)
+{
+ int rc = 0, i;
+ struct msm_vfe_cfg_cmd2 *proc_cmd = arg;
+ struct msm_vfe_reg_cfg_cmd *reg_cfg_cmd;
+ uint32_t *cfg_data = NULL;
+
+ if (!proc_cmd->num_cfg) {
+ pr_err("%s: Passed num_cfg as 0\n", __func__);
+ return -EINVAL;
+ }
+
+ reg_cfg_cmd = kzalloc(sizeof(struct msm_vfe_reg_cfg_cmd)*
+ proc_cmd->num_cfg, GFP_KERNEL);
+ if (!reg_cfg_cmd) {
+ pr_err("%s: reg_cfg alloc failed\n", __func__);
+ rc = -ENOMEM;
+ goto reg_cfg_failed;
+ }
+
+ if (copy_from_user(reg_cfg_cmd,
+ (void __user *)(proc_cmd->cfg_cmd),
+ sizeof(struct msm_vfe_reg_cfg_cmd) * proc_cmd->num_cfg)) {
+ rc = -EFAULT;
+ goto copy_cmd_failed;
+ }
+
+ if (proc_cmd->cmd_len > 0) {
+ cfg_data = kzalloc(proc_cmd->cmd_len, GFP_KERNEL);
+ if (!cfg_data) {
+ pr_err("%s: cfg_data alloc failed\n", __func__);
+ rc = -ENOMEM;
+ goto cfg_data_failed;
+ }
+
+ if (copy_from_user(cfg_data,
+ (void __user *)(proc_cmd->cfg_data),
+ proc_cmd->cmd_len)) {
+ rc = -EFAULT;
+ goto copy_cmd_failed;
+ }
+ }
+
+ for (i = 0; i < proc_cmd->num_cfg; i++)
+ rc = msm_isp_send_hw_cmd(vfe_dev, &reg_cfg_cmd[i],
+ cfg_data, proc_cmd->cmd_len);
+
+ if (copy_to_user(proc_cmd->cfg_data,
+ cfg_data, proc_cmd->cmd_len)) {
+ rc = -EFAULT;
+ goto copy_cmd_failed;
+ }
+
+copy_cmd_failed:
+ kfree(cfg_data);
+cfg_data_failed:
+ kfree(reg_cfg_cmd);
+reg_cfg_failed:
+ return rc;
+}
+
+int msm_isp_send_event(struct vfe_device *vfe_dev,
+ uint32_t event_type,
+ struct msm_isp_event_data *event_data)
+{
+ struct v4l2_event isp_event;
+ memset(&isp_event, 0, sizeof(struct v4l2_event));
+ isp_event.id = 0;
+ isp_event.type = event_type;
+ memcpy(&isp_event.u.data[0], event_data,
+ sizeof(struct msm_isp_event_data));
+ v4l2_event_queue(vfe_dev->subdev.sd.devnode, &isp_event);
+ return 0;
+}
+
+#define CAL_WORD(width, M, N) ((width * M + N - 1) / N)
+
+int msm_isp_cal_word_per_line(uint32_t output_format,
+ uint32_t pixel_per_line)
+{
+ int val = -1;
+ switch (output_format) {
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_QBGGR8:
+ case V4L2_PIX_FMT_QGBRG8:
+ case V4L2_PIX_FMT_QGRBG8:
+ case V4L2_PIX_FMT_QRGGB8:
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_META:
+ case V4L2_PIX_FMT_GREY:
+ val = CAL_WORD(pixel_per_line, 1, 8);
+ break;
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ val = CAL_WORD(pixel_per_line, 5, 32);
+ break;
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ val = CAL_WORD(pixel_per_line, 3, 16);
+ break;
+ case V4L2_PIX_FMT_SBGGR14:
+ case V4L2_PIX_FMT_SGBRG14:
+ case V4L2_PIX_FMT_SGRBG14:
+ case V4L2_PIX_FMT_SRGGB14:
+ val = CAL_WORD(pixel_per_line, 7, 32);
+ break;
+ case V4L2_PIX_FMT_QBGGR10:
+ case V4L2_PIX_FMT_QGBRG10:
+ case V4L2_PIX_FMT_QGRBG10:
+ case V4L2_PIX_FMT_QRGGB10:
+ val = CAL_WORD(pixel_per_line, 1, 6);
+ break;
+ case V4L2_PIX_FMT_QBGGR12:
+ case V4L2_PIX_FMT_QGBRG12:
+ case V4L2_PIX_FMT_QGRBG12:
+ case V4L2_PIX_FMT_QRGGB12:
+ val = CAL_WORD(pixel_per_line, 1, 5);
+ break;
+ case V4L2_PIX_FMT_QBGGR14:
+ case V4L2_PIX_FMT_QGBRG14:
+ case V4L2_PIX_FMT_QGRBG14:
+ case V4L2_PIX_FMT_QRGGB14:
+ val = CAL_WORD(pixel_per_line, 1, 4);
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV14:
+ case V4L2_PIX_FMT_NV41:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ val = CAL_WORD(pixel_per_line, 1, 8);
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ val = CAL_WORD(pixel_per_line, 2, 8);
+ break;
+ case V4L2_PIX_FMT_P16BGGR10:
+ case V4L2_PIX_FMT_P16GBRG10:
+ case V4L2_PIX_FMT_P16GRBG10:
+ case V4L2_PIX_FMT_P16RGGB10:
+ val = CAL_WORD(pixel_per_line, 1, 4);
+ break;
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ val = CAL_WORD(pixel_per_line, 1, 8);
+ break;
+ /*TD: Add more image format*/
+ default:
+ msm_isp_print_fourcc_error(__func__, output_format);
+ break;
+ }
+ return val;
+}
+
+enum msm_isp_pack_fmt msm_isp_get_pack_format(uint32_t output_format)
+{
+ switch (output_format) {
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ case V4L2_PIX_FMT_SBGGR14:
+ case V4L2_PIX_FMT_SGBRG14:
+ case V4L2_PIX_FMT_SGRBG14:
+ case V4L2_PIX_FMT_SRGGB14:
+ return MIPI;
+ case V4L2_PIX_FMT_QBGGR8:
+ case V4L2_PIX_FMT_QGBRG8:
+ case V4L2_PIX_FMT_QGRBG8:
+ case V4L2_PIX_FMT_QRGGB8:
+ case V4L2_PIX_FMT_QBGGR10:
+ case V4L2_PIX_FMT_QGBRG10:
+ case V4L2_PIX_FMT_QGRBG10:
+ case V4L2_PIX_FMT_QRGGB10:
+ case V4L2_PIX_FMT_QBGGR12:
+ case V4L2_PIX_FMT_QGBRG12:
+ case V4L2_PIX_FMT_QGRBG12:
+ case V4L2_PIX_FMT_QRGGB12:
+ case V4L2_PIX_FMT_QBGGR14:
+ case V4L2_PIX_FMT_QGBRG14:
+ case V4L2_PIX_FMT_QGRBG14:
+ case V4L2_PIX_FMT_QRGGB14:
+ return QCOM;
+ case V4L2_PIX_FMT_P16BGGR10:
+ case V4L2_PIX_FMT_P16GBRG10:
+ case V4L2_PIX_FMT_P16GRBG10:
+ case V4L2_PIX_FMT_P16RGGB10:
+ return PLAIN16;
+ default:
+ msm_isp_print_fourcc_error(__func__, output_format);
+ break;
+ }
+ return -EINVAL;
+}
+
+int msm_isp_get_bit_per_pixel(uint32_t output_format)
+{
+ switch (output_format) {
+ case V4L2_PIX_FMT_Y4:
+ return 4;
+ case V4L2_PIX_FMT_Y6:
+ return 6;
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_QBGGR8:
+ case V4L2_PIX_FMT_QGBRG8:
+ case V4L2_PIX_FMT_QGRBG8:
+ case V4L2_PIX_FMT_QRGGB8:
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_META:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV14:
+ case V4L2_PIX_FMT_NV41:
+ case V4L2_PIX_FMT_YVU410:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YYUV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_YUV411P:
+ case V4L2_PIX_FMT_Y41P:
+ case V4L2_PIX_FMT_YUV444:
+ case V4L2_PIX_FMT_YUV555:
+ case V4L2_PIX_FMT_YUV565:
+ case V4L2_PIX_FMT_YUV32:
+ case V4L2_PIX_FMT_YUV410:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_GREY:
+ case V4L2_PIX_FMT_PAL8:
+ case V4L2_PIX_FMT_UV8:
+ case MSM_V4L2_PIX_FMT_META:
+ return 8;
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ case V4L2_PIX_FMT_QBGGR10:
+ case V4L2_PIX_FMT_QGBRG10:
+ case V4L2_PIX_FMT_QGRBG10:
+ case V4L2_PIX_FMT_QRGGB10:
+ case V4L2_PIX_FMT_Y10:
+ case V4L2_PIX_FMT_Y10BPACK:
+ case V4L2_PIX_FMT_P16BGGR10:
+ case V4L2_PIX_FMT_P16GBRG10:
+ case V4L2_PIX_FMT_P16GRBG10:
+ case V4L2_PIX_FMT_P16RGGB10:
+ return 10;
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ case V4L2_PIX_FMT_QBGGR12:
+ case V4L2_PIX_FMT_QGBRG12:
+ case V4L2_PIX_FMT_QGRBG12:
+ case V4L2_PIX_FMT_QRGGB12:
+ case V4L2_PIX_FMT_Y12:
+ return 12;
+ case V4L2_PIX_FMT_SBGGR14:
+ case V4L2_PIX_FMT_SGBRG14:
+ case V4L2_PIX_FMT_SGRBG14:
+ case V4L2_PIX_FMT_SRGGB14:
+ case V4L2_PIX_FMT_QBGGR14:
+ case V4L2_PIX_FMT_QGBRG14:
+ case V4L2_PIX_FMT_QGRBG14:
+ case V4L2_PIX_FMT_QRGGB14:
+ return 14;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_Y16:
+ return 16;
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ return 24;
+ /*TD: Add more image format*/
+ default:
+ msm_isp_print_fourcc_error(__func__, output_format);
+ pr_err("%s: Invalid output format %x\n",
+ __func__, output_format);
+ return -EINVAL;
+ }
+}
+
+void msm_isp_update_error_frame_count(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
+ error_info->info_dump_frame_count++;
+ if (error_info->info_dump_frame_count == 0)
+ error_info->info_dump_frame_count++;
+}
+
+
+void ms_isp_process_iommu_page_fault(struct vfe_device *vfe_dev)
+{
+ pr_err("%s:%d] VFE%d Handle Page fault! vfe_dev %p\n", __func__,
+ __LINE__, vfe_dev->pdev->id, vfe_dev);
+
+ msm_isp_halt_send_error(vfe_dev, ISP_EVENT_IOMMU_P_FAULT);
+
+ if (vfe_dev->buf_mgr->pagefault_debug_disable == 0) {
+ vfe_dev->buf_mgr->pagefault_debug_disable = 1;
+ vfe_dev->buf_mgr->ops->buf_mgr_debug(vfe_dev->buf_mgr,
+ vfe_dev->page_fault_addr);
+ msm_isp_print_ping_pong_address(vfe_dev,
+ vfe_dev->page_fault_addr);
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ read_wm_ping_pong_addr(vfe_dev);
+ }
+}
+
+void msm_isp_process_error_info(struct vfe_device *vfe_dev)
+{
+ struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
+
+ if (error_info->error_count == 1 ||
+ !(error_info->info_dump_frame_count % 100)) {
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ process_error_status(vfe_dev);
+ error_info->error_mask0 = 0;
+ error_info->error_mask1 = 0;
+ error_info->camif_status = 0;
+ error_info->violation_status = 0;
+ }
+}
+
+static inline void msm_isp_update_error_info(struct vfe_device *vfe_dev,
+ uint32_t error_mask0, uint32_t error_mask1)
+{
+ vfe_dev->error_info.error_mask0 |= error_mask0;
+ vfe_dev->error_info.error_mask1 |= error_mask1;
+ vfe_dev->error_info.error_count++;
+}
+
+static void msm_isp_process_overflow_irq(
+ struct vfe_device *vfe_dev,
+ uint32_t *irq_status0, uint32_t *irq_status1)
+{
+ uint32_t overflow_mask;
+
+ /* if there are no active streams - do not start recovery */
+ if (!vfe_dev->axi_data.num_active_stream)
+ return;
+
+ /*Mask out all other irqs if recovery is started*/
+ if (atomic_read(&vfe_dev->error_info.overflow_state) != NO_OVERFLOW) {
+ uint32_t halt_restart_mask0, halt_restart_mask1;
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ get_halt_restart_mask(&halt_restart_mask0,
+ &halt_restart_mask1);
+ *irq_status0 &= halt_restart_mask0;
+ *irq_status1 &= halt_restart_mask1;
+
+ return;
+ }
+
+ /*Check if any overflow bit is set*/
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ get_overflow_mask(&overflow_mask);
+ overflow_mask &= *irq_status1;
+
+ if (overflow_mask) {
+ struct msm_isp_event_data error_event;
+ struct msm_vfe_axi_halt_cmd halt_cmd;
+
+ if (vfe_dev->reset_pending == 1) {
+ pr_err("%s:%d failed: overflow %x during reset\n",
+ __func__, __LINE__, overflow_mask);
+ /* Clear overflow bits since reset is pending */
+ *irq_status1 &= ~overflow_mask;
+ return;
+ }
+
+ halt_cmd.overflow_detected = 1;
+ halt_cmd.stop_camif = 1;
+ halt_cmd.blocking_halt = 0;
+
+ msm_isp_axi_halt(vfe_dev, &halt_cmd);
+
+ /*Update overflow state*/
+ *irq_status0 = 0;
+ *irq_status1 = 0;
+
+ if (atomic_read(&vfe_dev->error_info.overflow_state)
+ != HALT_ENFORCED) {
+ error_event.frame_id =
+ vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
+ error_event.u.error_info.err_type =
+ ISP_ERROR_BUS_OVERFLOW;
+ msm_isp_send_event(vfe_dev,
+ ISP_EVENT_ERROR, &error_event);
+ }
+ }
+}
+
+void msm_isp_reset_burst_count_and_frame_drop(
+ struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info)
+{
+ if (stream_info->state != ACTIVE ||
+ stream_info->stream_type != BURST_STREAM) {
+ return;
+ }
+ if (stream_info->stream_type == BURST_STREAM &&
+ stream_info->num_burst_capture != 0) {
+ msm_isp_reset_framedrop(vfe_dev, stream_info);
+ }
+}
+
+static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev,
+ uint32_t irq_status0, uint32_t irq_status1, uint32_t iommu_page_fault)
+{
+ unsigned long flags;
+ struct msm_vfe_tasklet_queue_cmd *queue_cmd = NULL;
+
+ spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
+ queue_cmd = &vfe_dev->tasklet_queue_cmd[vfe_dev->taskletq_idx];
+ if (queue_cmd->cmd_used) {
+ ISP_DBG("%s: Tasklet queue overflow: %d\n",
+ __func__, vfe_dev->pdev->id);
+ list_del(&queue_cmd->list);
+ } else {
+ atomic_add(1, &vfe_dev->irq_cnt);
+ }
+ queue_cmd->vfeInterruptStatus0 = irq_status0;
+ queue_cmd->vfeInterruptStatus1 = irq_status1;
+ queue_cmd->iommu_page_fault = iommu_page_fault;
+ msm_isp_get_timestamp(&queue_cmd->ts);
+ queue_cmd->cmd_used = 1;
+ vfe_dev->taskletq_idx = (vfe_dev->taskletq_idx + 1) %
+ MSM_VFE_TASKLETQ_SIZE;
+ list_add_tail(&queue_cmd->list, &vfe_dev->tasklet_q);
+ spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+ tasklet_schedule(&vfe_dev->vfe_tasklet);
+}
+
+irqreturn_t msm_isp_process_irq(int irq_num, void *data)
+{
+ struct vfe_device *vfe_dev = (struct vfe_device *) data;
+ uint32_t irq_status0, irq_status1;
+ uint32_t error_mask0, error_mask1;
+
+ vfe_dev->hw_info->vfe_ops.irq_ops.
+ read_irq_status(vfe_dev, &irq_status0, &irq_status1);
+
+ if ((irq_status0 == 0) && (irq_status1 == 0)) {
+ ISP_DBG("%s:VFE%d irq_status0 & 1 are both 0\n",
+ __func__, vfe_dev->pdev->id);
+ return IRQ_HANDLED;
+ }
+
+ msm_isp_process_overflow_irq(vfe_dev,
+ &irq_status0, &irq_status1);
+
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ get_error_mask(&error_mask0, &error_mask1);
+ error_mask0 &= irq_status0;
+ error_mask1 &= irq_status1;
+ irq_status0 &= ~error_mask0;
+ irq_status1 &= ~error_mask1;
+ if (!vfe_dev->ignore_error &&
+ ((error_mask0 != 0) || (error_mask1 != 0)))
+ msm_isp_update_error_info(vfe_dev, error_mask0, error_mask1);
+
+ if ((irq_status0 == 0) && (irq_status1 == 0) &&
+ (!(((error_mask0 != 0) || (error_mask1 != 0)) &&
+ vfe_dev->error_info.error_count == 1))) {
+ ISP_DBG("%s: error_mask0/1 & error_count are set!\n", __func__);
+ return IRQ_HANDLED;
+ }
+
+ msm_isp_enqueue_tasklet_cmd(vfe_dev, irq_status0, irq_status1, 0);
+
+ return IRQ_HANDLED;
+}
+
+void msm_isp_do_tasklet(unsigned long data)
+{
+ unsigned long flags;
+ struct vfe_device *vfe_dev = (struct vfe_device *) data;
+ struct msm_vfe_irq_ops *irq_ops = &vfe_dev->hw_info->vfe_ops.irq_ops;
+ struct msm_vfe_tasklet_queue_cmd *queue_cmd;
+ struct msm_isp_timestamp ts;
+ uint32_t irq_status0, irq_status1, iommu_page_fault;
+
+ if (vfe_dev->vfe_base == NULL || vfe_dev->vfe_open_cnt == 0) {
+ ISP_DBG("%s: VFE%d open cnt = %d, device closed(base = %p)\n",
+ __func__, vfe_dev->pdev->id, vfe_dev->vfe_open_cnt,
+ vfe_dev->vfe_base);
+ return;
+ }
+
+ while (atomic_read(&vfe_dev->irq_cnt)) {
+ spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
+ queue_cmd = list_first_entry(&vfe_dev->tasklet_q,
+ struct msm_vfe_tasklet_queue_cmd, list);
+ if (!queue_cmd) {
+ atomic_set(&vfe_dev->irq_cnt, 0);
+ spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+ return;
+ }
+ atomic_sub(1, &vfe_dev->irq_cnt);
+ list_del(&queue_cmd->list);
+ queue_cmd->cmd_used = 0;
+ irq_status0 = queue_cmd->vfeInterruptStatus0;
+ irq_status1 = queue_cmd->vfeInterruptStatus1;
+ ts = queue_cmd->ts;
+ iommu_page_fault = queue_cmd->iommu_page_fault;
+ queue_cmd->iommu_page_fault = 0;
+ spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+ ISP_DBG("%s: vfe_id %d status0: 0x%x status1: 0x%x\n",
+ __func__, vfe_dev->pdev->id, irq_status0, irq_status1);
+ if (iommu_page_fault > 0) {
+ ms_isp_process_iommu_page_fault(vfe_dev);
+ continue;
+ }
+ irq_ops->process_reset_irq(vfe_dev,
+ irq_status0, irq_status1);
+ irq_ops->process_halt_irq(vfe_dev,
+ irq_status0, irq_status1);
+ if (atomic_read(&vfe_dev->error_info.overflow_state)
+ != NO_OVERFLOW) {
+ ISP_DBG("%s: Recovery in processing, Ignore IRQs!!!\n",
+ __func__);
+ continue;
+ }
+ msm_isp_process_error_info(vfe_dev);
+ irq_ops->process_stats_irq(vfe_dev,
+ irq_status0, irq_status1, &ts);
+ irq_ops->process_axi_irq(vfe_dev,
+ irq_status0, irq_status1, &ts);
+ irq_ops->process_camif_irq(vfe_dev,
+ irq_status0, irq_status1, &ts);
+ irq_ops->process_reg_update(vfe_dev,
+ irq_status0, irq_status1, &ts);
+ irq_ops->process_epoch_irq(vfe_dev,
+ irq_status0, irq_status1, &ts);
+ }
+}
+
+int msm_isp_set_src_state(struct vfe_device *vfe_dev, void *arg)
+{
+ struct msm_vfe_axi_src_state *src_state = arg;
+ if (src_state->input_src >= VFE_SRC_MAX)
+ return -EINVAL;
+ vfe_dev->axi_data.src_info[src_state->input_src].active =
+ src_state->src_active;
+ vfe_dev->axi_data.src_info[src_state->input_src].frame_id =
+ src_state->src_frame_id;
+ return 0;
+}
+
+static int msm_vfe_iommu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova, int flags, void *token)
+{
+ struct vfe_device *vfe_dev = NULL;
+ int rc = -ENOSYS;
+
+ if (token) {
+ vfe_dev = (struct vfe_device *)token;
+ vfe_dev->page_fault_addr = iova;
+ if (!vfe_dev->buf_mgr || !vfe_dev->buf_mgr->ops ||
+ !vfe_dev->axi_data.num_active_stream) {
+ pr_err("%s:%d buf_mgr %p active strms %d\n", __func__,
+ __LINE__, vfe_dev->buf_mgr,
+ vfe_dev->axi_data.num_active_stream);
+ goto end;
+ }
+
+ mutex_lock(&vfe_dev->core_mutex);
+ if (vfe_dev->vfe_open_cnt > 0) {
+ atomic_set(&vfe_dev->error_info.overflow_state,
+ HALT_ENFORCED);
+ msm_isp_enqueue_tasklet_cmd(vfe_dev, 0, 0, 1);
+ } else {
+ pr_err("%s: no handling, vfe open cnt = %d\n",
+ __func__, vfe_dev->vfe_open_cnt);
+ }
+ mutex_unlock(&vfe_dev->core_mutex);
+ } else {
+ ISP_DBG("%s:%d] no token received: %p\n",
+ __func__, __LINE__, token);
+ goto end;
+ }
+end:
+ return rc;
+}
+
+int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+ long rc = 0;
+
+ ISP_DBG("%s open_cnt %u\n", __func__, vfe_dev->vfe_open_cnt);
+
+ if (vfe_dev->common_data == NULL) {
+ pr_err("%s: Error in probe. No common_data\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&vfe_dev->realtime_mutex);
+ mutex_lock(&vfe_dev->core_mutex);
+
+ if (vfe_dev->vfe_open_cnt++ && vfe_dev->vfe_base) {
+ mutex_unlock(&vfe_dev->core_mutex);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ return 0;
+ }
+
+ if (vfe_dev->vfe_base) {
+ pr_err("%s:%d invalid params cnt %d base %p\n", __func__,
+ __LINE__, vfe_dev->vfe_open_cnt, vfe_dev->vfe_base);
+ vfe_dev->vfe_base = NULL;
+ }
+
+ vfe_dev->reset_pending = 0;
+ vfe_dev->isp_sof_debug = 0;
+ vfe_dev->isp_raw0_debug = 0;
+ vfe_dev->isp_raw1_debug = 0;
+ vfe_dev->isp_raw2_debug = 0;
+
+ if (vfe_dev->hw_info->vfe_ops.core_ops.init_hw(vfe_dev) < 0) {
+ pr_err("%s: init hardware failed\n", __func__);
+ vfe_dev->vfe_open_cnt--;
+ mutex_unlock(&vfe_dev->core_mutex);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ return -EBUSY;
+ }
+
+ memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
+ atomic_set(&vfe_dev->error_info.overflow_state, NO_OVERFLOW);
+
+ vfe_dev->hw_info->vfe_ops.core_ops.clear_status_reg(vfe_dev);
+
+ rc = vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 1, 1);
+ if (rc <= 0) {
+ pr_err("%s: reset timeout\n", __func__);
+ vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
+ vfe_dev->vfe_open_cnt--;
+ mutex_unlock(&vfe_dev->core_mutex);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ return -EINVAL;
+ }
+ vfe_dev->vfe_hw_version = msm_camera_io_r(vfe_dev->vfe_base);
+ ISP_DBG("%s: HW Version: 0x%x\n", __func__, vfe_dev->vfe_hw_version);
+
+ vfe_dev->hw_info->vfe_ops.core_ops.init_hw_reg(vfe_dev);
+
+ vfe_dev->buf_mgr->ops->buf_mgr_init(vfe_dev->buf_mgr,
+ "msm_isp");
+
+ memset(&vfe_dev->axi_data, 0, sizeof(struct msm_vfe_axi_shared_data));
+ memset(&vfe_dev->stats_data, 0,
+ sizeof(struct msm_vfe_stats_shared_data));
+ memset(&vfe_dev->error_info, 0, sizeof(vfe_dev->error_info));
+ memset(&vfe_dev->fetch_engine_info, 0,
+ sizeof(vfe_dev->fetch_engine_info));
+ vfe_dev->axi_data.hw_info = vfe_dev->hw_info->axi_hw_info;
+ vfe_dev->axi_data.enable_frameid_recovery = 0;
+ vfe_dev->taskletq_idx = 0;
+ vfe_dev->vt_enable = 0;
+ vfe_dev->reg_update_requested = 0;
+ /* Register page fault handler */
+ vfe_dev->buf_mgr->pagefault_debug_disable = 0;
+ cam_smmu_reg_client_page_fault_handler(
+ vfe_dev->buf_mgr->iommu_hdl,
+ msm_vfe_iommu_fault_handler, vfe_dev);
+ mutex_unlock(&vfe_dev->core_mutex);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ return 0;
+}
+
+#ifdef CONFIG_MSM_AVTIMER
+void msm_isp_end_avtimer(void)
+{
+ avcs_core_disable_power_collapse(0);
+}
+#else
+void msm_isp_end_avtimer(void)
+{
+ pr_err("AV Timer is not supported\n");
+}
+#endif
+
+int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ long rc = 0;
+ int wm;
+ struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
+ ISP_DBG("%s E open_cnt %u\n", __func__, vfe_dev->vfe_open_cnt);
+ mutex_lock(&vfe_dev->realtime_mutex);
+ mutex_lock(&vfe_dev->core_mutex);
+
+ if (!vfe_dev->vfe_open_cnt) {
+ pr_err("%s invalid state open cnt %d\n", __func__,
+ vfe_dev->vfe_open_cnt);
+ mutex_unlock(&vfe_dev->core_mutex);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ return -EINVAL;
+ }
+
+ if (vfe_dev->vfe_open_cnt > 1) {
+ vfe_dev->vfe_open_cnt--;
+ mutex_unlock(&vfe_dev->core_mutex);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ return 0;
+ }
+ /* Unregister page fault handler */
+ cam_smmu_reg_client_page_fault_handler(
+ vfe_dev->buf_mgr->iommu_hdl,
+ NULL, vfe_dev);
+
+ rc = vfe_dev->hw_info->vfe_ops.axi_ops.halt(vfe_dev, 1);
+ if (rc <= 0)
+ pr_err("%s: halt timeout rc=%ld\n", __func__, rc);
+
+ vfe_dev->hw_info->vfe_ops.core_ops.
+ update_camif_state(vfe_dev, DISABLE_CAMIF_IMMEDIATELY);
+ vfe_dev->hw_info->vfe_ops.core_ops.reset_hw(vfe_dev, 0, 0);
+
+ /* after regular hw stop, reduce open cnt */
+ vfe_dev->vfe_open_cnt--;
+
+ /* put scratch buf in all the wm */
+ for (wm = 0; wm < vfe_dev->axi_data.hw_info->num_wm; wm++) {
+ msm_isp_cfg_wm_scratch(vfe_dev, wm, VFE_PING_FLAG);
+ msm_isp_cfg_wm_scratch(vfe_dev, wm, VFE_PONG_FLAG);
+ }
+ vfe_dev->hw_info->vfe_ops.core_ops.release_hw(vfe_dev);
+ vfe_dev->buf_mgr->ops->buf_mgr_deinit(vfe_dev->buf_mgr);
+ if (vfe_dev->vt_enable) {
+ msm_isp_end_avtimer();
+ vfe_dev->vt_enable = 0;
+ }
+ vfe_dev->is_split = 0;
+
+ mutex_unlock(&vfe_dev->core_mutex);
+ mutex_unlock(&vfe_dev->realtime_mutex);
+ return 0;
+}
+
+void msm_isp_flush_tasklet(struct vfe_device *vfe_dev)
+{
+ unsigned long flags;
+ struct msm_vfe_tasklet_queue_cmd *queue_cmd;
+
+ spin_lock_irqsave(&vfe_dev->tasklet_lock, flags);
+ while (atomic_read(&vfe_dev->irq_cnt)) {
+ queue_cmd = list_first_entry(&vfe_dev->tasklet_q,
+ struct msm_vfe_tasklet_queue_cmd, list);
+ if (!queue_cmd) {
+ atomic_set(&vfe_dev->irq_cnt, 0);
+ break;
+ }
+ atomic_sub(1, &vfe_dev->irq_cnt);
+ list_del(&queue_cmd->list);
+ queue_cmd->cmd_used = 0;
+ }
+ spin_unlock_irqrestore(&vfe_dev->tasklet_lock, flags);
+
+ return;
+}
+
+void msm_isp_save_framedrop_values(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src)
+{
+ struct msm_vfe_axi_stream *stream_info = NULL;
+ uint32_t j = 0;
+ unsigned long flags;
+
+ for (j = 0; j < VFE_AXI_SRC_MAX; j++) {
+ stream_info = &vfe_dev->axi_data.stream_info[j];
+ if (stream_info->state != ACTIVE)
+ continue;
+ if (frame_src != SRC_TO_INTF(stream_info->stream_src))
+ continue;
+
+ stream_info =
+ &vfe_dev->axi_data.stream_info[j];
+ spin_lock_irqsave(&stream_info->lock, flags);
+ stream_info->prev_framedrop_period &= ~0x80000000;
+ spin_unlock_irqrestore(&stream_info->lock, flags);
+ }
+}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h
new file mode 100644
index 000000000000..bde32b33817d
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.h
@@ -0,0 +1,85 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_ISP_UTIL_H__
+#define __MSM_ISP_UTIL_H__
+
+#include "msm_isp.h"
+#include <soc/qcom/camera2.h>
+#include "msm_camera_io_util.h"
+
+/* #define CONFIG_MSM_ISP_DBG 1 */
+
+#ifdef CONFIG_MSM_ISP_DBG
+#define ISP_DBG(fmt, args...) printk(fmt, ##args)
+#else
+#define ISP_DBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define ALT_VECTOR_IDX(x) {x = 3 - x; }
+
+struct msm_isp_bandwidth_mgr {
+ uint32_t bus_client;
+ uint32_t bus_vector_active_idx;
+ uint32_t use_count;
+ struct msm_isp_bandwidth_info client_info[MAX_ISP_CLIENT];
+};
+
+uint32_t msm_isp_get_framedrop_period(
+ enum msm_vfe_frame_skip_pattern frame_skip_pattern);
+void msm_isp_reset_burst_count_and_frame_drop(
+ struct vfe_device *vfe_dev, struct msm_vfe_axi_stream *stream_info);
+
+int msm_isp_init_bandwidth_mgr(enum msm_isp_hw_client client);
+int msm_isp_update_bandwidth(enum msm_isp_hw_client client,
+ uint64_t ab, uint64_t ib);
+void msm_isp_util_get_bandwidth_stats(struct vfe_device *vfe_dev,
+ struct msm_isp_statistics *stats);
+void msm_isp_util_update_last_overflow_ab_ib(struct vfe_device *vfe_dev);
+void msm_isp_util_update_clk_rate(long clock_rate);
+void msm_isp_update_req_history(uint32_t client, uint64_t ab,
+ uint64_t ib,
+ struct msm_isp_bandwidth_info *client_info,
+ unsigned long long ts);
+void msm_isp_deinit_bandwidth_mgr(enum msm_isp_hw_client client);
+
+int msm_isp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub);
+
+int msm_isp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub);
+
+int msm_isp_proc_cmd(struct vfe_device *vfe_dev, void *arg);
+int msm_isp_send_event(struct vfe_device *vfe_dev,
+ uint32_t type, struct msm_isp_event_data *event_data);
+int msm_isp_cal_word_per_line(uint32_t output_format,
+ uint32_t pixel_per_line);
+int msm_isp_get_bit_per_pixel(uint32_t output_format);
+enum msm_isp_pack_fmt msm_isp_get_pack_format(uint32_t output_format);
+irqreturn_t msm_isp_process_irq(int irq_num, void *data);
+int msm_isp_set_src_state(struct vfe_device *vfe_dev, void *arg);
+void msm_isp_do_tasklet(unsigned long data);
+void msm_isp_update_error_frame_count(struct vfe_device *vfe_dev);
+void msm_isp_process_error_info(struct vfe_device *vfe_dev);
+int msm_isp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
+int msm_isp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
+long msm_isp_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
+int msm_isp_get_clk_info(struct vfe_device *vfe_dev,
+ struct platform_device *pdev, struct msm_cam_clk_info *vfe_clk_info);
+void msm_isp_fetch_engine_done_notify(struct vfe_device *vfe_dev,
+ struct msm_vfe_fetch_engine_info *fetch_engine_info);
+void msm_isp_print_fourcc_error(const char *origin, uint32_t fourcc_format);
+void msm_isp_flush_tasklet(struct vfe_device *vfe_dev);
+void msm_isp_save_framedrop_values(struct vfe_device *vfe_dev,
+ enum msm_vfe_input_src frame_src);
+void msm_isp_get_timestamp(struct msm_isp_timestamp *time_stamp);
+
+#endif /* __MSM_ISP_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/ispif/Makefile b/drivers/media/platform/msm/camera_v2/ispif/Makefile
new file mode 100644
index 000000000000..236ec7340c56
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/ispif/Makefile
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+obj-$(CONFIG_MSM_CSID) += msm_ispif.o
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
new file mode 100644
index 000000000000..9d0f2b930696
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
@@ -0,0 +1,1734 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/of.h>
+#include <linux/videodev2.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/iopoll.h>
+#include <linux/compat.h>
+#include <media/msmb_isp.h>
+#include <linux/ratelimit.h>
+
+#include "msm_ispif.h"
+#include "msm.h"
+#include "msm_sd.h"
+#include "msm_camera_io_util.h"
+#include "cam_hw_ops.h"
+
+#ifdef CONFIG_MSM_ISPIF_V1
+#include "msm_ispif_hwreg_v1.h"
+#elif defined CONFIG_MSM_ISPIF_V2
+#include "msm_ispif_hwreg_v2.h"
+#else
+#include "msm_ispif_hwreg_v3.h"
+#endif
+
+#define V4L2_IDENT_ISPIF 50001
+#define MSM_ISPIF_DRV_NAME "msm_ispif"
+
+#define ISPIF_INTF_CMD_DISABLE_FRAME_BOUNDARY 0x00
+#define ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY 0x01
+#define ISPIF_INTF_CMD_DISABLE_IMMEDIATELY 0x02
+
+#define ISPIF_TIMEOUT_SLEEP_US 1000
+#define ISPIF_TIMEOUT_ALL_US 1000000
+#define ISPIF_SOF_DEBUG_COUNT 0
+
+#undef CDBG
+#ifdef CONFIG_MSMB_CAMERA_DEBUG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#else
+#define CDBG(fmt, args...) do { } while (0)
+#endif
+
+static int msm_ispif_clk_ahb_enable(struct ispif_device *ispif, int enable);
+static int ispif_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
+int msm_ispif_get_clk_info(struct ispif_device *ispif_dev,
+ struct platform_device *pdev,
+ struct msm_cam_clk_info *ahb_clk_info,
+ struct msm_cam_clk_info *clk_info);
+
+static void msm_ispif_io_dump_reg(struct ispif_device *ispif)
+{
+ if (!ispif->enb_dump_reg)
+ return;
+ msm_camera_io_dump(ispif->base, 0x250, 0);
+}
+
+
+static inline int msm_ispif_is_intf_valid(uint32_t csid_version,
+ uint8_t intf_type)
+{
+ return ((csid_version <= CSID_VERSION_V22 && intf_type != VFE0) ||
+ (intf_type >= VFE_MAX)) ? false : true;
+}
+
+static struct msm_cam_clk_info ispif_8626_reset_clk_info[] = {
+ {"ispif_ahb_clk", NO_SET_RATE},
+ {"camss_top_ahb_clk", NO_SET_RATE},
+ {"csi0_ahb_clk", NO_SET_RATE},
+ {"csi0_src_clk", NO_SET_RATE},
+ {"csi0_phy_clk", NO_SET_RATE},
+ {"csi0_clk", NO_SET_RATE},
+ {"csi0_pix_clk", NO_SET_RATE},
+ {"csi0_rdi_clk", NO_SET_RATE},
+ {"csi1_ahb_clk", NO_SET_RATE},
+ {"csi1_src_clk", NO_SET_RATE},
+ {"csi1_phy_clk", NO_SET_RATE},
+ {"csi1_clk", NO_SET_RATE},
+ {"csi1_pix_clk", NO_SET_RATE},
+ {"csi1_rdi_clk", NO_SET_RATE},
+ {"camss_vfe_vfe_clk", NO_SET_RATE},
+ {"camss_csi_vfe_clk", NO_SET_RATE},
+};
+
+static struct msm_cam_clk_info ispif_ahb_clk_info[ISPIF_CLK_INFO_MAX];
+static struct msm_cam_clk_info ispif_clk_info[ISPIF_CLK_INFO_MAX];
+
+static void msm_ispif_put_regulator(struct ispif_device *ispif_dev)
+{
+ int i;
+
+ for (i = 0; i < ispif_dev->ispif_vdd_count; i++) {
+ regulator_put(ispif_dev->ispif_vdd[i]);
+ ispif_dev->ispif_vdd[i] = NULL;
+ }
+ for (i = 0; i < ispif_dev->vfe_vdd_count; i++) {
+ regulator_put(ispif_dev->vfe_vdd[i]);
+ ispif_dev->vfe_vdd[i] = NULL;
+ }
+}
+
+static inline int __get_vdd(struct platform_device *pdev,
+ struct regulator **reg, const char *vdd)
+{
+ int rc = 0;
+ *reg = regulator_get(&pdev->dev, vdd);
+ if (IS_ERR_OR_NULL(*reg)) {
+ rc = PTR_ERR(*reg);
+ rc = rc ? rc : -EINVAL;
+ pr_err("%s: Regulator %s get failed %d\n", __func__, vdd, rc);
+ *reg = NULL;
+ }
+ return rc;
+}
+
+static int msm_ispif_get_regulator_info(struct ispif_device *ispif_dev,
+ struct platform_device *pdev)
+{
+ int rc;
+ const char *vdd_name;
+ struct device_node *of_node;
+ int i;
+ int count;
+
+ of_node = pdev->dev.of_node;
+
+ count = of_property_count_strings(of_node,
+ "qcom,vdd-names");
+ if (0 == count) {
+ pr_err("%s: no regulators found\n", __func__);
+ return -EINVAL;
+ }
+
+ BUG_ON(count > (ISPIF_VDD_INFO_MAX + ISPIF_VFE_VDD_INFO_MAX));
+ ispif_dev->vfe_vdd_count = 0;
+ ispif_dev->ispif_vdd_count = 0;
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(
+ of_node, "qcom,vdd-names",
+ i, &vdd_name);
+ if (rc < 0) {
+ pr_err("%s: read property qcom,ispif-vdd-names at index %d failed\n",
+ __func__, i);
+ goto err;
+ }
+ if (strnstr(vdd_name, "vfe", strlen(vdd_name))) {
+ BUG_ON(ispif_dev->vfe_vdd_count >=
+ ISPIF_VFE_VDD_INFO_MAX);
+ rc = __get_vdd(pdev,
+ &ispif_dev->vfe_vdd[ispif_dev->vfe_vdd_count],
+ vdd_name);
+ if (0 == rc)
+ ispif_dev->vfe_vdd_count++;
+ } else {
+ BUG_ON(ispif_dev->vfe_vdd_count >=
+ ISPIF_VDD_INFO_MAX);
+ rc = __get_vdd(pdev,
+ &ispif_dev->ispif_vdd
+ [ispif_dev->ispif_vdd_count],
+ vdd_name);
+ if (0 == rc)
+ ispif_dev->ispif_vdd_count++;
+ }
+ if (rc)
+ goto err;
+ }
+ return 0;
+err:
+ for (i = 0; i < ispif_dev->vfe_vdd_count; i++) {
+ regulator_put(ispif_dev->vfe_vdd[i]);
+ ispif_dev->vfe_vdd[i] = NULL;
+ }
+ for (i = 0; i < ispif_dev->ispif_vdd_count; i++) {
+ regulator_put(ispif_dev->ispif_vdd[i]);
+ ispif_dev->ispif_vdd[i] = NULL;
+ }
+ ispif_dev->ispif_vdd_count = 0;
+ ispif_dev->vfe_vdd_count = 0;
+ return rc;
+}
+
+static int msm_ispif_set_regulators(struct regulator **regs, int count,
+ uint8_t enable)
+{
+ int rc = 0;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if (enable) {
+ rc = regulator_enable(regs[i]);
+ if (rc)
+ goto err;
+ } else {
+ rc |= regulator_disable(regs[i]);
+ }
+ }
+ if (rc)
+ pr_err("%s: Regulator disable failed\n", __func__);
+ return rc;
+err:
+ pr_err("%s: Regulator enable failed\n", __func__);
+ for (i--; i >= 0; i--)
+ regulator_disable(regs[i]);
+ return rc;
+}
+
+static int msm_ispif_reset_hw(struct ispif_device *ispif)
+{
+ int rc = 0;
+ long timeout = 0;
+ struct clk *reset_clk1[ARRAY_SIZE(ispif_8626_reset_clk_info)];
+ ispif->clk_idx = 0;
+
+ /* Turn ON VFE regulators before enabling the vfe clocks */
+ rc = msm_ispif_set_regulators(ispif->vfe_vdd, ispif->vfe_vdd_count, 1);
+ if (rc < 0)
+ return rc;
+
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_clk_info, ispif->clk,
+ ispif->num_clk, 1);
+ if (rc < 0) {
+ pr_err("%s: cannot enable clock, error = %d\n",
+ __func__, rc);
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_8626_reset_clk_info, reset_clk1,
+ ARRAY_SIZE(ispif_8626_reset_clk_info), 1);
+ if (rc < 0) {
+ pr_err("%s: cannot enable clock, error = %d",
+ __func__, rc);
+ goto reg_disable;
+ } else {
+ /* This is set when device is 8x26 */
+ ispif->clk_idx = 2;
+ }
+ } else {
+ /* This is set when device is 8974 */
+ ispif->clk_idx = 1;
+ }
+
+ atomic_set(&ispif->reset_trig[VFE0], 1);
+ /* initiate reset of ISPIF */
+ msm_camera_io_w(ISPIF_RST_CMD_MASK,
+ ispif->base + ISPIF_RST_CMD_ADDR);
+
+ timeout = wait_for_completion_timeout(
+ &ispif->reset_complete[VFE0], msecs_to_jiffies(500));
+ CDBG("%s: VFE0 done\n", __func__);
+
+ if (timeout <= 0) {
+ rc = -ETIMEDOUT;
+ pr_err("%s: VFE0 reset wait timeout\n", __func__);
+ goto clk_disable;
+ }
+
+ if (ispif->hw_num_isps > 1) {
+ atomic_set(&ispif->reset_trig[VFE1], 1);
+ msm_camera_io_w(ISPIF_RST_CMD_1_MASK,
+ ispif->base + ISPIF_RST_CMD_1_ADDR);
+ timeout = wait_for_completion_timeout(
+ &ispif->reset_complete[VFE1],
+ msecs_to_jiffies(500));
+ CDBG("%s: VFE1 done\n", __func__);
+ if (timeout <= 0) {
+ pr_err("%s: VFE1 reset wait timeout\n", __func__);
+ rc = -ETIMEDOUT;
+ }
+ }
+
+clk_disable:
+ if (ispif->clk_idx == 1) {
+ rc = rc ? rc : msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_clk_info, ispif->clk,
+ ispif->num_clk, 0);
+ }
+
+ if (ispif->clk_idx == 2) {
+ rc = rc ? rc : msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_8626_reset_clk_info, reset_clk1,
+ ARRAY_SIZE(ispif_8626_reset_clk_info), 0);
+ }
+reg_disable:
+ rc = rc ? rc : msm_ispif_set_regulators(ispif->vfe_vdd,
+ ispif->vfe_vdd_count, 0);
+
+ return rc;
+}
+
+int msm_ispif_get_clk_info(struct ispif_device *ispif_dev,
+ struct platform_device *pdev,
+ struct msm_cam_clk_info *ahb_clk_info,
+ struct msm_cam_clk_info *clk_info)
+{
+ uint32_t count, num_ahb_clk = 0, non_ahb_clk = 0;
+ int i, rc;
+ uint32_t rates[ISPIF_CLK_INFO_MAX];
+ const char *clk_ctl = NULL;
+ const char *clk_name = NULL;
+ struct msm_cam_clk_info *clk_temp;
+
+ struct device_node *of_node;
+ of_node = pdev->dev.of_node;
+
+ count = of_property_count_strings(of_node, "clock-names");
+
+ CDBG("count = %d\n", count);
+ if (count == 0) {
+ pr_err("no clocks found in device tree, count=%d", count);
+ return 0;
+ }
+
+ if (count > ISPIF_CLK_INFO_MAX) {
+ pr_err("invalid count=%d, max is %d\n", count,
+ ISPIF_CLK_INFO_MAX);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,clock-rates",
+ rates, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node, "clock-names",
+ i, &clk_name);
+ if (rc < 0) {
+ pr_err("%s reading clock-name failed index %d\n",
+ __func__, i);
+ return rc;
+ }
+
+ rc = of_property_read_string_index(of_node,
+ "qcom,clock-control", i, &clk_ctl);
+ if (rc < 0) {
+ pr_err("%s reading clock-control failed index %d\n",
+ __func__, i);
+ return rc;
+ }
+
+ if (strnstr(clk_name, "ahb", strlen(clk_name))) {
+ clk_temp = &ahb_clk_info[num_ahb_clk];
+ num_ahb_clk++;
+ } else {
+ clk_temp = &clk_info[non_ahb_clk];
+ non_ahb_clk++;
+ }
+
+ clk_temp->clk_name = clk_name;
+ if (!strcmp(clk_ctl, "NO_SET_RATE"))
+ clk_temp->clk_rate = NO_SET_RATE;
+ else if (!strcmp(clk_ctl, "INIT_RATE"))
+ clk_temp->clk_rate = INIT_RATE;
+ else if (!strcmp(clk_ctl, "SET_RATE"))
+ clk_temp->clk_rate = rates[i];
+ else {
+ pr_err("%s: error: clock control has invalid value\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ CDBG("%s: clock-name= %s, clk_rate = %ld clock-control = %s\n",
+ __func__, clk_temp->clk_name, clk_temp->clk_rate,
+ clk_ctl);
+ }
+ ispif_dev->num_ahb_clk = num_ahb_clk;
+ ispif_dev->num_clk = non_ahb_clk;
+ return 0;
+}
+
+static int msm_ispif_clk_ahb_enable(struct ispif_device *ispif, int enable)
+{
+ int rc = 0;
+
+ if (ispif->csid_version < CSID_VERSION_V30) {
+ /* Older ISPIF versiond don't need ahb clokc */
+ return 0;
+ }
+
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_ahb_clk_info, ispif->ahb_clk,
+ ispif->num_ahb_clk, enable);
+ if (rc < 0) {
+ pr_err("%s: cannot enable clock, error = %d",
+ __func__, rc);
+ }
+
+ return rc;
+}
+
+static int msm_ispif_reset(struct ispif_device *ispif)
+{
+ int rc = 0;
+ int i;
+
+ BUG_ON(!ispif);
+
+ memset(ispif->sof_count, 0, sizeof(ispif->sof_count));
+ for (i = 0; i < ispif->vfe_info.num_vfe; i++) {
+
+ msm_camera_io_w(1 << PIX0_LINE_BUF_EN_BIT,
+ ispif->base + ISPIF_VFE_m_CTRL_0(i));
+ msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(i));
+ msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(i));
+ msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_IRQ_MASK_2(i));
+ msm_camera_io_w(0xFFFFFFFF, ispif->base +
+ ISPIF_VFE_m_IRQ_CLEAR_0(i));
+ msm_camera_io_w(0xFFFFFFFF, ispif->base +
+ ISPIF_VFE_m_IRQ_CLEAR_1(i));
+ msm_camera_io_w(0xFFFFFFFF, ispif->base +
+ ISPIF_VFE_m_IRQ_CLEAR_2(i));
+
+ msm_camera_io_w(0, ispif->base + ISPIF_VFE_m_INPUT_SEL(i));
+
+ msm_camera_io_w(ISPIF_STOP_INTF_IMMEDIATELY,
+ ispif->base + ISPIF_VFE_m_INTF_CMD_0(i));
+ msm_camera_io_w(ISPIF_STOP_INTF_IMMEDIATELY,
+ ispif->base + ISPIF_VFE_m_INTF_CMD_1(i));
+ pr_debug("%s: base %lx", __func__, (unsigned long)ispif->base);
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 0));
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_CID_MASK(i, 1));
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_CID_MASK(i, 0));
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_CID_MASK(i, 1));
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_CID_MASK(i, 2));
+
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_CROP(i, 0));
+ msm_camera_io_w(0, ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_CROP(i, 1));
+ }
+
+ msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
+ ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+ return rc;
+}
+
+static void msm_ispif_sel_csid_core(struct ispif_device *ispif,
+ uint8_t intftype, uint8_t csid, uint8_t vfe_intf)
+{
+ uint32_t data;
+
+ BUG_ON(!ispif);
+
+ if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ return;
+ }
+
+ data = msm_camera_io_r(ispif->base + ISPIF_VFE_m_INPUT_SEL(vfe_intf));
+ switch (intftype) {
+ case PIX0:
+ data &= ~(BIT(1) | BIT(0));
+ data |= csid;
+ break;
+ case RDI0:
+ data &= ~(BIT(5) | BIT(4));
+ data |= (csid << 4);
+ break;
+ case PIX1:
+ data &= ~(BIT(9) | BIT(8));
+ data |= (csid << 8);
+ break;
+ case RDI1:
+ data &= ~(BIT(13) | BIT(12));
+ data |= (csid << 12);
+ break;
+ case RDI2:
+ data &= ~(BIT(21) | BIT(20));
+ data |= (csid << 20);
+ break;
+ }
+
+ msm_camera_io_w_mb(data, ispif->base +
+ ISPIF_VFE_m_INPUT_SEL(vfe_intf));
+}
+
+static void msm_ispif_enable_crop(struct ispif_device *ispif,
+ uint8_t intftype, uint8_t vfe_intf, uint16_t start_pixel,
+ uint16_t end_pixel)
+{
+ uint32_t data;
+ BUG_ON(!ispif);
+
+ if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ return;
+ }
+
+ data = msm_camera_io_r(ispif->base + ISPIF_VFE_m_CTRL_0(vfe_intf));
+ data |= (1 << (intftype + 7));
+ if (intftype == PIX0)
+ data |= 1 << PIX0_LINE_BUF_EN_BIT;
+ msm_camera_io_w(data,
+ ispif->base + ISPIF_VFE_m_CTRL_0(vfe_intf));
+
+ if (intftype == PIX0)
+ msm_camera_io_w_mb(start_pixel | (end_pixel << 16),
+ ispif->base + ISPIF_VFE_m_PIX_INTF_n_CROP(vfe_intf, 0));
+ else if (intftype == PIX1)
+ msm_camera_io_w_mb(start_pixel | (end_pixel << 16),
+ ispif->base + ISPIF_VFE_m_PIX_INTF_n_CROP(vfe_intf, 1));
+ else {
+ pr_err("%s: invalid intftype=%d\n", __func__, intftype);
+ BUG_ON(1);
+ return;
+ }
+}
+
+static void msm_ispif_enable_intf_cids(struct ispif_device *ispif,
+ uint8_t intftype, uint16_t cid_mask, uint8_t vfe_intf, uint8_t enable)
+{
+ uint32_t intf_addr, data;
+
+ BUG_ON(!ispif);
+
+ if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ return;
+ }
+
+ switch (intftype) {
+ case PIX0:
+ intf_addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe_intf, 0);
+ break;
+ case RDI0:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 0);
+ break;
+ case PIX1:
+ intf_addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe_intf, 1);
+ break;
+ case RDI1:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 1);
+ break;
+ case RDI2:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe_intf, 2);
+ break;
+ default:
+ pr_err("%s: invalid intftype=%d\n", __func__, intftype);
+ BUG_ON(1);
+ return;
+ }
+
+ data = msm_camera_io_r(ispif->base + intf_addr);
+ if (enable)
+ data |= cid_mask;
+ else
+ data &= ~cid_mask;
+ msm_camera_io_w_mb(data, ispif->base + intf_addr);
+}
+
+static int msm_ispif_validate_intf_status(struct ispif_device *ispif,
+ uint8_t intftype, uint8_t vfe_intf)
+{
+ int rc = 0;
+ uint32_t data = 0;
+
+ BUG_ON(!ispif);
+
+ if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (intftype) {
+ case PIX0:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 0));
+ break;
+ case RDI0:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 0));
+ break;
+ case PIX1:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1));
+ break;
+ case RDI1:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 1));
+ break;
+ case RDI2:
+ data = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 2));
+ break;
+ }
+ if ((data & 0xf) != 0xf)
+ rc = -EBUSY;
+ return rc;
+}
+
+static void msm_ispif_select_clk_mux(struct ispif_device *ispif,
+ uint8_t intftype, uint8_t csid, uint8_t vfe_intf)
+{
+ uint32_t data = 0;
+
+ switch (intftype) {
+ case PIX0:
+ data = msm_camera_io_r(ispif->clk_mux_base);
+ data &= ~(0xf << (vfe_intf * 8));
+ data |= (csid << (vfe_intf * 8));
+ msm_camera_io_w(data, ispif->clk_mux_base);
+ break;
+
+ case RDI0:
+ data = msm_camera_io_r(ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ data &= ~(0xf << (vfe_intf * 12));
+ data |= (csid << (vfe_intf * 12));
+ msm_camera_io_w(data, ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ break;
+
+ case PIX1:
+ data = msm_camera_io_r(ispif->clk_mux_base);
+ data &= ~(0xf0 << (vfe_intf * 8));
+ data |= (csid << (4 + (vfe_intf * 8)));
+ msm_camera_io_w(data, ispif->clk_mux_base);
+ break;
+
+ case RDI1:
+ data = msm_camera_io_r(ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ data &= ~(0xf << (4 + (vfe_intf * 12)));
+ data |= (csid << (4 + (vfe_intf * 12)));
+ msm_camera_io_w(data, ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ break;
+
+ case RDI2:
+ data = msm_camera_io_r(ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ data &= ~(0xf << (8 + (vfe_intf * 12)));
+ data |= (csid << (8 + (vfe_intf * 12)));
+ msm_camera_io_w(data, ispif->clk_mux_base +
+ ISPIF_RDI_CLK_MUX_SEL_ADDR);
+ break;
+ }
+ CDBG("%s intftype %d data %x\n", __func__, intftype, data);
+ /* ensure clk mux is enabled */
+ mb();
+ return;
+}
+
+static uint16_t msm_ispif_get_cids_mask_from_cfg(
+ struct msm_ispif_params_entry *entry)
+{
+ int i;
+ uint16_t cids_mask = 0;
+
+ BUG_ON(!entry);
+
+ for (i = 0; i < entry->num_cids; i++)
+ cids_mask |= (1 << entry->cids[i]);
+
+ return cids_mask;
+}
+
+static int msm_ispif_config(struct ispif_device *ispif,
+ struct msm_ispif_param_data *params)
+{
+ int rc = 0, i = 0;
+ uint16_t cid_mask;
+ enum msm_ispif_intftype intftype;
+ enum msm_ispif_vfe_intf vfe_intf;
+
+ BUG_ON(!ispif);
+ BUG_ON(!params);
+
+ if (ispif->ispif_state != ISPIF_POWER_UP) {
+ pr_err("%s: ispif invalid state %d\n", __func__,
+ ispif->ispif_state);
+ rc = -EPERM;
+ return rc;
+ }
+ if (params->num > MAX_PARAM_ENTRIES) {
+ pr_err("%s: invalid param entries %d\n", __func__,
+ params->num);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ for (i = 0; i < params->num; i++) {
+ vfe_intf = params->entries[i].vfe_intf;
+ if (!msm_ispif_is_intf_valid(ispif->csid_version,
+ vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ return -EINVAL;
+ }
+ msm_camera_io_w(0x0, ispif->base +
+ ISPIF_VFE_m_IRQ_MASK_0(vfe_intf));
+ msm_camera_io_w(0x0, ispif->base +
+ ISPIF_VFE_m_IRQ_MASK_1(vfe_intf));
+ msm_camera_io_w_mb(0x0, ispif->base +
+ ISPIF_VFE_m_IRQ_MASK_2(vfe_intf));
+ }
+
+ for (i = 0; i < params->num; i++) {
+ intftype = params->entries[i].intftype;
+
+ vfe_intf = params->entries[i].vfe_intf;
+
+ CDBG("%s intftype %x, vfe_intf %d, csid %d\n", __func__,
+ intftype, vfe_intf, params->entries[i].csid);
+
+ if ((intftype >= INTF_MAX) ||
+ (vfe_intf >= ispif->vfe_info.num_vfe) ||
+ (ispif->csid_version <= CSID_VERSION_V22 &&
+ (vfe_intf > VFE0))) {
+ pr_err("%s: VFEID %d and CSID version %d mismatch\n",
+ __func__, vfe_intf, ispif->csid_version);
+ return -EINVAL;
+ }
+
+ if (ispif->csid_version >= CSID_VERSION_V30)
+ msm_ispif_select_clk_mux(ispif, intftype,
+ params->entries[i].csid, vfe_intf);
+
+ rc = msm_ispif_validate_intf_status(ispif, intftype, vfe_intf);
+ if (rc) {
+ pr_err("%s:validate_intf_status failed, rc = %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ msm_ispif_sel_csid_core(ispif, intftype,
+ params->entries[i].csid, vfe_intf);
+ cid_mask = msm_ispif_get_cids_mask_from_cfg(
+ &params->entries[i]);
+ msm_ispif_enable_intf_cids(ispif, intftype,
+ cid_mask, vfe_intf, 1);
+ if (params->entries[i].crop_enable)
+ msm_ispif_enable_crop(ispif, intftype, vfe_intf,
+ params->entries[i].crop_start_pixel,
+ params->entries[i].crop_end_pixel);
+ }
+
+ for (vfe_intf = 0; vfe_intf < 2; vfe_intf++) {
+ msm_camera_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base +
+ ISPIF_VFE_m_IRQ_MASK_0(vfe_intf));
+
+ msm_camera_io_w(ISPIF_IRQ_STATUS_MASK, ispif->base +
+ ISPIF_VFE_m_IRQ_CLEAR_0(vfe_intf));
+
+ msm_camera_io_w(ISPIF_IRQ_STATUS_1_MASK, ispif->base +
+ ISPIF_VFE_m_IRQ_MASK_1(vfe_intf));
+
+ msm_camera_io_w(ISPIF_IRQ_STATUS_1_MASK, ispif->base +
+ ISPIF_VFE_m_IRQ_CLEAR_1(vfe_intf));
+
+ msm_camera_io_w(ISPIF_IRQ_STATUS_2_MASK, ispif->base +
+ ISPIF_VFE_m_IRQ_MASK_2(vfe_intf));
+
+ msm_camera_io_w(ISPIF_IRQ_STATUS_2_MASK, ispif->base +
+ ISPIF_VFE_m_IRQ_CLEAR_2(vfe_intf));
+ }
+
+ msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
+ ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+ return rc;
+}
+
+static void msm_ispif_intf_cmd(struct ispif_device *ispif, uint32_t cmd_bits,
+ struct msm_ispif_param_data *params)
+{
+ uint8_t vc;
+ int i, k;
+ enum msm_ispif_intftype intf_type;
+ enum msm_ispif_cid cid;
+ enum msm_ispif_vfe_intf vfe_intf;
+
+ BUG_ON(!ispif);
+ BUG_ON(!params);
+
+ for (i = 0; i < params->num; i++) {
+ vfe_intf = params->entries[i].vfe_intf;
+ if (!msm_ispif_is_intf_valid(ispif->csid_version, vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ return;
+ }
+ if (params->entries[i].num_cids > MAX_CID_CH) {
+ pr_err("%s: out of range of cid_num %d\n",
+ __func__, params->entries[i].num_cids);
+ return;
+ }
+ }
+
+ for (i = 0; i < params->num; i++) {
+ intf_type = params->entries[i].intftype;
+ vfe_intf = params->entries[i].vfe_intf;
+ for (k = 0; k < params->entries[i].num_cids; k++) {
+ cid = params->entries[i].cids[k];
+ vc = cid / 4;
+ if (intf_type == RDI2) {
+ /* zero out two bits */
+ ispif->applied_intf_cmd[vfe_intf].intf_cmd1 &=
+ ~(0x3 << (vc * 2 + 8));
+ /* set cmd bits */
+ ispif->applied_intf_cmd[vfe_intf].intf_cmd1 |=
+ (cmd_bits << (vc * 2 + 8));
+ } else {
+ /* zero 2 bits */
+ ispif->applied_intf_cmd[vfe_intf].intf_cmd &=
+ ~(0x3 << (vc * 2 + intf_type * 8));
+ /* set cmd bits */
+ ispif->applied_intf_cmd[vfe_intf].intf_cmd |=
+ (cmd_bits << (vc * 2 + intf_type * 8));
+ }
+ }
+ /* cmd for PIX0, PIX1, RDI0, RDI1 */
+ if (ispif->applied_intf_cmd[vfe_intf].intf_cmd != 0xFFFFFFFF)
+ msm_camera_io_w_mb(
+ ispif->applied_intf_cmd[vfe_intf].intf_cmd,
+ ispif->base + ISPIF_VFE_m_INTF_CMD_0(vfe_intf));
+
+ /* cmd for RDI2 */
+ if (ispif->applied_intf_cmd[vfe_intf].intf_cmd1 != 0xFFFFFFFF)
+ msm_camera_io_w_mb(
+ ispif->applied_intf_cmd[vfe_intf].intf_cmd1,
+ ispif->base + ISPIF_VFE_m_INTF_CMD_1(vfe_intf));
+ }
+}
+
+static int msm_ispif_stop_immediately(struct ispif_device *ispif,
+ struct msm_ispif_param_data *params)
+{
+ int i, rc = 0;
+ uint16_t cid_mask = 0;
+ BUG_ON(!ispif);
+ BUG_ON(!params);
+
+ if (ispif->ispif_state != ISPIF_POWER_UP) {
+ pr_err("%s: ispif invalid state %d\n", __func__,
+ ispif->ispif_state);
+ rc = -EPERM;
+ return rc;
+ }
+
+ if (params->num > MAX_PARAM_ENTRIES) {
+ pr_err("%s: invalid param entries %d\n", __func__,
+ params->num);
+ rc = -EINVAL;
+ return rc;
+ }
+ msm_ispif_intf_cmd(ispif, ISPIF_INTF_CMD_DISABLE_IMMEDIATELY, params);
+
+ /* after stop the interface we need to unmask the CID enable bits */
+ for (i = 0; i < params->num; i++) {
+ cid_mask = msm_ispif_get_cids_mask_from_cfg(
+ &params->entries[i]);
+ msm_ispif_enable_intf_cids(ispif, params->entries[i].intftype,
+ cid_mask, params->entries[i].vfe_intf, 0);
+ }
+
+ return rc;
+}
+
+static int msm_ispif_start_frame_boundary(struct ispif_device *ispif,
+ struct msm_ispif_param_data *params)
+{
+ int rc = 0;
+
+ if (ispif->ispif_state != ISPIF_POWER_UP) {
+ pr_err("%s: ispif invalid state %d\n", __func__,
+ ispif->ispif_state);
+ rc = -EPERM;
+ return rc;
+ }
+ if (params->num > MAX_PARAM_ENTRIES) {
+ pr_err("%s: invalid param entries %d\n", __func__,
+ params->num);
+ rc = -EINVAL;
+ return rc;
+ }
+ msm_ispif_intf_cmd(ispif, ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY, params);
+
+ return rc;
+}
+
+static int msm_ispif_restart_frame_boundary(struct ispif_device *ispif,
+ struct msm_ispif_param_data *params)
+{
+ int rc = 0, i;
+ long timeout = 0;
+ uint16_t cid_mask;
+ enum msm_ispif_intftype intftype;
+ enum msm_ispif_vfe_intf vfe_intf;
+ uint32_t vfe_mask = 0;
+ uint32_t intf_addr;
+
+ if (ispif->ispif_state != ISPIF_POWER_UP) {
+ pr_err("%s: ispif invalid state %d\n", __func__,
+ ispif->ispif_state);
+ rc = -EPERM;
+ return rc;
+ }
+ if (params->num > MAX_PARAM_ENTRIES) {
+ pr_err("%s: invalid param entries %d\n", __func__,
+ params->num);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ for (i = 0; i < params->num; i++) {
+ vfe_intf = params->entries[i].vfe_intf;
+ if (vfe_intf >= VFE_MAX) {
+ pr_err("%s: %d invalid i %d vfe_intf %d\n", __func__,
+ __LINE__, i, vfe_intf);
+ return -EINVAL;
+ }
+ vfe_mask |= (1 << vfe_intf);
+ }
+
+ /* Turn ON regulators before enabling the clocks*/
+ rc = msm_ispif_set_regulators(ispif->vfe_vdd,
+ ispif->vfe_vdd_count, 1);
+ if (rc < 0)
+ return -EFAULT;
+
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_clk_info, ispif->clk,
+ ispif->num_clk, 1);
+ if (rc < 0)
+ goto disable_regulator;
+
+ if (vfe_mask & (1 << VFE0)) {
+ atomic_set(&ispif->reset_trig[VFE0], 1);
+ /* initiate reset of ISPIF */
+ msm_camera_io_w(ISPIF_RST_CMD_MASK_RESTART,
+ ispif->base + ISPIF_RST_CMD_ADDR);
+ timeout = wait_for_completion_timeout(
+ &ispif->reset_complete[VFE0], msecs_to_jiffies(500));
+ if (timeout <= 0) {
+ pr_err("%s: VFE0 reset wait timeout\n", __func__);
+ rc = -ETIMEDOUT;
+ goto disable_clk;
+ }
+ }
+
+ if (ispif->hw_num_isps > 1 && (vfe_mask & (1 << VFE1))) {
+ atomic_set(&ispif->reset_trig[VFE1], 1);
+ msm_camera_io_w(ISPIF_RST_CMD_1_MASK_RESTART,
+ ispif->base + ISPIF_RST_CMD_1_ADDR);
+ timeout = wait_for_completion_timeout(
+ &ispif->reset_complete[VFE1],
+ msecs_to_jiffies(500));
+ if (timeout <= 0) {
+ pr_err("%s: VFE1 reset wait timeout\n", __func__);
+ rc = -ETIMEDOUT;
+ goto disable_clk;
+ }
+ }
+
+ pr_info("%s: ISPIF reset hw done, Restarting", __func__);
+ rc = msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_clk_info, ispif->clk,
+ ispif->num_clk, 0);
+ if (rc < 0)
+ goto disable_regulator;
+
+ /* Turn OFF regulators after disabling clocks */
+ rc = msm_ispif_set_regulators(ispif->vfe_vdd, ispif->vfe_vdd_count, 0);
+ if (rc < 0)
+ goto end;
+
+ for (i = 0; i < params->num; i++) {
+ intftype = params->entries[i].intftype;
+ vfe_intf = params->entries[i].vfe_intf;
+
+ switch (params->entries[0].intftype) {
+ case PIX0:
+ intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 0);
+ break;
+ case RDI0:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 0);
+ break;
+ case PIX1:
+ intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1);
+ break;
+ case RDI1:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 1);
+ break;
+ case RDI2:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 2);
+ break;
+ default:
+ pr_err("%s: invalid intftype=%d\n", __func__,
+ params->entries[i].intftype);
+ rc = -EPERM;
+ goto end;
+ }
+
+ msm_ispif_intf_cmd(ispif,
+ ISPIF_INTF_CMD_ENABLE_FRAME_BOUNDARY, params);
+ }
+
+ for (i = 0; i < params->num; i++) {
+ intftype = params->entries[i].intftype;
+
+ vfe_intf = params->entries[i].vfe_intf;
+
+
+ cid_mask = msm_ispif_get_cids_mask_from_cfg(
+ &params->entries[i]);
+
+ msm_ispif_enable_intf_cids(ispif, intftype,
+ cid_mask, vfe_intf, 1);
+ }
+ return rc;
+
+disable_clk:
+ msm_cam_clk_enable(&ispif->pdev->dev,
+ ispif_clk_info, ispif->clk,
+ ispif->num_clk, 0);
+disable_regulator:
+ /* Turn OFF regulators */
+ msm_ispif_set_regulators(ispif->vfe_vdd, ispif->vfe_vdd_count, 0);
+end:
+ return rc;
+}
+
+static int msm_ispif_stop_frame_boundary(struct ispif_device *ispif,
+ struct msm_ispif_param_data *params)
+{
+ int i, rc = 0;
+ uint16_t cid_mask = 0;
+ uint32_t intf_addr;
+ enum msm_ispif_vfe_intf vfe_intf;
+ uint32_t stop_flag = 0;
+
+ BUG_ON(!ispif);
+ BUG_ON(!params);
+
+
+ if (ispif->ispif_state != ISPIF_POWER_UP) {
+ pr_err("%s: ispif invalid state %d\n", __func__,
+ ispif->ispif_state);
+ rc = -EPERM;
+ return rc;
+ }
+
+ if (params->num > MAX_PARAM_ENTRIES) {
+ pr_err("%s: invalid param entries %d\n", __func__,
+ params->num);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ for (i = 0; i < params->num; i++) {
+ if (!msm_ispif_is_intf_valid(ispif->csid_version,
+ params->entries[i].vfe_intf)) {
+ pr_err("%s: invalid interface type\n", __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+ msm_ispif_intf_cmd(ispif,
+ ISPIF_INTF_CMD_DISABLE_FRAME_BOUNDARY, params);
+
+ for (i = 0; i < params->num; i++) {
+ cid_mask =
+ msm_ispif_get_cids_mask_from_cfg(&params->entries[i]);
+ vfe_intf = params->entries[i].vfe_intf;
+
+ switch (params->entries[i].intftype) {
+ case PIX0:
+ intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 0);
+ break;
+ case RDI0:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 0);
+ break;
+ case PIX1:
+ intf_addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe_intf, 1);
+ break;
+ case RDI1:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 1);
+ break;
+ case RDI2:
+ intf_addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe_intf, 2);
+ break;
+ default:
+ pr_err("%s: invalid intftype=%d\n", __func__,
+ params->entries[i].intftype);
+ rc = -EPERM;
+ goto end;
+ }
+
+ rc = readl_poll_timeout(ispif->base + intf_addr, stop_flag,
+ (stop_flag & 0xF) == 0xF,
+ ISPIF_TIMEOUT_SLEEP_US,
+ ISPIF_TIMEOUT_ALL_US);
+ if (rc < 0)
+ goto end;
+
+ /* disable CIDs in CID_MASK register */
+ msm_ispif_enable_intf_cids(ispif, params->entries[i].intftype,
+ cid_mask, vfe_intf, 0);
+ }
+
+end:
+ return rc;
+}
+
+static void ispif_process_irq(struct ispif_device *ispif,
+ struct ispif_irq_status *out, enum msm_ispif_vfe_intf vfe_id)
+{
+ BUG_ON(!ispif);
+ BUG_ON(!out);
+
+ if (out[vfe_id].ispifIrqStatus0 &
+ ISPIF_IRQ_STATUS_PIX_SOF_MASK) {
+ if (ispif->ispif_sof_debug < ISPIF_SOF_DEBUG_COUNT)
+ pr_err("%s: PIX0 frame id: %u\n", __func__,
+ ispif->sof_count[vfe_id].sof_cnt[PIX0]);
+ ispif->sof_count[vfe_id].sof_cnt[PIX0]++;
+ ispif->ispif_sof_debug++;
+ }
+ if (out[vfe_id].ispifIrqStatus0 &
+ ISPIF_IRQ_STATUS_RDI0_SOF_MASK) {
+ if (ispif->ispif_rdi0_debug < ISPIF_SOF_DEBUG_COUNT)
+ pr_err("%s: RDI0 frame id: %u\n", __func__,
+ ispif->sof_count[vfe_id].sof_cnt[RDI0]);
+ ispif->sof_count[vfe_id].sof_cnt[RDI0]++;
+ ispif->ispif_rdi0_debug++;
+ }
+ if (out[vfe_id].ispifIrqStatus1 &
+ ISPIF_IRQ_STATUS_RDI1_SOF_MASK) {
+ if (ispif->ispif_rdi1_debug < ISPIF_SOF_DEBUG_COUNT)
+ pr_err("%s: RDI1 frame id: %u\n", __func__,
+ ispif->sof_count[vfe_id].sof_cnt[RDI1]);
+ ispif->sof_count[vfe_id].sof_cnt[RDI1]++;
+ ispif->ispif_rdi1_debug++;
+ }
+ if (out[vfe_id].ispifIrqStatus2 &
+ ISPIF_IRQ_STATUS_RDI2_SOF_MASK) {
+ if (ispif->ispif_rdi2_debug < ISPIF_SOF_DEBUG_COUNT)
+ pr_err("%s: RDI2 frame id: %u\n", __func__,
+ ispif->sof_count[vfe_id].sof_cnt[RDI2]);
+ ispif->sof_count[vfe_id].sof_cnt[RDI2]++;
+ ispif->ispif_rdi2_debug++;
+ }
+}
+
+static inline void msm_ispif_read_irq_status(struct ispif_irq_status *out,
+ void *data)
+{
+ struct ispif_device *ispif = (struct ispif_device *)data;
+ bool fatal_err = false;
+ int i = 0;
+
+ BUG_ON(!ispif);
+ BUG_ON(!out);
+
+ out[VFE0].ispifIrqStatus0 = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_IRQ_STATUS_0(VFE0));
+ msm_camera_io_w(out[VFE0].ispifIrqStatus0,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(VFE0));
+
+ out[VFE0].ispifIrqStatus1 = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_IRQ_STATUS_1(VFE0));
+ msm_camera_io_w(out[VFE0].ispifIrqStatus1,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(VFE0));
+
+ out[VFE0].ispifIrqStatus2 = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_IRQ_STATUS_2(VFE0));
+ msm_camera_io_w_mb(out[VFE0].ispifIrqStatus2,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(VFE0));
+
+ if (ispif->vfe_info.num_vfe > 1) {
+ out[VFE1].ispifIrqStatus0 = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_IRQ_STATUS_0(VFE1));
+ msm_camera_io_w(out[VFE1].ispifIrqStatus0,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(VFE1));
+
+ out[VFE1].ispifIrqStatus1 = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_IRQ_STATUS_1(VFE1));
+ msm_camera_io_w(out[VFE1].ispifIrqStatus1,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(VFE1));
+
+ out[VFE1].ispifIrqStatus2 = msm_camera_io_r(ispif->base +
+ ISPIF_VFE_m_IRQ_STATUS_2(VFE1));
+ msm_camera_io_w_mb(out[VFE1].ispifIrqStatus2,
+ ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(VFE1));
+ }
+ msm_camera_io_w_mb(ISPIF_IRQ_GLOBAL_CLEAR_CMD, ispif->base +
+ ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+ if (out[VFE0].ispifIrqStatus0 & ISPIF_IRQ_STATUS_MASK) {
+ if (out[VFE0].ispifIrqStatus0 & RESET_DONE_IRQ) {
+ if (atomic_dec_and_test(&ispif->reset_trig[VFE0]))
+ complete(&ispif->reset_complete[VFE0]);
+ }
+
+ if (out[VFE0].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ) {
+ pr_err_ratelimited("%s: VFE0 pix0 overflow.\n",
+ __func__);
+ fatal_err = true;
+ }
+
+ if (out[VFE0].ispifIrqStatus0 & RAW_INTF_0_OVERFLOW_IRQ) {
+ pr_err_ratelimited("%s: VFE0 rdi0 overflow.\n",
+ __func__);
+ fatal_err = true;
+ }
+
+ if (out[VFE0].ispifIrqStatus1 & RAW_INTF_1_OVERFLOW_IRQ) {
+ pr_err_ratelimited("%s: VFE0 rdi1 overflow.\n",
+ __func__);
+ fatal_err = true;
+ }
+
+ if (out[VFE0].ispifIrqStatus2 & RAW_INTF_2_OVERFLOW_IRQ) {
+ pr_err_ratelimited("%s: VFE0 rdi2 overflow.\n",
+ __func__);
+ fatal_err = true;
+ }
+
+ ispif_process_irq(ispif, out, VFE0);
+ }
+ if (ispif->hw_num_isps > 1) {
+ if (out[VFE1].ispifIrqStatus0 & RESET_DONE_IRQ) {
+ if (atomic_dec_and_test(&ispif->reset_trig[VFE1]))
+ complete(&ispif->reset_complete[VFE1]);
+ }
+
+ if (out[VFE1].ispifIrqStatus0 & PIX_INTF_0_OVERFLOW_IRQ) {
+ pr_err_ratelimited("%s: VFE1 pix0 overflow.\n",
+ __func__);
+ fatal_err = true;
+ }
+
+ if (out[VFE1].ispifIrqStatus0 & RAW_INTF_0_OVERFLOW_IRQ) {
+ pr_err_ratelimited("%s: VFE1 rdi0 overflow.\n",
+ __func__);
+ fatal_err = true;
+ }
+
+ if (out[VFE1].ispifIrqStatus1 & RAW_INTF_1_OVERFLOW_IRQ) {
+ pr_err_ratelimited("%s: VFE1 rdi1 overflow.\n",
+ __func__);
+ fatal_err = true;
+ }
+
+ if (out[VFE1].ispifIrqStatus2 & RAW_INTF_2_OVERFLOW_IRQ) {
+ pr_err_ratelimited("%s: VFE1 rdi2 overflow.\n",
+ __func__);
+ fatal_err = true;
+ }
+
+ ispif_process_irq(ispif, out, VFE1);
+ }
+
+ if (fatal_err == true) {
+ pr_err("%s: fatal error, stop ispif immediately\n", __func__);
+ for (i = 0; i < ispif->vfe_info.num_vfe; i++) {
+ msm_camera_io_w(ISPIF_STOP_INTF_IMMEDIATELY,
+ ispif->base + ISPIF_VFE_m_INTF_CMD_0(i));
+ msm_camera_io_w(ISPIF_STOP_INTF_IMMEDIATELY,
+ ispif->base + ISPIF_VFE_m_INTF_CMD_1(i));
+ }
+ }
+}
+
+static irqreturn_t msm_io_ispif_irq(int irq_num, void *data)
+{
+ struct ispif_irq_status irq[VFE_MAX];
+
+ msm_ispif_read_irq_status(irq, data);
+ return IRQ_HANDLED;
+}
+
+static int msm_ispif_set_vfe_info(struct ispif_device *ispif,
+ struct msm_ispif_vfe_info *vfe_info)
+{
+ memcpy(&ispif->vfe_info, vfe_info, sizeof(struct msm_ispif_vfe_info));
+ if (ispif->vfe_info.num_vfe > ispif->hw_num_isps)
+ return -EINVAL;
+ return 0;
+}
+
+static int msm_ispif_init(struct ispif_device *ispif,
+ uint32_t csid_version)
+{
+ int rc = 0;
+
+ BUG_ON(!ispif);
+
+ if (ispif->ispif_state == ISPIF_POWER_UP) {
+ pr_err("%s: ispif already initted state = %d\n", __func__,
+ ispif->ispif_state);
+ rc = -EPERM;
+ return rc;
+ }
+
+ /* can we set to zero? */
+ ispif->applied_intf_cmd[VFE0].intf_cmd = 0xFFFFFFFF;
+ ispif->applied_intf_cmd[VFE0].intf_cmd1 = 0xFFFFFFFF;
+ ispif->applied_intf_cmd[VFE1].intf_cmd = 0xFFFFFFFF;
+ ispif->applied_intf_cmd[VFE1].intf_cmd1 = 0xFFFFFFFF;
+ memset(ispif->sof_count, 0, sizeof(ispif->sof_count));
+
+ ispif->csid_version = csid_version;
+
+ if (ispif->csid_version >= CSID_VERSION_V30) {
+ if (!ispif->clk_mux_mem || !ispif->clk_mux_io) {
+ pr_err("%s csi clk mux mem %p io %p\n", __func__,
+ ispif->clk_mux_mem, ispif->clk_mux_io);
+ rc = -ENOMEM;
+ return rc;
+ }
+ ispif->clk_mux_base = ioremap(ispif->clk_mux_mem->start,
+ resource_size(ispif->clk_mux_mem));
+ if (!ispif->clk_mux_base) {
+ pr_err("%s: clk_mux_mem ioremap failed\n", __func__);
+ rc = -ENOMEM;
+ return rc;
+ }
+ }
+
+ ispif->base = ioremap(ispif->mem->start,
+ resource_size(ispif->mem));
+ if (!ispif->base) {
+ rc = -ENOMEM;
+ pr_err("%s: nomem\n", __func__);
+ goto end;
+ }
+ rc = request_irq(ispif->irq->start, msm_io_ispif_irq,
+ IRQF_TRIGGER_RISING, "ispif", ispif);
+ if (rc) {
+ pr_err("%s: request_irq error = %d\n", __func__, rc);
+ goto error_irq;
+ }
+
+ rc = cam_config_ahb_clk(CAM_AHB_CLIENT_ISPIF, CAMERA_AHB_SVS_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ goto ahb_vote_fail;
+ }
+
+ rc = msm_ispif_reset_hw(ispif);
+ if (rc)
+ goto error_ahb;
+
+ rc = msm_ispif_reset(ispif);
+ if (rc == 0) {
+ ispif->ispif_state = ISPIF_POWER_UP;
+ CDBG("%s: power up done\n", __func__);
+ goto end;
+ }
+
+error_ahb:
+ rc = cam_config_ahb_clk(CAM_AHB_CLIENT_ISPIF,
+ CAMERA_AHB_SUSPEND_VOTE);
+ if (rc < 0)
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ahb_vote_fail:
+ free_irq(ispif->irq->start, ispif);
+error_irq:
+ iounmap(ispif->base);
+
+end:
+ return rc;
+}
+
+static void msm_ispif_release(struct ispif_device *ispif)
+{
+ BUG_ON(!ispif);
+
+ if (!ispif->base) {
+ pr_err("%s: ispif base is NULL\n", __func__);
+ return;
+ }
+
+ if (ispif->ispif_state != ISPIF_POWER_UP) {
+ pr_err("%s: ispif invalid state %d\n", __func__,
+ ispif->ispif_state);
+ return;
+ }
+
+ /* make sure no streaming going on */
+ msm_ispif_reset(ispif);
+ msm_ispif_reset_hw(ispif);
+
+ disable_irq(ispif->irq->start);
+ free_irq(ispif->irq->start, ispif);
+
+ iounmap(ispif->base);
+
+ iounmap(ispif->clk_mux_base);
+
+ ispif->ispif_state = ISPIF_POWER_DOWN;
+
+ if (cam_config_ahb_clk(CAM_AHB_CLIENT_ISPIF,
+ CAMERA_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+}
+
+static long msm_ispif_cmd(struct v4l2_subdev *sd, void *arg)
+{
+ long rc = 0;
+ struct ispif_cfg_data *pcdata = (struct ispif_cfg_data *)arg;
+ struct ispif_device *ispif =
+ (struct ispif_device *)v4l2_get_subdevdata(sd);
+
+ BUG_ON(!sd);
+ BUG_ON(!pcdata);
+
+ mutex_lock(&ispif->mutex);
+ switch (pcdata->cfg_type) {
+ case ISPIF_ENABLE_REG_DUMP:
+ ispif->enb_dump_reg = pcdata->reg_dump; /* save dump config */
+ break;
+ case ISPIF_INIT:
+ rc = msm_ispif_init(ispif, pcdata->csid_version);
+ msm_ispif_io_dump_reg(ispif);
+ break;
+ case ISPIF_CFG:
+ rc = msm_ispif_config(ispif, &pcdata->params);
+ msm_ispif_io_dump_reg(ispif);
+ break;
+ case ISPIF_START_FRAME_BOUNDARY:
+ rc = msm_ispif_start_frame_boundary(ispif, &pcdata->params);
+ msm_ispif_io_dump_reg(ispif);
+ break;
+ case ISPIF_RESTART_FRAME_BOUNDARY:
+ rc = msm_ispif_restart_frame_boundary(ispif, &pcdata->params);
+ msm_ispif_io_dump_reg(ispif);
+ break;
+
+ case ISPIF_STOP_FRAME_BOUNDARY:
+ rc = msm_ispif_stop_frame_boundary(ispif, &pcdata->params);
+ msm_ispif_io_dump_reg(ispif);
+ break;
+ case ISPIF_STOP_IMMEDIATELY:
+ rc = msm_ispif_stop_immediately(ispif, &pcdata->params);
+ msm_ispif_io_dump_reg(ispif);
+ break;
+ case ISPIF_RELEASE:
+ msm_ispif_release(ispif);
+ break;
+ case ISPIF_SET_VFE_INFO:
+ rc = msm_ispif_set_vfe_info(ispif, &pcdata->vfe_info);
+ break;
+ default:
+ pr_err("%s: invalid cfg_type\n", __func__);
+ rc = -EINVAL;
+ break;
+ }
+ mutex_unlock(&ispif->mutex);
+ return rc;
+}
+static struct v4l2_file_operations msm_ispif_v4l2_subdev_fops;
+
+static long msm_ispif_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct ispif_device *ispif =
+ (struct ispif_device *)v4l2_get_subdevdata(sd);
+
+ switch (cmd) {
+ case VIDIOC_MSM_ISPIF_CFG:
+ return msm_ispif_cmd(sd, arg);
+ case MSM_SD_NOTIFY_FREEZE: {
+ ispif->ispif_sof_debug = 0;
+ ispif->ispif_rdi0_debug = 0;
+ ispif->ispif_rdi1_debug = 0;
+ ispif->ispif_rdi2_debug = 0;
+ return 0;
+ }
+ case MSM_SD_SHUTDOWN: {
+ struct ispif_device *ispif =
+ (struct ispif_device *)v4l2_get_subdevdata(sd);
+
+ if (ispif && ispif->base) {
+ while (ispif->open_cnt != 0)
+ ispif_close_node(sd, NULL);
+ } else {
+ pr_debug("%s:SD SHUTDOWN fail, ispif%s %p\n", __func__,
+ ispif ? "_base" : "",
+ ispif ? ispif->base : NULL);
+ }
+ return 0;
+ }
+ default:
+ pr_err_ratelimited("%s: invalid cmd 0x%x received\n",
+ __func__, cmd);
+ return -ENOIOCTLCMD;
+ }
+}
+
+static long msm_ispif_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ return msm_ispif_subdev_ioctl(sd, cmd, arg);
+}
+
+static long msm_ispif_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_ispif_subdev_do_ioctl);
+}
+
+static int ispif_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct ispif_device *ispif = v4l2_get_subdevdata(sd);
+ int rc;
+
+ mutex_lock(&ispif->mutex);
+ if (0 == ispif->open_cnt) {
+ /* enable regulator and clocks on first open */
+ rc = msm_ispif_set_regulators(ispif->ispif_vdd,
+ ispif->ispif_vdd_count, 1);
+ if (rc)
+ goto unlock;
+ rc = msm_ispif_clk_ahb_enable(ispif, 1);
+ if (rc) {
+ msm_ispif_set_regulators(ispif->ispif_vdd,
+ ispif->ispif_vdd_count, 0);
+ goto unlock;
+ }
+ }
+ /* mem remap is done in init when the clock is on */
+ ispif->open_cnt++;
+unlock:
+ mutex_unlock(&ispif->mutex);
+ return rc;
+}
+
+static int ispif_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ int rc = 0;
+ struct ispif_device *ispif = v4l2_get_subdevdata(sd);
+ if (!ispif) {
+ pr_err("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&ispif->mutex);
+ if (ispif->open_cnt == 0) {
+ pr_err("%s: Invalid close\n", __func__);
+ rc = -ENODEV;
+ goto end;
+ }
+ ispif->open_cnt--;
+ if (ispif->open_cnt == 0) {
+ msm_ispif_release(ispif);
+ /* disable clocks and regulator on last close */
+ msm_ispif_clk_ahb_enable(ispif, 0);
+ msm_ispif_set_regulators(ispif->ispif_vdd,
+ ispif->ispif_vdd_count, 0);
+ }
+end:
+ mutex_unlock(&ispif->mutex);
+ return rc;
+}
+
+static struct v4l2_subdev_core_ops msm_ispif_subdev_core_ops = {
+ .ioctl = &msm_ispif_subdev_ioctl,
+};
+
+static const struct v4l2_subdev_ops msm_ispif_subdev_ops = {
+ .core = &msm_ispif_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops msm_ispif_internal_ops = {
+ .open = ispif_open_node,
+ .close = ispif_close_node,
+};
+
+static int ispif_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct ispif_device *ispif;
+
+ ispif = kzalloc(sizeof(struct ispif_device), GFP_KERNEL);
+ if (!ispif) {
+ pr_err("%s: no enough memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (pdev->dev.of_node) {
+ of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+ rc = of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,num-isps", &ispif->hw_num_isps);
+ if (rc)
+ /* backward compatibility */
+ ispif->hw_num_isps = 1;
+ /* not an error condition */
+ rc = 0;
+ }
+
+ rc = msm_ispif_get_regulator_info(ispif, pdev);
+ if (rc < 0)
+ return -EFAULT;
+
+ rc = msm_ispif_get_clk_info(ispif, pdev,
+ ispif_ahb_clk_info, ispif_clk_info);
+ if (rc < 0) {
+ pr_err("%s: msm_isp_get_clk_info() failed", __func__);
+ return -EFAULT;
+ }
+ mutex_init(&ispif->mutex);
+ ispif->mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "ispif");
+ if (!ispif->mem) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto error;
+ }
+ ispif->irq = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "ispif");
+ if (!ispif->irq) {
+ pr_err("%s: no irq resource?\n", __func__);
+ rc = -ENODEV;
+ goto error;
+ }
+ ispif->io = request_mem_region(ispif->mem->start,
+ resource_size(ispif->mem), pdev->name);
+ if (!ispif->io) {
+ pr_err("%s: no valid mem region\n", __func__);
+ rc = -EBUSY;
+ goto error;
+ }
+ ispif->clk_mux_mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "csi_clk_mux");
+ if (ispif->clk_mux_mem) {
+ ispif->clk_mux_io = request_mem_region(
+ ispif->clk_mux_mem->start,
+ resource_size(ispif->clk_mux_mem),
+ ispif->clk_mux_mem->name);
+ if (!ispif->clk_mux_io)
+ pr_err("%s: no valid csi_mux region\n", __func__);
+ }
+
+ ispif->pdev = pdev;
+
+ v4l2_subdev_init(&ispif->msm_sd.sd, &msm_ispif_subdev_ops);
+ ispif->msm_sd.sd.internal_ops = &msm_ispif_internal_ops;
+ ispif->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ snprintf(ispif->msm_sd.sd.name,
+ ARRAY_SIZE(ispif->msm_sd.sd.name), MSM_ISPIF_DRV_NAME);
+ v4l2_set_subdevdata(&ispif->msm_sd.sd, ispif);
+
+ platform_set_drvdata(pdev, &ispif->msm_sd.sd);
+
+ media_entity_init(&ispif->msm_sd.sd.entity, 0, NULL, 0);
+ ispif->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ ispif->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_ISPIF;
+ ispif->msm_sd.sd.entity.name = pdev->name;
+ ispif->msm_sd.close_seq = MSM_SD_CLOSE_1ST_CATEGORY | 0x1;
+ rc = msm_sd_register(&ispif->msm_sd);
+ if (rc) {
+ pr_err("%s: msm_sd_register error = %d\n", __func__, rc);
+ goto error;
+ }
+ msm_cam_copy_v4l2_subdev_fops(&msm_ispif_v4l2_subdev_fops);
+ msm_ispif_v4l2_subdev_fops.unlocked_ioctl =
+ msm_ispif_subdev_fops_ioctl;
+#ifdef CONFIG_COMPAT
+ msm_ispif_v4l2_subdev_fops.compat_ioctl32 = msm_ispif_subdev_fops_ioctl;
+#endif
+ ispif->msm_sd.sd.devnode->fops = &msm_ispif_v4l2_subdev_fops;
+ ispif->ispif_state = ISPIF_POWER_DOWN;
+ ispif->open_cnt = 0;
+ init_completion(&ispif->reset_complete[VFE0]);
+ init_completion(&ispif->reset_complete[VFE1]);
+ atomic_set(&ispif->reset_trig[VFE0], 0);
+ atomic_set(&ispif->reset_trig[VFE1], 0);
+ return 0;
+
+error:
+ msm_ispif_put_regulator(ispif);
+ mutex_destroy(&ispif->mutex);
+ kfree(ispif);
+ return rc;
+}
+
+static const struct of_device_id msm_ispif_dt_match[] = {
+ {.compatible = "qcom,ispif"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_ispif_dt_match);
+
+static struct platform_driver ispif_driver = {
+ .probe = ispif_probe,
+ .driver = {
+ .name = MSM_ISPIF_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_ispif_dt_match,
+ },
+};
+
+static int __init msm_ispif_init_module(void)
+{
+ return platform_driver_register(&ispif_driver);
+}
+
+static void __exit msm_ispif_exit_module(void)
+{
+ platform_driver_unregister(&ispif_driver);
+}
+
+module_init(msm_ispif_init_module);
+module_exit(msm_ispif_exit_module);
+MODULE_DESCRIPTION("MSM ISP Interface driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
new file mode 100644
index 000000000000..cd1a171c07e3
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.h
@@ -0,0 +1,83 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_ISPIF_H
+#define MSM_ISPIF_H
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+#include <media/msmb_ispif.h>
+#include "msm_sd.h"
+
+/* Maximum number of voltage supply for ispif and vfe */
+#define ISPIF_VDD_INFO_MAX 2
+#define ISPIF_VFE_VDD_INFO_MAX 2
+
+#define ISPIF_CLK_INFO_MAX 27
+
+struct ispif_irq_status {
+ uint32_t ispifIrqStatus0;
+ uint32_t ispifIrqStatus1;
+ uint32_t ispifIrqStatus2;
+};
+
+enum msm_ispif_state_t {
+ ISPIF_POWER_UP,
+ ISPIF_POWER_DOWN,
+};
+struct ispif_sof_count {
+ uint32_t sof_cnt[INTF_MAX];
+};
+
+struct ispif_intf_cmd {
+ uint32_t intf_cmd;
+ uint32_t intf_cmd1;
+};
+
+struct ispif_device {
+ struct platform_device *pdev;
+ struct msm_sd_subdev msm_sd;
+ struct resource *mem;
+ struct resource *clk_mux_mem;
+ struct resource *irq;
+ struct resource *io;
+ struct resource *clk_mux_io;
+ void __iomem *base;
+ void __iomem *clk_mux_base;
+ struct mutex mutex;
+ uint8_t start_ack_pending;
+ uint32_t csid_version;
+ int enb_dump_reg;
+ uint32_t open_cnt;
+ struct ispif_sof_count sof_count[VFE_MAX];
+ struct ispif_intf_cmd applied_intf_cmd[VFE_MAX];
+ enum msm_ispif_state_t ispif_state;
+ struct msm_ispif_vfe_info vfe_info;
+ struct clk *ahb_clk[ISPIF_CLK_INFO_MAX];
+ struct clk *clk[ISPIF_CLK_INFO_MAX];
+ struct completion reset_complete[VFE_MAX];
+ atomic_t reset_trig[VFE_MAX];
+ uint32_t hw_num_isps;
+ uint32_t num_ahb_clk;
+ uint32_t num_clk;
+ uint32_t clk_idx;
+ uint32_t ispif_sof_debug;
+ uint32_t ispif_rdi0_debug;
+ uint32_t ispif_rdi1_debug;
+ uint32_t ispif_rdi2_debug;
+ struct regulator *ispif_vdd[ISPIF_VDD_INFO_MAX];
+ int ispif_vdd_count;
+ struct regulator *vfe_vdd[ISPIF_VFE_VDD_INFO_MAX];
+ int vfe_vdd_count;
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h
new file mode 100644
index 000000000000..b82fd34f2396
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v1.h
@@ -0,0 +1,118 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_ISPIF_HWREG_V1_H__
+#define __MSM_ISPIF_HWREG_V1_H__
+
+/* common registers */
+#define ISPIF_RST_CMD_ADDR 0x0000
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x0124
+#define PIX0_LINE_BUF_EN_BIT 0
+
+#define ISPIF_VFE(m) (0x0)
+
+#define ISPIF_VFE_m_CTRL_0(m) (0x0008 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_0(m) (0x0100 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_1(m) (0x010C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_2(m) (0x0118 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_0(m) (0x0108 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_1(m) (0x0114 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_2(m) (0x0120 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_0(m) (0x0104 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_1(m) (0x0110 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_2(m) (0x011C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INPUT_SEL(m) (0x000C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INTF_CMD_0(m) (0x0004 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INTF_CMD_1(m) (0x0030 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_PIX_INTF_n_CID_MASK(m, n) (0x0010 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_CID_MASK(m, n) (0x0014 + ISPIF_VFE(m) + \
+ ((n > 0) ? (0x20) : 0) \
+ + 8*(n))
+#define ISPIF_VFE_m_PIX_OUTPUT_n_MISR(m, n) (0x0290 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_0(m, n) (0x001C + ISPIF_VFE(m) + \
+ ((n > 0) ? (0x24) : 0) \
+ + 0xc*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_1(m, n) (0x0020 + ISPIF_VFE(m) + \
+ ((n > 0) ? (0x24) : 0) \
+ + 0xc*(n))
+#define ISPIF_VFE_m_PIX_INTF_n_STATUS(m, n) (0x0024 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n) (0x0028 + ISPIF_VFE(m) + \
+ ((n > 0) ? (0x34) : 0) \
+ + 8*(n))
+
+/* Defines for compatibility with newer ISPIF versions */
+#define ISPIF_RST_CMD_1_ADDR (0x0000)
+#define ISPIF_VFE_m_PIX_INTF_n_CROP(m, n) (0x0000 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_3D_THRESHOLD(m) (0x0000 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_OUTPUT_SEL(m) (0x0000 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_3D_DESKEW_SIZE(m) (0x0000 + ISPIF_VFE(m))
+
+
+
+/* CSID CLK MUX SEL REGISTERS */
+#define ISPIF_RDI_CLK_MUX_SEL_ADDR 0x8
+
+/*ISPIF RESET BITS*/
+#define VFE_CLK_DOMAIN_RST BIT(31)
+#define RDI_CLK_DOMAIN_RST BIT(30)
+#define PIX_CLK_DOMAIN_RST BIT(29)
+#define AHB_CLK_DOMAIN_RST BIT(28)
+#define RDI_1_CLK_DOMAIN_RST BIT(27)
+#define PIX_1_CLK_DOMAIN_RST BIT(26)
+#define RDI_2_CLK_DOMAIN_RST BIT(25)
+#define RDI_2_MISR_RST_STB BIT(20)
+#define RDI_2_VFE_RST_STB BIT(19)
+#define RDI_2_CSID_RST_STB BIT(18)
+#define RDI_1_MISR_RST_STB BIT(14)
+#define RDI_1_VFE_RST_STB BIT(13)
+#define RDI_1_CSID_RST_STB BIT(12)
+#define PIX_1_VFE_RST_STB BIT(10)
+#define PIX_1_CSID_RST_STB BIT(9)
+#define RDI_0_MISR_RST_STB BIT(8)
+#define RDI_0_VFE_RST_STB BIT(7)
+#define RDI_0_CSID_RST_STB BIT(6)
+#define PIX_0_MISR_RST_STB BIT(5)
+#define PIX_0_VFE_RST_STB BIT(4)
+#define PIX_0_CSID_RST_STB BIT(3)
+#define SW_REG_RST_STB BIT(2)
+#define MISC_LOGIC_RST_STB BIT(1)
+#define STROBED_RST_EN BIT(0)
+
+#define ISPIF_RST_CMD_MASK 0xFE1C77FF
+#define ISPIF_RST_CMD_1_MASK 0xFFFFFFFF /* undefined */
+
+#define ISPIF_RST_CMD_MASK_RESTART 0x00001FF9
+#define ISPIF_RST_CMD_1_MASK_RESTART 0x00001FF9 /* undefined */
+
+/* irq_mask_0 */
+#define PIX_INTF_0_OVERFLOW_IRQ BIT(12)
+#define RAW_INTF_0_OVERFLOW_IRQ BIT(25)
+#define RESET_DONE_IRQ BIT(27)
+/* irq_mask_1 */
+#define PIX_INTF_1_OVERFLOW_IRQ BIT(12)
+#define RAW_INTF_1_OVERFLOW_IRQ BIT(25)
+/* irq_mask_2 */
+#define RAW_INTF_2_OVERFLOW_IRQ BIT(12)
+
+#define ISPIF_IRQ_STATUS_MASK 0x0A493249
+#define ISPIF_IRQ_STATUS_1_MASK 0x02493249
+#define ISPIF_IRQ_STATUS_2_MASK 0x00001249
+
+#define ISPIF_IRQ_STATUS_PIX_SOF_MASK 0x000249
+#define ISPIF_IRQ_STATUS_RDI0_SOF_MASK 0x492000
+#define ISPIF_IRQ_STATUS_RDI1_SOF_MASK 0x492000
+#define ISPIF_IRQ_STATUS_RDI2_SOF_MASK 0x000249
+
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD 0x000001
+
+#define ISPIF_STOP_INTF_IMMEDIATELY 0xAAAAAAAA
+#endif /* __MSM_ISPIF_HWREG_V1_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h
new file mode 100644
index 000000000000..01dce6d45897
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v2.h
@@ -0,0 +1,99 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_ISPIF_HWREG_V2_H__
+#define __MSM_ISPIF_HWREG_V2_H__
+
+/* common registers */
+#define ISPIF_RST_CMD_ADDR 0x008
+#define ISPIF_RST_CMD_1_ADDR 0x00C
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x01C
+#define PIX0_LINE_BUF_EN_BIT 6
+
+#define ISPIF_VFE(m) ((m) * 0x200)
+
+#define ISPIF_VFE_m_CTRL_0(m) (0x200 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_0(m) (0x208 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_1(m) (0x20C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_2(m) (0x210 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_0(m) (0x21C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_1(m) (0x220 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_2(m) (0x224 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_0(m) (0x230 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_1(m) (0x234 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_2(m) (0x238 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INPUT_SEL(m) (0x244 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INTF_CMD_0(m) (0x248 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INTF_CMD_1(m) (0x24C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_PIX_INTF_n_CID_MASK(m, n) (0x254 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_CID_MASK(m, n) (0x264 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_PIX_INTF_n_CROP(m, n) (0x278 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_3D_THRESHOLD(m) (0x288 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_OUTPUT_SEL(m) (0x28C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_PIX_OUTPUT_n_MISR(m, n) (0x290 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_0(m, n) (0x298 + ISPIF_VFE(m) + 8*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_1(m, n) (0x29C + ISPIF_VFE(m) + 8*(n))
+#define ISPIF_VFE_m_PIX_INTF_n_STATUS(m, n) (0x2C0 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n) (0x2D0 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_3D_DESKEW_SIZE(m) (0x2E4 + ISPIF_VFE(m))
+
+/* CSID CLK MUX SEL REGISTERS */
+#define ISPIF_RDI_CLK_MUX_SEL_ADDR 0x8
+
+/*ISPIF RESET BITS*/
+#define VFE_CLK_DOMAIN_RST BIT(31)
+#define PIX_1_CLK_DOMAIN_RST BIT(30)
+#define PIX_CLK_DOMAIN_RST BIT(29)
+#define RDI_2_CLK_DOMAIN_RST BIT(28)
+#define RDI_1_CLK_DOMAIN_RST BIT(27)
+#define RDI_CLK_DOMAIN_RST BIT(26)
+#define AHB_CLK_DOMAIN_RST BIT(25)
+#define RDI_2_VFE_RST_STB BIT(12)
+#define RDI_2_CSID_RST_STB BIT(11)
+#define RDI_1_VFE_RST_STB BIT(10)
+#define RDI_1_CSID_RST_STB BIT(9)
+#define RDI_0_VFE_RST_STB BIT(8)
+#define RDI_0_CSID_RST_STB BIT(7)
+#define PIX_1_VFE_RST_STB BIT(6)
+#define PIX_1_CSID_RST_STB BIT(5)
+#define PIX_0_VFE_RST_STB BIT(4)
+#define PIX_0_CSID_RST_STB BIT(3)
+#define SW_REG_RST_STB BIT(2)
+#define MISC_LOGIC_RST_STB BIT(1)
+#define STROBED_RST_EN BIT(0)
+
+#define ISPIF_RST_CMD_MASK 0xFE0F1FFF
+#define ISPIF_RST_CMD_1_MASK 0xFC0F1FF9
+
+#define ISPIF_RST_CMD_MASK_RESTART 0x00001FF9
+#define ISPIF_RST_CMD_1_MASK_RESTART 0x00001FF9
+
+#define PIX_INTF_0_OVERFLOW_IRQ BIT(12)
+#define RAW_INTF_0_OVERFLOW_IRQ BIT(25)
+#define RAW_INTF_1_OVERFLOW_IRQ BIT(25)
+#define RAW_INTF_2_OVERFLOW_IRQ BIT(12)
+#define RESET_DONE_IRQ BIT(27)
+
+#define ISPIF_IRQ_STATUS_MASK 0x0A493249
+#define ISPIF_IRQ_STATUS_1_MASK 0x02493249
+#define ISPIF_IRQ_STATUS_2_MASK 0x00001249
+
+#define ISPIF_IRQ_STATUS_PIX_SOF_MASK 0x249
+#define ISPIF_IRQ_STATUS_RDI0_SOF_MASK 0x492000
+#define ISPIF_IRQ_STATUS_RDI1_SOF_MASK 0x492000
+#define ISPIF_IRQ_STATUS_RDI2_SOF_MASK 0x249
+
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD 0x1
+
+#define ISPIF_STOP_INTF_IMMEDIATELY 0xAAAAAAAA
+
+#endif /* __MSM_ISPIF_HWREG_V2_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v3.h b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v3.h
new file mode 100644
index 000000000000..343575263816
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif_hwreg_v3.h
@@ -0,0 +1,102 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_ISPIF_HWREG_V2_H__
+#define __MSM_ISPIF_HWREG_V2_H__
+
+/* common registers */
+#define ISPIF_RST_CMD_ADDR 0x008
+#define ISPIF_RST_CMD_1_ADDR 0x00C
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x01C
+#define PIX0_LINE_BUF_EN_BIT 6
+
+#define ISPIF_VFE(m) ((m) * 0x200)
+
+#define ISPIF_VFE_m_CTRL_0(m) (0x200 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_CTRL_1(m) (0x204 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_0(m) (0x208 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_1(m) (0x20C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_MASK_2(m) (0x210 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_0(m) (0x21C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_1(m) (0x220 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_STATUS_2(m) (0x224 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_0(m) (0x230 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_1(m) (0x234 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_IRQ_CLEAR_2(m) (0x238 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INPUT_SEL(m) (0x244 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INTF_CMD_0(m) (0x248 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_INTF_CMD_1(m) (0x24C + ISPIF_VFE(m))
+#define ISPIF_VFE_m_PIX_INTF_n_CID_MASK(m, n) (0x254 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_CID_MASK(m, n) (0x264 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_PACK_0(m, n) (0x270 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_PACK_1(m, n) (0x27C + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_PIX_INTF_n_CROP(m, n) (0x288 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_3D_THRESHOLD(m) (0x290 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_OUTPUT_SEL(m) (0x294 + ISPIF_VFE(m))
+#define ISPIF_VFE_m_PIX_OUTPUT_n_MISR(m, n) (0x298 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_0(m, n) (0x29C + ISPIF_VFE(m) + 8*(n))
+#define ISPIF_VFE_m_RDI_OUTPUT_n_MISR_1(m, n) (0x2A0 + ISPIF_VFE(m) + 8*(n))
+#define ISPIF_VFE_m_PIX_INTF_n_STATUS(m, n) (0x2C0 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n) (0x2D0 + ISPIF_VFE(m) + 4*(n))
+#define ISPIF_VFE_m_3D_DESKEW_SIZE(m) (0x2E4 + ISPIF_VFE(m))
+
+/* CSID CLK MUX SEL REGISTERS */
+#define ISPIF_RDI_CLK_MUX_SEL_ADDR 0x8
+
+/*ISPIF RESET BITS*/
+#define VFE_CLK_DOMAIN_RST BIT(31)
+#define PIX_1_CLK_DOMAIN_RST BIT(30)
+#define PIX_CLK_DOMAIN_RST BIT(29)
+#define RDI_2_CLK_DOMAIN_RST BIT(28)
+#define RDI_1_CLK_DOMAIN_RST BIT(27)
+#define RDI_CLK_DOMAIN_RST BIT(26)
+#define AHB_CLK_DOMAIN_RST BIT(25)
+#define RDI_2_VFE_RST_STB BIT(12)
+#define RDI_2_CSID_RST_STB BIT(11)
+#define RDI_1_VFE_RST_STB BIT(10)
+#define RDI_1_CSID_RST_STB BIT(9)
+#define RDI_0_VFE_RST_STB BIT(8)
+#define RDI_0_CSID_RST_STB BIT(7)
+#define PIX_1_VFE_RST_STB BIT(6)
+#define PIX_1_CSID_RST_STB BIT(5)
+#define PIX_0_VFE_RST_STB BIT(4)
+#define PIX_0_CSID_RST_STB BIT(3)
+#define SW_REG_RST_STB BIT(2)
+#define MISC_LOGIC_RST_STB BIT(1)
+#define STROBED_RST_EN BIT(0)
+
+#define ISPIF_RST_CMD_MASK 0xFE7F1FFF
+#define ISPIF_RST_CMD_1_MASK 0xFC7F1FF9
+
+#define ISPIF_RST_CMD_MASK_RESTART 0x7F1FF9
+#define ISPIF_RST_CMD_1_MASK_RESTART 0x7F1FF9
+
+#define PIX_INTF_0_OVERFLOW_IRQ BIT(12)
+#define RAW_INTF_0_OVERFLOW_IRQ BIT(25)
+#define RAW_INTF_1_OVERFLOW_IRQ BIT(25)
+#define RAW_INTF_2_OVERFLOW_IRQ BIT(12)
+#define RESET_DONE_IRQ BIT(27)
+
+#define ISPIF_IRQ_STATUS_MASK 0x0A493249
+#define ISPIF_IRQ_STATUS_1_MASK 0x02493249
+#define ISPIF_IRQ_STATUS_2_MASK 0x00001249
+
+#define ISPIF_IRQ_STATUS_PIX_SOF_MASK 0x249
+#define ISPIF_IRQ_STATUS_RDI0_SOF_MASK 0x492000
+#define ISPIF_IRQ_STATUS_RDI1_SOF_MASK 0x492000
+#define ISPIF_IRQ_STATUS_RDI2_SOF_MASK 0x249
+
+#define ISPIF_IRQ_GLOBAL_CLEAR_CMD 0x1
+
+#define ISPIF_STOP_INTF_IMMEDIATELY 0xAAAAAAAA
+
+#endif /* __MSM_ISPIF_HWREG_V2_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/Makefile b/drivers/media/platform/msm/camera_v2/jpeg_10/Makefile
new file mode 100644
index 000000000000..0b8dc1db225c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/jpeg_10/Makefile
@@ -0,0 +1,7 @@
+GCC_VERSION := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
+
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/jpeg_10
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+
+obj-$(CONFIG_MSMB_JPEG) += msm_jpeg_dev.o msm_jpeg_sync.o msm_jpeg_core.o msm_jpeg_hw.o msm_jpeg_platform.o
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_common.h b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_common.h
new file mode 100644
index 000000000000..e8e79a180e9c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_common.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_JPEG_COMMON_H
+#define MSM_JPEG_COMMON_H
+
+#define JPEG_DBG(fmt, args...) pr_debug(fmt, ##args)
+
+#define JPEG_PR_ERR pr_err
+#define JPEG_DBG_HIGH pr_debug
+
+#define JPEG_BUS_VOTED(pgmn_dev) (pgmn_dev->jpeg_bus_vote = 1)
+#define JPEG_BUS_UNVOTED(pgmn_dev) (pgmn_dev->jpeg_bus_vote = 0)
+
+enum JPEG_MODE {
+ JPEG_MODE_DISABLE,
+ JPEG_MODE_OFFLINE,
+ JPEG_MODE_REALTIME,
+ JPEG_MODE_REALTIME_ROTATION
+};
+
+enum JPEG_ROTATION {
+ JPEG_ROTATION_0,
+ JPEG_ROTATION_90,
+ JPEG_ROTATION_180,
+ JPEG_ROTATION_270
+};
+
+#endif /* MSM_JPEG_COMMON_H */
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_core.c b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_core.c
new file mode 100644
index 000000000000..33eb69f198cf
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_core.c
@@ -0,0 +1,380 @@
+/* Copyright (c) 2012-2015,The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include "msm_jpeg_hw.h"
+#include "msm_jpeg_core.h"
+#include "msm_jpeg_platform.h"
+#include "msm_jpeg_common.h"
+
+int msm_jpeg_core_reset(struct msm_jpeg_device *pgmn_dev, uint8_t op_mode,
+ void *base, int size) {
+ unsigned long flags;
+ int rc = 0;
+ int tm = 500; /*500ms*/
+ JPEG_DBG("%s:%d] reset", __func__, __LINE__);
+ memset(&pgmn_dev->fe_pingpong_buf, 0,
+ sizeof(pgmn_dev->fe_pingpong_buf));
+ pgmn_dev->fe_pingpong_buf.is_fe = 1;
+ memset(&pgmn_dev->we_pingpong_buf, 0,
+ sizeof(pgmn_dev->we_pingpong_buf));
+ spin_lock_irqsave(&pgmn_dev->reset_lock, flags);
+ pgmn_dev->reset_done_ack = 0;
+ if (pgmn_dev->core_type == MSM_JPEG_CORE_CODEC)
+ msm_jpeg_hw_reset(base, size);
+ else
+ msm_jpeg_hw_reset_dma(base, size);
+
+ spin_unlock_irqrestore(&pgmn_dev->reset_lock, flags);
+ rc = wait_event_timeout(
+ pgmn_dev->reset_wait,
+ pgmn_dev->reset_done_ack,
+ msecs_to_jiffies(tm));
+
+ if (!pgmn_dev->reset_done_ack) {
+ JPEG_DBG("%s: reset ACK failed %d", __func__, rc);
+ return -EBUSY;
+ }
+
+ JPEG_DBG("%s: reset_done_ack rc %d", __func__, rc);
+ spin_lock_irqsave(&pgmn_dev->reset_lock, flags);
+ pgmn_dev->reset_done_ack = 0;
+ pgmn_dev->state = MSM_JPEG_RESET;
+ spin_unlock_irqrestore(&pgmn_dev->reset_lock, flags);
+
+ return 0;
+}
+
+void msm_jpeg_core_release(struct msm_jpeg_device *pgmn_dev)
+{
+ int i = 0;
+ for (i = 0; i < 2; i++) {
+ if (pgmn_dev->we_pingpong_buf.buf_status[i] &&
+ pgmn_dev->release_buf)
+ msm_jpeg_platform_p2v(pgmn_dev->iommu_hdl,
+ pgmn_dev->we_pingpong_buf.buf[i].ion_fd);
+ pgmn_dev->we_pingpong_buf.buf_status[i] = 0;
+ }
+}
+
+void msm_jpeg_core_init(struct msm_jpeg_device *pgmn_dev)
+{
+ init_waitqueue_head(&pgmn_dev->reset_wait);
+ spin_lock_init(&pgmn_dev->reset_lock);
+}
+
+int msm_jpeg_core_fe_start(struct msm_jpeg_device *pgmn_dev)
+{
+ msm_jpeg_hw_fe_start(pgmn_dev->base);
+ return 0;
+}
+
+/* fetch engine */
+int msm_jpeg_core_fe_buf_update(struct msm_jpeg_device *pgmn_dev,
+ struct msm_jpeg_core_buf *buf)
+{
+ int rc = 0;
+ if (0 == buf->cbcr_len)
+ buf->cbcr_buffer_addr = 0x0;
+
+ JPEG_DBG("%s:%d] 0x%08x %d 0x%08x %d\n", __func__, __LINE__,
+ (int) buf->y_buffer_addr, buf->y_len,
+ (int) buf->cbcr_buffer_addr, buf->cbcr_len);
+
+ if (pgmn_dev->core_type == MSM_JPEG_CORE_CODEC) {
+ rc = msm_jpeg_hw_pingpong_update(&pgmn_dev->fe_pingpong_buf,
+ buf, pgmn_dev->base);
+ if (rc < 0)
+ return rc;
+ msm_jpeg_hw_fe_mmu_prefetch(buf, pgmn_dev->base,
+ pgmn_dev->decode_flag);
+ } else {
+ rc = msm_jpegdma_hw_pingpong_update(
+ &pgmn_dev->fe_pingpong_buf, buf, pgmn_dev->base);
+ if (rc < 0)
+ return rc;
+ msm_jpegdma_hw_fe_mmu_prefetch(buf, pgmn_dev->base);
+ }
+
+ return rc;
+}
+
+void *msm_jpeg_core_fe_pingpong_irq(int jpeg_irq_status,
+ struct msm_jpeg_device *pgmn_dev)
+{
+ return msm_jpeg_hw_pingpong_irq(&pgmn_dev->fe_pingpong_buf);
+}
+
+/* write engine */
+int msm_jpeg_core_we_buf_update(struct msm_jpeg_device *pgmn_dev,
+ struct msm_jpeg_core_buf *buf) {
+
+ JPEG_DBG("%s:%d] 0x%08x 0x%08x %d\n", __func__, __LINE__,
+ (int) buf->y_buffer_addr, (int) buf->cbcr_buffer_addr,
+ buf->y_len);
+
+ pgmn_dev->we_pingpong_buf.buf[0] = *buf;
+ pgmn_dev->we_pingpong_buf.buf_status[0] = 1;
+
+ if (pgmn_dev->core_type == MSM_JPEG_CORE_CODEC) {
+ msm_jpeg_hw_we_buffer_update(
+ &pgmn_dev->we_pingpong_buf.buf[0], 0, pgmn_dev->base);
+ msm_jpeg_hw_we_mmu_prefetch(buf, pgmn_dev->base,
+ pgmn_dev->decode_flag);
+ } else {
+ msm_jpegdma_hw_we_buffer_update(
+ &pgmn_dev->we_pingpong_buf.buf[0], 0, pgmn_dev->base);
+ msm_jpegdma_hw_we_mmu_prefetch(buf, pgmn_dev->base);
+ }
+
+ return 0;
+}
+
+int msm_jpeg_core_we_buf_reset(struct msm_jpeg_device *pgmn_dev,
+ struct msm_jpeg_hw_buf *buf)
+{
+ int i = 0;
+ for (i = 0; i < 2; i++) {
+ if (pgmn_dev->we_pingpong_buf.buf[i].y_buffer_addr
+ == buf->y_buffer_addr)
+ pgmn_dev->we_pingpong_buf.buf_status[i] = 0;
+ }
+ return 0;
+}
+
+void *msm_jpeg_core_we_pingpong_irq(int jpeg_irq_status,
+ struct msm_jpeg_device *pgmn_dev)
+{
+ JPEG_DBG("%s:%d]\n", __func__, __LINE__);
+
+ return msm_jpeg_hw_pingpong_irq(&pgmn_dev->we_pingpong_buf);
+}
+
+void *msm_jpeg_core_framedone_irq(int jpeg_irq_status,
+ struct msm_jpeg_device *pgmn_dev)
+{
+ struct msm_jpeg_hw_buf *buf_p;
+
+ JPEG_DBG("%s:%d]\n", __func__, __LINE__);
+
+ buf_p = msm_jpeg_hw_pingpong_active_buffer(
+ &pgmn_dev->we_pingpong_buf);
+ if (buf_p && !pgmn_dev->decode_flag) {
+ buf_p->framedone_len =
+ msm_jpeg_hw_encode_output_size(pgmn_dev->base);
+ JPEG_DBG("%s:%d] framedone_len %d\n", __func__, __LINE__,
+ buf_p->framedone_len);
+ }
+
+ return buf_p;
+}
+
+void *msm_jpeg_core_reset_ack_irq(int jpeg_irq_status,
+ struct msm_jpeg_device *pgmn_dev)
+{
+ /* @todo return the status back to msm_jpeg_core_reset */
+ JPEG_DBG("%s:%d]\n", __func__, __LINE__);
+ return NULL;
+}
+
+void *msm_jpeg_core_err_irq(int jpeg_irq_status,
+ struct msm_jpeg_device *pgmn_dev)
+{
+ JPEG_PR_ERR("%s: Error %x\n", __func__, jpeg_irq_status);
+ return NULL;
+}
+
+static int (*msm_jpeg_irq_handler)(int, void *, void *);
+
+void msm_jpeg_core_return_buffers(struct msm_jpeg_device *pgmn_dev,
+ int jpeg_irq_status)
+{
+ void *data = NULL;
+ data = msm_jpeg_core_fe_pingpong_irq(jpeg_irq_status,
+ pgmn_dev);
+ if (msm_jpeg_irq_handler)
+ msm_jpeg_irq_handler(MSM_JPEG_HW_MASK_COMP_FE,
+ pgmn_dev, data);
+ data = msm_jpeg_core_we_pingpong_irq(jpeg_irq_status,
+ pgmn_dev);
+ if (msm_jpeg_irq_handler)
+ msm_jpeg_irq_handler(MSM_JPEG_HW_MASK_COMP_WE,
+ pgmn_dev, data);
+}
+
+irqreturn_t msm_jpeg_core_irq(int irq_num, void *context)
+{
+ void *data = NULL;
+ unsigned long flags;
+ int jpeg_irq_status;
+ struct msm_jpeg_device *pgmn_dev = (struct msm_jpeg_device *)context;
+
+ JPEG_DBG("%s:%d] irq_num = %d\n", __func__, __LINE__, irq_num);
+
+ jpeg_irq_status = msm_jpeg_hw_irq_get_status(pgmn_dev->base);
+
+ JPEG_DBG("%s:%d] jpeg_irq_status = %0x\n", __func__, __LINE__,
+ jpeg_irq_status);
+
+ /*For reset and framedone IRQs, clear all bits*/
+ if (pgmn_dev->state == MSM_JPEG_IDLE) {
+ JPEG_DBG_HIGH("%s %d ] Error IRQ received state %d",
+ __func__, __LINE__, pgmn_dev->state);
+ JPEG_DBG_HIGH("%s %d ] Ignoring the Error", __func__,
+ __LINE__);
+ msm_jpeg_hw_irq_clear(JPEG_IRQ_CLEAR_BMSK,
+ JPEG_IRQ_CLEAR_ALL, pgmn_dev->base);
+ return IRQ_HANDLED;
+ } else if (jpeg_irq_status & 0x10000000) {
+ msm_jpeg_hw_irq_clear(JPEG_IRQ_CLEAR_BMSK,
+ JPEG_IRQ_CLEAR_ALL, pgmn_dev->base);
+ } else if (jpeg_irq_status & 0x1) {
+ msm_jpeg_hw_irq_clear(JPEG_IRQ_CLEAR_BMSK,
+ JPEG_IRQ_CLEAR_ALL, pgmn_dev->base);
+ if (pgmn_dev->decode_flag)
+ msm_jpeg_decode_status(pgmn_dev->base);
+ } else {
+ msm_jpeg_hw_irq_clear(JPEG_IRQ_CLEAR_BMSK,
+ jpeg_irq_status, pgmn_dev->base);
+ }
+
+ if (msm_jpeg_hw_irq_is_frame_done(jpeg_irq_status)) {
+ /* send fe ping pong irq */
+ JPEG_DBG_HIGH("%s:%d] Session done\n", __func__, __LINE__);
+ data = msm_jpeg_core_fe_pingpong_irq(jpeg_irq_status,
+ pgmn_dev);
+ if (msm_jpeg_irq_handler)
+ msm_jpeg_irq_handler(MSM_JPEG_HW_MASK_COMP_FE,
+ context, data);
+ data = msm_jpeg_core_framedone_irq(jpeg_irq_status,
+ pgmn_dev);
+ if (msm_jpeg_irq_handler)
+ msm_jpeg_irq_handler(
+ MSM_JPEG_HW_MASK_COMP_FRAMEDONE,
+ context, data);
+ pgmn_dev->state = MSM_JPEG_INIT;
+ }
+ if (msm_jpeg_hw_irq_is_reset_ack(jpeg_irq_status)) {
+ data = msm_jpeg_core_reset_ack_irq(jpeg_irq_status,
+ pgmn_dev);
+ spin_lock_irqsave(&pgmn_dev->reset_lock, flags);
+ pgmn_dev->reset_done_ack = 1;
+ spin_unlock_irqrestore(&pgmn_dev->reset_lock, flags);
+ wake_up(&pgmn_dev->reset_wait);
+ if (msm_jpeg_irq_handler)
+ msm_jpeg_irq_handler(
+ MSM_JPEG_HW_MASK_COMP_RESET_ACK,
+ context, data);
+ }
+
+ /* Unexpected/unintended HW interrupt */
+ if (msm_jpeg_hw_irq_is_err(jpeg_irq_status)) {
+ if (pgmn_dev->state != MSM_JPEG_EXECUTING) {
+ /*Clear all the bits and ignore the IRQ*/
+ JPEG_DBG_HIGH("%s %d ] Error IRQ received state %d",
+ __func__, __LINE__, pgmn_dev->state);
+ JPEG_DBG_HIGH("%s %d ] Ignoring the Error", __func__,
+ __LINE__);
+ msm_jpeg_hw_irq_clear(JPEG_IRQ_CLEAR_BMSK,
+ JPEG_IRQ_CLEAR_ALL, pgmn_dev->base);
+ return IRQ_HANDLED;
+ } else {
+ if (pgmn_dev->decode_flag)
+ msm_jpeg_decode_status(pgmn_dev->base);
+ msm_jpeg_core_return_buffers(pgmn_dev, jpeg_irq_status);
+ data = msm_jpeg_core_err_irq(jpeg_irq_status, pgmn_dev);
+ if (msm_jpeg_irq_handler) {
+ msm_jpeg_irq_handler(MSM_JPEG_HW_MASK_COMP_ERR,
+ context, data);
+ }
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t msm_jpegdma_core_irq(int irq_num, void *context)
+{
+ void *data = NULL;
+ unsigned long flags;
+ int jpeg_irq_status;
+ struct msm_jpeg_device *pgmn_dev = context;
+
+ JPEG_DBG("%s:%d] irq_num = %d\n", __func__, __LINE__, irq_num);
+
+ jpeg_irq_status = msm_jpegdma_hw_irq_get_status(pgmn_dev->base);
+
+ JPEG_DBG("%s:%d] jpeg_irq_status = %0x\n", __func__, __LINE__,
+ jpeg_irq_status);
+
+ /*For reset and framedone IRQs, clear all bits*/
+ if (pgmn_dev->state == MSM_JPEG_IDLE) {
+ JPEG_DBG_HIGH("%s %d ] Error IRQ received state %d",
+ __func__, __LINE__, pgmn_dev->state);
+ JPEG_DBG_HIGH("%s %d ] Ignoring the Error", __func__,
+ __LINE__);
+ msm_jpegdma_hw_irq_clear(JPEGDMA_IRQ_CLEAR_BMSK,
+ JPEGDMA_IRQ_CLEAR_ALL, pgmn_dev->base);
+ return IRQ_HANDLED;
+ } else if (jpeg_irq_status & 0x00000400) {
+ msm_jpegdma_hw_irq_clear(JPEGDMA_IRQ_CLEAR_BMSK,
+ JPEGDMA_IRQ_CLEAR_ALL, pgmn_dev->base);
+ } else if (jpeg_irq_status & 0x1) {
+ msm_jpegdma_hw_irq_clear(JPEGDMA_IRQ_CLEAR_BMSK,
+ JPEGDMA_IRQ_CLEAR_ALL, pgmn_dev->base);
+ } else {
+ msm_jpegdma_hw_irq_clear(JPEGDMA_IRQ_CLEAR_BMSK,
+ jpeg_irq_status, pgmn_dev->base);
+ }
+
+ if (msm_jpegdma_hw_irq_is_frame_done(jpeg_irq_status)) {
+ /* send fe ping pong irq */
+ JPEG_DBG_HIGH("%s:%d] Session done\n", __func__, __LINE__);
+ data = msm_jpeg_core_fe_pingpong_irq(jpeg_irq_status,
+ pgmn_dev);
+ if (msm_jpeg_irq_handler)
+ msm_jpeg_irq_handler(MSM_JPEG_HW_MASK_COMP_FE,
+ context, data);
+ data = msm_jpeg_core_framedone_irq(jpeg_irq_status,
+ pgmn_dev);
+ if (msm_jpeg_irq_handler)
+ msm_jpeg_irq_handler(
+ MSM_JPEG_HW_MASK_COMP_FRAMEDONE,
+ context, data);
+ pgmn_dev->state = MSM_JPEG_INIT;
+ }
+ if (msm_jpegdma_hw_irq_is_reset_ack(jpeg_irq_status)) {
+ data = msm_jpeg_core_reset_ack_irq(jpeg_irq_status,
+ pgmn_dev);
+ spin_lock_irqsave(&pgmn_dev->reset_lock, flags);
+ pgmn_dev->reset_done_ack = 1;
+ spin_unlock_irqrestore(&pgmn_dev->reset_lock, flags);
+ wake_up(&pgmn_dev->reset_wait);
+ if (msm_jpeg_irq_handler)
+ msm_jpeg_irq_handler(
+ MSM_JPEG_HW_MASK_COMP_RESET_ACK,
+ context, data);
+ }
+
+ return IRQ_HANDLED;
+}
+
+void msm_jpeg_core_irq_install(int (*irq_handler) (int, void *, void *))
+{
+ msm_jpeg_irq_handler = irq_handler;
+}
+
+void msm_jpeg_core_irq_remove(void)
+{
+ msm_jpeg_irq_handler = NULL;
+}
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_core.h b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_core.h
new file mode 100644
index 000000000000..0688538b0361
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_core.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2012, 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_JPEG_CORE_H
+#define MSM_JPEG_CORE_H
+
+#include <linux/interrupt.h>
+#include "msm_jpeg_hw.h"
+#include "msm_jpeg_sync.h"
+
+#define msm_jpeg_core_buf msm_jpeg_hw_buf
+
+irqreturn_t msm_jpeg_core_irq(int irq_num, void *context);
+irqreturn_t msm_jpegdma_core_irq(int irq_num, void *context);
+void msm_jpeg_core_irq_install(int (*irq_handler) (int, void *, void *));
+void msm_jpeg_core_irq_remove(void);
+
+int msm_jpeg_core_fe_buf_update(struct msm_jpeg_device *pgmn_dev,
+ struct msm_jpeg_core_buf *buf);
+int msm_jpeg_core_we_buf_update(struct msm_jpeg_device *pgmn_dev,
+ struct msm_jpeg_core_buf *buf);
+int msm_jpeg_core_we_buf_reset(struct msm_jpeg_device *pgmn_dev,
+ struct msm_jpeg_hw_buf *buf);
+
+int msm_jpeg_core_reset(struct msm_jpeg_device *pgmn_dev, uint8_t op_mode,
+ void *base, int size);
+int msm_jpeg_core_fe_start(struct msm_jpeg_device *);
+
+void msm_jpeg_core_release(struct msm_jpeg_device *);
+void msm_jpeg_core_init(struct msm_jpeg_device *);
+#endif /* MSM_JPEG_CORE_H */
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_dev.c b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_dev.c
new file mode 100644
index 000000000000..437af72a6a55
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_dev.c
@@ -0,0 +1,343 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <media/msm_jpeg.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "msm_jpeg_sync.h"
+#include "msm_jpeg_common.h"
+
+#define MSM_JPEG_NAME "jpeg"
+#define DEV_NAME_LEN 10
+
+static int msm_jpeg_open(struct inode *inode, struct file *filp)
+{
+ int rc = 0;
+
+ struct msm_jpeg_device *pgmn_dev = container_of(inode->i_cdev,
+ struct msm_jpeg_device, cdev);
+ filp->private_data = pgmn_dev;
+
+ JPEG_DBG("%s:%d]\n", __func__, __LINE__);
+
+ rc = __msm_jpeg_open(pgmn_dev);
+
+ JPEG_DBG(KERN_INFO "%s:%d] %s open_count = %d\n", __func__, __LINE__,
+ filp->f_path.dentry->d_name.name, pgmn_dev->open_count);
+
+ return rc;
+}
+
+static int msm_jpeg_release(struct inode *inode, struct file *filp)
+{
+ int rc;
+
+ struct msm_jpeg_device *pgmn_dev = filp->private_data;
+
+ JPEG_DBG(KERN_INFO "%s:%d]\n", __func__, __LINE__);
+
+ rc = __msm_jpeg_release(pgmn_dev);
+
+ JPEG_DBG(KERN_INFO "%s:%d] %s open_count = %d\n", __func__, __LINE__,
+ filp->f_path.dentry->d_name.name, pgmn_dev->open_count);
+ return rc;
+}
+#ifdef CONFIG_COMPAT
+static long msm_jpeg_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int rc;
+ struct msm_jpeg_device *pgmn_dev = filp->private_data;
+
+ JPEG_DBG("%s:%d] cmd=%d pgmn_dev=0x%lx arg=0x%lx\n", __func__,
+ __LINE__, _IOC_NR(cmd), (unsigned long)pgmn_dev,
+ (unsigned long)arg);
+
+ rc = __msm_jpeg_compat_ioctl(pgmn_dev, cmd, arg);
+
+ JPEG_DBG("%s:%d]\n", __func__, __LINE__);
+ return rc;
+}
+#endif
+static long msm_jpeg_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int rc;
+ struct msm_jpeg_device *pgmn_dev = filp->private_data;
+
+ JPEG_DBG("%s:%d] cmd=%d pgmn_dev=0x%lx arg=0x%lx\n", __func__,
+ __LINE__, _IOC_NR(cmd), (unsigned long)pgmn_dev,
+ (unsigned long)arg);
+
+ rc = __msm_jpeg_ioctl(pgmn_dev, cmd, arg);
+
+ JPEG_DBG("%s:%d]\n", __func__, __LINE__);
+ return rc;
+}
+
+static const struct file_operations msm_jpeg_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_jpeg_open,
+ .release = msm_jpeg_release,
+ .unlocked_ioctl = msm_jpeg_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = msm_jpeg_compat_ioctl,
+#endif
+};
+
+
+int msm_jpeg_subdev_init(struct v4l2_subdev *jpeg_sd)
+{
+ int rc;
+ struct msm_jpeg_device *pgmn_dev =
+ (struct msm_jpeg_device *)jpeg_sd->host_priv;
+
+ JPEG_DBG("%s:%d: jpeg_sd=0x%lx pgmn_dev=0x%lx\n",
+ __func__, __LINE__, (unsigned long)jpeg_sd,
+ (unsigned long)pgmn_dev);
+ rc = __msm_jpeg_open(pgmn_dev);
+ JPEG_DBG("%s:%d: rc=%d\n",
+ __func__, __LINE__, rc);
+ return rc;
+}
+
+static long msm_jpeg_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ long rc;
+ struct msm_jpeg_device *pgmn_dev =
+ (struct msm_jpeg_device *)sd->host_priv;
+
+ JPEG_DBG("%s: cmd=%d\n", __func__, cmd);
+
+ JPEG_DBG("%s: pgmn_dev 0x%lx", __func__, (unsigned long)pgmn_dev);
+
+ JPEG_DBG("%s: Calling __msm_jpeg_ioctl\n", __func__);
+
+ rc = __msm_jpeg_ioctl(pgmn_dev, cmd, (unsigned long)arg);
+ pr_debug("%s: X\n", __func__);
+ return rc;
+}
+
+void msm_jpeg_subdev_release(struct v4l2_subdev *jpeg_sd)
+{
+ int rc;
+ struct msm_jpeg_device *pgmn_dev =
+ (struct msm_jpeg_device *)jpeg_sd->host_priv;
+ JPEG_DBG("%s:pgmn_dev=0x%lx", __func__, (unsigned long)pgmn_dev);
+ rc = __msm_jpeg_release(pgmn_dev);
+ JPEG_DBG("%s:rc=%d", __func__, rc);
+}
+
+static const struct v4l2_subdev_core_ops msm_jpeg_subdev_core_ops = {
+ .ioctl = msm_jpeg_subdev_ioctl,
+};
+
+static const struct v4l2_subdev_ops msm_jpeg_subdev_ops = {
+ .core = &msm_jpeg_subdev_core_ops,
+};
+
+struct msm_jpeg_priv_data {
+ enum msm_jpeg_core_type core_type;
+};
+
+static const struct msm_jpeg_priv_data msm_jpeg_priv_data_jpg = {
+ .core_type = MSM_JPEG_CORE_CODEC
+};
+static const struct msm_jpeg_priv_data msm_jpeg_priv_data_dma = {
+ .core_type = MSM_JPEG_CORE_DMA
+};
+
+static const struct of_device_id msm_jpeg_dt_match[] = {
+ {.compatible = "qcom,jpeg", .data = &msm_jpeg_priv_data_jpg},
+ {.compatible = "qcom,jpeg_dma", .data = &msm_jpeg_priv_data_dma},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_jpeg_dt_match);
+
+static int msm_jpeg_init_dev(struct platform_device *pdev)
+{
+ int rc = -1;
+ struct device *dev;
+ struct msm_jpeg_device *msm_jpeg_device_p;
+ const struct of_device_id *device_id;
+ const struct msm_jpeg_priv_data *priv_data;
+ char devname[DEV_NAME_LEN];
+
+ msm_jpeg_device_p = kzalloc(sizeof(struct msm_jpeg_device), GFP_ATOMIC);
+ if (!msm_jpeg_device_p) {
+ JPEG_PR_ERR("%s: no mem\n", __func__);
+ return -EFAULT;
+ }
+
+ msm_jpeg_device_p->pdev = pdev;
+
+ device_id = of_match_device(msm_jpeg_dt_match, &pdev->dev);
+ if (!device_id) {
+ JPEG_PR_ERR("%s: device_id is NULL\n", __func__);
+ goto fail;
+ }
+
+ priv_data = device_id->data;
+ msm_jpeg_device_p->core_type = priv_data->core_type;
+
+ if (pdev->dev.of_node)
+ of_property_read_u32((&pdev->dev)->of_node, "cell-index",
+ &pdev->id);
+
+ snprintf(devname, sizeof(devname), "%s%d", MSM_JPEG_NAME, pdev->id);
+
+ rc = __msm_jpeg_init(msm_jpeg_device_p);
+ if (rc < -1) {
+ JPEG_PR_ERR("%s: initialization failed\n", __func__);
+ goto fail;
+ }
+
+ v4l2_subdev_init(&msm_jpeg_device_p->subdev, &msm_jpeg_subdev_ops);
+ v4l2_set_subdev_hostdata(&msm_jpeg_device_p->subdev, msm_jpeg_device_p);
+ JPEG_DBG("%s: msm_jpeg_device_p 0x%lx", __func__,
+ (unsigned long)msm_jpeg_device_p);
+
+ rc = alloc_chrdev_region(&msm_jpeg_device_p->msm_jpeg_devno, 0, 1,
+ devname);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: failed to allocate chrdev\n", __func__);
+ goto fail_1;
+ }
+
+ if (!msm_jpeg_device_p->msm_jpeg_class) {
+ msm_jpeg_device_p->msm_jpeg_class =
+ class_create(THIS_MODULE, devname);
+ if (IS_ERR(msm_jpeg_device_p->msm_jpeg_class)) {
+ rc = PTR_ERR(msm_jpeg_device_p->msm_jpeg_class);
+ JPEG_PR_ERR("%s: create device class failed\n",
+ __func__);
+ goto fail_2;
+ }
+ }
+
+ dev = device_create(msm_jpeg_device_p->msm_jpeg_class, NULL,
+ MKDEV(MAJOR(msm_jpeg_device_p->msm_jpeg_devno),
+ MINOR(msm_jpeg_device_p->msm_jpeg_devno)), NULL,
+ "%s%d", MSM_JPEG_NAME, pdev->id);
+ if (IS_ERR(dev)) {
+ JPEG_PR_ERR("%s: error creating device\n", __func__);
+ rc = -ENODEV;
+ goto fail_3;
+ }
+
+ cdev_init(&msm_jpeg_device_p->cdev, &msm_jpeg_fops);
+ msm_jpeg_device_p->cdev.owner = THIS_MODULE;
+ msm_jpeg_device_p->cdev.ops =
+ (const struct file_operations *) &msm_jpeg_fops;
+ rc = cdev_add(&msm_jpeg_device_p->cdev,
+ msm_jpeg_device_p->msm_jpeg_devno, 1);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: error adding cdev\n", __func__);
+ rc = -ENODEV;
+ goto fail_4;
+ }
+
+ platform_set_drvdata(pdev, &msm_jpeg_device_p);
+
+ JPEG_DBG("%s %s%d: success\n", __func__, MSM_JPEG_NAME, pdev->id);
+
+ return rc;
+
+fail_4:
+ device_destroy(msm_jpeg_device_p->msm_jpeg_class,
+ msm_jpeg_device_p->msm_jpeg_devno);
+
+fail_3:
+ class_destroy(msm_jpeg_device_p->msm_jpeg_class);
+
+fail_2:
+ unregister_chrdev_region(msm_jpeg_device_p->msm_jpeg_devno, 1);
+
+fail_1:
+ __msm_jpeg_exit(msm_jpeg_device_p);
+ return rc;
+
+fail:
+ kfree(msm_jpeg_device_p);
+ return rc;
+
+}
+
+static void msm_jpeg_exit(struct msm_jpeg_device *msm_jpeg_device_p)
+{
+ cdev_del(&msm_jpeg_device_p->cdev);
+ device_destroy(msm_jpeg_device_p->msm_jpeg_class,
+ msm_jpeg_device_p->msm_jpeg_devno);
+ class_destroy(msm_jpeg_device_p->msm_jpeg_class);
+ unregister_chrdev_region(msm_jpeg_device_p->msm_jpeg_devno, 1);
+ cam_smmu_destroy_handle(msm_jpeg_device_p->iommu_hdl);
+
+ __msm_jpeg_exit(msm_jpeg_device_p);
+}
+
+static int __msm_jpeg_probe(struct platform_device *pdev)
+{
+ return msm_jpeg_init_dev(pdev);
+}
+
+static int __msm_jpeg_remove(struct platform_device *pdev)
+{
+ struct msm_jpeg_device *msm_jpegd_device_p;
+
+ msm_jpegd_device_p = platform_get_drvdata(pdev);
+ if (msm_jpegd_device_p)
+ msm_jpeg_exit(msm_jpegd_device_p);
+
+ return 0;
+}
+
+static struct platform_driver msm_jpeg_driver = {
+ .probe = __msm_jpeg_probe,
+ .remove = __msm_jpeg_remove,
+ .driver = {
+ .name = "msm_jpeg",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_jpeg_dt_match,
+ },
+};
+
+static int __init msm_jpeg_driver_init(void)
+{
+ int rc;
+ rc = platform_driver_register(&msm_jpeg_driver);
+ return rc;
+}
+
+static void __exit msm_jpeg_driver_exit(void)
+{
+ platform_driver_unregister(&msm_jpeg_driver);
+}
+
+MODULE_DESCRIPTION("msm jpeg jpeg driver");
+
+module_init(msm_jpeg_driver_init);
+module_exit(msm_jpeg_driver_exit);
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw.c b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw.c
new file mode 100644
index 000000000000..9339029dbc0f
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw.c
@@ -0,0 +1,931 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include "msm_jpeg_hw.h"
+#include "msm_jpeg_common.h"
+#include "msm_camera_io_util.h"
+
+#include <linux/io.h>
+
+int msm_jpeg_hw_pingpong_update(struct msm_jpeg_hw_pingpong *pingpong_hw,
+ struct msm_jpeg_hw_buf *buf, void *base)
+{
+ int buf_free_index = -1;
+
+ if (!pingpong_hw->buf_status[0]) {
+ buf_free_index = 0;
+ } else if (!pingpong_hw->buf_status[1]) {
+ buf_free_index = 1;
+ } else {
+ JPEG_PR_ERR("%s:%d: pingpong buffer busy\n",
+ __func__, __LINE__);
+ return -EBUSY;
+ }
+
+ pingpong_hw->buf[buf_free_index] = *buf;
+ pingpong_hw->buf_status[buf_free_index] = 1;
+
+ if (pingpong_hw->is_fe) {
+ /* it is fe */
+ msm_jpeg_hw_fe_buffer_update(
+ &pingpong_hw->buf[buf_free_index], buf_free_index,
+ base);
+ } else {
+ /* it is we */
+ msm_jpeg_hw_we_buffer_update(
+ &pingpong_hw->buf[buf_free_index], buf_free_index,
+ base);
+ }
+ return 0;
+}
+
+int msm_jpegdma_hw_pingpong_update(struct msm_jpeg_hw_pingpong *pingpong_hw,
+ struct msm_jpeg_hw_buf *buf, void *base)
+{
+ int buf_free_index = -1;
+
+ if (!pingpong_hw->buf_status[0]) {
+ buf_free_index = 0;
+ } else if (!pingpong_hw->buf_status[1]) {
+ buf_free_index = 1;
+ } else {
+ JPEG_PR_ERR("%s:%d: pingpong buffer busy\n",
+ __func__, __LINE__);
+ return -EBUSY;
+ }
+
+ pingpong_hw->buf[buf_free_index] = *buf;
+ pingpong_hw->buf_status[buf_free_index] = 1;
+
+ if (pingpong_hw->is_fe) {
+ /* it is fe */
+ msm_jpegdma_hw_fe_buffer_update(
+ &pingpong_hw->buf[buf_free_index], buf_free_index,
+ base);
+ } else {
+ /* it is we */
+ msm_jpegdma_hw_we_buffer_update(
+ &pingpong_hw->buf[buf_free_index], buf_free_index,
+ base);
+ }
+ return 0;
+}
+void *msm_jpeg_hw_pingpong_irq(struct msm_jpeg_hw_pingpong *pingpong_hw)
+{
+ struct msm_jpeg_hw_buf *buf_p = NULL;
+
+ if (pingpong_hw->buf_status[pingpong_hw->buf_active_index]) {
+ buf_p = &pingpong_hw->buf[pingpong_hw->buf_active_index];
+ pingpong_hw->buf_status[pingpong_hw->buf_active_index] = 0;
+ }
+
+ pingpong_hw->buf_active_index = !pingpong_hw->buf_active_index;
+
+ return (void *) buf_p;
+}
+
+void *msm_jpeg_hw_pingpong_active_buffer(
+ struct msm_jpeg_hw_pingpong *pingpong_hw)
+{
+ struct msm_jpeg_hw_buf *buf_p = NULL;
+
+ if (pingpong_hw->buf_status[pingpong_hw->buf_active_index])
+ buf_p = &pingpong_hw->buf[pingpong_hw->buf_active_index];
+
+ return (void *) buf_p;
+}
+
+struct msm_jpeg_hw_cmd hw_cmd_irq_get_status[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_READ, 1, JPEG_IRQ_STATUS_ADDR,
+ JPEG_IRQ_STATUS_BMSK, {0} },
+};
+
+int msm_jpeg_hw_irq_get_status(void *base)
+{
+ uint32_t n_irq_status = 0;
+ n_irq_status = msm_jpeg_hw_read(&hw_cmd_irq_get_status[0], base);
+ return n_irq_status;
+}
+
+struct msm_jpeg_hw_cmd hw_cmd_irq_get_dmastatus[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_READ, 1, JPEGDMA_IRQ_STATUS_ADDR,
+ JPEGDMA_IRQ_STATUS_BMSK, {0} },
+};
+
+int msm_jpegdma_hw_irq_get_status(void *base)
+{
+ uint32_t n_irq_status = 0;
+ n_irq_status = msm_jpeg_hw_read(&hw_cmd_irq_get_dmastatus[0], base);
+ return n_irq_status;
+}
+
+struct msm_jpeg_hw_cmd hw_cmd_encode_output_size[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_READ, 1,
+ JPEG_ENCODE_OUTPUT_SIZE_STATUS_ADDR,
+ JPEG_ENCODE_OUTPUT_SIZE_STATUS_BMSK, {0} } ,
+};
+
+long msm_jpeg_hw_encode_output_size(void *base)
+{
+ uint32_t encode_output_size = 0;
+
+ encode_output_size = msm_jpeg_hw_read(&hw_cmd_encode_output_size[0],
+ base);
+
+ return encode_output_size;
+}
+
+void msm_jpeg_hw_irq_clear(uint32_t mask, uint32_t data, void *base)
+{
+ struct msm_jpeg_hw_cmd cmd_irq_clear;
+
+ cmd_irq_clear.type = MSM_JPEG_HW_CMD_TYPE_WRITE;
+ cmd_irq_clear.n = 1;
+ cmd_irq_clear.offset = JPEG_IRQ_CLEAR_ADDR;
+ cmd_irq_clear.mask = mask;
+ cmd_irq_clear.data = data;
+ JPEG_DBG("%s:%d] mask %0x data %0x", __func__, __LINE__, mask, data);
+ msm_jpeg_hw_write(&cmd_irq_clear, base);
+}
+
+void msm_jpegdma_hw_irq_clear(uint32_t mask, uint32_t data, void *base)
+{
+ struct msm_jpeg_hw_cmd cmd_irq_clear;
+
+ cmd_irq_clear.type = MSM_JPEG_HW_CMD_TYPE_WRITE;
+ cmd_irq_clear.n = 1;
+ cmd_irq_clear.offset = JPEGDMA_IRQ_CLEAR_ADDR;
+ cmd_irq_clear.mask = mask;
+ cmd_irq_clear.data = data;
+ JPEG_DBG("%s:%d] mask %0x data %0x", __func__, __LINE__, mask, data);
+ msm_jpeg_hw_write(&cmd_irq_clear, base);
+}
+
+struct msm_jpeg_hw_cmd hw_cmd_fe_ping_update[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_IRQ_MASK_ADDR,
+ JPEG_IRQ_MASK_BMSK, {JPEG_IRQ_ALLSOURCES_ENABLE} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_CMD_ADDR,
+ JPEG_CMD_BMSK, {JPEG_CMD_CLEAR_WRITE_PLN_QUEUES} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_PLN0_RD_OFFSET_ADDR,
+ JPEG_PLN0_RD_OFFSET_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_PLN0_RD_PNTR_ADDR,
+ JPEG_PLN0_RD_PNTR_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_PLN1_RD_OFFSET_ADDR,
+ JPEG_PLN1_RD_OFFSET_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_PLN1_RD_PNTR_ADDR,
+ JPEG_PLN1_RD_PNTR_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_PLN2_RD_OFFSET_ADDR,
+ JPEG_PLN1_RD_OFFSET_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_PLN2_RD_PNTR_ADDR,
+ JPEG_PLN2_RD_PNTR_BMSK, {0} },
+};
+
+void msm_jpeg_hw_fe_buffer_update(struct msm_jpeg_hw_buf *p_input,
+ uint8_t pingpong_index, void *base)
+{
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+ struct msm_jpeg_hw_cmd tmp_hw_cmd;
+
+ if (pingpong_index == 0) {
+ hw_cmd_p = &hw_cmd_fe_ping_update[0];
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = p_input->y_buffer_addr;
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = p_input->cbcr_buffer_addr;
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = p_input->pln2_addr;
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ /* ensure write is done */
+ wmb();
+ }
+ return;
+}
+
+struct msm_jpeg_hw_cmd hw_dma_cmd_fe_ping_update[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEGDMA_IRQ_MASK_ADDR,
+ JPEGDMA_IRQ_MASK_BMSK, {JPEG_IRQ_ALLSOURCES_ENABLE} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEGDMA_CMD_ADDR,
+ JPEGDMA_CMD_BMSK, {JPEGDMA_CMD_CLEAR_READ_PLN_QUEUES} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEGDMA_FE_0_RD_PNTR,
+ JPEG_PLN0_RD_PNTR_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEGDMA_FE_1_RD_PNTR,
+ JPEG_PLN1_RD_PNTR_BMSK, {0} },
+};
+
+void msm_jpegdma_hw_fe_buffer_update(struct msm_jpeg_hw_buf *p_input,
+ uint8_t pingpong_index, void *base)
+{
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+ struct msm_jpeg_hw_cmd tmp_hw_cmd;
+
+ if (pingpong_index != 0)
+ return;
+
+ hw_cmd_p = &hw_dma_cmd_fe_ping_update[0];
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = p_input->y_buffer_addr;
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ /* ensure write is done */
+ wmb();
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = p_input->cbcr_buffer_addr;
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ /* ensure write is done */
+ wmb();
+}
+
+struct msm_jpeg_hw_cmd hw_cmd_fe_start[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_CMD_ADDR,
+ JPEG_CMD_BMSK, {JPEG_OFFLINE_CMD_START} },
+};
+
+void msm_jpeg_hw_fe_start(void *base)
+{
+ msm_jpeg_hw_write(&hw_cmd_fe_start[0], base);
+
+ return;
+}
+
+struct msm_jpeg_hw_cmd hw_cmd_we_ping_update[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_PLN0_WR_PNTR_ADDR,
+ JPEG_PLN0_WR_PNTR_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_PLN1_WR_PNTR_ADDR,
+ JPEG_PLN0_WR_PNTR_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_PLN2_WR_PNTR_ADDR,
+ JPEG_PLN0_WR_PNTR_BMSK, {0} },
+};
+
+void msm_jpeg_decode_status(void *base)
+{
+ uint32_t data;
+ data = msm_camera_io_r(base + JPEG_DECODE_MCUS_DECODED_STATUS);
+ JPEG_DBG_HIGH("Decode MCUs decode status %u", data);
+ data = msm_camera_io_r(base + JPEG_DECODE_BITS_CONSUMED_STATUS);
+ JPEG_DBG_HIGH("Decode bits consumed status %u", data);
+ data = msm_camera_io_r(base + JPEG_DECODE_PRED_Y_STATE);
+ JPEG_DBG_HIGH("Decode prediction Y state %u", data);
+ data = msm_camera_io_r(base + JPEG_DECODE_PRED_C_STATE);
+ JPEG_DBG_HIGH("Decode prediction C state %u", data);
+ data = msm_camera_io_r(base + JPEG_DECODE_RSM_STATE);
+ JPEG_DBG_HIGH("Decode prediction RSM state %u", data);
+}
+
+
+void msm_jpeg_hw_we_buffer_update(struct msm_jpeg_hw_buf *p_input,
+ uint8_t pingpong_index, void *base)
+{
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+ struct msm_jpeg_hw_cmd tmp_hw_cmd;
+
+ if (pingpong_index == 0) {
+ hw_cmd_p = &hw_cmd_we_ping_update[0];
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = p_input->y_buffer_addr;
+ JPEG_DBG_HIGH("%s Output pln0 buffer address is %x\n", __func__,
+ p_input->y_buffer_addr);
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = p_input->cbcr_buffer_addr;
+ JPEG_DBG_HIGH("%s Output pln1 buffer address is %x\n", __func__,
+ p_input->cbcr_buffer_addr);
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = p_input->pln2_addr;
+ JPEG_DBG_HIGH("%s Output pln2 buffer address is %x\n", __func__,
+ p_input->pln2_addr);
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ }
+ return;
+}
+
+struct msm_jpeg_hw_cmd hw_dma_cmd_we_ping_update[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEGDMA_CMD_ADDR,
+ JPEGDMA_CMD_BMSK, {JPEGDMA_CMD_CLEAR_WRITE_PLN_QUEUES} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEGDMA_WE_0_WR_PNTR,
+ JPEG_PLN0_WR_PNTR_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEGDMA_WE_1_WR_PNTR,
+ JPEG_PLN0_WR_PNTR_BMSK, {0} },
+};
+void msm_jpegdma_hw_we_buffer_update(struct msm_jpeg_hw_buf *p_input,
+ uint8_t pingpong_index, void *base)
+{
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+ struct msm_jpeg_hw_cmd tmp_hw_cmd;
+
+ if (pingpong_index != 0)
+ return;
+
+ hw_cmd_p = &hw_dma_cmd_we_ping_update[0];
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+
+ /* ensure write is done */
+ wmb();
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = p_input->y_buffer_addr;
+ JPEG_DBG_HIGH("%s Output we 0 buffer address is %x\n", __func__,
+ p_input->y_buffer_addr);
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ /* ensure write is done */
+ wmb();
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = p_input->cbcr_buffer_addr;
+ JPEG_DBG_HIGH("%s Output we 1 buffer address is %x\n", __func__,
+ p_input->cbcr_buffer_addr);
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ /* ensure write is done */
+ wmb();
+}
+
+struct msm_jpeg_hw_cmd hw_cmd_fe_mmu_prefetch[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S0_MMU_PF_ADDR_MIN,
+ MSM_JPEG_S0_MMU_PF_ADDR_MIN_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S0_MMU_PF_ADDR_MAX,
+ MSM_JPEG_S0_MMU_PF_ADDR_MAX_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S1_MMU_PF_ADDR_MIN,
+ MSM_JPEG_S1_MMU_PF_ADDR_MIN_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S1_MMU_PF_ADDR_MAX,
+ MSM_JPEG_S1_MMU_PF_ADDR_MAX_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S2_MMU_PF_ADDR_MIN,
+ MSM_JPEG_S2_MMU_PF_ADDR_MIN_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S2_MMU_PF_ADDR_MAX,
+ MSM_JPEG_S2_MMU_PF_ADDR_MAX_BMSK, {0} },
+};
+
+/*
+ * msm_jpeg_hw_fe_mmu_prefetch() - writes fe min/max addrs for each plane to
+ * MMU prefetch registers.
+ * @buf: Pointer to jpeg hw buffer.
+ * @base: Pointer to base address.
+ * @decode_flag: Jpeg decode flag.
+ *
+ * This function writes fe min/max address for each plane to MMU prefetch
+ * registers, MMU prefetch hardware will only prefetch address translations
+ * within this min/max boundary.
+ *
+ * Return: None.
+ */
+void msm_jpeg_hw_fe_mmu_prefetch(struct msm_jpeg_hw_buf *buf, void *base,
+ uint8_t decode_flag)
+{
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+ struct msm_jpeg_hw_cmd tmp_hw_cmd;
+
+ hw_cmd_p = &hw_cmd_fe_mmu_prefetch[0];
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->y_buffer_addr;
+
+ JPEG_DBG("%s:%d: MIN y_buf_addr %08x\n",
+ __func__, __LINE__, tmp_hw_cmd.data);
+
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->y_buffer_addr;
+ if (buf->y_len)
+ tmp_hw_cmd.data += buf->y_len - 1;
+
+ JPEG_DBG("%s:%d: MAX y_buf_addr %08x, y_len %d\n",
+ __func__, __LINE__, tmp_hw_cmd.data, buf->y_len);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ if (!decode_flag) {
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->cbcr_buffer_addr;
+
+ JPEG_DBG("%s:%d: MIN cbcr_buf_addr %08x\n",
+ __func__, __LINE__, tmp_hw_cmd.data);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->cbcr_buffer_addr;
+ if (buf->cbcr_len)
+ tmp_hw_cmd.data += buf->cbcr_len - 1;
+
+ JPEG_DBG("%s:%d: MAX cbcr_buf_addr %08x, cbcr_len %d\n"
+ , __func__, __LINE__, tmp_hw_cmd.data, buf->cbcr_len);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->pln2_addr;
+
+ JPEG_DBG("%s:%d: MIN pln2_buf_addr %08x\n",
+ __func__, __LINE__, tmp_hw_cmd.data);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->pln2_addr;
+ if (buf->pln2_len)
+ tmp_hw_cmd.data += buf->pln2_len - 1;
+
+ JPEG_DBG("%s:%d: MAX pln2_buf_addr %08x, pln2_len %d\n"
+ , __func__, __LINE__, tmp_hw_cmd.data, buf->pln2_len);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ }
+ /* ensure write is done */
+ wmb();
+}
+
+struct msm_jpeg_hw_cmd hw_cmd_we_mmu_prefetch[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S1_MMU_PF_ADDR_MIN,
+ MSM_JPEG_S1_MMU_PF_ADDR_MIN_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S1_MMU_PF_ADDR_MAX,
+ MSM_JPEG_S1_MMU_PF_ADDR_MAX_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S2_MMU_PF_ADDR_MIN,
+ MSM_JPEG_S2_MMU_PF_ADDR_MIN_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S2_MMU_PF_ADDR_MAX,
+ MSM_JPEG_S2_MMU_PF_ADDR_MAX_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S3_MMU_PF_ADDR_MIN,
+ MSM_JPEG_S3_MMU_PF_ADDR_MIN_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEG_S3_MMU_PF_ADDR_MAX,
+ MSM_JPEG_S3_MMU_PF_ADDR_MAX_BMSK, {0} },
+};
+
+/*
+ * msm_jpeg_hw_we_mmu_prefetch() - write we min/max addrs for each plane to
+ * MMU prefetch registers.
+ * @buf: Pointer to jpeg hw buffer.
+ * @base: Pointer to base address.
+ * @decode_flag: Jpeg decode flag.
+ *
+ * This function writes we min/max address for each plane to MMU prefetch
+ * registers, MMU prefetch hardware will only prefetch address translations
+ * within this min/max boundary.
+ *
+ * Return: None.
+ */
+void msm_jpeg_hw_we_mmu_prefetch(struct msm_jpeg_hw_buf *buf, void *base,
+ uint8_t decode_flag)
+{
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+ struct msm_jpeg_hw_cmd tmp_hw_cmd;
+
+ hw_cmd_p = &hw_cmd_we_mmu_prefetch[0];
+
+ /* ensure write is done */
+ wmb();
+ if (decode_flag) {
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->y_buffer_addr;
+
+ JPEG_DBG("%s:%d: MIN y_buf_addr %08x\n",
+ __func__, __LINE__, tmp_hw_cmd.data);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->y_buffer_addr;
+ if (buf->y_len)
+ tmp_hw_cmd.data += buf->y_len - 1;
+
+ JPEG_DBG("%s:%d: MAX y_buf_addr %08x, y_len %d\n",
+ __func__, __LINE__, tmp_hw_cmd.data, buf->y_len);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->cbcr_buffer_addr;
+
+ JPEG_DBG("%s:%d: MIN cbcr_buf_addr %08x\n",
+ __func__, __LINE__, tmp_hw_cmd.data);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->cbcr_buffer_addr;
+ if (buf->cbcr_len)
+ tmp_hw_cmd.data += buf->cbcr_len - 1;
+
+ JPEG_DBG("%s:%d: MAX cbcr_buf_addr %08x, cbcr_len %d\n"
+ , __func__, __LINE__, tmp_hw_cmd.data, buf->cbcr_len);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->pln2_addr;
+
+ JPEG_DBG("%s:%d: MIN pln2_buf_addr %08x\n",
+ __func__, __LINE__, tmp_hw_cmd.data);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->pln2_addr;
+ if (buf->pln2_len)
+ tmp_hw_cmd.data += buf->pln2_len - 1;
+
+ JPEG_DBG("%s:%d: MIN pln2_buf_addr %08x, pln2_len %d\n"
+ , __func__, __LINE__, tmp_hw_cmd.data, buf->pln2_len);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ } else {
+ hw_cmd_p += 4;
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->y_buffer_addr;
+
+ JPEG_DBG("%s:%d: MIN y_buf_addr %08x\n",
+ __func__, __LINE__, tmp_hw_cmd.data);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->y_buffer_addr;
+ if (buf->y_len)
+ tmp_hw_cmd.data += buf->y_len - 1;
+
+ JPEG_DBG("%s:%d: MAX y_buf_addr %08x, y_len %d\n",
+ __func__, __LINE__, tmp_hw_cmd.data, buf->y_len);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ }
+ /* ensure write is done */
+ wmb();
+}
+
+struct msm_jpeg_hw_cmd hw_dma_cmd_fe_mmu_prefetch[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEGDMA_S0_MMU_PF_ADDR_MIN,
+ MSM_JPEGDMA_S0_MMU_PF_ADDR_MIN_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEGDMA_S0_MMU_PF_ADDR_MAX,
+ MSM_JPEGDMA_S0_MMU_PF_ADDR_MAX_BMSK, {0} },
+};
+
+/*
+ * msm_jpegdma_hw_fe_mmu_prefetch() - write DMA fe min/max addrs to
+ * MMU prefetch registers.
+ * @buf: Pointer to jpeg hw buffer.
+ * @base: Pointer to base address.
+ *
+ * This function writes DMA fe min/max address for each plane to MMU prefetch
+ * registers, MMU prefetch hardware will only prefetch address translations
+ * with in this min/max boundary.
+ *
+ * Return: None.
+ */
+void msm_jpegdma_hw_fe_mmu_prefetch(struct msm_jpeg_hw_buf *buf, void *base)
+{
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+ struct msm_jpeg_hw_cmd tmp_hw_cmd;
+
+ hw_cmd_p = &hw_dma_cmd_fe_mmu_prefetch[0];
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->y_buffer_addr;
+
+ JPEG_DBG("%s:%d: MIN DMA addr %08x , reg offset %08x\n",
+ __func__, __LINE__, tmp_hw_cmd.data, tmp_hw_cmd.offset);
+
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->y_buffer_addr;
+ if (buf->y_len)
+ tmp_hw_cmd.data += buf->y_len - 1;
+
+ JPEG_DBG("%s:%d: MAX DMA addr %08x , reg offset %08x , length %d\n",
+ __func__, __LINE__, tmp_hw_cmd.data, tmp_hw_cmd.offset,
+ buf->y_len);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ /* ensure write is done */
+ wmb();
+}
+
+struct msm_jpeg_hw_cmd hw_dma_cmd_we_mmu_prefetch[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEGDMA_S1_MMU_PF_ADDR_MIN,
+ MSM_JPEGDMA_S1_MMU_PF_ADDR_MIN_BMSK, {0} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, MSM_JPEGDMA_S1_MMU_PF_ADDR_MAX,
+ MSM_JPEGDMA_S1_MMU_PF_ADDR_MAX_BMSK, {0} },
+};
+
+/*
+ * msm_jpegdma_hw_we_mmu_prefetch() - write DMA we min/max addrs to
+ * MMU prefetch registers.
+ * @buf: Pointer to jpeg hw buffer.
+ * @base: Pointer to base address.
+ *
+ * This function writes DMA we min/max address for each plane to MMU prefetch
+ * registers, MMU prefetch hardware will only prefetch address translations
+ * with in this min/max boundary.
+ *
+ * Return: None.
+ */
+void msm_jpegdma_hw_we_mmu_prefetch(struct msm_jpeg_hw_buf *buf, void *base)
+{
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+ struct msm_jpeg_hw_cmd tmp_hw_cmd;
+
+ hw_cmd_p = &hw_dma_cmd_we_mmu_prefetch[0];
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->y_buffer_addr;
+
+ JPEG_DBG("%s:%d: MIN DMA addr %08x , reg offset %08x\n",
+ __func__, __LINE__, tmp_hw_cmd.data, tmp_hw_cmd.offset);
+
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+
+ tmp_hw_cmd = *hw_cmd_p++;
+ tmp_hw_cmd.data = buf->y_buffer_addr;
+ if (buf->y_len)
+ tmp_hw_cmd.data += buf->y_len - 1;
+
+ JPEG_DBG("%s:%d: MAX DMA addr %08x , reg offset %08x , length %d\n",
+ __func__, __LINE__, tmp_hw_cmd.data, tmp_hw_cmd.offset,
+ buf->y_len);
+
+ msm_jpeg_hw_write(&tmp_hw_cmd, base);
+ /* ensure write is done */
+ wmb();
+}
+
+struct msm_jpeg_hw_cmd hw_cmd_reset[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_IRQ_MASK_ADDR,
+ JPEG_IRQ_MASK_BMSK, {JPEG_IRQ_DISABLE_ALL} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_IRQ_CLEAR_ADDR,
+ JPEG_IRQ_MASK_BMSK, {JPEG_IRQ_CLEAR_ALL} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_IRQ_MASK_ADDR,
+ JPEG_IRQ_MASK_BMSK, {JPEG_IRQ_ALLSOURCES_ENABLE} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEG_RESET_CMD_ADDR,
+ JPEG_RESET_CMD_RMSK, {JPEG_RESET_DEFAULT} },
+};
+
+void msm_jpeg_hw_reset(void *base, int size)
+{
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+
+ hw_cmd_p = &hw_cmd_reset[0];
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p, base);
+ /* ensure write is done */
+ wmb();
+
+ return;
+}
+struct msm_jpeg_hw_cmd hw_cmd_reset_dma[] = {
+ /* type, repeat n times, offset, mask, data or pdata */
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEGDMA_IRQ_MASK_ADDR,
+ JPEGDMA_IRQ_MASK_BMSK, {JPEGDMA_IRQ_DISABLE_ALL} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEGDMA_IRQ_CLEAR_ADDR,
+ JPEGDMA_IRQ_MASK_BMSK, {JPEGDMA_IRQ_CLEAR_ALL} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEGDMA_IRQ_MASK_ADDR,
+ JPEGDMA_IRQ_MASK_BMSK, {JPEGDMA_IRQ_ALLSOURCES_ENABLE} },
+ {MSM_JPEG_HW_CMD_TYPE_WRITE, 1, JPEGDMA_RESET_CMD_ADDR,
+ JPEGDMA_RESET_CMD_BMSK, {JPEGDMA_RESET_DEFAULT} },
+};
+
+void msm_jpeg_hw_reset_dma(void *base, int size)
+{
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+
+ hw_cmd_p = &hw_cmd_reset_dma[0];
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p++, base);
+ /* ensure write is done */
+ wmb();
+ msm_jpeg_hw_write(hw_cmd_p, base);
+ /* ensure write is done */
+ wmb();
+
+ return;
+}
+
+uint32_t msm_jpeg_hw_read(struct msm_jpeg_hw_cmd *hw_cmd_p,
+ void *jpeg_region_base)
+{
+ uint32_t *paddr;
+ uint32_t data;
+
+ paddr = jpeg_region_base + hw_cmd_p->offset;
+
+ data = msm_camera_io_r(paddr);
+ data &= hw_cmd_p->mask;
+
+ return data;
+}
+
+void msm_jpeg_hw_write(struct msm_jpeg_hw_cmd *hw_cmd_p,
+ void *jpeg_region_base)
+{
+ uint32_t *paddr;
+ uint32_t old_data, new_data;
+
+ paddr = jpeg_region_base + hw_cmd_p->offset;
+
+ if (hw_cmd_p->mask == 0xffffffff) {
+ old_data = 0;
+ } else {
+ old_data = msm_camera_io_r(paddr);
+ old_data &= ~hw_cmd_p->mask;
+ }
+
+ new_data = hw_cmd_p->data & hw_cmd_p->mask;
+ new_data |= old_data;
+ JPEG_DBG("%s:%d] %p %08x\n", __func__, __LINE__,
+ paddr, new_data);
+ msm_camera_io_w(new_data, paddr);
+}
+
+int msm_jpeg_hw_wait(struct msm_jpeg_hw_cmd *hw_cmd_p, int m_us,
+ void *base)
+{
+ int tm = hw_cmd_p->n;
+ uint32_t data;
+ uint32_t wait_data = hw_cmd_p->data & hw_cmd_p->mask;
+
+ data = msm_jpeg_hw_read(hw_cmd_p, base);
+ if (data != wait_data) {
+ while (tm) {
+ udelay(m_us);
+ data = msm_jpeg_hw_read(hw_cmd_p, base);
+ if (data == wait_data)
+ break;
+ tm--;
+ }
+ }
+ hw_cmd_p->data = data;
+ return tm;
+}
+
+void msm_jpeg_hw_delay(struct msm_jpeg_hw_cmd *hw_cmd_p, int m_us)
+{
+ int tm = hw_cmd_p->n;
+ while (tm) {
+ udelay(m_us);
+ tm--;
+ }
+}
+
+int msm_jpeg_hw_exec_cmds(struct msm_jpeg_hw_cmd *hw_cmd_p, uint32_t m_cmds,
+ uint32_t max_size, void *base)
+{
+ int is_copy_to_user = 0;
+ uint32_t data;
+
+ while (m_cmds--) {
+ if (hw_cmd_p->offset > max_size) {
+ JPEG_PR_ERR("%s:%d] %d exceed hw region %d\n", __func__,
+ __LINE__, hw_cmd_p->offset, max_size);
+ return -EFAULT;
+ }
+ if (hw_cmd_p->offset & 0x3) {
+ JPEG_PR_ERR("%s:%d] %d Invalid alignment\n", __func__,
+ __LINE__, hw_cmd_p->offset);
+ return -EFAULT;
+ }
+
+ switch (hw_cmd_p->type) {
+ case MSM_JPEG_HW_CMD_TYPE_READ:
+ hw_cmd_p->data = msm_jpeg_hw_read(hw_cmd_p, base);
+ is_copy_to_user = 1;
+ break;
+
+ case MSM_JPEG_HW_CMD_TYPE_WRITE:
+ msm_jpeg_hw_write(hw_cmd_p, base);
+ break;
+
+ case MSM_JPEG_HW_CMD_TYPE_WRITE_OR:
+ data = msm_jpeg_hw_read(hw_cmd_p, base);
+ hw_cmd_p->data = (hw_cmd_p->data & hw_cmd_p->mask) |
+ data;
+ msm_jpeg_hw_write(hw_cmd_p, base);
+ break;
+
+ case MSM_JPEG_HW_CMD_TYPE_UWAIT:
+ msm_jpeg_hw_wait(hw_cmd_p, 1, base);
+ break;
+
+ case MSM_JPEG_HW_CMD_TYPE_MWAIT:
+ msm_jpeg_hw_wait(hw_cmd_p, 1000, base);
+ break;
+
+ case MSM_JPEG_HW_CMD_TYPE_UDELAY:
+ msm_jpeg_hw_delay(hw_cmd_p, 1);
+ break;
+
+ case MSM_JPEG_HW_CMD_TYPE_MDELAY:
+ msm_jpeg_hw_delay(hw_cmd_p, 1000);
+ break;
+
+ default:
+ JPEG_PR_ERR("wrong hw command type\n");
+ break;
+ }
+
+ hw_cmd_p++;
+ }
+ return is_copy_to_user;
+}
+
+void msm_jpeg_io_dump(void *base, int size)
+{
+ char line_str[128], *p_str;
+ void __iomem *addr = (void __iomem *)base;
+ int i;
+ u32 *p = (u32 *) addr;
+ u32 data;
+ JPEG_DBG_HIGH("%s:%d] %p %d", __func__, __LINE__, addr, size);
+ line_str[0] = '\0';
+ p_str = line_str;
+ for (i = 0; i < size/4; i++) {
+ if (i % 4 == 0) {
+ snprintf(p_str, 12, "%08lx: ", (unsigned long)p);
+ p_str += 10;
+ }
+ data = msm_camera_io_r(p++);
+ snprintf(p_str, 12, "%08x ", data);
+ p_str += 9;
+ if ((i + 1) % 4 == 0) {
+ JPEG_DBG_HIGH("%s\n", line_str);
+ line_str[0] = '\0';
+ p_str = line_str;
+ }
+ }
+ if (line_str[0] != '\0')
+ JPEG_DBG_HIGH("%s\n", line_str);
+}
+
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw.h b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw.h
new file mode 100644
index 000000000000..2c0d1f7e0528
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw.h
@@ -0,0 +1,142 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_JPEG_HW_H
+#define MSM_JPEG_HW_H
+
+#include <media/msm_jpeg.h>
+#include "msm_jpeg_hw_reg.h"
+#include <linux/ion.h>
+
+struct msm_jpeg_hw_buf {
+ struct msm_jpeg_buf vbuf;
+ struct file *file;
+ uint32_t framedone_len;
+ uint32_t y_buffer_addr;
+ uint32_t y_len;
+ uint32_t cbcr_buffer_addr;
+ uint32_t cbcr_len;
+ uint32_t num_of_mcu_rows;
+ int ion_fd;
+ uint32_t pln2_addr;
+ uint32_t pln2_len;
+};
+
+struct msm_jpeg_hw_pingpong {
+ uint8_t is_fe; /* 1: fe; 0: we */
+ struct msm_jpeg_hw_buf buf[2];
+ int buf_status[2];
+ int buf_active_index;
+};
+
+int msm_jpeg_hw_pingpong_update(struct msm_jpeg_hw_pingpong *pingpong_hw,
+ struct msm_jpeg_hw_buf *buf, void *);
+int msm_jpegdma_hw_pingpong_update(struct msm_jpeg_hw_pingpong *pingpong_hw,
+ struct msm_jpeg_hw_buf *buf, void *);
+void *msm_jpeg_hw_pingpong_irq(struct msm_jpeg_hw_pingpong *pingpong_hw);
+void *msm_jpeg_hw_pingpong_active_buffer(struct msm_jpeg_hw_pingpong
+ *pingpong_hw);
+
+void msm_jpeg_hw_irq_clear(uint32_t, uint32_t, void *);
+void msm_jpegdma_hw_irq_clear(uint32_t, uint32_t, void *);
+int msm_jpeg_hw_irq_get_status(void *);
+int msm_jpegdma_hw_irq_get_status(void *);
+long msm_jpeg_hw_encode_output_size(void *);
+#define MSM_JPEG_HW_MASK_COMP_FRAMEDONE \
+ MSM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK
+#define MSM_JPEG_HW_MASK_COMP_FE \
+ MSM_JPEG_HW_IRQ_STATUS_FE_RD_DONE_MASK
+#define MSM_JPEG_HW_MASK_COMP_WE \
+ (MSM_JPEG_HW_IRQ_STATUS_WE_Y_PINGPONG_MASK | \
+ MSM_JPEG_HW_IRQ_STATUS_WE_CBCR_PINGPONG_MASK)
+#define MSM_JPEG_HW_MASK_COMP_RESET_ACK \
+ MSM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK
+#define MSM_JPEG_HW_MASK_COMP_ERR \
+ (MSM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF | \
+ MSM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR | \
+ MSM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR | \
+ MSM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF | \
+ MSM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW | \
+ MSM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM | \
+ MSM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ | \
+ MSM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM | \
+ MSM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK)
+
+#define msm_jpeg_hw_irq_is_frame_done(jpeg_irq_status) \
+ (jpeg_irq_status & MSM_JPEG_HW_MASK_COMP_FRAMEDONE)
+#define msm_jpeg_hw_irq_is_fe_pingpong(jpeg_irq_status) \
+ (jpeg_irq_status & MSM_JPEG_HW_MASK_COMP_FE)
+#define msm_jpeg_hw_irq_is_we_pingpong(jpeg_irq_status) \
+ (jpeg_irq_status & MSM_JPEG_HW_MASK_COMP_WE)
+#define msm_jpeg_hw_irq_is_reset_ack(jpeg_irq_status) \
+ (jpeg_irq_status & MSM_JPEG_HW_MASK_COMP_RESET_ACK)
+#define msm_jpeg_hw_irq_is_err(jpeg_irq_status) \
+ (jpeg_irq_status & MSM_JPEG_HW_MASK_COMP_ERR)
+
+
+#define MSM_JPEGDMA_HW_MASK_COMP_FRAMEDONE \
+ MSM_JPEGDMA_HW_IRQ_STATUS_FRAMEDONE_MASK
+#define MSM_JPEGDMA_HW_MASK_COMP_FE \
+ MSM_JPEGDMA_HW_IRQ_STATUS_FE_RD_DONE_MASK
+#define MSM_JPEGDMA_HW_MASK_COMP_WE \
+ (MSM_JPEGDMA_HW_IRQ_STATUS_WE_WR_DONE_MASK)
+#define MSM_JPEGDMA_HW_MASK_COMP_RESET_ACK \
+ MSM_JPEGDMA_HW_IRQ_STATUS_RESET_ACK_MASK
+
+
+#define msm_jpegdma_hw_irq_is_frame_done(jpeg_irq_status) \
+ (jpeg_irq_status & MSM_JPEGDMA_HW_MASK_COMP_FRAMEDONE)
+#define msm_jpegdma_hw_irq_is_fe_pingpong(jpeg_irq_status) \
+ (jpeg_irq_status & MSM_JPEGDMA_HW_MASK_COMP_FE)
+#define msm_jpegdma_hw_irq_is_we_pingpong(jpeg_irq_status) \
+ (jpeg_irq_status & MSM_JPEGDMA_HW_MASK_COMP_WE)
+#define msm_jpegdma_hw_irq_is_reset_ack(jpeg_irq_status) \
+ (jpeg_irq_status & MSM_JPEGDMA_HW_MASK_COMP_RESET_ACK)
+
+
+void msm_jpeg_hw_fe_buffer_update(struct msm_jpeg_hw_buf *p_input,
+ uint8_t pingpong_index, void *);
+void msm_jpeg_hw_we_buffer_update(struct msm_jpeg_hw_buf *p_input,
+ uint8_t pingpong_index, void *);
+void msm_jpegdma_hw_fe_buffer_update(struct msm_jpeg_hw_buf *p_input,
+ uint8_t pingpong_index, void *);
+void msm_jpegdma_hw_we_buffer_update(struct msm_jpeg_hw_buf *p_input,
+ uint8_t pingpong_index, void *);
+
+
+void msm_jpeg_hw_we_buffer_cfg(uint8_t is_realtime);
+
+void msm_jpeg_hw_fe_mmu_prefetch(struct msm_jpeg_hw_buf *buf, void *,
+ uint8_t decode_flag);
+void msm_jpeg_hw_we_mmu_prefetch(struct msm_jpeg_hw_buf *buf, void *,
+ uint8_t decode_flag);
+void msm_jpegdma_hw_fe_mmu_prefetch(struct msm_jpeg_hw_buf *buf, void *);
+void msm_jpegdma_hw_we_mmu_prefetch(struct msm_jpeg_hw_buf *buf, void *);
+
+void msm_jpeg_hw_fe_start(void *);
+void msm_jpeg_hw_clk_cfg(void);
+
+void msm_jpeg_hw_reset(void *base, int size);
+void msm_jpeg_hw_irq_cfg(void);
+
+uint32_t msm_jpeg_hw_read(struct msm_jpeg_hw_cmd *, void *);
+void msm_jpeg_hw_write(struct msm_jpeg_hw_cmd *, void *);
+int msm_jpeg_hw_wait(struct msm_jpeg_hw_cmd *, int, void *);
+void msm_jpeg_hw_delay(struct msm_jpeg_hw_cmd *, int);
+int msm_jpeg_hw_exec_cmds(struct msm_jpeg_hw_cmd *, uint32_t ,
+ uint32_t , void *);
+void msm_jpeg_hw_region_dump(int size);
+void msm_jpeg_io_dump(void *base, int size);
+void msm_jpeg_decode_status(void *base);
+void msm_jpeg_hw_reset_dma(void *base, int size);
+
+#endif /* MSM_JPEG_HW_H */
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw_reg.h b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw_reg.h
new file mode 100644
index 000000000000..7b40c2cda5bb
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_hw_reg.h
@@ -0,0 +1,210 @@
+/* Copyright (c) 2012-2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_JPEG_HW_REG_H
+#define MSM_JPEG_HW_REG_H
+
+#define JPEG_REG_BASE 0
+
+#define MSM_JPEG_HW_IRQ_MASK_ADDR 0x00000018
+#define MSM_JPEG_HW_IRQ_MASK_RMSK 0xFFFFFFFF
+#define MSM_JPEG_HW_IRQ_ENABLE 0xFFFFFFFF
+
+#define MSM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK 0x00000001
+#define MSM_JPEG_HW_IRQ_STATUS_FRAMEDONE_SHIFT 0x00000000
+
+#define MSM_JPEG_HW_IRQ_STATUS_FE_RD_DONE_MASK 0x00000010
+#define MSM_JPEG_HW_IRQ_STATUS_FE_RD_DONE_SHIFT 0x00000001
+
+#define MSM_JPEG_HW_IRQ_STATUS_FE_RTOVF_MASK 0x00000004
+#define MSM_JPEG_HW_IRQ_STATUS_FE_RTOVF_SHIFT 0x00000002
+
+#define MSM_JPEG_HW_IRQ_STATUS_FE_VFE_OVERFLOW_MASK 0x00000008
+#define MSM_JPEG_HW_IRQ_STATUS_FE_VFE_OVERFLOW_SHIFT 0x00000003
+
+#define MSM_JPEG_HW_IRQ_STATUS_WE_Y_PINGPONG_MASK 0x00000010
+#define MSM_JPEG_HW_IRQ_STATUS_WE_Y_PINGPONG_SHIFT 0x00000004
+
+#define MSM_JPEG_HW_IRQ_STATUS_WE_CBCR_PINGPONG_MASK 0x00000020
+#define MSM_JPEG_HW_IRQ_STATUS_WE_CBCR_PINGPONG_SHIFT 0x00000005
+
+#define MSM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK 0x10000000
+#define MSM_JPEG_HW_IRQ_STATUS_RESET_ACK_SHIFT 0x0000000a
+
+#define MSM_JPEG_HW_IRQ_STATUS_BUS_ERROR_MASK 0x00000800
+#define MSM_JPEG_HW_IRQ_STATUS_BUS_ERROR_SHIFT 0x0000000b
+
+#define MSM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF (0x1<<19)
+#define MSM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR (0x1<<20)
+#define MSM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR (0x1<<21)
+#define MSM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF (0x1<<22)
+#define MSM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW (0x1<<23)
+#define MSM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM (0x1<<24)
+#define MSM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ (0x1<<25)
+#define MSM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM (0x1<<26)
+#define MSM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK (0x1<<29)
+
+#define JPEG_OFFLINE_CMD_START 0x00000001
+
+#define JPEG_RESET_DEFAULT 0x00032093
+
+#define JPEG_IRQ_DISABLE_ALL 0x00000000
+#define JPEG_IRQ_CLEAR_ALL 0xFFFFFFFF
+
+#define JPEG_PLN0_RD_PNTR_ADDR (JPEG_REG_BASE + 0x00000038)
+#define JPEG_PLN0_RD_PNTR_BMSK 0xFFFFFFFF
+
+#define JPEG_PLN0_RD_OFFSET_ADDR 0x0000003C
+#define JPEG_PLN0_RD_OFFSET_BMSK 0xFFFFFFFF
+
+#define JPEG_PLN1_RD_PNTR_ADDR (JPEG_REG_BASE + 0x00000044)
+#define JPEG_PLN1_RD_PNTR_BMSK 0xFFFFFFFF
+
+#define JPEG_PLN1_RD_OFFSET_ADDR 0x00000048
+#define JPEG_PLN1_RD_OFFSET_BMSK 0xFFFFFFFF
+
+#define JPEG_PLN2_RD_PNTR_ADDR (JPEG_REG_BASE + 0x00000050)
+#define JPEG_PLN2_RD_PNTR_BMSK 0xFFFFFFFF
+
+#define JPEG_PLN2_RD_OFFSET_ADDR 0x00000054
+#define JPEG_PLN2_RD_OFFSET_BMSK 0xFFFFFFFF
+
+#define JPEG_CMD_ADDR (JPEG_REG_BASE + 0x00000010)
+#define JPEG_CMD_BMSK 0xFFFFFFFF
+#define JPEG_CMD_CLEAR_WRITE_PLN_QUEUES 0x700
+
+#define JPEG_PLN0_WR_PNTR_ADDR (JPEG_REG_BASE + 0x000000cc)
+#define JPEG_PLN0_WR_PNTR_BMSK 0xFFFFFFFF
+
+#define JPEG_PLN1_WR_PNTR_ADDR (JPEG_REG_BASE + 0x000000D0)
+#define JPEG_PLN1_WR_PNTR_BMSK 0xFFFFFFFF
+
+#define JPEG_PLN2_WR_PNTR_ADDR (JPEG_REG_BASE + 0x000000D4)
+#define JPEG_PLN2_WR_PNTR_BMSK 0xFFFFFFFF
+
+#define JPEG_IRQ_MASK_ADDR (JPEG_REG_BASE + 0x00000018)
+#define JPEG_IRQ_MASK_BMSK 0xFFFFFFFF
+#define JPEG_IRQ_ALLSOURCES_ENABLE 0xFFFFFFFF
+
+#define JPEG_IRQ_CLEAR_ADDR (JPEG_REG_BASE + 0x0000001c)
+#define JPEG_IRQ_CLEAR_BMSK 0xFFFFFFFF
+
+#define JPEG_RESET_CMD_ADDR (JPEG_REG_BASE + 0x00000008)
+#define JPEG_RESET_CMD_RMSK 0xFFFFFFFF
+
+#define JPEG_IRQ_STATUS_ADDR (JPEG_REG_BASE + 0x00000020)
+#define JPEG_IRQ_STATUS_BMSK 0xFFFFFFFF
+
+#define MSM_JPEG_S0_MMU_PF_ADDR_MIN (JPEG_REG_BASE + 0x00000310)
+#define MSM_JPEG_S0_MMU_PF_ADDR_MIN_BMSK 0xFFFFFFFF
+
+#define MSM_JPEG_S0_MMU_PF_ADDR_MAX (JPEG_REG_BASE + 0x00000314)
+#define MSM_JPEG_S0_MMU_PF_ADDR_MAX_BMSK 0xFFFFFFFF
+
+#define MSM_JPEG_S1_MMU_PF_ADDR_MIN (JPEG_REG_BASE + 0x0000031C)
+#define MSM_JPEG_S1_MMU_PF_ADDR_MIN_BMSK 0xFFFFFFFF
+
+#define MSM_JPEG_S1_MMU_PF_ADDR_MAX (JPEG_REG_BASE + 0x00000320)
+#define MSM_JPEG_S1_MMU_PF_ADDR_MAX_BMSK 0xFFFFFFFF
+
+#define MSM_JPEG_S2_MMU_PF_ADDR_MIN (JPEG_REG_BASE + 0x00000328)
+#define MSM_JPEG_S2_MMU_PF_ADDR_MIN_BMSK 0xFFFFFFFF
+
+#define MSM_JPEG_S2_MMU_PF_ADDR_MAX (JPEG_REG_BASE + 0x0000032C)
+#define MSM_JPEG_S2_MMU_PF_ADDR_MAX_BMSK 0xFFFFFFFF
+
+#define MSM_JPEG_S3_MMU_PF_ADDR_MIN (JPEG_REG_BASE + 0x00000334)
+#define MSM_JPEG_S3_MMU_PF_ADDR_MIN_BMSK 0xFFFFFFFF
+
+#define MSM_JPEG_S3_MMU_PF_ADDR_MAX (JPEG_REG_BASE + 0x00000338)
+#define MSM_JPEG_S3_MMU_PF_ADDR_MAX_BMSK 0xFFFFFFFF
+
+#define JPEG_ENCODE_OUTPUT_SIZE_STATUS_ADDR (JPEG_REG_BASE + 0x00000180)
+#define JPEG_ENCODE_OUTPUT_SIZE_STATUS_BMSK 0x1FFFFFFF
+
+#define JPEG_DECODE_MCUS_DECODED_STATUS (JPEG_REG_BASE + 0x00000258)
+#define JPEG_DECODE_BITS_CONSUMED_STATUS (JPEG_REG_BASE + 0x0000025C)
+#define JPEG_DECODE_PRED_Y_STATE (JPEG_REG_BASE + 0x00000260)
+#define JPEG_DECODE_PRED_C_STATE (JPEG_REG_BASE + 0x00000264)
+#define JPEG_DECODE_RSM_STATE (JPEG_REG_BASE + 0x00000268)
+
+#define JPEG_HW_VERSION (JPEG_REG_BASE + 0x00000000)
+
+#define VBIF_BASE_ADDRESS 0xFDA60000
+#define VBIF_REGION_SIZE 0xC30
+#define JPEG_VBIF_CLKON 0x4
+#define JPEG_VBIF_IN_RD_LIM_CONF0 0xB0
+#define JPEG_VBIF_IN_RD_LIM_CONF1 0xB4
+#define JPEG_VBIF_IN_RD_LIM_CONF2 0xB8
+#define JPEG_VBIF_IN_WR_LIM_CONF0 0xC0
+#define JPEG_VBIF_IN_WR_LIM_CONF1 0xC4
+#define JPEG_VBIF_IN_WR_LIM_CONF2 0xC8
+#define JPEG_VBIF_OUT_RD_LIM_CONF0 0xD0
+#define JPEG_VBIF_OUT_WR_LIM_CONF0 0xD4
+#define JPEG_VBIF_DDR_OUT_MAX_BURST 0xD8
+#define JPEG_VBIF_OCMEM_OUT_MAX_BURST 0xDC
+#define JPEG_VBIF_ARB_CTL 0xF0
+#define JPEG_VBIF_OUT_AXI_AOOO_EN 0x178
+#define JPEG_VBIF_OUT_AXI_AOOO 0x17c
+#define JPEG_VBIF_ROUND_ROBIN_QOS_ARB 0x124
+#define JPEG_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x160
+#define JPEG_VBIF_OUT_AXI_AMEMTYPE_CONF1 0x164
+
+#define JPEGDMA_IRQ_MASK_ADDR (JPEG_REG_BASE + 0x0000000C)
+#define JPEGDMA_IRQ_MASK_BMSK 0xFFFFFFFF
+#define JPEGDMA_IRQ_ALLSOURCES_ENABLE 0xFFFFFFFF
+
+#define JPEGDMA_IRQ_CLEAR_ADDR (JPEG_REG_BASE + 0x00000014)
+#define JPEGDMA_IRQ_CLEAR_BMSK 0xFFFFFFFF
+
+#define JPEGDMA_RESET_CMD_ADDR (JPEG_REG_BASE + 0x00000008)
+#define JPEGDMA_RESET_CMD_BMSK 0xFFFFFFFF
+
+#define JPEGDMA_IRQ_STATUS_ADDR (JPEG_REG_BASE + 0x00000010)
+#define JPEGDMA_IRQ_STATUS_BMSK 0xFFFFFFFF
+#define JPEGDMA_RESET_DEFAULT 0x00032083
+
+
+#define JPEGDMA_CMD_ADDR (JPEG_REG_BASE + 0x0000001C)
+#define JPEGDMA_CMD_BMSK (0xFFFFFFFF)
+#define JPEGDMA_CMD_CLEAR_READ_PLN_QUEUES 0x030
+#define JPEGDMA_CMD_CLEAR_WRITE_PLN_QUEUES 0x300
+
+#define JPEGDMA_IRQ_DISABLE_ALL 0x00000000
+#define JPEGDMA_IRQ_CLEAR_ALL 0x00001FFF
+#define MSM_JPEGDMA_HW_IRQ_STATUS_FRAMEDONE_MASK 0x00000001
+#define MSM_JPEGDMA_HW_IRQ_STATUS_FRAMEDONE_SHIFT 0x00000000
+#define MSM_JPEGDMA_HW_IRQ_STATUS_FE_RD_DONE_MASK 0x00000006
+#define MSM_JPEGDMA_HW_IRQ_STATUS_FE_RD_DONE_SHIFT 0x00000001
+#define MSM_JPEGDMA_HW_IRQ_STATUS_WE_WR_DONE_MASK 0x00000060
+#define MSM_JPEGDMA_HW_IRQ_STATUS_WE_WR_DONE_SHIFT 0x00000005
+#define MSM_JPEGDMA_HW_IRQ_STATUS_RESET_ACK_MASK 0x00000400
+#define MSM_JPEGDMA_HW_IRQ_STATUS_RESET_ACK_SHIFT 0x0000000a
+
+#define MSM_JPEGDMA_FE_0_RD_PNTR (JPEG_REG_BASE + 0x00000034)
+#define MSM_JPEGDMA_FE_1_RD_PNTR (JPEG_REG_BASE + 0x00000078)
+#define MSM_JPEGDMA_WE_0_WR_PNTR (JPEG_REG_BASE + 0x000000BC)
+#define MSM_JPEGDMA_WE_1_WR_PNTR (JPEG_REG_BASE + 0x000000EC)
+
+#define MSM_JPEGDMA_S0_MMU_PF_ADDR_MIN (JPEG_REG_BASE + 0x00000190)
+#define MSM_JPEGDMA_S0_MMU_PF_ADDR_MIN_BMSK 0xFFFFFFFF
+
+#define MSM_JPEGDMA_S0_MMU_PF_ADDR_MAX (JPEG_REG_BASE + 0x00000198)
+#define MSM_JPEGDMA_S0_MMU_PF_ADDR_MAX_BMSK 0xFFFFFFFF
+
+#define MSM_JPEGDMA_S1_MMU_PF_ADDR_MIN (JPEG_REG_BASE + 0x000001A4)
+#define MSM_JPEGDMA_S1_MMU_PF_ADDR_MIN_BMSK 0xFFFFFFFF
+
+#define MSM_JPEGDMA_S1_MMU_PF_ADDR_MAX (JPEG_REG_BASE + 0x000001AC)
+#define MSM_JPEGDMA_S1_MMU_PF_ADDR_MAX_BMSK 0xFFFFFFFF
+
+#endif /* MSM_JPEG_HW_REG_H */
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.c b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.c
new file mode 100644
index 000000000000..68a7e4627562
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.c
@@ -0,0 +1,646 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+
+#include <linux/module.h>
+#include <linux/pm_qos.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/iommu.h>
+#include <asm/dma-iommu.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-buf.h>
+
+#include "msm_camera_io_util.h"
+#include "msm_jpeg_platform.h"
+#include "msm_jpeg_sync.h"
+#include "msm_jpeg_common.h"
+#include "msm_jpeg_hw.h"
+
+#define JPEG_DT_PROP_CNT 2
+
+static int msm_jpeg_get_regulator_info(struct msm_jpeg_device *jpeg_dev,
+ struct platform_device *pdev)
+{
+ uint32_t count;
+ int i, rc;
+
+ struct device_node *of_node;
+ of_node = pdev->dev.of_node;
+
+ if (of_get_property(of_node, "qcom,vdd-names", NULL)) {
+
+ count = of_property_count_strings(of_node, "qcom,vdd-names");
+
+ JPEG_DBG("count = %d\n", count);
+ if ((count == 0) || (count == -EINVAL)) {
+ pr_err("no regulators found in device tree, count=%d",
+ count);
+ return -EINVAL;
+ }
+
+ if (count > JPEG_REGULATOR_MAX) {
+ pr_err("invalid count=%d, max is %d\n", count,
+ JPEG_REGULATOR_MAX);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,vdd-names", i,
+ &(jpeg_dev->regulator_names[i]));
+ JPEG_DBG("regulator-names[%d] = %s\n",
+ i, jpeg_dev->regulator_names[i]);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
+ }
+ } else {
+ jpeg_dev->regulator_names[0] = "vdd";
+ count = 1;
+ }
+ jpeg_dev->num_regulator = count;
+ return 0;
+}
+
+static int msm_jpeg_regulator_enable(struct device *dev, const char **reg_names,
+ struct regulator **reg_ptr, int num_reg, int enable)
+{
+ int i;
+ int rc = 0;
+ if (enable) {
+ for (i = 0; i < num_reg; i++) {
+ JPEG_DBG("%s enable %s\n", __func__, reg_names[i]);
+ reg_ptr[i] = regulator_get(dev, reg_names[i]);
+ if (IS_ERR(reg_ptr[i])) {
+ pr_err("%s get failed\n", reg_names[i]);
+ rc = PTR_ERR(reg_ptr[i]);
+ reg_ptr[i] = NULL;
+ goto cam_reg_get_err;
+ }
+
+ rc = regulator_enable(reg_ptr[i]);
+ if (rc < 0) {
+ pr_err("%s enable failed\n", reg_names[i]);
+ goto cam_reg_enable_err;
+ }
+ }
+ } else {
+ for (i = num_reg - 1; i >= 0; i--) {
+ if (reg_ptr[i] != NULL) {
+ JPEG_DBG("%s disable %s\n", __func__,
+ reg_names[i]);
+ regulator_disable(reg_ptr[i]);
+ regulator_put(reg_ptr[i]);
+ }
+ }
+ }
+ return rc;
+
+cam_reg_enable_err:
+ regulator_put(reg_ptr[i]);
+cam_reg_get_err:
+ for (i--; i >= 0; i--) {
+ if (reg_ptr[i] != NULL) {
+ regulator_disable(reg_ptr[i]);
+ regulator_put(reg_ptr[i]);
+ }
+ }
+ return rc;
+}
+
+
+static int msm_jpeg_get_clk_info(struct msm_jpeg_device *jpeg_dev,
+ struct platform_device *pdev)
+{
+ uint32_t count;
+ int i, rc;
+ uint32_t rates[JPEG_CLK_MAX];
+
+ struct device_node *of_node;
+ of_node = pdev->dev.of_node;
+
+ count = of_property_count_strings(of_node, "clock-names");
+
+ JPEG_DBG("count = %d\n", count);
+ if (count == 0) {
+ pr_err("no clocks found in device tree, count=%d", count);
+ return 0;
+ }
+
+ if (count > JPEG_CLK_MAX) {
+ pr_err("invalid count=%d, max is %d\n", count,
+ JPEG_CLK_MAX);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node, "clock-names",
+ i, &(jpeg_dev->jpeg_clk_info[i].clk_name));
+ JPEG_DBG("clock-names[%d] = %s\n",
+ i, jpeg_dev->jpeg_clk_info[i].clk_name);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
+ }
+ rc = of_property_read_u32_array(of_node, "qcom,clock-rates",
+ rates, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
+ for (i = 0; i < count; i++) {
+ jpeg_dev->jpeg_clk_info[i].clk_rate =
+ (rates[i] == 0) ? (long) -1 : (long) rates[i];
+ JPEG_DBG("clk_rate[%d] = %ld\n",
+ i, jpeg_dev->jpeg_clk_info[i].clk_rate);
+ }
+ jpeg_dev->num_clk = count;
+ return 0;
+}
+
+
+int msm_jpeg_platform_set_clk_rate(struct msm_jpeg_device *pgmn_dev,
+ long clk_rate)
+{
+ int rc = 0;
+ struct clk *jpeg_clk;
+
+ jpeg_clk = clk_get(&pgmn_dev->pdev->dev, "core_clk");
+ if (IS_ERR(jpeg_clk)) {
+ JPEG_PR_ERR("%s get failed\n", "core_clk");
+ rc = PTR_ERR(jpeg_clk);
+ goto error;
+ }
+
+ clk_rate = clk_round_rate(jpeg_clk, clk_rate);
+ if (clk_rate < 0) {
+ JPEG_PR_ERR("%s:%d] round rate failed", __func__, __LINE__);
+ rc = -EINVAL;
+ goto error;
+ }
+ JPEG_DBG("%s:%d] jpeg clk rate %ld", __func__, __LINE__, clk_rate);
+
+ rc = clk_set_rate(jpeg_clk, clk_rate);
+
+error:
+ return rc;
+}
+
+void msm_jpeg_platform_p2v(int iommu_hdl, int fd)
+{
+ cam_smmu_put_phy_addr(iommu_hdl, fd);
+ return;
+}
+
+uint32_t msm_jpeg_platform_v2p(struct msm_jpeg_device *pgmn_dev, int fd,
+ uint32_t len, int iommu_hdl)
+{
+ dma_addr_t paddr;
+ size_t size;
+ int rc;
+
+ rc = cam_smmu_get_phy_addr(pgmn_dev->iommu_hdl, fd, CAM_SMMU_MAP_RW,
+ &paddr, &size);
+ JPEG_DBG("%s:%d] addr 0x%x size %zu", __func__, __LINE__,
+ (uint32_t)paddr, size);
+
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: fd %d got phy addr error %d\n", __func__, fd,
+ rc);
+ goto err_get_phy;
+ }
+
+ /* validate user input */
+ if (len > size) {
+ JPEG_PR_ERR("%s: invalid offset + len\n", __func__);
+ goto err_size;
+ }
+
+ return paddr;
+err_size:
+ cam_smmu_put_phy_addr(pgmn_dev->iommu_hdl, fd);
+err_get_phy:
+ return 0;
+}
+
+static void set_vbif_params(struct msm_jpeg_device *pgmn_dev,
+ void *jpeg_vbif_base)
+{
+ msm_camera_io_w(0x1,
+ jpeg_vbif_base + JPEG_VBIF_CLKON);
+
+ if (pgmn_dev->hw_version != JPEG_8994) {
+ msm_camera_io_w(0x10101010,
+ jpeg_vbif_base + JPEG_VBIF_IN_RD_LIM_CONF0);
+ msm_camera_io_w(0x10101010,
+ jpeg_vbif_base + JPEG_VBIF_IN_RD_LIM_CONF1);
+ msm_camera_io_w(0x10101010,
+ jpeg_vbif_base + JPEG_VBIF_IN_RD_LIM_CONF2);
+ msm_camera_io_w(0x10101010,
+ jpeg_vbif_base + JPEG_VBIF_IN_WR_LIM_CONF0);
+ msm_camera_io_w(0x10101010,
+ jpeg_vbif_base + JPEG_VBIF_IN_WR_LIM_CONF1);
+ msm_camera_io_w(0x10101010,
+ jpeg_vbif_base + JPEG_VBIF_IN_WR_LIM_CONF2);
+ msm_camera_io_w(0x00001010,
+ jpeg_vbif_base + JPEG_VBIF_OUT_RD_LIM_CONF0);
+ msm_camera_io_w(0x00000110,
+ jpeg_vbif_base + JPEG_VBIF_OUT_WR_LIM_CONF0);
+ msm_camera_io_w(0x00000707,
+ jpeg_vbif_base + JPEG_VBIF_DDR_OUT_MAX_BURST);
+ msm_camera_io_w(0x00000FFF,
+ jpeg_vbif_base + JPEG_VBIF_OUT_AXI_AOOO_EN);
+ msm_camera_io_w(0x0FFF0FFF,
+ jpeg_vbif_base + JPEG_VBIF_OUT_AXI_AOOO);
+ msm_camera_io_w(0x2222,
+ jpeg_vbif_base + JPEG_VBIF_OUT_AXI_AMEMTYPE_CONF1);
+ }
+
+ msm_camera_io_w(0x7,
+ jpeg_vbif_base + JPEG_VBIF_OCMEM_OUT_MAX_BURST);
+ msm_camera_io_w(0x00000030,
+ jpeg_vbif_base + JPEG_VBIF_ARB_CTL);
+
+ /*FE and WE QOS configuration need to be set when
+ QOS RR arbitration is enabled*/
+ if (pgmn_dev->hw_version != JPEG_8974_V1)
+ msm_camera_io_w(0x00000003,
+ jpeg_vbif_base + JPEG_VBIF_ROUND_ROBIN_QOS_ARB);
+ else
+ msm_camera_io_w(0x00000001,
+ jpeg_vbif_base + JPEG_VBIF_ROUND_ROBIN_QOS_ARB);
+
+ msm_camera_io_w(0x22222222,
+ jpeg_vbif_base + JPEG_VBIF_OUT_AXI_AMEMTYPE_CONF0);
+
+}
+
+/*
+ * msm_jpeg_set_init_dt_parms() - get device tree config and write to registers.
+ * @pgmn_dev: Pointer to jpeg device.
+ * @dt_prop_name: Device tree property name.
+ * @base: Base address.
+ *
+ * This function reads register offsets and values from dtsi based on
+ * device tree property name and writes to jpeg registers.
+ *
+ * Return: 0 on success and negative error on failure.
+ */
+static int32_t msm_jpeg_set_init_dt_parms(struct msm_jpeg_device *pgmn_dev,
+ const char *dt_prop_name,
+ void *base)
+{
+ struct device_node *of_node;
+ int32_t i = 0 , rc = 0;
+ uint32_t *dt_reg_settings = NULL;
+ uint32_t dt_count = 0;
+
+ of_node = pgmn_dev->pdev->dev.of_node;
+ JPEG_DBG("%s:%d E\n", __func__, __LINE__);
+
+ if (!of_get_property(of_node, dt_prop_name,
+ &dt_count)) {
+ JPEG_DBG("%s: Error property does not exist\n",
+ __func__);
+ return -ENOENT;
+ }
+ if (dt_count % 8) {
+ JPEG_PR_ERR("%s: Error invalid entries\n",
+ __func__);
+ return -EINVAL;
+ }
+ dt_count /= 4;
+ if (dt_count != 0) {
+ dt_reg_settings = kcalloc(dt_count, sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!dt_reg_settings) {
+ JPEG_PR_ERR("%s:%d No memory\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+ rc = of_property_read_u32_array(of_node,
+ dt_prop_name,
+ dt_reg_settings,
+ dt_count);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: No reg info\n",
+ __func__);
+ kfree(dt_reg_settings);
+ return -EINVAL;
+ }
+ for (i = 0; i < dt_count; i = i + 2) {
+ JPEG_DBG("%s:%d] %p %08x\n",
+ __func__, __LINE__,
+ base + dt_reg_settings[i],
+ dt_reg_settings[i + 1]);
+ msm_camera_io_w(dt_reg_settings[i + 1],
+ base + dt_reg_settings[i]);
+ }
+ kfree(dt_reg_settings);
+ }
+ return 0;
+}
+
+static struct msm_bus_vectors msm_jpeg_init_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_JPEG,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 0,
+ .ib = 0,
+ },
+};
+
+static struct msm_bus_vectors msm_jpeg_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_JPEG,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = JPEG_CLK_RATE * 2.5,
+ .ib = JPEG_CLK_RATE * 2.5,
+ },
+};
+
+static struct msm_bus_paths msm_jpeg_bus_client_config[] = {
+ {
+ ARRAY_SIZE(msm_jpeg_init_vectors),
+ msm_jpeg_init_vectors,
+ },
+ {
+ ARRAY_SIZE(msm_jpeg_vectors),
+ msm_jpeg_vectors,
+ },
+};
+
+static struct msm_bus_scale_pdata msm_jpeg_bus_client_pdata = {
+ msm_jpeg_bus_client_config,
+ ARRAY_SIZE(msm_jpeg_bus_client_config),
+ .name = "msm_jpeg",
+};
+
+static int msm_jpeg_attach_iommu(struct msm_jpeg_device *pgmn_dev)
+{
+ int rc;
+ rc = cam_smmu_ops(pgmn_dev->iommu_hdl, CAM_SMMU_ATTACH);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: Device attach failed\n", __func__);
+ return -ENODEV;
+ }
+ JPEG_DBG("%s:%d] handle %d attach\n",
+ __func__, __LINE__, pgmn_dev->iommu_hdl);
+ return 0;
+}
+
+static int msm_jpeg_detach_iommu(struct msm_jpeg_device *pgmn_dev)
+{
+ JPEG_DBG("%s:%d] handle %d detach\n",
+ __func__, __LINE__, pgmn_dev->iommu_hdl);
+ cam_smmu_ops(pgmn_dev->iommu_hdl, CAM_SMMU_DETACH);
+ return 0;
+}
+
+
+
+int msm_jpeg_platform_init(struct platform_device *pdev,
+ struct resource **mem,
+ void **base,
+ int *irq,
+ irqreturn_t (*handler)(int, void *),
+ void *context)
+{
+ int rc = -1;
+ int jpeg_irq;
+ struct resource *jpeg_mem, *vbif_mem, *jpeg_io, *jpeg_irq_res;
+ void *jpeg_base;
+ struct msm_jpeg_device *pgmn_dev =
+ (struct msm_jpeg_device *) context;
+
+ pgmn_dev->state = MSM_JPEG_IDLE;
+
+ jpeg_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!jpeg_mem) {
+ JPEG_PR_ERR("%s: jpeg no mem resource?\n", __func__);
+ return -ENODEV;
+ }
+
+ vbif_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!vbif_mem) {
+ JPEG_PR_ERR("%s: vbif no mem resource?\n", __func__);
+ return -ENODEV;
+ }
+
+ jpeg_irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!jpeg_irq_res) {
+ JPEG_PR_ERR("no irq resource?\n");
+ return -ENODEV;
+ }
+ jpeg_irq = jpeg_irq_res->start;
+ JPEG_DBG("%s base address: 0x%lx, jpeg irq number: %d\n", __func__,
+ (unsigned long)jpeg_mem->start, jpeg_irq);
+
+ pgmn_dev->jpeg_bus_client =
+ msm_bus_scale_register_client(&msm_jpeg_bus_client_pdata);
+ if (!pgmn_dev->jpeg_bus_client) {
+ JPEG_PR_ERR("%s: Registration Failed!\n", __func__);
+ pgmn_dev->jpeg_bus_client = 0;
+ return -EINVAL;
+ }
+
+ jpeg_io = request_mem_region(jpeg_mem->start,
+ resource_size(jpeg_mem), pdev->name);
+ if (!jpeg_io) {
+ JPEG_PR_ERR("%s: region already claimed\n", __func__);
+ return -EBUSY;
+ }
+
+ jpeg_base = ioremap(jpeg_mem->start, resource_size(jpeg_mem));
+ if (!jpeg_base) {
+ rc = -ENOMEM;
+ JPEG_PR_ERR("%s: ioremap failed\n", __func__);
+ goto fail_remap;
+ }
+
+ rc = msm_jpeg_get_regulator_info(pgmn_dev, pgmn_dev->pdev);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s:%d]jpeg regulator get failed\n",
+ __func__, __LINE__);
+ goto fail_fs;
+ }
+
+ rc = msm_jpeg_regulator_enable(&pgmn_dev->pdev->dev,
+ pgmn_dev->regulator_names, pgmn_dev->jpeg_fs,
+ pgmn_dev->num_regulator, 1);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s:%d] jpeg regulator enable failed rc = %d\n",
+ __func__, __LINE__, rc);
+ goto fail_fs;
+ }
+
+ if (msm_jpeg_get_clk_info(pgmn_dev, pgmn_dev->pdev) < 0) {
+ JPEG_PR_ERR("%s:%d]jpeg clock get failed\n",
+ __func__, __LINE__);
+ goto fail_fs;
+ }
+
+ rc = msm_cam_clk_enable(&pgmn_dev->pdev->dev, pgmn_dev->jpeg_clk_info,
+ pgmn_dev->jpeg_clk, pgmn_dev->num_clk, 1);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: clk failed rc = %d\n", __func__, rc);
+ goto fail_clk;
+ }
+
+ pgmn_dev->hw_version = msm_camera_io_r(jpeg_base +
+ JPEG_HW_VERSION);
+ JPEG_DBG_HIGH("%s:%d] jpeg HW version 0x%x", __func__, __LINE__,
+ pgmn_dev->hw_version);
+
+ pgmn_dev->jpeg_vbif = ioremap(vbif_mem->start, resource_size(vbif_mem));
+ if (!pgmn_dev->jpeg_vbif) {
+ rc = -ENOMEM;
+ JPEG_PR_ERR("%s: ioremap failed\n", __func__);
+ goto fail_vbif;
+ }
+ JPEG_DBG("%s:%d] jpeg_vbif 0x%lx", __func__, __LINE__,
+ (unsigned long)pgmn_dev->jpeg_vbif);
+
+ rc = msm_jpeg_attach_iommu(pgmn_dev);
+ if (rc < 0)
+ goto fail_iommu;
+
+ rc = msm_jpeg_set_init_dt_parms(pgmn_dev, "qcom,vbif-reg-settings",
+ pgmn_dev->jpeg_vbif);
+ if (rc == -ENOENT) {
+ JPEG_DBG("%s: No qcom,vbif-reg-settings property\n", __func__);
+ set_vbif_params(pgmn_dev, pgmn_dev->jpeg_vbif);
+ } else if (rc < 0) {
+ JPEG_PR_ERR("%s: vbif params set fail\n", __func__);
+ goto fail_set_vbif;
+ }
+
+ rc = request_irq(jpeg_irq, handler, IRQF_TRIGGER_RISING,
+ dev_name(&pdev->dev), context);
+ if (rc) {
+ JPEG_PR_ERR("%s: request_irq failed, %d\n", __func__,
+ jpeg_irq);
+ goto fail_request_irq;
+ }
+
+ *mem = jpeg_mem;
+ *base = jpeg_base;
+ *irq = jpeg_irq;
+
+ pgmn_dev->state = MSM_JPEG_INIT;
+ return rc;
+
+fail_request_irq:
+fail_set_vbif:
+ msm_jpeg_detach_iommu(pgmn_dev);
+
+fail_iommu:
+ iounmap(pgmn_dev->jpeg_vbif);
+
+fail_vbif:
+ msm_cam_clk_enable(&pgmn_dev->pdev->dev, pgmn_dev->jpeg_clk_info,
+ pgmn_dev->jpeg_clk, pgmn_dev->num_clk, 0);
+
+fail_clk:
+ msm_jpeg_regulator_enable(&pgmn_dev->pdev->dev,
+ pgmn_dev->regulator_names, pgmn_dev->jpeg_fs,
+ pgmn_dev->num_regulator, 0);
+
+fail_fs:
+ iounmap(jpeg_base);
+
+fail_remap:
+ release_mem_region(jpeg_mem->start, resource_size(jpeg_mem));
+ JPEG_DBG("%s:%d] fail\n", __func__, __LINE__);
+ return rc;
+}
+
+int msm_jpeg_platform_release(struct resource *mem, void *base, int irq,
+ void *context)
+{
+ int result = 0;
+
+ struct msm_jpeg_device *pgmn_dev =
+ (struct msm_jpeg_device *) context;
+
+ free_irq(irq, context);
+
+ msm_jpeg_detach_iommu(pgmn_dev);
+
+ if (pgmn_dev->jpeg_bus_client) {
+ if (pgmn_dev->jpeg_bus_vote) {
+ msm_bus_scale_client_update_request(
+ pgmn_dev->jpeg_bus_client, 0);
+ JPEG_BUS_UNVOTED(pgmn_dev);
+ JPEG_DBG("%s:%d] Bus unvoted\n", __func__, __LINE__);
+ }
+ msm_bus_scale_unregister_client(pgmn_dev->jpeg_bus_client);
+ }
+
+ msm_cam_clk_enable(&pgmn_dev->pdev->dev, pgmn_dev->jpeg_clk_info,
+ pgmn_dev->jpeg_clk, pgmn_dev->num_clk, 0);
+ JPEG_DBG("%s:%d] clock disbale done", __func__, __LINE__);
+
+ msm_jpeg_regulator_enable(&pgmn_dev->pdev->dev,
+ pgmn_dev->regulator_names, pgmn_dev->jpeg_fs,
+ pgmn_dev->num_regulator, 0);
+ JPEG_DBG("%s:%d] regulator disable done", __func__, __LINE__);
+
+ iounmap(pgmn_dev->jpeg_vbif);
+ iounmap(base);
+ release_mem_region(mem->start, resource_size(mem));
+ pgmn_dev->state = MSM_JPEG_IDLE;
+ JPEG_DBG("%s:%d] success\n", __func__, __LINE__);
+ return result;
+}
+
+/*
+ * msm_jpeg_platform_set_dt_config() - set jpeg device tree configuration.
+ * @pgmn_dev: Pointer to jpeg device.
+ *
+ * This function holds an array of device tree property names and calls
+ * msm_jpeg_set_init_dt_parms() for each property.
+ *
+ * Return: 0 on success and negative error on failure.
+ */
+int msm_jpeg_platform_set_dt_config(struct msm_jpeg_device *pgmn_dev)
+{
+ int rc = 0;
+ uint8_t dt_prop_cnt = JPEG_DT_PROP_CNT;
+ char *dt_prop_name[JPEG_DT_PROP_CNT] = {"qcom,qos-reg-settings",
+ "qcom,prefetch-reg-settings"};
+
+ while (dt_prop_cnt) {
+ dt_prop_cnt--;
+ rc = msm_jpeg_set_init_dt_parms(pgmn_dev,
+ dt_prop_name[dt_prop_cnt],
+ pgmn_dev->base);
+ if (rc == -ENOENT) {
+ JPEG_DBG("%s: No %s property\n", __func__,
+ dt_prop_name[dt_prop_cnt]);
+ } else if (rc < 0) {
+ JPEG_PR_ERR("%s: %s params set fail\n", __func__,
+ dt_prop_name[dt_prop_cnt]);
+ return rc;
+ }
+ }
+ return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.h b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.h
new file mode 100644
index 000000000000..905a0e4126d8
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_platform.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_JPEG_PLATFORM_H
+#define MSM_JPEG_PLATFORM_H
+
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/ion.h>
+#include "msm_jpeg_sync.h"
+#define JPEG_CLK_RATE 266670000
+
+int msm_jpeg_platform_set_clk_rate(struct msm_jpeg_device *pgmn_dev,
+ long clk_rate);
+void msm_jpeg_platform_p2v(int iommu_hdl, int fd);
+uint32_t msm_jpeg_platform_v2p(struct msm_jpeg_device *pgmn_dev, int fd,
+ uint32_t len, int iommu_hdl);
+
+int msm_jpeg_platform_clk_enable(void);
+int msm_jpeg_platform_clk_disable(void);
+
+int msm_jpeg_platform_init(struct platform_device *pdev,
+ struct resource **mem,
+ void **base,
+ int *irq,
+ irqreturn_t (*handler)(int, void *),
+ void *context);
+int msm_jpeg_platform_release(struct resource *mem, void *base, int irq,
+ void *context);
+int msm_jpeg_platform_set_dt_config(struct msm_jpeg_device *pgmn_dev);
+
+#endif /* MSM_JPEG_PLATFORM_H */
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_sync.c b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_sync.c
new file mode 100644
index 000000000000..034ba79ab5c8
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_sync.c
@@ -0,0 +1,1573 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/compat.h>
+#include <linux/ratelimit.h>
+#include <media/msm_jpeg.h>
+#include <linux/msm-bus.h>
+#include "msm_jpeg_sync.h"
+#include "msm_jpeg_core.h"
+#include "msm_jpeg_platform.h"
+#include "msm_jpeg_common.h"
+#include "cam_hw_ops.h"
+
+#define JPEG_REG_SIZE 0x308
+#define JPEG_DEV_CNT 4
+#define JPEG_DEC_ID 2
+#define UINT32_MAX (0xFFFFFFFFU)
+
+#ifdef CONFIG_COMPAT
+
+#define MSM_JPEG_IOCTL_GET_HW_VERSION32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 1, struct msm_jpeg_hw_cmd32)
+
+#define MSM_JPEG_IOCTL_RESET32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 2, struct msm_jpeg_ctrl_cmd32)
+
+#define MSM_JPEG_IOCTL_STOP32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 3, struct msm_jpeg_hw_cmds32)
+
+#define MSM_JPEG_IOCTL_START32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 4, struct msm_jpeg_hw_cmds32)
+
+#define MSM_JPEG_IOCTL_INPUT_BUF_ENQUEUE32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 5, struct msm_jpeg_buf32)
+
+#define MSM_JPEG_IOCTL_INPUT_GET32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 6, struct msm_jpeg_buf32)
+
+#define MSM_JPEG_IOCTL_OUTPUT_BUF_ENQUEUE32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 8, struct msm_jpeg_buf32)
+
+#define MSM_JPEG_IOCTL_OUTPUT_GET32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 9, struct msm_jpeg_buf32)
+
+#define MSM_JPEG_IOCTL_EVT_GET32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 11, struct msm_jpeg_ctrl_cmd32)
+
+#define MSM_JPEG_IOCTL_HW_CMD32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 13, struct msm_jpeg_hw_cmd32)
+
+#define MSM_JPEG_IOCTL_HW_CMDS32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 14, struct msm_jpeg_hw_cmds32)
+
+#define MSM_JPEG_IOCTL_TEST_DUMP_REGION32 \
+ _IOW(MSM_JPEG_IOCTL_MAGIC, 15, compat_ulong_t)
+
+struct msm_jpeg_ctrl_cmd32 {
+ uint32_t type;
+ uint32_t len;
+ compat_uptr_t value;
+};
+struct msm_jpeg_buf32 {
+ uint32_t type;
+ int fd;
+
+ compat_uptr_t vaddr;
+
+ uint32_t y_off;
+ uint32_t y_len;
+ uint32_t framedone_len;
+
+ uint32_t cbcr_off;
+ uint32_t cbcr_len;
+
+ uint32_t num_of_mcu_rows;
+ uint32_t offset;
+ uint32_t pln2_off;
+ uint32_t pln2_len;
+};
+
+struct msm_jpeg_hw_cmd32 {
+
+ uint32_t type:4;
+
+ /* n microseconds of timeout for WAIT */
+ /* n microseconds of time for DELAY */
+ /* repeat n times for READ/WRITE */
+ /* max is 0xFFF, 4095 */
+ uint32_t n:12;
+ uint32_t offset:16;
+ uint32_t mask;
+ union {
+ uint32_t data; /* for single READ/WRITE/WAIT, n = 1 */
+ compat_uptr_t pdata; /* for multiple READ/WRITE/WAIT, n > 1 */
+ };
+};
+
+struct msm_jpeg_hw_cmds32 {
+ uint32_t m; /* number of elements in the hw_cmd array */
+ struct msm_jpeg_hw_cmd32 hw_cmd[1];
+};
+#endif
+
+
+inline void msm_jpeg_q_init(char const *name, struct msm_jpeg_q *q_p)
+{
+ JPEG_DBG("%s:%d] %s\n", __func__, __LINE__, name);
+ q_p->name = name;
+ spin_lock_init(&q_p->lck);
+ INIT_LIST_HEAD(&q_p->q);
+ init_waitqueue_head(&q_p->wait);
+ q_p->unblck = 0;
+}
+
+inline void *msm_jpeg_q_out(struct msm_jpeg_q *q_p)
+{
+ unsigned long flags;
+ struct msm_jpeg_q_entry *q_entry_p = NULL;
+ void *data = NULL;
+
+ JPEG_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+ spin_lock_irqsave(&q_p->lck, flags);
+ if (!list_empty(&q_p->q)) {
+ q_entry_p = list_first_entry(&q_p->q, struct msm_jpeg_q_entry,
+ list);
+ list_del_init(&q_entry_p->list);
+ }
+ spin_unlock_irqrestore(&q_p->lck, flags);
+
+ if (q_entry_p) {
+ data = q_entry_p->data;
+ kfree(q_entry_p);
+ } else {
+ JPEG_DBG("%s:%d] %s no entry\n", __func__, __LINE__,
+ q_p->name);
+ }
+
+ return data;
+}
+
+inline int msm_jpeg_q_in(struct msm_jpeg_q *q_p, void *data)
+{
+ unsigned long flags;
+
+ struct msm_jpeg_q_entry *q_entry_p;
+
+ JPEG_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+
+ q_entry_p = kmalloc(sizeof(struct msm_jpeg_q_entry), GFP_ATOMIC);
+ if (!q_entry_p) {
+ JPEG_PR_ERR("%s: no mem\n", __func__);
+ return -EFAULT;
+ }
+ q_entry_p->data = data;
+
+ spin_lock_irqsave(&q_p->lck, flags);
+ list_add_tail(&q_entry_p->list, &q_p->q);
+ spin_unlock_irqrestore(&q_p->lck, flags);
+
+ return 0;
+}
+
+inline int msm_jpeg_q_in_buf(struct msm_jpeg_q *q_p,
+ struct msm_jpeg_core_buf *buf)
+{
+ struct msm_jpeg_core_buf *buf_p;
+
+ JPEG_DBG("%s:%d]\n", __func__, __LINE__);
+ buf_p = kmalloc(sizeof(struct msm_jpeg_core_buf), GFP_ATOMIC);
+ if (!buf_p) {
+ JPEG_PR_ERR("%s: no mem\n", __func__);
+ return -EFAULT;
+ }
+
+ memcpy(buf_p, buf, sizeof(struct msm_jpeg_core_buf));
+
+ msm_jpeg_q_in(q_p, buf_p);
+ return 0;
+}
+
+inline int msm_jpeg_q_wait(struct msm_jpeg_q *q_p)
+{
+ long tm = MAX_SCHEDULE_TIMEOUT; /* 500ms */
+ int rc;
+
+ JPEG_DBG("%s:%d] %s wait\n", __func__, __LINE__, q_p->name);
+ rc = wait_event_timeout(q_p->wait,
+ (!list_empty_careful(&q_p->q) || q_p->unblck),
+ msecs_to_jiffies(tm));
+ JPEG_DBG("%s:%d] %s wait done\n", __func__, __LINE__, q_p->name);
+ if (list_empty_careful(&q_p->q)) {
+ if (rc == 0) {
+ rc = -ETIMEDOUT;
+ JPEG_PR_ERR("%s:%d] %s timeout\n", __func__, __LINE__,
+ q_p->name);
+ } else if (q_p->unblck) {
+ JPEG_DBG("%s:%d] %s unblock is true\n", __func__,
+ __LINE__, q_p->name);
+ q_p->unblck = 0;
+ rc = -ECANCELED;
+ }
+ }
+ return rc;
+}
+
+inline int msm_jpeg_q_wakeup(struct msm_jpeg_q *q_p)
+{
+ JPEG_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+ wake_up(&q_p->wait);
+ return 0;
+}
+
+inline int msm_jpeg_q_unblock(struct msm_jpeg_q *q_p)
+{
+ JPEG_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+ q_p->unblck = 1;
+ wake_up(&q_p->wait);
+ return 0;
+}
+
+inline void msm_jpeg_outbuf_q_cleanup(struct msm_jpeg_device *pgmn_dev,
+ struct msm_jpeg_q *q_p)
+{
+ struct msm_jpeg_core_buf *buf_p;
+ JPEG_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+ do {
+ buf_p = msm_jpeg_q_out(q_p);
+ if (buf_p) {
+ msm_jpeg_platform_p2v(pgmn_dev->iommu_hdl,
+ buf_p->ion_fd);
+ JPEG_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+ kfree(buf_p);
+ }
+ } while (buf_p);
+ q_p->unblck = 0;
+}
+
+inline void msm_jpeg_q_cleanup(struct msm_jpeg_q *q_p)
+{
+ void *data;
+ JPEG_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+ do {
+ data = msm_jpeg_q_out(q_p);
+ if (data) {
+ JPEG_DBG("%s:%d] %s\n", __func__, __LINE__, q_p->name);
+ kfree(data);
+ }
+ } while (data);
+ q_p->unblck = 0;
+}
+
+/*************** event queue ****************/
+
+int msm_jpeg_framedone_irq(struct msm_jpeg_device *pgmn_dev,
+ struct msm_jpeg_core_buf *buf_in)
+{
+ int rc = 0;
+
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+
+ if (buf_in) {
+ buf_in->vbuf.framedone_len = buf_in->framedone_len;
+ buf_in->vbuf.type = MSM_JPEG_EVT_SESSION_DONE;
+ JPEG_DBG("%s:%d] 0x%08x %d framedone_len %d\n",
+ __func__, __LINE__,
+ (int) buf_in->y_buffer_addr, buf_in->y_len,
+ buf_in->vbuf.framedone_len);
+ rc = msm_jpeg_q_in_buf(&pgmn_dev->evt_q, buf_in);
+ } else {
+ JPEG_PR_ERR("%s:%d] no output return buffer\n",
+ __func__, __LINE__);
+ rc = -1;
+ }
+
+ if (buf_in)
+ rc = msm_jpeg_q_wakeup(&pgmn_dev->evt_q);
+
+ return rc;
+}
+
+int msm_jpeg_evt_get(struct msm_jpeg_device *pgmn_dev,
+ void __user *to)
+{
+ struct msm_jpeg_core_buf *buf_p;
+ struct msm_jpeg_ctrl_cmd ctrl_cmd;
+
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+
+ msm_jpeg_q_wait(&pgmn_dev->evt_q);
+ buf_p = msm_jpeg_q_out(&pgmn_dev->evt_q);
+
+ if (!buf_p) {
+ JPEG_DBG("%s:%d] no buffer\n", __func__, __LINE__);
+ return -EAGAIN;
+ }
+
+ memset(&ctrl_cmd, 0, sizeof(ctrl_cmd));
+ ctrl_cmd.type = buf_p->vbuf.type;
+ kfree(buf_p);
+
+ if (ctrl_cmd.type == MSM_JPEG_EVT_SESSION_DONE) {
+ msm_bus_scale_client_update_request(
+ pgmn_dev->jpeg_bus_client, 0);
+ JPEG_BUS_UNVOTED(pgmn_dev);
+ JPEG_DBG("%s:%d] Bus unvoted\n", __func__, __LINE__);
+ }
+
+ JPEG_DBG("%s:%d] 0x%08lx %d\n", __func__, __LINE__,
+ (unsigned long) ctrl_cmd.value, ctrl_cmd.len);
+
+ if (copy_to_user(to, &ctrl_cmd, sizeof(ctrl_cmd))) {
+ JPEG_PR_ERR("%s:%d]\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int msm_jpeg_evt_get_unblock(struct msm_jpeg_device *pgmn_dev)
+{
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ msm_jpeg_q_unblock(&pgmn_dev->evt_q);
+ return 0;
+}
+
+void msm_jpeg_reset_ack_irq(struct msm_jpeg_device *pgmn_dev)
+{
+ JPEG_DBG("%s:%d]\n", __func__, __LINE__);
+}
+
+void msm_jpeg_err_irq(struct msm_jpeg_device *pgmn_dev,
+ int event)
+{
+ int rc = 0;
+ struct msm_jpeg_core_buf buf;
+
+ JPEG_PR_ERR("%s:%d] error: %d\n", __func__, __LINE__, event);
+
+ buf.vbuf.type = MSM_JPEG_EVT_ERR;
+ rc = msm_jpeg_q_in_buf(&pgmn_dev->evt_q, &buf);
+ if (!rc)
+ rc = msm_jpeg_q_wakeup(&pgmn_dev->evt_q);
+
+ if (!rc)
+ JPEG_PR_ERR("%s:%d] err err\n", __func__, __LINE__);
+
+ return;
+}
+
+/*************** output queue ****************/
+
+int msm_jpeg_we_pingpong_irq(struct msm_jpeg_device *pgmn_dev,
+ struct msm_jpeg_core_buf *buf_in)
+{
+ int rc = 0;
+ struct msm_jpeg_core_buf *buf_out;
+
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ if (buf_in) {
+ JPEG_DBG("%s:%d] 0x%08x %d\n", __func__, __LINE__,
+ (int) buf_in->y_buffer_addr, buf_in->y_len);
+ rc = msm_jpeg_q_in_buf(&pgmn_dev->output_rtn_q, buf_in);
+ } else {
+ JPEG_DBG("%s:%d] no output return buffer\n", __func__,
+ __LINE__);
+ rc = -1;
+ return rc;
+ }
+
+ buf_out = msm_jpeg_q_out(&pgmn_dev->output_buf_q);
+
+ if (buf_out) {
+ JPEG_DBG("%s:%d] 0x%08x %d\n", __func__, __LINE__,
+ (int) buf_out->y_buffer_addr, buf_out->y_len);
+ rc = msm_jpeg_core_we_buf_update(pgmn_dev, buf_out);
+ kfree(buf_out);
+ } else {
+ msm_jpeg_core_we_buf_reset(pgmn_dev, buf_in);
+ JPEG_DBG("%s:%d] no output buffer\n", __func__, __LINE__);
+ rc = -2;
+ }
+
+ if (buf_in)
+ rc = msm_jpeg_q_wakeup(&pgmn_dev->output_rtn_q);
+
+ return rc;
+}
+
+int msm_jpeg_output_get(struct msm_jpeg_device *pgmn_dev, void __user *to)
+{
+ struct msm_jpeg_core_buf *buf_p;
+ struct msm_jpeg_buf buf_cmd;
+
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+
+ msm_jpeg_q_wait(&pgmn_dev->output_rtn_q);
+ buf_p = msm_jpeg_q_out(&pgmn_dev->output_rtn_q);
+
+ if (!buf_p) {
+ JPEG_DBG("%s:%d] no output buffer return\n",
+ __func__, __LINE__);
+ return -EAGAIN;
+ }
+
+ buf_cmd = buf_p->vbuf;
+ msm_jpeg_platform_p2v(pgmn_dev->iommu_hdl, buf_p->ion_fd);
+ kfree(buf_p);
+
+ JPEG_DBG("%s:%d] 0x%08lx %d\n", __func__, __LINE__,
+ (unsigned long) buf_cmd.vaddr, buf_cmd.y_len);
+
+ if (copy_to_user(to, &buf_cmd, sizeof(buf_cmd))) {
+ JPEG_PR_ERR("%s:%d]", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int msm_jpeg_output_get_unblock(struct msm_jpeg_device *pgmn_dev)
+{
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ msm_jpeg_q_unblock(&pgmn_dev->output_rtn_q);
+ return 0;
+}
+
+static inline int msm_jpeg_add_u32_check(uint32_t *p, uint32_t n, uint32_t *res)
+{
+ *res = 0;
+
+ while (n--) {
+ if ((*res + *p) < *res)
+ return -EFAULT;
+ *res += *p++;
+ }
+ return 0;
+}
+
+int msm_jpeg_output_buf_enqueue(struct msm_jpeg_device *pgmn_dev,
+ void __user *arg)
+{
+ struct msm_jpeg_buf buf_cmd;
+ struct msm_jpeg_core_buf *buf_p;
+ uint32_t buf_len_params[10];
+ uint32_t total_len = 0;
+ int n = 0;
+
+ memset(&buf_cmd, 0x0, sizeof(struct msm_jpeg_buf));
+
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ if (copy_from_user(&buf_cmd, arg, sizeof(struct msm_jpeg_buf))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ buf_len_params[n++] = buf_cmd.y_len;
+ buf_len_params[n++] = buf_cmd.cbcr_len;
+ buf_len_params[n++] = buf_cmd.pln2_len;
+ buf_len_params[n++] = buf_cmd.offset;
+ buf_len_params[n++] = buf_cmd.y_off;
+ if (msm_jpeg_add_u32_check(buf_len_params, n, &total_len) < 0) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ buf_p = kmalloc(sizeof(struct msm_jpeg_core_buf), GFP_ATOMIC);
+ if (!buf_p) {
+ JPEG_PR_ERR("%s:%d] no mem\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+
+ JPEG_DBG("%s:%d] vaddr = 0x%08lx y_len = %d\n, fd = %d",
+ __func__, __LINE__, (unsigned long) buf_cmd.vaddr,
+ buf_cmd.y_len, buf_cmd.fd);
+
+ buf_p->ion_fd = buf_cmd.fd;
+ buf_p->y_buffer_addr = msm_jpeg_platform_v2p(pgmn_dev, buf_cmd.fd,
+ total_len, pgmn_dev->iommu_hdl);
+
+ if (!buf_p->y_buffer_addr) {
+ JPEG_PR_ERR("%s:%d] v2p wrong\n", __func__, __LINE__);
+ kfree(buf_p);
+ return -EFAULT;
+ }
+
+ buf_p->y_buffer_addr += buf_cmd.offset + buf_cmd.y_off;
+
+ if (buf_cmd.cbcr_len)
+ buf_p->cbcr_buffer_addr = buf_p->y_buffer_addr +
+ buf_cmd.y_len;
+ else
+ buf_p->cbcr_buffer_addr = 0x0;
+
+ if (buf_cmd.pln2_len)
+ buf_p->pln2_addr = buf_p->cbcr_buffer_addr +
+ buf_cmd.cbcr_len;
+ else
+ buf_p->pln2_addr = 0x0;
+
+ JPEG_DBG("%s:%d]After v2p pln0_addr %x pln0_len %d",
+ __func__, __LINE__, buf_p->y_buffer_addr,
+ buf_cmd.y_len);
+
+ JPEG_DBG("pl1_len %d, pln1_addr %x, pln2_adrr %x,pln2_len %d",
+ buf_cmd.cbcr_len, buf_p->cbcr_buffer_addr,
+ buf_p->pln2_addr, buf_cmd.pln2_len);
+
+ buf_p->y_len = buf_cmd.y_len;
+ buf_p->cbcr_len = buf_cmd.cbcr_len;
+ buf_p->pln2_len = buf_cmd.pln2_len;
+ buf_p->vbuf = buf_cmd;
+
+ msm_jpeg_q_in(&pgmn_dev->output_buf_q, buf_p);
+ return 0;
+}
+
+/*************** input queue ****************/
+
+int msm_jpeg_fe_pingpong_irq(struct msm_jpeg_device *pgmn_dev,
+ struct msm_jpeg_core_buf *buf_in)
+{
+ struct msm_jpeg_core_buf *buf_out;
+ int rc = 0;
+
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ if (buf_in) {
+ JPEG_DBG("%s:%d] 0x%08x %d\n", __func__, __LINE__,
+ (int) buf_in->y_buffer_addr, buf_in->y_len);
+ rc = msm_jpeg_q_in_buf(&pgmn_dev->input_rtn_q, buf_in);
+ } else {
+ JPEG_DBG("%s:%d] no input return buffer\n", __func__,
+ __LINE__);
+ rc = -EFAULT;
+ }
+
+ buf_out = msm_jpeg_q_out(&pgmn_dev->input_buf_q);
+
+ if (buf_out) {
+ rc = msm_jpeg_core_fe_buf_update(pgmn_dev, buf_out);
+ kfree(buf_out);
+ msm_jpeg_core_fe_start(pgmn_dev);
+ } else {
+ JPEG_DBG("%s:%d] no input buffer\n", __func__, __LINE__);
+ rc = -EFAULT;
+ }
+
+ if (buf_in)
+ rc = msm_jpeg_q_wakeup(&pgmn_dev->input_rtn_q);
+
+ return rc;
+}
+
+int msm_jpeg_input_get(struct msm_jpeg_device *pgmn_dev, void __user *to)
+{
+ struct msm_jpeg_core_buf *buf_p;
+ struct msm_jpeg_buf buf_cmd;
+
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ msm_jpeg_q_wait(&pgmn_dev->input_rtn_q);
+ buf_p = msm_jpeg_q_out(&pgmn_dev->input_rtn_q);
+
+ if (!buf_p) {
+ JPEG_DBG("%s:%d] no input buffer return\n",
+ __func__, __LINE__);
+ return -EAGAIN;
+ }
+
+ buf_cmd = buf_p->vbuf;
+
+ msm_jpeg_platform_p2v(pgmn_dev->iommu_hdl, buf_p->ion_fd);
+ kfree(buf_p);
+
+ JPEG_DBG("%s:%d] 0x%08lx %d\n", __func__, __LINE__,
+ (unsigned long) buf_cmd.vaddr, buf_cmd.y_len);
+
+ if (copy_to_user(to, &buf_cmd, sizeof(buf_cmd))) {
+ JPEG_PR_ERR("%s:%d]\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int msm_jpeg_input_get_unblock(struct msm_jpeg_device *pgmn_dev)
+{
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ msm_jpeg_q_unblock(&pgmn_dev->input_rtn_q);
+ return 0;
+}
+
+int msm_jpeg_input_buf_enqueue(struct msm_jpeg_device *pgmn_dev,
+ void __user *arg)
+{
+ struct msm_jpeg_core_buf *buf_p;
+ struct msm_jpeg_buf buf_cmd;
+ uint32_t buf_len_params[10];
+ uint32_t total_len = 0;
+ int n = 0;
+
+ memset(&buf_cmd, 0x0, sizeof(struct msm_jpeg_buf));
+
+ if (copy_from_user(&buf_cmd, arg, sizeof(struct msm_jpeg_buf))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ buf_len_params[n++] = buf_cmd.y_len;
+ buf_len_params[n++] = buf_cmd.cbcr_len;
+ buf_len_params[n++] = buf_cmd.pln2_len;
+ buf_len_params[n++] = buf_cmd.offset;
+ buf_len_params[n++] = buf_cmd.y_off;
+ if (buf_cmd.cbcr_len)
+ buf_len_params[n++] = buf_cmd.cbcr_off;
+ if (buf_cmd.pln2_len)
+ buf_len_params[n++] = buf_cmd.pln2_off;
+
+ if (msm_jpeg_add_u32_check(buf_len_params, n, &total_len) < 0) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ buf_p = kmalloc(sizeof(struct msm_jpeg_core_buf), GFP_ATOMIC);
+ if (!buf_p) {
+ JPEG_PR_ERR("%s:%d] no mem\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ JPEG_DBG("%s:%d] 0x%08lx %d\n", __func__, __LINE__,
+ (unsigned long) buf_cmd.vaddr, buf_cmd.y_len);
+
+ buf_p->ion_fd = buf_cmd.fd;
+ buf_p->y_buffer_addr = msm_jpeg_platform_v2p(pgmn_dev, buf_cmd.fd,
+ total_len, pgmn_dev->iommu_hdl);
+
+ if (!buf_p->y_buffer_addr) {
+ JPEG_PR_ERR("%s:%d] v2p wrong\n", __func__, __LINE__);
+ kfree(buf_p);
+ return -EFAULT;
+ }
+
+ buf_p->y_buffer_addr += buf_cmd.offset + buf_cmd.y_off;
+
+ buf_p->y_len = buf_cmd.y_len;
+ buf_p->cbcr_len = buf_cmd.cbcr_len;
+ buf_p->pln2_len = buf_cmd.pln2_len;
+ buf_p->num_of_mcu_rows = buf_cmd.num_of_mcu_rows;
+
+ if (buf_cmd.cbcr_len)
+ buf_p->cbcr_buffer_addr = buf_p->y_buffer_addr +
+ buf_cmd.y_len + buf_cmd.cbcr_off;
+ else
+ buf_p->cbcr_buffer_addr = 0x0;
+
+ if (buf_cmd.pln2_len)
+ buf_p->pln2_addr = buf_p->cbcr_buffer_addr +
+ buf_cmd.cbcr_len + buf_cmd.pln2_off;
+ else
+ buf_p->pln2_addr = 0x0;
+
+ JPEG_DBG("%s: y_addr=%x, y_len=%x, cbcr_addr=%x, cbcr_len=%d",
+ __func__, buf_p->y_buffer_addr, buf_p->y_len,
+ buf_p->cbcr_buffer_addr, buf_p->cbcr_len);
+ JPEG_DBG("pln2_addr = %x, pln2_len = %d, fd =%d\n",
+ buf_p->pln2_addr, buf_p->pln2_len, buf_cmd.fd);
+
+ buf_p->vbuf = buf_cmd;
+
+ msm_jpeg_q_in(&pgmn_dev->input_buf_q, buf_p);
+
+ return 0;
+}
+
+int msm_jpeg_irq(int event, void *context, void *data)
+{
+ struct msm_jpeg_device *pgmn_dev =
+ (struct msm_jpeg_device *) context;
+
+ switch (event) {
+ case MSM_JPEG_EVT_SESSION_DONE:
+ msm_jpeg_framedone_irq(pgmn_dev, data);
+ msm_jpeg_we_pingpong_irq(pgmn_dev, data);
+ break;
+
+ case MSM_JPEG_HW_MASK_COMP_FE:
+ msm_jpeg_fe_pingpong_irq(pgmn_dev, data);
+ break;
+
+ case MSM_JPEG_HW_MASK_COMP_WE:
+ msm_jpeg_we_pingpong_irq(pgmn_dev, data);
+ break;
+
+ case MSM_JPEG_HW_MASK_COMP_RESET_ACK:
+ msm_jpeg_reset_ack_irq(pgmn_dev);
+ break;
+
+ case MSM_JPEG_HW_MASK_COMP_ERR:
+ default:
+ msm_jpeg_err_irq(pgmn_dev, event);
+ break;
+ }
+
+ return 0;
+}
+
+int __msm_jpeg_open(struct msm_jpeg_device *pgmn_dev)
+{
+ int rc;
+ irqreturn_t (*core_irq)(int, void *);
+ mutex_lock(&pgmn_dev->lock);
+ if (pgmn_dev->open_count) {
+ /* only open once */
+ JPEG_PR_ERR("%s:%d] busy\n", __func__, __LINE__);
+ mutex_unlock(&pgmn_dev->lock);
+ return -EBUSY;
+ }
+ pgmn_dev->open_count++;
+ if (pgmn_dev->open_count == 1)
+ pgmn_dev->state = MSM_JPEG_INIT;
+
+ mutex_unlock(&pgmn_dev->lock);
+
+ rc = cam_config_ahb_clk(CAM_AHB_CLIENT_JPEG, CAMERA_AHB_SVS_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ return rc;
+ }
+
+ msm_jpeg_core_irq_install(msm_jpeg_irq);
+ if (pgmn_dev->core_type == MSM_JPEG_CORE_CODEC)
+ core_irq = msm_jpeg_core_irq;
+ else
+ core_irq = msm_jpegdma_core_irq;
+
+ rc = msm_jpeg_platform_init(pgmn_dev->pdev,
+ &pgmn_dev->mem, &pgmn_dev->base,
+ &pgmn_dev->irq, core_irq, pgmn_dev);
+ if (rc) {
+ JPEG_PR_ERR("%s:%d] platform_init fail %d\n", __func__,
+ __LINE__, rc);
+ goto platform_init_fail;
+ }
+
+ JPEG_DBG("%s:%d] platform resources - mem %p, base %p, irq %d\n",
+ __func__, __LINE__,
+ pgmn_dev->mem, pgmn_dev->base, pgmn_dev->irq);
+ pgmn_dev->res_size = resource_size(pgmn_dev->mem);
+
+ msm_jpeg_q_cleanup(&pgmn_dev->evt_q);
+ msm_jpeg_q_cleanup(&pgmn_dev->output_rtn_q);
+ msm_jpeg_outbuf_q_cleanup(pgmn_dev, &pgmn_dev->output_buf_q);
+ msm_jpeg_q_cleanup(&pgmn_dev->input_rtn_q);
+ msm_jpeg_q_cleanup(&pgmn_dev->input_buf_q);
+ msm_jpeg_core_init(pgmn_dev);
+
+ JPEG_DBG("%s:%d] success\n", __func__, __LINE__);
+ return rc;
+
+platform_init_fail:
+ if (cam_config_ahb_clk(CAM_AHB_CLIENT_JPEG,
+ CAMERA_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+ return rc;
+}
+
+int __msm_jpeg_release(struct msm_jpeg_device *pgmn_dev)
+{
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ mutex_lock(&pgmn_dev->lock);
+ if (!pgmn_dev->open_count) {
+ JPEG_PR_ERR(KERN_ERR "%s: not opened\n", __func__);
+ mutex_unlock(&pgmn_dev->lock);
+ return -EINVAL;
+ }
+ pgmn_dev->open_count--;
+ mutex_unlock(&pgmn_dev->lock);
+
+ msm_jpeg_core_release(pgmn_dev);
+ msm_jpeg_q_cleanup(&pgmn_dev->evt_q);
+ msm_jpeg_q_cleanup(&pgmn_dev->output_rtn_q);
+ msm_jpeg_outbuf_q_cleanup(pgmn_dev, &pgmn_dev->output_buf_q);
+ msm_jpeg_q_cleanup(&pgmn_dev->input_rtn_q);
+ msm_jpeg_outbuf_q_cleanup(pgmn_dev, &pgmn_dev->input_buf_q);
+
+ JPEG_DBG("%s:%d]\n", __func__, __LINE__);
+ if (pgmn_dev->open_count)
+ JPEG_PR_ERR(KERN_ERR "%s: multiple opens\n", __func__);
+
+ msm_jpeg_platform_release(pgmn_dev->mem, pgmn_dev->base,
+ pgmn_dev->irq, pgmn_dev);
+
+ JPEG_DBG("%s:%d]\n", __func__, __LINE__);
+
+ if (cam_config_ahb_clk(CAM_AHB_CLIENT_JPEG,
+ CAMERA_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+
+ return 0;
+}
+
+int msm_jpeg_ioctl_hw_cmd(struct msm_jpeg_device *pgmn_dev,
+ void * __user arg)
+{
+ struct msm_jpeg_hw_cmd hw_cmd;
+ int is_copy_to_user;
+
+ if (copy_from_user(&hw_cmd, arg, sizeof(struct msm_jpeg_hw_cmd))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ is_copy_to_user = msm_jpeg_hw_exec_cmds(&hw_cmd, 1,
+ pgmn_dev->res_size, pgmn_dev->base);
+ JPEG_DBG(
+ "%s:%d] type %d, n %d, offset %d, mask %x, data %x, pdata %lx\n",
+ __func__, __LINE__, hw_cmd.type, hw_cmd.n, hw_cmd.offset,
+ hw_cmd.mask, hw_cmd.data, (unsigned long) hw_cmd.pdata);
+
+ if (is_copy_to_user >= 0) {
+ if (copy_to_user(arg, &hw_cmd, sizeof(hw_cmd))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ } else {
+ return is_copy_to_user;
+ }
+
+ return 0;
+}
+
+int msm_jpeg_ioctl_hw_cmds(struct msm_jpeg_device *pgmn_dev,
+ void * __user arg)
+{
+ int is_copy_to_user;
+ uint32_t len;
+ uint32_t m;
+ struct msm_jpeg_hw_cmds *hw_cmds_p;
+ struct msm_jpeg_hw_cmd *hw_cmd_p;
+
+ if (copy_from_user(&m, arg, sizeof(m))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ if ((m == 0) || (m > ((UINT32_MAX - sizeof(struct msm_jpeg_hw_cmds)) /
+ sizeof(struct msm_jpeg_hw_cmd)))) {
+ JPEG_PR_ERR("%s:%d] m_cmds out of range\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ len = sizeof(struct msm_jpeg_hw_cmds) +
+ sizeof(struct msm_jpeg_hw_cmd) * (m - 1);
+ hw_cmds_p = kmalloc(len, GFP_KERNEL);
+ if (!hw_cmds_p) {
+ JPEG_PR_ERR("%s:%d] no mem %d\n", __func__, __LINE__, len);
+ return -EFAULT;
+ }
+
+ if (copy_from_user(hw_cmds_p, arg, len)) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ kfree(hw_cmds_p);
+ return -EFAULT;
+ }
+
+ hw_cmd_p = (struct msm_jpeg_hw_cmd *) &(hw_cmds_p->hw_cmd);
+
+ is_copy_to_user = msm_jpeg_hw_exec_cmds(hw_cmd_p, m,
+ pgmn_dev->res_size, pgmn_dev->base);
+
+ if (is_copy_to_user >= 0) {
+ if (copy_to_user(arg, hw_cmds_p, len)) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ kfree(hw_cmds_p);
+ return -EFAULT;
+ }
+ } else {
+ kfree(hw_cmds_p);
+ return is_copy_to_user;
+ }
+ kfree(hw_cmds_p);
+ return 0;
+}
+
+int msm_jpeg_start(struct msm_jpeg_device *pgmn_dev, void * __user arg,
+ int (*hw_ioctl)(struct msm_jpeg_device *, void * __user))
+{
+ struct msm_jpeg_core_buf *buf_out;
+ struct msm_jpeg_core_buf *buf_out_free[2] = {NULL, NULL};
+ int i, rc;
+
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+
+ msm_jpeg_platform_set_dt_config(pgmn_dev);
+
+ msm_bus_scale_client_update_request(
+ pgmn_dev->jpeg_bus_client, 1);
+ JPEG_BUS_VOTED(pgmn_dev);
+ JPEG_DBG("%s:%d] Bus Voted\n", __func__, __LINE__);
+
+ pgmn_dev->release_buf = 1;
+ for (i = 0; i < 2; i++) {
+ buf_out = msm_jpeg_q_out(&pgmn_dev->input_buf_q);
+
+ if (buf_out) {
+ msm_jpeg_core_fe_buf_update(pgmn_dev, buf_out);
+ kfree(buf_out);
+ } else {
+ JPEG_DBG("%s:%d] no input buffer\n", __func__,
+ __LINE__);
+ break;
+ }
+ }
+
+ for (i = 0; i < 2; i++) {
+ buf_out_free[i] = msm_jpeg_q_out(&pgmn_dev->output_buf_q);
+
+ if (buf_out_free[i]) {
+ msm_jpeg_core_we_buf_update(pgmn_dev, buf_out_free[i]);
+ pgmn_dev->release_buf = 0;
+ } else {
+ JPEG_DBG("%s:%d] no output buffer\n",
+ __func__, __LINE__);
+ break;
+ }
+ }
+
+ for (i = 0; i < 2; i++)
+ kfree(buf_out_free[i]);
+
+ JPEG_DBG_HIGH("%s:%d] START\n", __func__, __LINE__);
+ pgmn_dev->state = MSM_JPEG_EXECUTING;
+ /* ensure write is done */
+ wmb();
+ rc = hw_ioctl(pgmn_dev, arg);
+ /* ensure write is done */
+ wmb();
+ JPEG_DBG("%s:%d]", __func__, __LINE__);
+ return rc;
+}
+
+int msm_jpeg_ioctl_reset(struct msm_jpeg_device *pgmn_dev, void * __user arg)
+{
+ int rc;
+ struct msm_jpeg_ctrl_cmd ctrl_cmd, *p_ctrl_cmd;
+
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ p_ctrl_cmd = &ctrl_cmd;
+
+ if (pgmn_dev->state == MSM_JPEG_INIT) {
+ if (copy_from_user(&ctrl_cmd, arg, sizeof(ctrl_cmd))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ pgmn_dev->op_mode = p_ctrl_cmd->type;
+
+ rc = msm_jpeg_core_reset(pgmn_dev, pgmn_dev->op_mode,
+ pgmn_dev->base, resource_size(pgmn_dev->mem));
+ } else {
+ JPEG_PR_ERR("%s:%d] JPEG not been initialized Wrong state\n",
+ __func__, __LINE__);
+ rc = -1;
+ }
+ return rc;
+}
+
+int msm_jpeg_ioctl_test_dump_region(struct msm_jpeg_device *pgmn_dev,
+ unsigned long arg)
+{
+ JPEG_DBG("%s:%d] Enter\n", __func__, __LINE__);
+ msm_jpeg_io_dump(pgmn_dev->base, JPEG_REG_SIZE);
+ return 0;
+}
+
+int msm_jpeg_ioctl_set_clk_rate(struct msm_jpeg_device *pgmn_dev,
+ void * __user arg)
+{
+ long clk_rate;
+ int rc;
+
+ if ((pgmn_dev->state != MSM_JPEG_INIT) &&
+ (pgmn_dev->state != MSM_JPEG_RESET)) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ if (get_user(clk_rate, (unsigned int __user *)arg)) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ JPEG_DBG("%s:%d] Requested clk rate %ld\n", __func__, __LINE__,
+ clk_rate);
+ if (clk_rate < 0) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ rc = msm_jpeg_platform_set_clk_rate(pgmn_dev, clk_rate);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: clk failed rc = %d\n", __func__, rc);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+#ifdef CONFIG_COMPAT
+int msm_jpeg_get_ctrl_cmd32(struct msm_jpeg_ctrl_cmd *ctrl_cmd,
+ void __user *arg)
+{
+ struct msm_jpeg_ctrl_cmd32 ctrl_cmd32;
+ unsigned long temp;
+ if (copy_from_user(&ctrl_cmd32, arg,
+ sizeof(struct msm_jpeg_ctrl_cmd32))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ ctrl_cmd->type = ctrl_cmd32.type;
+ ctrl_cmd->len = ctrl_cmd32.len;
+ temp = (unsigned long) ctrl_cmd32.value;
+ ctrl_cmd->value = (void *) temp;
+
+ return 0;
+}
+int msm_jpeg_put_ctrl_cmd32(struct msm_jpeg_ctrl_cmd *ctrl_cmd,
+ void __user *arg)
+{
+ struct msm_jpeg_ctrl_cmd32 ctrl_cmd32;
+ unsigned long temp;
+
+ ctrl_cmd32.type = ctrl_cmd->type;
+ ctrl_cmd32.len = ctrl_cmd->len;
+ temp = (unsigned long) ctrl_cmd->value;
+ ctrl_cmd32.value = (compat_uptr_t) temp;
+
+ if (copy_to_user(arg, &ctrl_cmd32,
+ sizeof(struct msm_jpeg_ctrl_cmd32))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+int msm_jpeg_get_jpeg_buf32(struct msm_jpeg_buf *jpeg_buf,
+ void __user *arg)
+{
+ struct msm_jpeg_buf32 jpeg_buf32;
+ unsigned long temp;
+ if (copy_from_user(&jpeg_buf32, arg, sizeof(struct msm_jpeg_buf32))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ jpeg_buf->type = jpeg_buf32.type;
+ jpeg_buf->fd = jpeg_buf32.fd;
+ temp = (unsigned long) jpeg_buf32.vaddr;
+ jpeg_buf->vaddr = (void *) temp;
+ jpeg_buf->y_off = jpeg_buf32.y_off;
+ jpeg_buf->y_len = jpeg_buf32.y_len;
+ jpeg_buf->framedone_len = jpeg_buf32.framedone_len;
+ jpeg_buf->cbcr_off = jpeg_buf32.cbcr_off;
+ jpeg_buf->cbcr_len = jpeg_buf32.cbcr_len;
+ jpeg_buf->num_of_mcu_rows = jpeg_buf32.num_of_mcu_rows;
+ jpeg_buf->offset = jpeg_buf32.offset;
+ jpeg_buf->pln2_off = jpeg_buf32.pln2_off;
+ jpeg_buf->pln2_len = jpeg_buf32.pln2_len;
+
+ return 0;
+}
+int msm_jpeg_put_jpeg_buf32(struct msm_jpeg_buf *jpeg_buf,
+ void __user *arg)
+{
+ struct msm_jpeg_buf32 jpeg_buf32;
+ unsigned long temp;
+
+ jpeg_buf32.type = jpeg_buf->type;
+ jpeg_buf32.fd = jpeg_buf->fd;
+ temp = (unsigned long) jpeg_buf->vaddr;
+ jpeg_buf32.vaddr = (compat_uptr_t) temp;
+ jpeg_buf32.y_off = jpeg_buf->y_off;
+ jpeg_buf32.y_len = jpeg_buf->y_len;
+ jpeg_buf32.framedone_len = jpeg_buf->framedone_len;
+ jpeg_buf32.cbcr_off = jpeg_buf->cbcr_off;
+ jpeg_buf32.cbcr_len = jpeg_buf->cbcr_len;
+ jpeg_buf32.num_of_mcu_rows = jpeg_buf->num_of_mcu_rows;
+ jpeg_buf32.offset = jpeg_buf->offset;
+ jpeg_buf32.pln2_off = jpeg_buf->pln2_off;
+ jpeg_buf32.pln2_len = jpeg_buf->pln2_len;
+
+ if (copy_to_user(arg, &jpeg_buf32, sizeof(struct msm_jpeg_buf32))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int msm_jpeg_put_hw_cmd32(void __user *arg,
+ struct msm_jpeg_hw_cmd *phw_cmd, int copy)
+{
+ struct msm_jpeg_hw_cmd32 hw_cmd32;
+ struct msm_jpeg_hw_cmd32 *phw_cmd32;
+
+ phw_cmd32 = (struct msm_jpeg_hw_cmd32 *) arg;
+ if (copy)
+ phw_cmd32 = &hw_cmd32;
+
+
+ phw_cmd32->type = phw_cmd->type;
+ phw_cmd32->n = phw_cmd->n;
+ phw_cmd32->offset = phw_cmd->offset;
+ phw_cmd32->mask = phw_cmd->mask;
+ phw_cmd32->data = phw_cmd->data;
+
+ if (copy && copy_to_user(arg, &hw_cmd32, sizeof(hw_cmd32))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+int msm_jpeg_get_hw_cmd32(struct msm_jpeg_hw_cmd *phw_cmd,
+ void __user *arg, int copy)
+{
+ struct msm_jpeg_hw_cmd32 hw_cmd32;
+ struct msm_jpeg_hw_cmd32 *phw_cmd32;
+
+ if (copy) {
+ phw_cmd32 = &hw_cmd32;
+ if (copy_from_user(&hw_cmd32, arg, sizeof(hw_cmd32))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ } else {
+ phw_cmd32 = (struct msm_jpeg_hw_cmd32 *) arg;
+ }
+ phw_cmd->type = phw_cmd32->type;
+ phw_cmd->n = phw_cmd32->n;
+ phw_cmd->offset = phw_cmd32->offset;
+ phw_cmd->mask = phw_cmd32->mask;
+ phw_cmd->data = phw_cmd32->data;
+
+ return 0;
+}
+int msm_jpeg_ioctl_hw_cmds32(struct msm_jpeg_device *pgmn_dev,
+ void __user *arg)
+{
+ int is_copy_to_user;
+ uint32_t len, len32;
+ uint32_t m;
+ struct msm_jpeg_hw_cmds32 *phw_cmds32;
+ struct msm_jpeg_hw_cmds *phw_cmds;
+
+ if (copy_from_user(&m, arg, sizeof(m))) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ if ((m == 0) || (m > ((UINT32_MAX - sizeof(struct msm_jpeg_hw_cmds32)) /
+ sizeof(struct msm_jpeg_hw_cmd32)))) {
+ JPEG_PR_ERR("%s:%d] m_cmds out of range\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ len32 = sizeof(struct msm_jpeg_hw_cmds32) +
+ sizeof(struct msm_jpeg_hw_cmd32) * (m - 1);
+ phw_cmds32 = kmalloc(len32, GFP_KERNEL);
+ if (!phw_cmds32) {
+ JPEG_PR_ERR("%s:%d] no mem %d\n", __func__, __LINE__, len32);
+ return -EFAULT;
+ }
+
+ if (copy_from_user(phw_cmds32, arg, len32)) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ kfree(phw_cmds32);
+ return -EFAULT;
+ }
+ len = sizeof(struct msm_jpeg_hw_cmds) +
+ sizeof(struct msm_jpeg_hw_cmd) * (m - 1);
+ phw_cmds = kmalloc(len, GFP_KERNEL);
+ if (!phw_cmds) {
+ JPEG_PR_ERR("%s:%d] no mem %d\n", __func__, __LINE__, len);
+ kfree(phw_cmds32);
+ return -EFAULT;
+ }
+ (phw_cmds)->m = m;
+ while (m--) {
+ struct msm_jpeg_hw_cmd32 *src;
+ struct msm_jpeg_hw_cmd *dst;
+ src = &phw_cmds32->hw_cmd[m];
+ dst = &(phw_cmds)->hw_cmd[m];
+ msm_jpeg_get_hw_cmd32(dst, src, 0);
+ }
+
+ is_copy_to_user = msm_jpeg_hw_exec_cmds(phw_cmds->hw_cmd, phw_cmds->m,
+ pgmn_dev->res_size, pgmn_dev->base);
+
+ if (is_copy_to_user >= 0) {
+ m = phw_cmds->m;
+ while (m--) {
+ struct msm_jpeg_hw_cmd *src;
+ struct msm_jpeg_hw_cmd32 *dst;
+ dst = &phw_cmds32->hw_cmd[m];
+ src = &phw_cmds->hw_cmd[m];
+
+ msm_jpeg_put_hw_cmd32(dst, src, 0);
+ }
+ if (copy_to_user(arg, phw_cmds32, len32)) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ kfree(phw_cmds);
+ kfree(phw_cmds32);
+ return -EFAULT;
+ }
+
+ } else {
+ kfree(phw_cmds);
+ kfree(phw_cmds32);
+ return is_copy_to_user;
+ }
+ kfree(phw_cmds);
+ kfree(phw_cmds32);
+
+ return 0;
+}
+int msm_jpeg_ioctl_hw_cmd32(struct msm_jpeg_device *pgmn_dev,
+ void * __user arg)
+{
+ struct msm_jpeg_hw_cmd hw_cmd;
+ int is_copy_to_user;
+
+ if (msm_jpeg_get_hw_cmd32(&hw_cmd, arg, 1)) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ is_copy_to_user = msm_jpeg_hw_exec_cmds(&hw_cmd, 1,
+ pgmn_dev->res_size, pgmn_dev->base);
+ JPEG_DBG("%s:%d] type %d, n %d, offst %d, mask %x, data %x pdata %lx\n",
+ __func__, __LINE__, hw_cmd.type, hw_cmd.n, hw_cmd.offset,
+ hw_cmd.mask, hw_cmd.data, (unsigned long) hw_cmd.pdata);
+
+ if (is_copy_to_user >= 0) {
+ if (msm_jpeg_put_hw_cmd32(arg, &hw_cmd, 1)) {
+ JPEG_PR_ERR("%s:%d] failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ } else
+ return is_copy_to_user;
+
+
+ return 0;
+}
+
+long __msm_jpeg_compat_ioctl(struct msm_jpeg_device *pgmn_dev,
+ unsigned int cmd, unsigned long arg)
+{
+ int rc = 0;
+ struct msm_jpeg_ctrl_cmd *pctrl_cmd = NULL, ctrl_cmd;
+ struct msm_jpeg_buf jpeg_buf;
+ mm_segment_t old_fs;
+
+ old_fs = get_fs();
+
+ switch (cmd) {
+ case MSM_JPEG_IOCTL_GET_HW_VERSION:
+ JPEG_DBG("%s:%d] VERSION 1\n", __func__, __LINE__);
+ rc = msm_jpeg_ioctl_hw_cmd(pgmn_dev, (void __user *) arg);
+ break;
+ case MSM_JPEG_IOCTL_GET_HW_VERSION32:
+ JPEG_DBG("%s:%d] VERSION 1 32bit\n", __func__, __LINE__);
+ rc = msm_jpeg_ioctl_hw_cmd32(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_RESET:
+ rc = msm_jpeg_ioctl_reset(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_RESET32:
+ rc = msm_jpeg_get_ctrl_cmd32(&ctrl_cmd,
+ (void __user *) arg);
+ if (rc < 0)
+ break;
+
+ set_fs(KERNEL_DS);
+ rc = msm_jpeg_ioctl_reset(pgmn_dev, (void __user *) &ctrl_cmd);
+ set_fs(old_fs);
+ kfree(pctrl_cmd);
+ break;
+
+ case MSM_JPEG_IOCTL_STOP:
+ rc = msm_jpeg_ioctl_hw_cmds(pgmn_dev, (void __user *) arg);
+ pgmn_dev->state = MSM_JPEG_STOPPED;
+ break;
+
+ case MSM_JPEG_IOCTL_STOP32:
+ rc = msm_jpeg_ioctl_hw_cmds32(pgmn_dev, (void __user *) arg);
+ pgmn_dev->state = MSM_JPEG_STOPPED;
+ break;
+
+ case MSM_JPEG_IOCTL_START:
+ rc = msm_jpeg_start(pgmn_dev, (void __user *) arg,
+ msm_jpeg_ioctl_hw_cmds);
+ break;
+
+ case MSM_JPEG_IOCTL_START32:
+ rc = msm_jpeg_start(pgmn_dev, (void __user *) arg,
+ msm_jpeg_ioctl_hw_cmds32);
+ break;
+
+ case MSM_JPEG_IOCTL_INPUT_BUF_ENQUEUE:
+ rc = msm_jpeg_input_buf_enqueue(pgmn_dev,
+ (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_INPUT_BUF_ENQUEUE32:
+ rc = msm_jpeg_get_jpeg_buf32(&jpeg_buf, (void __user *) arg);
+ if (rc < 0)
+ break;
+ set_fs(KERNEL_DS);
+ rc = msm_jpeg_input_buf_enqueue(pgmn_dev,
+ (void __user *) &jpeg_buf);
+ set_fs(old_fs);
+ break;
+
+ case MSM_JPEG_IOCTL_INPUT_GET:
+ rc = msm_jpeg_input_get(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_INPUT_GET32:
+ set_fs(KERNEL_DS);
+ rc = msm_jpeg_input_get(pgmn_dev, (void __user *) &jpeg_buf);
+ set_fs(old_fs);
+ if (rc < 0)
+ break;
+ rc = msm_jpeg_put_jpeg_buf32(&jpeg_buf, (void __user *) arg);
+
+ break;
+
+ case MSM_JPEG_IOCTL_INPUT_GET_UNBLOCK:
+ rc = msm_jpeg_input_get_unblock(pgmn_dev);
+ break;
+
+ case MSM_JPEG_IOCTL_OUTPUT_BUF_ENQUEUE:
+ rc = msm_jpeg_output_buf_enqueue(pgmn_dev,
+ (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_OUTPUT_BUF_ENQUEUE32:
+ rc = msm_jpeg_get_jpeg_buf32(&jpeg_buf, (void __user *) arg);
+ if (rc < 0)
+ break;
+ set_fs(KERNEL_DS);
+ rc = msm_jpeg_output_buf_enqueue(pgmn_dev,
+ (void __user *) &jpeg_buf);
+ set_fs(old_fs);
+ break;
+
+ case MSM_JPEG_IOCTL_OUTPUT_GET:
+ rc = msm_jpeg_output_get(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_OUTPUT_GET32:
+ set_fs(KERNEL_DS);
+ rc = msm_jpeg_output_get(pgmn_dev, (void __user *) &jpeg_buf);
+ set_fs(old_fs);
+ if (rc < 0)
+ break;
+ rc = msm_jpeg_put_jpeg_buf32(&jpeg_buf, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_OUTPUT_GET_UNBLOCK:
+ rc = msm_jpeg_output_get_unblock(pgmn_dev);
+ break;
+
+ case MSM_JPEG_IOCTL_EVT_GET:
+ rc = msm_jpeg_evt_get(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_EVT_GET32:
+ set_fs(KERNEL_DS);
+ rc = msm_jpeg_evt_get(pgmn_dev, (void __user *) &ctrl_cmd);
+ set_fs(old_fs);
+ if (rc < 0)
+ break;
+ msm_jpeg_put_ctrl_cmd32(&ctrl_cmd, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_EVT_GET_UNBLOCK:
+ rc = msm_jpeg_evt_get_unblock(pgmn_dev);
+ break;
+
+ case MSM_JPEG_IOCTL_HW_CMD32:
+ rc = msm_jpeg_ioctl_hw_cmd32(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_HW_CMD:
+ rc = msm_jpeg_ioctl_hw_cmd(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_HW_CMDS32:
+ rc = msm_jpeg_ioctl_hw_cmds32(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_HW_CMDS:
+ rc = msm_jpeg_ioctl_hw_cmds(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_TEST_DUMP_REGION:
+ rc = msm_jpeg_ioctl_test_dump_region(pgmn_dev, arg);
+ break;
+
+ case MSM_JPEG_IOCTL_TEST_DUMP_REGION32:
+ rc = msm_jpeg_ioctl_test_dump_region(pgmn_dev, arg);
+ break;
+
+ case MSM_JPEG_IOCTL_SET_CLK_RATE:
+ rc = msm_jpeg_ioctl_set_clk_rate(pgmn_dev,
+ (void __user *) arg);
+ break;
+
+ default:
+ JPEG_PR_ERR(KERN_INFO "%s:%d] cmd = %d not supported\n",
+ __func__, __LINE__, _IOC_NR(cmd));
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+#else
+long __msm_jpeg_compat_ioctl(struct msm_jpeg_device *pgmn_dev,
+ unsigned int cmd, unsigned long arg)
+{
+ return 0;
+}
+#endif
+
+long __msm_jpeg_ioctl(struct msm_jpeg_device *pgmn_dev,
+ unsigned int cmd, unsigned long arg)
+{
+ int rc = 0;
+ switch (cmd) {
+ case MSM_JPEG_IOCTL_GET_HW_VERSION:
+ JPEG_DBG("%s:%d] VERSION 1\n", __func__, __LINE__);
+ rc = msm_jpeg_ioctl_hw_cmd(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_RESET:
+ rc = msm_jpeg_ioctl_reset(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_STOP:
+ rc = msm_jpeg_ioctl_hw_cmds(pgmn_dev, (void __user *) arg);
+ pgmn_dev->state = MSM_JPEG_STOPPED;
+ break;
+
+ case MSM_JPEG_IOCTL_START:
+ rc = msm_jpeg_start(pgmn_dev, (void __user *) arg,
+ msm_jpeg_ioctl_hw_cmds);
+ break;
+
+ case MSM_JPEG_IOCTL_INPUT_BUF_ENQUEUE:
+ rc = msm_jpeg_input_buf_enqueue(pgmn_dev,
+ (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_INPUT_GET:
+ rc = msm_jpeg_input_get(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_INPUT_GET_UNBLOCK:
+ rc = msm_jpeg_input_get_unblock(pgmn_dev);
+ break;
+
+ case MSM_JPEG_IOCTL_OUTPUT_BUF_ENQUEUE:
+ rc = msm_jpeg_output_buf_enqueue(pgmn_dev,
+ (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_OUTPUT_GET:
+ rc = msm_jpeg_output_get(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_OUTPUT_GET_UNBLOCK:
+ rc = msm_jpeg_output_get_unblock(pgmn_dev);
+ break;
+
+ case MSM_JPEG_IOCTL_EVT_GET:
+ rc = msm_jpeg_evt_get(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_EVT_GET_UNBLOCK:
+ rc = msm_jpeg_evt_get_unblock(pgmn_dev);
+ break;
+
+ case MSM_JPEG_IOCTL_HW_CMD:
+ rc = msm_jpeg_ioctl_hw_cmd(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_HW_CMDS:
+ rc = msm_jpeg_ioctl_hw_cmds(pgmn_dev, (void __user *) arg);
+ break;
+
+ case MSM_JPEG_IOCTL_TEST_DUMP_REGION:
+ rc = msm_jpeg_ioctl_test_dump_region(pgmn_dev, arg);
+ break;
+
+ case MSM_JPEG_IOCTL_SET_CLK_RATE:
+ rc = msm_jpeg_ioctl_set_clk_rate(pgmn_dev, (void __user *) arg);
+ break;
+ default:
+ pr_err_ratelimited("%s:%d] cmd = %d not supported\n",
+ __func__, __LINE__, _IOC_NR(cmd));
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+int __msm_jpeg_init(struct msm_jpeg_device *pgmn_dev)
+{
+ int rc = 0;
+ int idx = 0;
+
+ char *iommu_name[JPEG_DEV_CNT] = {"jpeg_enc0", "jpeg_enc1",
+ "jpeg_dec", "jpeg_dma"};
+
+
+ mutex_init(&pgmn_dev->lock);
+
+ pr_err("%s:%d] Jpeg Device id %d", __func__, __LINE__,
+ pgmn_dev->pdev->id);
+ idx = pgmn_dev->pdev->id;
+ pgmn_dev->idx = idx;
+ pgmn_dev->decode_flag = (idx == JPEG_DEC_ID);
+
+ msm_jpeg_q_init("evt_q", &pgmn_dev->evt_q);
+ msm_jpeg_q_init("output_rtn_q", &pgmn_dev->output_rtn_q);
+ msm_jpeg_q_init("output_buf_q", &pgmn_dev->output_buf_q);
+ msm_jpeg_q_init("input_rtn_q", &pgmn_dev->input_rtn_q);
+ msm_jpeg_q_init("input_buf_q", &pgmn_dev->input_buf_q);
+
+ /*get device context for IOMMU*/
+ rc = cam_smmu_get_handle(iommu_name[idx], &pgmn_dev->iommu_hdl);
+ JPEG_DBG("%s:%d] hdl %d", __func__, __LINE__,
+ pgmn_dev->iommu_hdl);
+ if (rc < 0) {
+ JPEG_PR_ERR("%s: No iommu fw context found\n",
+ __func__);
+ goto error;
+ }
+
+ return rc;
+
+error:
+ mutex_destroy(&pgmn_dev->lock);
+ return -EFAULT;
+}
+
+int __msm_jpeg_exit(struct msm_jpeg_device *pgmn_dev)
+{
+ mutex_destroy(&pgmn_dev->lock);
+ kfree(pgmn_dev);
+ return 0;
+}
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_sync.h b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_sync.h
new file mode 100644
index 000000000000..b7f655afe2cd
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/jpeg_10/msm_jpeg_sync.h
@@ -0,0 +1,142 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+
+#ifndef MSM_JPEG_SYNC_H
+#define MSM_JPEG_SYNC_H
+
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include "msm_camera_io_util.h"
+#include "msm_jpeg_hw.h"
+#include "cam_smmu_api.h"
+
+#define JPEG_8974_V1 0x10000000
+#define JPEG_8974_V2 0x10010000
+#define JPEG_8994 0x10020000
+#define JPEG_CLK_MAX 16
+#define JPEG_REGULATOR_MAX 3
+
+enum msm_jpeg_state {
+ MSM_JPEG_INIT,
+ MSM_JPEG_RESET,
+ MSM_JPEG_EXECUTING,
+ MSM_JPEG_STOPPED,
+ MSM_JPEG_IDLE
+};
+
+enum msm_jpeg_core_type {
+ MSM_JPEG_CORE_CODEC,
+ MSM_JPEG_CORE_DMA
+};
+
+struct msm_jpeg_q {
+ char const *name;
+ struct list_head q;
+ spinlock_t lck;
+ wait_queue_head_t wait;
+ int unblck;
+};
+
+struct msm_jpeg_q_entry {
+ struct list_head list;
+ void *data;
+};
+
+struct msm_jpeg_device {
+ struct platform_device *pdev;
+ struct resource *mem;
+ int irq;
+ void *base;
+ struct clk *jpeg_clk[JPEG_CLK_MAX];
+ struct msm_cam_clk_info jpeg_clk_info[JPEG_CLK_MAX];
+
+ struct regulator *jpeg_fs[JPEG_REGULATOR_MAX];
+ const char *regulator_names[JPEG_REGULATOR_MAX];
+ uint32_t hw_version;
+
+ struct device *device;
+ struct cdev cdev;
+ struct mutex lock;
+ char open_count;
+ uint8_t op_mode;
+
+ /* Flag to store the jpeg bus vote state
+ */
+ int jpeg_bus_vote;
+
+ /* event queue including frame done & err indications
+ */
+ struct msm_jpeg_q evt_q;
+
+ /* output return queue
+ */
+ struct msm_jpeg_q output_rtn_q;
+
+ /* output buf queue
+ */
+ struct msm_jpeg_q output_buf_q;
+
+ /* input return queue
+ */
+ struct msm_jpeg_q input_rtn_q;
+
+ /* input buf queue
+ */
+ struct msm_jpeg_q input_buf_q;
+
+ struct v4l2_subdev subdev;
+
+ struct class *msm_jpeg_class;
+
+ dev_t msm_jpeg_devno;
+
+ /*iommu domain and context*/
+ int idx;
+ int iommu_hdl;
+ int decode_flag;
+ void *jpeg_vbif;
+ int release_buf;
+ struct msm_jpeg_hw_pingpong fe_pingpong_buf;
+ struct msm_jpeg_hw_pingpong we_pingpong_buf;
+ int we_pingpong_index;
+ int reset_done_ack;
+ spinlock_t reset_lock;
+ wait_queue_head_t reset_wait;
+ uint32_t res_size;
+ uint32_t jpeg_bus_client;
+ uint32_t num_clk;
+ uint32_t num_regulator;
+ enum msm_jpeg_state state;
+ enum msm_jpeg_core_type core_type;
+};
+
+int __msm_jpeg_open(struct msm_jpeg_device *pgmn_dev);
+int __msm_jpeg_release(struct msm_jpeg_device *pgmn_dev);
+
+long __msm_jpeg_ioctl(struct msm_jpeg_device *pgmn_dev,
+ unsigned int cmd, unsigned long arg);
+
+#ifdef CONFIG_COMPAT
+long __msm_jpeg_compat_ioctl(struct msm_jpeg_device *pgmn_dev,
+ unsigned int cmd, unsigned long arg);
+#endif
+
+int __msm_jpeg_init(struct msm_jpeg_device *pgmn_dev);
+int __msm_jpeg_exit(struct msm_jpeg_device *pgmn_dev);
+
+#endif /* MSM_JPEG_SYNC_H */
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_dma/Makefile b/drivers/media/platform/msm/camera_v2/jpeg_dma/Makefile
new file mode 100644
index 000000000000..21cbadbd6425
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/jpeg_dma/Makefile
@@ -0,0 +1,4 @@
+GCC_VERSION := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CROSS_COMPILE)gcc)
+ccflags-y += -Idrivers/media/video/msm
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+obj-$(CONFIG_MSM_JPEGDMA) += msm_jpeg_dma_dev.o msm_jpeg_dma_hw.o
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c
new file mode 100644
index 000000000000..52a132b073d8
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.c
@@ -0,0 +1,1340 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/ion.h>
+#include <linux/msm_ion.h>
+#include <linux/delay.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-core.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/msm_jpeg_dma.h>
+
+#include "msm_jpeg_dma_dev.h"
+#include "msm_jpeg_dma_hw.h"
+#include "cam_hw_ops.h"
+
+#define MSM_JPEGDMA_DRV_NAME "msm_jpegdma"
+
+/* Jpeg dma stream off timeout */
+#define MSM_JPEGDMA_STREAM_OFF_TIMEOUT_MS 500
+
+/* Jpeg dma formats lookup table */
+static struct msm_jpegdma_format formats[] = {
+ {
+ .name = "Greyscale",
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .depth = 8,
+ .num_planes = 1,
+ .colplane_h = 1,
+ .colplane_v = 1,
+ .h_align = 1,
+ .v_align = 1,
+ .planes[0] = JPEGDMA_PLANE_TYPE_Y,
+ },
+ {
+ .name = "Y/CbCr 4:2:0",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .depth = 12,
+ .num_planes = 2,
+ .colplane_h = 1,
+ .colplane_v = 2,
+ .h_align = 2,
+ .v_align = 2,
+ .planes[0] = JPEGDMA_PLANE_TYPE_Y,
+ .planes[1] = JPEGDMA_PLANE_TYPE_CBCR,
+ },
+ {
+ .name = "Y/CrCb 4:2:0",
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .depth = 12,
+ .num_planes = 2,
+ .colplane_h = 1,
+ .colplane_v = 2,
+ .h_align = 2,
+ .v_align = 2,
+ .planes[0] = JPEGDMA_PLANE_TYPE_Y,
+ .planes[1] = JPEGDMA_PLANE_TYPE_CBCR,
+ },
+ {
+ .name = "YUV 4:2:0 planar, YCbCr",
+ .fourcc = V4L2_PIX_FMT_YUV420,
+ .depth = 12,
+ .num_planes = 3,
+ .colplane_h = 2,
+ .colplane_v = 2,
+ .h_align = 2,
+ .v_align = 2,
+ .planes[0] = JPEGDMA_PLANE_TYPE_Y,
+ .planes[1] = JPEGDMA_PLANE_TYPE_CR,
+ .planes[2] = JPEGDMA_PLANE_TYPE_CB,
+ },
+};
+
+/*
+ * msm_jpegdma_ctx_from_fh - Get dma context from v4l2 fh.
+ * @fh: Pointer to v4l2 fh.
+ */
+static inline struct jpegdma_ctx *msm_jpegdma_ctx_from_fh(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct jpegdma_ctx, fh);
+}
+
+/*
+ * msm_jpegdma_get_next_config_idx - get next configuration index.
+ * @ctx: Pointer to jpegdma context.
+ */
+static inline int msm_jpegdma_get_next_config_idx(struct jpegdma_ctx *ctx)
+{
+ return (ctx->config_idx + 1) % MSM_JPEGDMA_MAX_CONFIGS;
+}
+
+/*
+ * msm_jpegdma_schedule_next_config - Schedule next configuration.
+ * @ctx: Pointer to jpegdma context.
+ */
+static inline void msm_jpegdma_schedule_next_config(struct jpegdma_ctx *ctx)
+{
+ ctx->config_idx = (ctx->config_idx + 1) % MSM_JPEGDMA_MAX_CONFIGS;
+}
+
+/*
+ * msm_jpegdma_get_format_idx - Get jpeg dma format lookup index.
+ * @ctx: Pointer to dma ctx.
+ * @f: v4l2 format.
+ */
+static int msm_jpegdma_get_format_idx(struct jpegdma_ctx *ctx,
+ struct v4l2_format *f)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++)
+ if (formats[i].fourcc == f->fmt.pix.pixelformat)
+ break;
+
+ if (i == ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ return i;
+}
+
+/*
+ * msm_jpegdma_fill_size_from_ctx - Fill jpeg dma format lookup index.
+ * @ctx: Pointer to dma ctx.
+ * @size: Size config.
+ */
+static void msm_jpegdma_fill_size_from_ctx(struct jpegdma_ctx *ctx,
+ struct msm_jpegdma_size_config *size)
+{
+
+ size->in_size.top = ctx->crop.top;
+ size->in_size.left = ctx->crop.left;
+ size->in_size.width = ctx->crop.width;
+ size->in_size.height = ctx->crop.height;
+ size->in_size.scanline = ctx->format_out.fmt.pix.height;
+ size->in_size.stride = ctx->format_out.fmt.pix.bytesperline;
+
+ size->out_size.top = 0;
+ size->out_size.left = 0;
+ size->out_size.width = ctx->format_cap.fmt.pix.width;
+ size->out_size.height = ctx->format_cap.fmt.pix.height;
+ size->out_size.scanline = ctx->format_cap.fmt.pix.height;
+ size->out_size.stride = ctx->format_cap.fmt.pix.bytesperline;
+}
+
+/*
+ * msm_jpegdma_align_format - Align jpeg dma format.
+ * @f: v4l2 format.
+ * @format_idx: format lookup index.
+ */
+static void msm_jpegdma_align_format(struct v4l2_format *f, int format_idx)
+{
+ unsigned int size_image;
+ int i;
+
+ if (f->fmt.pix.width > MSM_JPEGDMA_MAX_WIDTH)
+ f->fmt.pix.width = MSM_JPEGDMA_MAX_WIDTH;
+
+ if (f->fmt.pix.width < MSM_JPEGDMA_MIN_WIDTH)
+ f->fmt.pix.width = MSM_JPEGDMA_MIN_WIDTH;
+
+ if (f->fmt.pix.height > MSM_JPEGDMA_MAX_HEIGHT)
+ f->fmt.pix.height = MSM_JPEGDMA_MAX_HEIGHT;
+
+ if (f->fmt.pix.height < MSM_JPEGDMA_MIN_HEIGHT)
+ f->fmt.pix.height = MSM_JPEGDMA_MIN_HEIGHT;
+
+ if (formats[format_idx].h_align > 1)
+ f->fmt.pix.width &= ~(formats[format_idx].h_align - 1);
+
+ if (formats[format_idx].v_align > 1)
+ f->fmt.pix.height &= ~(formats[format_idx].v_align - 1);
+
+ if (f->fmt.pix.bytesperline < f->fmt.pix.width)
+ f->fmt.pix.bytesperline = f->fmt.pix.width;
+
+ f->fmt.pix.bytesperline = ALIGN(f->fmt.pix.bytesperline,
+ MSM_JPEGDMA_STRIDE_ALIGN);
+
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
+
+ size_image = f->fmt.pix.bytesperline * f->fmt.pix.height;
+
+ if (formats[format_idx].num_planes > 1)
+ for (i = 1; i < formats[format_idx].num_planes; i++)
+ size_image += (f->fmt.pix.bytesperline *
+ (f->fmt.pix.height / formats[format_idx].colplane_v));
+
+ f->fmt.pix.sizeimage = size_image;
+ f->fmt.pix.field = V4L2_FIELD_NONE;
+}
+
+/*
+ * msm_jpegdma_config_ok - Check if jpeg dma format is ok for processing.
+ * @ctx: Pointer to dma ctx.
+ */
+static int msm_jpegdma_config_ok(struct jpegdma_ctx *ctx)
+{
+ int ret;
+ int cap_idx;
+ int out_idx;
+ struct msm_jpegdma_size_config size;
+
+ cap_idx = msm_jpegdma_get_format_idx(ctx, &ctx->format_cap);
+ if (cap_idx < 0)
+ return 0;
+
+ out_idx = msm_jpegdma_get_format_idx(ctx, &ctx->format_out);
+ if (out_idx < 0)
+ return 0;
+
+ /* jpeg dma can not convert formats */
+ if (cap_idx != out_idx)
+ return 0;
+
+ msm_jpegdma_fill_size_from_ctx(ctx, &size);
+
+ size.format = formats[ctx->format_idx];
+
+ ret = msm_jpegdma_hw_check_config(ctx->jdma_device, &size);
+ if (ret < 0)
+ return 0;
+
+ return 1;
+}
+
+/*
+ * msm_jpegdma_update_hw_config - Update dma hw configuration/
+ * @ctx: Pointer to dma ctx.
+ */
+static int msm_jpegdma_update_hw_config(struct jpegdma_ctx *ctx)
+{
+ struct msm_jpegdma_size_config size;
+ int idx;
+ int ret = 0;
+
+ if (msm_jpegdma_config_ok(ctx)) {
+ size.fps = ctx->timeperframe.denominator /
+ ctx->timeperframe.numerator;
+
+ size.format = formats[ctx->format_idx];
+
+ msm_jpegdma_fill_size_from_ctx(ctx, &size);
+
+ idx = msm_jpegdma_get_next_config_idx(ctx);
+
+ ret = msm_jpegdma_hw_set_config(ctx->jdma_device,
+ &size, &ctx->plane_config[idx]);
+ if (ret < 0)
+ dev_err(ctx->jdma_device->dev, "Can not get hw cfg\n");
+ else
+ ctx->pending_config = 1;
+ }
+
+ return ret;
+}
+
+/*
+ * msm_jpegdma_queue_setup - vb2_ops queue_setup callback.
+ * @q: Pointer to vb2 queue struct.
+ * @fmt: Pointer to v4l2 format struct (NULL is valid argument).
+ * @num_buffers: Pointer of number of buffers requested.
+ * @num_planes: Pointer to number of planes requested.
+ * @sizes: Array containing sizes of planes.
+ * @alloc_ctxs: Array of allocated contexts for each plane.
+ */
+static int msm_jpegdma_queue_setup(struct vb2_queue *q,
+ const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct jpegdma_ctx *ctx = vb2_get_drv_priv(q);
+
+ if (NULL == fmt) {
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ sizes[0] = ctx->format_out.fmt.pix.sizeimage;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ sizes[0] = ctx->format_cap.fmt.pix.sizeimage;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ sizes[0] = fmt->fmt.pix.sizeimage;
+ }
+
+ *num_planes = 1;
+ alloc_ctxs[0] = ctx->jdma_device;
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_buf_queue - vb2_ops buf_queue callback.
+ * @vb: Pointer to vb2 buffer struct.
+ */
+static void msm_jpegdma_buf_queue(struct vb2_buffer *vb)
+{
+ struct jpegdma_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+
+ return;
+}
+
+/*
+ * msm_jpegdma_start_streaming - vb2_ops start_streaming callback.
+ * @q: Pointer to vb2 queue struct.
+ * @count: Number of buffer queued before stream on call.
+ */
+static int msm_jpegdma_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct jpegdma_ctx *ctx = vb2_get_drv_priv(q);
+ int ret;
+
+ ret = msm_jpegdma_hw_get(ctx->jdma_device);
+ if (ret < 0) {
+ dev_err(ctx->jdma_device->dev, "Fail to get dma hw\n");
+ return ret;
+ }
+ if (!atomic_read(&ctx->active)) {
+ ret = msm_jpegdma_update_hw_config(ctx);
+ if (ret < 0) {
+ dev_err(ctx->jdma_device->dev, "Fail to configure hw\n");
+ return ret;
+ }
+ atomic_set(&ctx->active, 1);
+ }
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_stop_streaming - vb2_ops stop_streaming callback.
+ * @q: Pointer to vb2 queue struct.
+ */
+static void msm_jpegdma_stop_streaming(struct vb2_queue *q)
+{
+ struct jpegdma_ctx *ctx = vb2_get_drv_priv(q);
+ unsigned long time;
+ int ret = 0;
+
+ atomic_set(&ctx->active, 0);
+
+ time = wait_for_completion_timeout(&ctx->completion,
+ msecs_to_jiffies(MSM_JPEGDMA_STREAM_OFF_TIMEOUT_MS));
+ if (!time) {
+ dev_err(ctx->jdma_device->dev, "Ctx wait timeout\n");
+ ret = -ETIME;
+ }
+ msm_jpegdma_hw_put(ctx->jdma_device);
+}
+
+/* Videobuf2 queue callbacks. */
+static struct vb2_ops msm_jpegdma_vb2_q_ops = {
+ .queue_setup = msm_jpegdma_queue_setup,
+ .buf_queue = msm_jpegdma_buf_queue,
+ .start_streaming = msm_jpegdma_start_streaming,
+ .stop_streaming = msm_jpegdma_stop_streaming,
+};
+
+/*
+ * msm_jpegdma_get_userptr - Map and get buffer handler for user pointer buffer.
+ * @alloc_ctx: Contexts allocated in buf_setup.
+ * @vaddr: Virtual addr passed from userpsace (in our case ion fd)
+ * @size: Size of the buffer
+ * @write: True if buffer will be used for writing the data.
+ */
+static void *msm_jpegdma_get_userptr(void *alloc_ctx,
+ unsigned long vaddr, unsigned long size, int write)
+{
+ struct msm_jpegdma_device *dma = alloc_ctx;
+ struct msm_jpegdma_buf_handle *buf;
+ int ret;
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ ret = msm_jpegdma_hw_map_buffer(dma, vaddr, buf);
+ if (ret < 0 || buf->size < size)
+ goto error;
+
+ return buf;
+error:
+ kzfree(buf);
+ return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * msm_jpegdma_put_userptr - Unmap and free buffer handler.
+ * @buf_priv: Buffer handler allocated get_userptr callback.
+ */
+static void msm_jpegdma_put_userptr(void *buf_priv)
+{
+ if (IS_ERR_OR_NULL(buf_priv))
+ return;
+
+ msm_jpegdma_hw_unmap_buffer(buf_priv);
+
+ kzfree(buf_priv);
+}
+
+/* Videobuf2 memory callbacks. */
+static struct vb2_mem_ops msm_jpegdma_vb2_mem_ops = {
+ .get_userptr = msm_jpegdma_get_userptr,
+ .put_userptr = msm_jpegdma_put_userptr,
+};
+
+/*
+ * msm_jpegdma_queue_init - m2m_ops queue_setup callback.
+ * @priv: Pointer to jpegdma ctx.
+ * @src_vq: vb2 source queue.
+ * @dst_vq: vb2 destination queue.
+ */
+static int msm_jpegdma_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct jpegdma_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_USERPTR;
+ src_vq->drv_priv = ctx;
+ src_vq->mem_ops = &msm_jpegdma_vb2_mem_ops;
+ src_vq->ops = &msm_jpegdma_vb2_q_ops;
+ src_vq->buf_struct_size = sizeof(struct vb2_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret) {
+ dev_err(ctx->jdma_device->dev, "Can not init src queue\n");
+ return ret;
+ }
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_USERPTR;
+ dst_vq->drv_priv = ctx;
+ dst_vq->mem_ops = &msm_jpegdma_vb2_mem_ops;
+ dst_vq->ops = &msm_jpegdma_vb2_q_ops;
+ dst_vq->buf_struct_size = sizeof(struct vb2_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+
+ ret = vb2_queue_init(dst_vq);
+ if (ret) {
+ dev_err(ctx->jdma_device->dev, "Can not init dst queue\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_open - Fd device open method.
+ * @file: Pointer to file struct.
+ */
+static int msm_jpegdma_open(struct file *file)
+{
+ struct msm_jpegdma_device *device = video_drvdata(file);
+ struct video_device *video = video_devdata(file);
+ struct jpegdma_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mutex_init(&ctx->lock);
+ ctx->jdma_device = device;
+ dev_dbg(ctx->jdma_device->dev, "Jpeg v4l2 dma open\n");
+ /* Set ctx defaults */
+ ctx->timeperframe.numerator = 1;
+ ctx->timeperframe.denominator = MSM_JPEGDMA_DEFAULT_FPS;
+ atomic_set(&ctx->active, 0);
+
+ v4l2_fh_init(&ctx->fh, video);
+
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(device->m2m_dev,
+ ctx, msm_jpegdma_queue_init);
+ if (IS_ERR_OR_NULL(ctx->m2m_ctx)) {
+ ret = PTR_ERR(ctx->m2m_ctx);
+ goto error_m2m_init;
+ }
+ init_completion(&ctx->completion);
+ complete_all(&ctx->completion);
+ dev_dbg(ctx->jdma_device->dev, "Jpeg v4l2 dma open success\n");
+
+ ret = cam_config_ahb_clk(CAM_AHB_CLIENT_JPEG, CAMERA_AHB_SVS_VOTE);
+ if (ret < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ goto error_m2m_init;
+ }
+
+ return 0;
+
+error_m2m_init:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ return ret;
+}
+
+/*
+ * msm_jpegdma_release - Fd device release method.
+ * @file: Pointer to file struct.
+ */
+static int msm_jpegdma_release(struct file *file)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(file->private_data);
+
+ atomic_set(&ctx->active, 0);
+ complete_all(&ctx->completion);
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+
+ if (cam_config_ahb_clk(CAM_AHB_CLIENT_JPEG,
+ CAMERA_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_poll - Fd device pool method.
+ * @file: Pointer to file struct.
+ * @wait: Pointer to pool table struct.
+ */
+static unsigned int msm_jpegdma_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(file->private_data);
+
+ return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+}
+
+/* Dma device file operations callbacks */
+static const struct v4l2_file_operations fd_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_jpegdma_open,
+ .release = msm_jpegdma_release,
+ .poll = msm_jpegdma_poll,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+/*
+ * msm_jpegdma_querycap - V4l2 ioctl query capability handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @cap: Pointer to v4l2_capability struct need to be filled.
+ */
+static int msm_jpegdma_querycap(struct file *file,
+ void *fh, struct v4l2_capability *cap)
+{
+ cap->bus_info[0] = 0;
+ strlcpy(cap->driver, MSM_JPEGDMA_DRV_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, MSM_JPEGDMA_DRV_NAME, sizeof(cap->card));
+ cap->capabilities = V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_CAPTURE;
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_enum_fmt_vid_cap - V4l2 ioctl enumerate output format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_fmtdesc struct need to be filled.
+ */
+static int msm_jpegdma_enum_fmt_vid_cap(struct file *file,
+ void *fh, struct v4l2_fmtdesc *f)
+{
+ if (f->index >= ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ f->pixelformat = formats[f->index].fourcc;
+ strlcpy(f->description, formats[f->index].name,
+ sizeof(f->description));
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_enum_fmt_vid_out - V4l2 ioctl enumerate capture format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_fmtdesc struct need to be filled.
+ */
+static int msm_jpegdma_enum_fmt_vid_out(struct file *file,
+ void *fh, struct v4l2_fmtdesc *f)
+{
+ if (f->index >= ARRAY_SIZE(formats))
+ return -EINVAL;
+
+ f->pixelformat = formats[f->index].fourcc;
+ strlcpy(f->description, formats[f->index].name,
+ sizeof(f->description));
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_g_fmt_cap - V4l2 ioctl get capture format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct need to be filled.
+ */
+static int msm_jpegdma_g_fmt_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+
+ *f = ctx->format_cap;
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_g_fmt_out - V4l2 ioctl get output format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct need to be filled.
+ */
+static int msm_jpegdma_g_fmt_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+
+ *f = ctx->format_out;
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_try_fmt_vid_cap - V4l2 ioctl try capture format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct.
+ */
+static int msm_jpegdma_try_fmt_vid_cap(struct file *file,
+ void *fh, struct v4l2_format *f)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+
+ msm_jpegdma_align_format(f, ctx->format_idx);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_try_fmt_vid_out - V4l2 ioctl try output format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct.
+ */
+static int msm_jpegdma_try_fmt_vid_out(struct file *file,
+ void *fh, struct v4l2_format *f)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+
+ msm_jpegdma_align_format(f, ctx->format_idx);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_s_fmt_vid_cap - V4l2 ioctl set capture format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct.
+ */
+static int msm_jpegdma_s_fmt_vid_cap(struct file *file,
+ void *fh, struct v4l2_format *f)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+ int ret;
+
+ ret = msm_jpegdma_get_format_idx(ctx, f);
+ if (ret < 0)
+ return -EINVAL;
+
+ ctx->format_idx = ret;
+
+ msm_jpegdma_align_format(f, ctx->format_idx);
+
+ /* Initialize crop with output height */
+ ctx->crop.top = 0;
+ ctx->crop.left = 0;
+ ctx->crop.width = ctx->format_out.fmt.pix.width;
+ ctx->crop.height = ctx->format_out.fmt.pix.height;
+
+ ctx->format_cap = *f;
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_s_fmt_vid_out - V4l2 ioctl set output format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct.
+ */
+static int msm_jpegdma_s_fmt_vid_out(struct file *file,
+ void *fh, struct v4l2_format *f)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+ int ret;
+
+ ret = msm_jpegdma_get_format_idx(ctx, f);
+ if (ret < 0)
+ return -EINVAL;
+
+ ctx->format_idx = ret;
+
+ msm_jpegdma_align_format(f, ctx->format_idx);
+
+ /* Initialize crop */
+ ctx->crop.top = 0;
+ ctx->crop.left = 0;
+ ctx->crop.width = f->fmt.pix.width;
+ ctx->crop.height = f->fmt.pix.height;
+
+ ctx->format_out = *f;
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_reqbufs - V4l2 ioctl request buffers handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @req: Pointer to v4l2_requestbuffer struct.
+ */
+static int msm_jpegdma_reqbufs(struct file *file,
+ void *fh, struct v4l2_requestbuffers *req)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, req);
+}
+
+/*
+ * msm_jpegdma_qbuf - V4l2 ioctl queue buffer handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @buf: Pointer to v4l2_buffer struct.
+ */
+static int msm_jpegdma_qbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+ int ret;
+
+ mutex_lock(&ctx->lock);
+
+ ret = v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+ if (ret < 0)
+ dev_err(ctx->jdma_device->dev, "QBuf fail\n");
+
+ mutex_unlock(&ctx->lock);
+
+ return ret;
+}
+
+/*
+ * msm_jpegdma_dqbuf - V4l2 ioctl dequeue buffer handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @buf: Pointer to v4l2_buffer struct.
+ */
+static int msm_jpegdma_dqbuf(struct file *file,
+ void *fh, struct v4l2_buffer *buf)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+
+ return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+}
+
+/*
+ * msm_jpegdma_streamon - V4l2 ioctl stream on handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @buf_type: V4l2 buffer type.
+ */
+static int msm_jpegdma_streamon(struct file *file,
+ void *fh, enum v4l2_buf_type buf_type)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+ int ret;
+
+ if (!msm_jpegdma_config_ok(ctx))
+ return -EINVAL;
+
+ mutex_lock(&ctx->lock);
+
+ ret = v4l2_m2m_streamon(file, ctx->m2m_ctx, buf_type);
+ if (ret < 0)
+ dev_err(ctx->jdma_device->dev, "Stream on fail\n");
+
+ mutex_unlock(&ctx->lock);
+
+ return ret;
+}
+
+/*
+ * msm_jpegdma_streamoff - V4l2 ioctl stream off handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @buf_type: V4l2 buffer type.
+ */
+static int msm_jpegdma_streamoff(struct file *file,
+ void *fh, enum v4l2_buf_type buf_type)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+ int ret;
+
+ ret = v4l2_m2m_streamoff(file, ctx->m2m_ctx, buf_type);
+ if (ret < 0)
+ dev_err(ctx->jdma_device->dev, "Stream off fails\n");
+
+ return ret;
+}
+
+/*
+ * msm_jpegdma_cropcap - V4l2 ioctl crop capabilites.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @a: Pointer to v4l2_cropcap struct need to be set.
+ */
+static int msm_jpegdma_cropcap(struct file *file, void *fh,
+ struct v4l2_cropcap *a)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+ struct v4l2_format *format;
+
+ switch (a->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ format = &ctx->format_out;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ format = &ctx->format_cap;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ a->bounds.top = 0;
+ a->bounds.left = 0;
+ a->bounds.width = format->fmt.pix.width;
+ a->bounds.height = format->fmt.pix.height;
+
+ a->defrect = ctx->crop;
+
+ a->pixelaspect.numerator = 1;
+ a->pixelaspect.denominator = 1;
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_g_crop - V4l2 ioctl get crop.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @crop: Pointer to v4l2_crop struct need to be set.
+ */
+static int msm_jpegdma_g_crop(struct file *file, void *fh,
+ struct v4l2_crop *crop)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+
+ switch (crop->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ crop->c = ctx->crop;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ crop->c.left = 0;
+ crop->c.top = 0;
+ crop->c.width = ctx->format_cap.fmt.pix.width;
+ crop->c.height = ctx->format_cap.fmt.pix.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * msm_jpegdma_s_crop - V4l2 ioctl set crop.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @crop: Pointer to v4l2_crop struct need to be set.
+ */
+static int msm_jpegdma_s_crop(struct file *file, void *fh,
+ const struct v4l2_crop *crop)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+ int ret = 0;
+
+ /* Crop is supported only for input buffers */
+ if (crop->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ if (crop->c.left < 0 || crop->c.top < 0 ||
+ crop->c.height < 0 || crop->c.width < 0)
+ return -EINVAL;
+
+ /* Upscale is not supported */
+ if (crop->c.width < ctx->format_cap.fmt.pix.width)
+ return -EINVAL;
+
+ if (crop->c.height < ctx->format_cap.fmt.pix.height)
+ return -EINVAL;
+
+ if (crop->c.width + crop->c.left > ctx->format_out.fmt.pix.width)
+ return -EINVAL;
+
+ if (crop->c.height + crop->c.top > ctx->format_out.fmt.pix.height)
+ return -EINVAL;
+
+ if (crop->c.width % formats[ctx->format_idx].h_align)
+ return -EINVAL;
+
+ if (crop->c.left % formats[ctx->format_idx].h_align)
+ return -EINVAL;
+
+ if (crop->c.height % formats[ctx->format_idx].v_align)
+ return -EINVAL;
+
+ if (crop->c.top % formats[ctx->format_idx].v_align)
+ return -EINVAL;
+
+ mutex_lock(&ctx->lock);
+
+ ctx->crop = crop->c;
+ if (atomic_read(&ctx->active))
+ ret = msm_jpegdma_update_hw_config(ctx);
+
+ mutex_unlock(&ctx->lock);
+
+ return ret;
+}
+
+/*
+ * msm_jpegdma_g_crop - V4l2 ioctl get parm.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @a: Pointer to v4l2_streamparm struct need to be filled.
+ */
+static int msm_jpegdma_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+
+ /* Get param is supported only for input buffers */
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ a->parm.output.capability = 0;
+ a->parm.output.extendedmode = 0;
+ a->parm.output.outputmode = 0;
+ a->parm.output.writebuffers = 0;
+ a->parm.output.timeperframe = ctx->timeperframe;
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_s_crop - V4l2 ioctl set parm.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @a: Pointer to v4l2_streamparm struct need to be set.
+ */
+static int msm_jpegdma_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh);
+ /* Set param is supported only for input buffers */
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ if (!a->parm.output.timeperframe.numerator ||
+ !a->parm.output.timeperframe.denominator)
+ return -EINVAL;
+
+ /* Frame rate is not supported during streaming */
+ if (atomic_read(&ctx->active))
+ return -EINVAL;
+
+ ctx->timeperframe = a->parm.output.timeperframe;
+ return 0;
+}
+
+/* V4l2 ioctl handlers */
+static const struct v4l2_ioctl_ops fd_ioctl_ops = {
+ .vidioc_querycap = msm_jpegdma_querycap,
+ .vidioc_enum_fmt_vid_out = msm_jpegdma_enum_fmt_vid_out,
+ .vidioc_enum_fmt_vid_cap = msm_jpegdma_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_out = msm_jpegdma_g_fmt_out,
+ .vidioc_g_fmt_vid_cap = msm_jpegdma_g_fmt_cap,
+ .vidioc_try_fmt_vid_out = msm_jpegdma_try_fmt_vid_out,
+ .vidioc_try_fmt_vid_cap = msm_jpegdma_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_out = msm_jpegdma_s_fmt_vid_out,
+ .vidioc_s_fmt_vid_cap = msm_jpegdma_s_fmt_vid_cap,
+ .vidioc_reqbufs = msm_jpegdma_reqbufs,
+ .vidioc_qbuf = msm_jpegdma_qbuf,
+ .vidioc_dqbuf = msm_jpegdma_dqbuf,
+ .vidioc_streamon = msm_jpegdma_streamon,
+ .vidioc_streamoff = msm_jpegdma_streamoff,
+ .vidioc_cropcap = msm_jpegdma_cropcap,
+ .vidioc_g_crop = msm_jpegdma_g_crop,
+ .vidioc_s_crop = msm_jpegdma_s_crop,
+ .vidioc_g_parm = msm_jpegdma_g_parm,
+ .vidioc_s_parm = msm_jpegdma_s_parm,
+};
+
+/*
+ * msm_jpegdma_process_buffers - Start dma processing.
+ * @ctx: Pointer dma context.
+ * @src_buf: Pointer to Vb2 source buffer.
+ * @dst_buf: Pointer to Vb2 destination buffer.
+ */
+static void msm_jpegdma_process_buffers(struct jpegdma_ctx *ctx,
+ struct vb2_buffer *src_buf, struct vb2_buffer *dst_buf)
+{
+ struct msm_jpegdma_buf_handle *buf_handle;
+ struct msm_jpegdma_addr addr;
+ int plane_idx;
+ int config_idx;
+
+ buf_handle = dst_buf->planes[0].mem_priv;
+ addr.out_addr = buf_handle->addr;
+
+ buf_handle = src_buf->planes[0].mem_priv;
+ addr.in_addr = buf_handle->addr;
+
+ plane_idx = ctx->plane_idx;
+ config_idx = ctx->config_idx;
+ msm_jpegdma_hw_start(ctx->jdma_device, &addr,
+ &ctx->plane_config[config_idx].plane[plane_idx],
+ &ctx->plane_config[config_idx].speed);
+}
+
+/*
+ * msm_jpegdma_device_run - Dma device run.
+ * @priv: Pointer dma context.
+ */
+static void msm_jpegdma_device_run(void *priv)
+{
+ struct vb2_buffer *src_buf;
+ struct vb2_buffer *dst_buf;
+ struct jpegdma_ctx *ctx = priv;
+
+ dev_dbg(ctx->jdma_device->dev, "Jpeg v4l2 dma device run E\n");
+
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (src_buf == NULL || dst_buf == NULL) {
+ dev_err(ctx->jdma_device->dev, "Error, buffer list empty\n");
+ return;
+ }
+
+ if (ctx->pending_config) {
+ msm_jpegdma_schedule_next_config(ctx);
+ ctx->pending_config = 0;
+ }
+
+ msm_jpegdma_process_buffers(ctx, src_buf, dst_buf);
+ dev_dbg(ctx->jdma_device->dev, "Jpeg v4l2 dma device run X\n");
+}
+
+/*
+ * msm_jpegdma_job_abort - Dma abort job.
+ * @priv: Pointer dma context.
+ */
+static void msm_jpegdma_job_abort(void *priv)
+{
+ struct jpegdma_ctx *ctx = priv;
+
+ msm_jpegdma_hw_abort(ctx->jdma_device);
+ v4l2_m2m_job_finish(ctx->jdma_device->m2m_dev, ctx->m2m_ctx);
+}
+
+/*
+ * msm_jpegdma_job_ready - Dma check if job is ready
+ * @priv: Pointer dma context.
+ */
+static int msm_jpegdma_job_ready(void *priv)
+{
+ struct jpegdma_ctx *ctx = priv;
+
+ if (atomic_read(&ctx->active)) {
+ init_completion(&ctx->completion);
+ return 1;
+ }
+ return 0;
+}
+
+/* V4l2 mem2mem handlers */
+static struct v4l2_m2m_ops msm_jpegdma_m2m_ops = {
+ .device_run = msm_jpegdma_device_run,
+ .job_abort = msm_jpegdma_job_abort,
+ .job_ready = msm_jpegdma_job_ready,
+};
+
+/*
+ * msm_jpegdma_isr_processing_done - Invoked by dma_hw when processing is done.
+ * @dma: Pointer dma device.
+ */
+void msm_jpegdma_isr_processing_done(struct msm_jpegdma_device *dma)
+{
+ struct vb2_buffer *src_buf;
+ struct vb2_buffer *dst_buf;
+ struct jpegdma_ctx *ctx;
+
+ mutex_lock(&dma->lock);
+ ctx = v4l2_m2m_get_curr_priv(dma->m2m_dev);
+ if (ctx) {
+ mutex_lock(&ctx->lock);
+ ctx->plane_idx++;
+ if (ctx->plane_idx >= formats[ctx->format_idx].num_planes) {
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ if (src_buf == NULL || dst_buf == NULL) {
+ dev_err(ctx->jdma_device->dev, "Error, buffer list empty\n");
+ mutex_unlock(&ctx->lock);
+ mutex_unlock(&dma->lock);
+ return;
+ }
+ complete_all(&ctx->completion);
+ ctx->plane_idx = 0;
+
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+ v4l2_m2m_job_finish(ctx->jdma_device->m2m_dev,
+ ctx->m2m_ctx);
+ } else {
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (src_buf == NULL || dst_buf == NULL) {
+ dev_err(ctx->jdma_device->dev, "Error, buffer list empty\n");
+ mutex_unlock(&ctx->lock);
+ mutex_unlock(&dma->lock);
+ return;
+ }
+ msm_jpegdma_process_buffers(ctx, src_buf, dst_buf);
+ }
+ mutex_unlock(&ctx->lock);
+ }
+ mutex_unlock(&dma->lock);
+}
+
+/*
+ * jpegdma_probe - Dma device probe method.
+ * @pdev: Pointer Dma platform device.
+ */
+static int jpegdma_probe(struct platform_device *pdev)
+{
+ struct msm_jpegdma_device *jpegdma;
+ int ret;
+
+ dev_dbg(&pdev->dev, "jpeg v4l2 DMA probed\n");
+ /* Jpeg dma device struct */
+ jpegdma = kzalloc(sizeof(struct msm_jpegdma_device), GFP_KERNEL);
+ if (!jpegdma)
+ return -ENOMEM;
+
+ mutex_init(&jpegdma->lock);
+
+ init_completion(&jpegdma->hw_reset_completion);
+ init_completion(&jpegdma->hw_halt_completion);
+ jpegdma->dev = &pdev->dev;
+
+ /* Get resources */
+ ret = msm_jpegdma_hw_get_mem_resources(pdev, jpegdma);
+ if (ret < 0)
+ goto error_mem_resources;
+
+ ret = msm_jpegdma_hw_get_regulators(jpegdma);
+ if (ret < 0)
+ goto error_get_regulators;
+
+ ret = msm_jpegdma_hw_get_clocks(jpegdma);
+ if (ret < 0)
+ goto error_get_clocks;
+
+ ret = msm_jpegdma_hw_get_qos(jpegdma);
+ if (ret < 0)
+ goto error_qos_get;
+
+ ret = msm_jpegdma_hw_get_vbif(jpegdma);
+ if (ret < 0)
+ goto error_vbif_get;
+
+ ret = msm_jpegdma_hw_get_prefetch(jpegdma);
+ if (ret < 0)
+ goto error_prefetch_get;
+
+ ret = msm_jpegdma_hw_request_irq(pdev, jpegdma);
+ if (ret < 0)
+ goto error_hw_get_request_irq;
+
+ ret = msm_jpegdma_hw_get_capabilities(jpegdma);
+ if (ret < 0)
+ goto error_hw_get_request_irq;
+
+ /* mem2mem device */
+ jpegdma->m2m_dev = v4l2_m2m_init(&msm_jpegdma_m2m_ops);
+ if (IS_ERR(jpegdma->m2m_dev)) {
+ dev_err(&pdev->dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(jpegdma->m2m_dev);
+ goto error_m2m_init;
+ }
+
+ /* v4l2 device */
+ ret = v4l2_device_register(&pdev->dev, &jpegdma->v4l2_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+ goto error_v4l2_register;
+ }
+
+ jpegdma->video.fops = &fd_fops;
+ jpegdma->video.ioctl_ops = &fd_ioctl_ops;
+ jpegdma->video.minor = -1;
+ jpegdma->video.release = video_device_release;
+ jpegdma->video.v4l2_dev = &jpegdma->v4l2_dev;
+ jpegdma->video.vfl_dir = VFL_DIR_M2M;
+ jpegdma->video.vfl_type = VFL_TYPE_GRABBER;
+ strlcpy(jpegdma->video.name, MSM_JPEGDMA_DRV_NAME,
+ sizeof(jpegdma->video.name));
+
+ ret = video_register_device(&jpegdma->video, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register video device\n");
+ goto error_video_register;
+ }
+
+ video_set_drvdata(&jpegdma->video, jpegdma);
+
+ platform_set_drvdata(pdev, jpegdma);
+
+ dev_dbg(&pdev->dev, "jpeg v4l2 DMA probe success\n");
+ return 0;
+
+error_video_register:
+ v4l2_device_unregister(&jpegdma->v4l2_dev);
+error_v4l2_register:
+ v4l2_m2m_release(jpegdma->m2m_dev);
+error_m2m_init:
+ msm_jpegdma_hw_release_irq(jpegdma);
+error_hw_get_request_irq:
+ msm_jpegdma_hw_put_prefetch(jpegdma);
+error_prefetch_get:
+ msm_jpegdma_hw_put_vbif(jpegdma);
+error_vbif_get:
+ msm_jpegdma_hw_put_qos(jpegdma);
+error_qos_get:
+ msm_jpegdma_hw_put_clocks(jpegdma);
+error_get_clocks:
+ msm_jpegdma_hw_put_regulators(jpegdma);
+error_get_regulators:
+ msm_jpegdma_hw_release_mem_resources(jpegdma);
+error_mem_resources:
+ kfree(jpegdma);
+ return ret;
+}
+
+/*
+ * jpegdma_device_remove - Jpegdma device remove method.
+ * @pdev: Pointer jpegdma platform device.
+ */
+static int jpegdma_device_remove(struct platform_device *pdev)
+{
+ struct msm_jpegdma_device *dma;
+
+ dma = platform_get_drvdata(pdev);
+ if (NULL == dma) {
+ dev_err(&pdev->dev, "Can not get jpeg dma drvdata\n");
+ return 0;
+ }
+ video_unregister_device(&dma->video);
+ v4l2_device_unregister(&dma->v4l2_dev);
+ v4l2_m2m_release(dma->m2m_dev);
+ msm_jpegdma_hw_release_irq(dma);
+ msm_jpegdma_hw_put_clocks(dma);
+ msm_jpegdma_hw_put_regulators(dma);
+ msm_jpegdma_hw_release_mem_resources(dma);
+ kfree(dma);
+
+ return 0;
+}
+
+/* Device tree match struct */
+static const struct of_device_id msm_jpegdma_dt_match[] = {
+ {.compatible = "qcom,jpegdma"},
+ {}
+};
+
+/* Jpeg dma platform driver definition */
+static struct platform_driver jpegdma_driver = {
+ .probe = jpegdma_probe,
+ .remove = jpegdma_device_remove,
+ .driver = {
+ .name = MSM_JPEGDMA_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_jpegdma_dt_match,
+ },
+};
+
+static int __init msm_jpegdma_init_module(void)
+{
+ return platform_driver_register(&jpegdma_driver);
+}
+
+static void __exit msm_jpegdma_exit_module(void)
+{
+ platform_driver_unregister(&jpegdma_driver);
+}
+
+module_init(msm_jpegdma_init_module);
+module_exit(msm_jpegdma_exit_module);
+MODULE_DESCRIPTION("MSM JPEG DMA driver");
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.h b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.h
new file mode 100644
index 000000000000..d978842c33cb
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_dev.h
@@ -0,0 +1,373 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_JPEG_DMA_DEV_H__
+#define __MSM_JPEG_DMA_DEV_H__
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ctrls.h>
+#include <linux/msm-bus.h>
+
+/* Max number of clocks defined in device tree */
+#define MSM_JPEGDMA_MAX_CLK 10
+/* Core clock index */
+#define MSM_JPEGDMA_CORE_CLK 0
+/* Max number of regulators defined in device tree */
+#define MSM_JPEGDMA_MAX_REGULATOR_NUM 3
+/* Max number of planes supported */
+#define MSM_JPEGDMA_MAX_PLANES 3
+/* Max number of hw pipes supported */
+#define MSM_JPEGDMA_MAX_PIPES 2
+/* Max number of hw configurations supported */
+#define MSM_JPEGDMA_MAX_CONFIGS 2
+/* Dma default fps */
+#define MSM_JPEGDMA_DEFAULT_FPS 30
+
+/* Dma input output size limitations */
+#define MSM_JPEGDMA_MAX_WIDTH 65536
+#define MSM_JPEGDMA_MIN_WIDTH 32
+#define MSM_JPEGDMA_MAX_HEIGHT 65536
+#define MSM_JPEGDMA_MIN_HEIGHT 32
+#define MSM_JPEGDMA_STRIDE_ALIGN 8
+
+/*
+ * enum msm_jpegdma_plane_type - Dma format.
+ * @JPEGDMA_PLANE_TYPE_Y: Y plane type.
+ * @JPEGDMA_PLANE_TYPE_CR: Chroma CB plane.
+ * @JPEGDMA_PLANE_TYPE_CB: Chroma CR plane.
+ * @JPEGDMA_PLANE_TYPE_CBCR: Interlevaed CbCr plane.
+ */
+enum msm_jpegdma_plane_type {
+ JPEGDMA_PLANE_TYPE_Y,
+ JPEGDMA_PLANE_TYPE_CR,
+ JPEGDMA_PLANE_TYPE_CB,
+ JPEGDMA_PLANE_TYPE_CBCR,
+};
+
+/*
+ * struct msm_jpegdma_format - Dma format.
+ * @name: Format name.
+ * @fourcc: v4l2 fourcc code.
+ * @depth: Number of bits per pixel.
+ * @num_planes: number of planes.
+ * @colplane_h: Color plane horizontal subsample.
+ * @colplane_v: Color plane vertical subsample.
+ * @h_align: Horizontal align.
+ * @v_align: Vertical align.
+ * @planes: Array with plane types.
+ */
+struct msm_jpegdma_format {
+ char *name;
+ u32 fourcc;
+ int depth;
+ int num_planes;
+ int colplane_h;
+ int colplane_v;
+ int h_align;
+ int v_align;
+ enum msm_jpegdma_plane_type planes[MSM_JPEGDMA_MAX_PLANES];
+};
+
+/*
+ * struct msm_jpegdma_size - Dma size.
+ * @top: Top position.
+ * @left: Left position
+ * @width: Width
+ * @height: height.
+ * @scanline: Number of lines per plane.
+ * @stride: Stride bytes per line.
+ */
+struct msm_jpegdma_size {
+ unsigned int top;
+ unsigned int left;
+ unsigned int width;
+ unsigned int height;
+ unsigned int scanline;
+ unsigned int stride;
+};
+
+/*
+ * struct msm_jpegdma_size_config - Dma engine size configuration.
+ * @in_size: Input size.
+ * @out_size: Output size.
+ * @format: Format.
+ * @fps: Requested frames per second.
+ */
+struct msm_jpegdma_size_config {
+ struct msm_jpegdma_size in_size;
+ struct msm_jpegdma_size out_size;
+ struct msm_jpegdma_format format;
+ unsigned int fps;
+};
+
+/*
+ * struct msm_jpegdma_block - Dma hw block.
+ * @div: Block divider.
+ * @width: Block width.
+ * @reg_val: Block register value.
+ */
+struct msm_jpegdma_block {
+ unsigned int div;
+ unsigned int width;
+ unsigned int reg_val;
+};
+
+/*
+ * struct msm_jpegdma_block_config - Dma hw block configuration.
+ * @block: Block settings.
+ * @blocks_per_row: Blocks per row.
+ * @blocks_per_col: Blocks per column.
+ * @h_step: Horizontal step value
+ * @v_step: Vertical step value
+ * @h_step_last: Last horizontal step.
+ * @v_step_last: Last vertical step.
+ */
+struct msm_jpegdma_block_config {
+ struct msm_jpegdma_block block;
+ unsigned int blocks_per_row;
+ unsigned int blocks_per_col;
+ unsigned int h_step;
+ unsigned int v_step;
+ unsigned int h_step_last;
+ unsigned int v_step_last;
+};
+
+/*
+ * msm_jpegdma_scale - Dma hw scale configuration.
+ * @enable: Scale enable.
+ * @hor_scale: Horizontal scale factor in Q21 format.
+ * @ver_scale: Vertical scale factor in Q21 format.
+ */
+struct msm_jpegdma_scale {
+ int enable;
+ unsigned int hor_scale;
+ unsigned int ver_scale;
+};
+
+/*
+ * struct msm_jpegdma_config - Dma hw configuration.
+ * @size_cfg: Size configuration.
+ * @scale_cfg: Scale configuration
+ * @block_cfg: Block configuration.
+ * @phase: Starting phase.
+ * @in_offset: Input offset.
+ * @out_offset: Output offset.
+ */
+struct msm_jpegdma_config {
+ struct msm_jpegdma_size_config size_cfg;
+ struct msm_jpegdma_scale scale_cfg;
+ struct msm_jpegdma_block_config block_cfg;
+ unsigned int phase;
+ unsigned int in_offset;
+ unsigned int out_offset;
+};
+
+/*
+ * struct msm_jpegdma_plane_config - Contain input output address.
+ * @bus_ab: Bus average bandwidth.
+ * @bus_ib: Bus instantaneous bandwidth.
+ * @core_clock: Core clock freq.
+ */
+struct msm_jpegdma_speed {
+ u64 bus_ab;
+ u64 bus_ib;
+ u64 core_clock;
+};
+
+/*
+ * struct msm_jpegdma_plane_config - Contain input output address.
+ * @active_pipes: Number of active pipes.
+ * @config: Plane configurations.
+ * @type: Plane type.
+ */
+struct msm_jpegdma_plane {
+ unsigned int active_pipes;
+ struct msm_jpegdma_config config[MSM_JPEGDMA_MAX_PIPES];
+ enum msm_jpegdma_plane_type type;
+};
+
+/*
+ * struct msm_jpegdma_plane_config - Contain input output address.
+ * @num_planes: Number of planes.
+ * @plane: Plane configuration.
+ * @speed: Processing speed.
+ */
+struct msm_jpegdma_plane_config {
+ unsigned int num_planes;
+ struct msm_jpegdma_plane plane[MSM_JPEGDMA_MAX_PLANES];
+ struct msm_jpegdma_speed speed;
+};
+
+/*
+ * struct msm_jpegdma_addr - Contain input output address.
+ * @in_addr: Input dma address.
+ * @out_addr: Output dma address.
+ */
+struct msm_jpegdma_addr {
+ u32 in_addr;
+ u32 out_addr;
+};
+
+/*
+ * struct msm_jpegdma_buf_handle - Structure contain dma buffer information.
+ * @fd: ion dma from which this buffer is imported.
+ * @dma: Pointer to jpeg dma device.
+ * @size: Size of the buffer.
+ * @addr: Adders of dma mmu mapped buffer. This address should be set to dma hw.
+ */
+struct msm_jpegdma_buf_handle {
+ int fd;
+ struct msm_jpegdma_device *dma;
+ unsigned long size;
+ ion_phys_addr_t addr;
+};
+
+/*
+ * @jpegdma_ctx - Structure contains per open file handle context.
+ * @lock: Lock protecting dma ctx.
+ * @jdma_device: Pointer to dma device.
+ * @active: Set if context is active.
+ * @completion: Context processing completion.
+ * @fh: V4l2 file handle.
+ * @m2m_ctx: Memory to memory context.
+ * @format_cap: Current capture format.
+ * @format_out: Current output format.
+ * @crop: Current crop.
+ * @timeperframe: Time per frame in seconds.
+ * @config_idx: Plane configuration active index.
+ * @plane_config: Array of plane configurations.
+ * @pending_config: Flag set if there is pending plane configuration.
+ * @plane_idx: Processing plane index.
+ * @format_idx: Current format index.
+ */
+struct jpegdma_ctx {
+ struct mutex lock;
+ struct msm_jpegdma_device *jdma_device;
+ atomic_t active;
+ struct completion completion;
+ struct v4l2_fh fh;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_format format_cap;
+ struct v4l2_format format_out;
+ struct v4l2_rect crop;
+ struct v4l2_fract timeperframe;
+
+ unsigned int config_idx;
+ struct msm_jpegdma_plane_config plane_config[MSM_JPEGDMA_MAX_CONFIGS];
+ unsigned int pending_config;
+
+ unsigned int plane_idx;
+ unsigned int format_idx;
+};
+
+/*
+ * struct jpegdma_reg_cfg - Registry values configuration
+ * @reg: Register offset.
+ * @val: Register value.
+ */
+struct jpegdma_reg_cfg {
+ unsigned int reg;
+ unsigned int val;
+};
+
+/*
+ * enum msm_jpegdma_mem_resources - jpegdma device iomem resources.
+ * @MSM_JPEGDMA_IOMEM_CORE: Index of jpegdma core registers.
+ * @MSM_JPEGDMA_IOMEM_VBIF: Index of jpegdma vbif registers.
+ * @MSM_JPEGDMA_IOMEM_LAST: Not valid.
+ */
+enum msm_jpegdma_mem_resources {
+ MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_IOMEM_VBIF,
+ MSM_JPEGDMA_IOMEM_LAST
+};
+
+/*
+ * struct msm_jpegdma_device - FD device structure.
+ * @lock: Lock protecting dma device.
+ * @ref_count: Device reference count.
+ * @irq_num: Face detection irq number.
+ * @res_mem: Array of memory resources used by Dma device.
+ * @iomem_base: Array of register mappings used by Dma device.
+ * @ioarea: Array of register ioarea used by Dma device.
+ * @vdd: Pointer to vdd regulator.
+ * @regulator_num: Number of regulators attached to the device.
+ * @clk_num: Number of clocks attached to the device.
+ * @clk: Array of clock resources used by dma device.
+ * @clk_rates: Array of clock rates.
+ * @vbif_regs_num: number of vbif regs.
+ * @vbif_regs: Array of vbif regs need to be set.
+ * @qos_regs_num: Number of qos regs .
+ * @qos_regs: Array of qos regs need to be set.
+ * @bus_client: Memory access bus client.
+ * @bus_vectors: Bus vector
+ * @bus_paths: Bus path.
+ * @bus_scale_data: Memory access bus scale data.
+ * @iommu_hndl: Dma device iommu handle.
+ * @iommu_attached_cnt: Iommu attached devices reference count.
+ * @iommu_dev: Pointer to Ion iommu device.
+ * @dev: Pointer to device struct.
+ * @v4l2_dev: V4l2 device.
+ * @video: Video device.
+ * @m2m_dev: Memory to memory device.
+ * @hw_num_pipes: Number of dma hw pipes.
+ * @active_clock_rate: Active clock rate index.
+ * @hw_reset_completion: Dma reset completion.
+ * @hw_halt_completion: Dma halt completion.
+ */
+struct msm_jpegdma_device {
+ struct mutex lock;
+ int ref_count;
+
+ int irq_num;
+ struct resource *res_mem[MSM_JPEGDMA_IOMEM_LAST];
+ void __iomem *iomem_base[MSM_JPEGDMA_IOMEM_LAST];
+ struct resource *ioarea[MSM_JPEGDMA_IOMEM_LAST];
+
+ struct regulator *vdd[MSM_JPEGDMA_MAX_REGULATOR_NUM];
+ unsigned int regulator_num;
+
+ unsigned int clk_num;
+ struct clk *clk[MSM_JPEGDMA_MAX_CLK];
+ unsigned int clk_rates[MSM_JPEGDMA_MAX_CLK];
+
+ unsigned int vbif_regs_num;
+ struct jpegdma_reg_cfg *vbif_regs;
+ unsigned int qos_regs_num;
+ struct jpegdma_reg_cfg *qos_regs;
+ unsigned int prefetch_regs_num;
+ struct jpegdma_reg_cfg *prefetch_regs;
+
+ uint32_t bus_client;
+ struct msm_bus_vectors bus_vectors;
+ struct msm_bus_paths bus_paths;
+ struct msm_bus_scale_pdata bus_scale_data;
+
+ int iommu_hndl;
+ unsigned int iommu_attached_cnt;
+
+ struct device *iommu_dev;
+ struct device *dev;
+ struct v4l2_device v4l2_dev;
+ struct video_device video;
+ struct v4l2_m2m_dev *m2m_dev;
+
+ int hw_num_pipes;
+ struct completion hw_reset_completion;
+ struct completion hw_halt_completion;
+ u64 active_clock_rate;
+};
+
+void msm_jpegdma_isr_processing_done(struct msm_jpegdma_device *dma);
+
+#endif /* __MSM_JPEG_DMA_DEV_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.c b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.c
new file mode 100644
index 000000000000..1522ee4b5436
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.c
@@ -0,0 +1,2116 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spinlock.h>
+#include <linux/iommu.h>
+#include <linux/msm_ion.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <media/videobuf2-core.h>
+
+#include "msm_camera_io_util.h"
+#include "cam_smmu_api.h"
+#include "msm_jpeg_dma_dev.h"
+#include "msm_jpeg_dma_hw.h"
+#include "msm_jpeg_dma_regs.h"
+
+/* Jpeg dma scale unity */
+#define MSM_JPEGDMA_SCALE_UNI (1 << 21)
+/* Jpeg dma bw numerator */
+#define MSM_JPEGDMA_BW_NUM 38
+/* Jpeg dma bw denominator */
+#define MSM_JPEGDMA_BW_DEN 10
+/* Jpeg bus client name */
+#define MSM_JPEGDMA_BUS_CLIENT_NAME "msm_jpeg_dma"
+/* Jpeg dma engine timeout in ms */
+#define MSM_JPEGDMA_TIMEOUT_MS 500
+/* Jpeg dma smmu name */
+#define MSM_JPEGDMA_SMMU_NAME "jpeg_dma"
+
+static const struct msm_jpegdma_block msm_jpegdma_block_sel[] = {
+ {
+ .div = 0x3C0000,
+ .width = 256,
+ .reg_val = 4,
+ },
+ {
+ .div = 0x7C0000,
+ .width = 128,
+ .reg_val = 3,
+ },
+ {
+ .div = 0xFC0000,
+ .width = 64,
+ .reg_val = 2,
+ },
+ {
+ .div = 0x1FC0000,
+ .width = 32,
+ .reg_val = 1,
+ },
+ {
+ .div = 0x4000000,
+ .width = 16,
+ .reg_val = 0,
+ },
+};
+
+/*
+ * msm_jpegdma_hw_read_reg - dma read from register.
+ * @dma: Pointer to dma device.
+ * @base_idx: dma memory resource index.
+ * @reg: Register addr need to be read from.
+ */
+static inline u32 msm_jpegdma_hw_read_reg(struct msm_jpegdma_device *dma,
+ enum msm_jpegdma_mem_resources base_idx, u32 reg)
+{
+ return msm_camera_io_r(dma->iomem_base[base_idx] + reg);
+}
+
+/*
+ * msm_jpegdma_hw_write_reg - dma write to register.
+ * @dma: Pointer to dma device.
+ * @base_idx: dma memory resource index.
+ * @reg: Register addr need to be read from.
+ * @value: Value to be written.
+ */
+static inline void msm_jpegdma_hw_write_reg(struct msm_jpegdma_device *dma,
+ enum msm_jpegdma_mem_resources base_idx, u32 reg, u32 value)
+{
+ pr_debug("%s:%d]%p %08x\n", __func__, __LINE__,
+ dma->iomem_base[base_idx] + reg,
+ value);
+ msm_camera_io_w(value, dma->iomem_base[base_idx] + reg);
+}
+
+/*
+ * msm_jpegdma_hw_enable_irq - Enable dma interrupts.
+ * @dma: Pointer to dma device.
+ */
+static void msm_jpegdma_hw_enable_irq(struct msm_jpegdma_device *dma)
+{
+ u32 reg;
+
+ reg = MSM_JPEGDMA_IRQ_MASK_SESSION_DONE |
+ MSM_JPEGDMA_IRQ_MASK_AXI_HALT |
+ MSM_JPEGDMA_IRQ_MASK_RST_DONE;
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_IRQ_MASK_ADDR, reg);
+}
+
+/*
+ * msm_jpegdma_hw_disable_irq - Disable dma interrupts.
+ * @dma: Pointer to dma device.
+ */
+static void msm_jpegdma_hw_disable_irq(struct msm_jpegdma_device *dma)
+{
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_IRQ_MASK_ADDR, 0);
+}
+
+/*
+ * msm_jpegdma_hw_clear_irq - Clear dma interrupts.
+ * @dma: Pointer to dma device.
+ * @status: Status to clear.
+ */
+static void msm_jpegdma_hw_clear_irq(struct msm_jpegdma_device *dma,
+ u32 status)
+{
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_IRQ_CLEAR_ADDR, status);
+}
+
+/*
+ * msm_jpegdma_hw_get_irq_status - Get dma irq status
+ * @dma: Pointer to dma device.
+ */
+static u32 msm_jpegdma_hw_get_irq_status(struct msm_jpegdma_device *dma)
+{
+ return msm_jpegdma_hw_read_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_IRQ_STATUS);
+}
+
+/*
+ * msm_jpegdma_hw_get_num_pipes - Get number of dma pipes
+ * @dma: Pointer to dma device.
+ */
+static int msm_jpegdma_hw_get_num_pipes(struct msm_jpegdma_device *dma)
+{
+ int num_pipes;
+ u32 reg;
+
+ reg = msm_jpegdma_hw_read_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_HW_CAPABILITY);
+
+ num_pipes = (reg & MSM_JPEGDMA_HW_CAPABILITY_NUM_PIPES_BMSK) >>
+ MSM_JPEGDMA_HW_CAPABILITY_NUM_PIPES_SHFT;
+
+ return num_pipes;
+}
+
+/*
+ * msm_jpegdma_hw_reset - Reset jpeg dma core.
+ * @dma: Pointer to dma device.
+ */
+static int msm_jpegdma_hw_reset(struct msm_jpegdma_device *dma)
+{
+ unsigned long time;
+
+ init_completion(&dma->hw_reset_completion);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_HW_JPEGDMA_RESET, MSM_HW_JPEGDMA_RESET_DEFAULT);
+
+ time = wait_for_completion_timeout(&dma->hw_reset_completion,
+ msecs_to_jiffies(MSM_JPEGDMA_TIMEOUT_MS));
+ if (!time) {
+ dev_err(dma->dev, "Jpeg dma detection reset timeout\n");
+ return -ETIME;
+ }
+ return 0;
+}
+
+/*
+* msm_jpegdma_hw_halt - Halt jpeg dma core.
+* @dma: Pointer to dma device.
+*/
+static int msm_jpegdma_hw_halt(struct msm_jpegdma_device *dma)
+{
+ unsigned long time;
+
+ init_completion(&dma->hw_halt_completion);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_CMD_ADDR, 0x4);
+
+ time = wait_for_completion_timeout(&dma->hw_halt_completion,
+ msecs_to_jiffies(MSM_JPEGDMA_TIMEOUT_MS));
+ if (!time) {
+ dev_err(dma->dev, "Jpeg dma detection halt timeout\n");
+ return -ETIME;
+ }
+ return 0;
+}
+
+/*
+* msm_jpegdma_hw_run - Enable dma processing.
+* @dma: Pointer to dma device.
+*/
+static int msm_jpegdma_hw_run(struct msm_jpegdma_device *dma)
+{
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_CMD_ADDR, 0x1);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_core_config - Set jpeg dma core configuration.
+ * @dma: Pointer to dma device.
+ * @num_pipes: Number of pipes.
+ * @scale_0: Scaler 0 enable.
+ * @scale_1: Scaler 1 enable.
+ */
+static int msm_jpegdma_hw_core_config(struct msm_jpegdma_device *dma,
+ int num_pipes, int scale_0, int scale_1)
+{
+ u32 reg;
+
+ reg = (scale_0 << MSM_JPEGDMA_CORE_CFG_SCALE_0_ENABLE_SHFT) |
+ (0x1 << MSM_JPEGDMA_CORE_CFG_TEST_BUS_ENABLE_SHFT) |
+ (0x1 << MSM_JPEGDMA_CORE_CFG_BRIDGE_ENABLE_SHFT) |
+ (0x1 << MSM_JPEGDMA_CORE_CFG_WE_0_ENABLE_SHFT) |
+ (0x1 << MSM_JPEGDMA_CORE_CFG_FE_0_ENABLE_SHFT);
+
+ /* Enable read write ports for second pipe */
+ if (num_pipes > 1) {
+ reg |= (scale_1 << MSM_JPEGDMA_CORE_CFG_SCALE_1_ENABLE_SHFT) |
+ (0x1 << MSM_JPEGDMA_CORE_CFG_WE_1_ENABLE_SHFT) |
+ (0x1 << MSM_JPEGDMA_CORE_CFG_FE_1_ENABLE_SHFT);
+ }
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_CORE_CFG_ADDR, reg);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_fe_0_block - Fetch engine 0 block configuration.
+ * @dma: Pointer to dma device.
+ * @block_config: Pointer to block configuration.
+ * @plane_type: Plane type.
+ */
+static int msm_jpegdma_hw_fe_0_block(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_block_config *block_config,
+ enum msm_jpegdma_plane_type plane_type)
+{
+ u32 reg;
+
+ switch (plane_type) {
+ case JPEGDMA_PLANE_TYPE_Y:
+ reg = MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_Y <<
+ MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_SHFT;
+ break;
+ case JPEGDMA_PLANE_TYPE_CB:
+ reg = MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_CB <<
+ MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_SHFT;
+ break;
+ case JPEGDMA_PLANE_TYPE_CR:
+ reg = MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_CR <<
+ MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_SHFT;
+ break;
+ case JPEGDMA_PLANE_TYPE_CBCR:
+ reg = MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_CBCR <<
+ MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_SHFT;
+ break;
+ default:
+ dev_err(dma->dev, "Unsupported plane type %d\n", plane_type);
+ return -EINVAL;
+ }
+
+ reg |= (block_config->block.reg_val <<
+ MSM_JPEGDMA_FE_CFG_BLOCK_WIDTH_SHFT) |
+ (0x1 << MSM_JPEGDMA_FE_CFG_MAL_BOUNDARY_SHFT) |
+ (0x1 << MSM_JPEGDMA_FE_CFG_MAL_EN_SHFT) |
+ (0xF << MSM_JPEGDMA_FE_CFG_BURST_LENGTH_MAX_SHFT);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_0_CFG_ADDR, reg);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_fe_1_block - Fetch engine 1 block configuration.
+ * @dma: Pointer to dma device.
+ * @block_config: Pointer to block configuration.
+ * @plane_type: Plane type.
+ */
+static int msm_jpegdma_hw_fe_1_block(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_block_config *block_config,
+ enum msm_jpegdma_plane_type plane_type)
+{
+ u32 reg;
+
+ switch (plane_type) {
+ case JPEGDMA_PLANE_TYPE_Y:
+ reg = MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_Y <<
+ MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_SHFT;
+ break;
+ case JPEGDMA_PLANE_TYPE_CB:
+ reg = MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_CB <<
+ MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_SHFT;
+ break;
+ case JPEGDMA_PLANE_TYPE_CR:
+ reg = MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_CR <<
+ MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_SHFT;
+ break;
+ case JPEGDMA_PLANE_TYPE_CBCR:
+ reg = MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_CBCR <<
+ MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_SHFT;
+ break;
+ default:
+ dev_err(dma->dev, "Unsupported plane type %d\n", plane_type);
+ return -EINVAL;
+ }
+
+ reg |= (block_config->block.reg_val <<
+ MSM_JPEGDMA_FE_CFG_BLOCK_WIDTH_SHFT) |
+ (0xF << MSM_JPEGDMA_FE_CFG_BURST_LENGTH_MAX_SHFT) |
+ (0x1 << MSM_JPEGDMA_FE_CFG_MAL_EN_SHFT);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_1_CFG_ADDR, reg);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_fe_0_phase - Fetch engine 0 phase configuration.
+ * @dma: Pointer to dma device.
+ * @phase: Fetch engine 0 phase.
+ */
+static int msm_jpegdma_hw_fe_0_phase(struct msm_jpegdma_device *dma, int phase)
+{
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_0_HINIT_ADDR, 0x00);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_0_HINIT_INT_ADDR, 0x00);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_0_VINIT_INT_ADDR, 0x00);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_0_VINIT_INT_ADDR, phase);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_fe_1_phase - Fetch engine 1 phase configuration.
+ * @dma: Pointer to dma device.
+ * @phase: Fetch engine 1 phase.
+ */
+static int msm_jpegdma_hw_fe_1_phase(struct msm_jpegdma_device *dma, int phase)
+{
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_1_HINIT_ADDR, 0x00);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_1_HINIT_INT_ADDR, 0x00);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_1_VINIT_INT_ADDR, 0x00);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_1_VINIT_INT_ADDR, phase);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_fe_0_size - Fetch engine 0 size configuration.
+ * @dma: Pointer to dma device.
+ * @size: Pointer to size configuration.
+ * @plane_type: Plane type.
+ */
+static int msm_jpegdma_hw_fe_0_size(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_size *size, enum msm_jpegdma_plane_type plane_type)
+{
+ u32 reg;
+
+ reg = (size->width + size->left - 1) |
+ ((size->height + size->top - 1) <<
+ MSM_JPEGDMA_FE_RD_BUFFER_SIZE_HEIGHT_SHFT);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_BUFFER_SIZE_0_ADDR, reg);
+
+ if (size->left && plane_type == JPEGDMA_PLANE_TYPE_CBCR)
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_0_HINIT_INT_ADDR, size->left / 2);
+ else
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_0_HINIT_INT_ADDR, size->left);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_0_VINIT_INT_ADDR, size->top);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_0_STRIDE_ADDR, size->stride);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_fe_1_size - Fetch engine 1 size configuration.
+ * @dma: Pointer to dma device.
+ * @size: Pointer to size configuration.
+ * @plane_type: Plane type.
+ */
+static int msm_jpegdma_hw_fe_1_size(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_size *size, enum msm_jpegdma_plane_type plane_type)
+{
+ u32 reg;
+
+ reg = (size->width + size->left - 1) |
+ ((size->height + size->top - 1) <<
+ MSM_JPEGDMA_FE_RD_BUFFER_SIZE_HEIGHT_SHFT);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_BUFFER_SIZE_1_ADDR, reg);
+
+ if (size->left && plane_type == JPEGDMA_PLANE_TYPE_CBCR)
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_1_HINIT_INT_ADDR, size->left / 2);
+ else
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_1_HINIT_INT_ADDR, size->left);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_1_VINIT_INT_ADDR, size->top);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_1_STRIDE_ADDR, size->stride);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_fe_0_addr - Set fetch engine 0 address.
+ * @dma: Pointer to dma device.
+ * @addr: Fetch engine addres.
+ */
+static int msm_jpegdma_hw_fe_0_addr(struct msm_jpegdma_device *dma, u32 addr)
+{
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_CMD_ADDR, MSM_JPEGDMA_CMD_CLEAR_READ_PLN_QUEUES);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_0_PNTR_ADDR, addr);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_fe_1_addr - Set fetch engine 1 address.
+ * @dma: Pointer to dma device.
+ * @addr: Fetch engine addres.
+ */
+static int msm_jpegdma_hw_fe_1_addr(struct msm_jpegdma_device *dma, u32 addr)
+{
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_FE_RD_1_PNTR_ADDR, addr);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_fe_0_block - Write engine 0 block configuration.
+ * @dma: Pointer to dma device.
+ * @block_config: Pointer to block configuration.
+ * @plane_type: Plane type.
+ */
+static int msm_jpegdma_hw_we_0_block(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_block_config *block,
+ enum msm_jpegdma_plane_type plane_type)
+{
+ u32 reg;
+
+ reg = (0xF << MSM_JPEGDMA_WE_CFG_BURST_LENGTH_MAX_SHFT) |
+ (0x1 << MSM_JPEGDMA_WE_CFG_MAL_BOUNDARY_SHFT) |
+ (0x1 << MSM_JPEGDMA_WE_CFG_MAL_EN_SHFT);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_CFG_ADDR, reg);
+
+ reg = ((block->blocks_per_row - 1) <<
+ MSM_JPEGDMA_WE_PLN_WR_CFG_0_BLOCKS_PER_ROW_SHFT) |
+ (block->blocks_per_col - 1);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_0_WR_CFG_0_ADDR, reg);
+
+ reg = ((block->h_step_last - 1) <<
+ MSM_JPEGDMA_WE_PLN_WR_CFG_1_LAST_H_STEP_SHFT) |
+ (block->h_step - 1);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_0_WR_CFG_1_ADDR, reg);
+
+ reg = ((block->v_step_last - 1) <<
+ MSM_JPEGDMA_WE_PLN_WR_CFG_2_LAST_V_STEP_SHFT) |
+ (block->v_step - 1);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_0_WR_CFG_2_ADDR, reg);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_0_WR_CFG_3_ADDR, 0x0);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_we_1_block - Write engine 1 block configuration.
+ * @dma: Pointer to dma device.
+ * @block_config: Pointer to block configuration.
+ * @plane_type: Plane type.
+ */
+static int msm_jpegdma_hw_we_1_block(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_block_config *block,
+ enum msm_jpegdma_plane_type plane_type)
+{
+ u32 reg;
+
+ reg = ((block->blocks_per_row - 1) <<
+ MSM_JPEGDMA_WE_PLN_WR_CFG_0_BLOCKS_PER_ROW_SHFT) |
+ (block->blocks_per_col - 1);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_1_WR_CFG_0_ADDR, reg);
+
+ reg = ((block->h_step_last - 1) <<
+ MSM_JPEGDMA_WE_PLN_WR_CFG_1_LAST_H_STEP_SHFT) |
+ (block->h_step - 1);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_1_WR_CFG_1_ADDR, reg);
+
+ reg = ((block->v_step_last - 1) <<
+ MSM_JPEGDMA_WE_PLN_WR_CFG_2_LAST_V_STEP_SHFT) |
+ (block->v_step - 1);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_1_WR_CFG_2_ADDR, reg);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_1_WR_CFG_3_ADDR, 0x0);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_we_0_size - Write engine 0 size configuration.
+ * @dma: Pointer to dma device.
+ * @size: Pointer to size configuration.
+ */
+static int msm_jpegdma_hw_we_0_size(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_size *size)
+{
+ u32 reg;
+
+ reg = (size->width) | ((size->height) <<
+ MSM_JPEGDMA_WE_PLN_WR_BUFFER_SIZE_HEIGHT_SHFT);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_WR_BUFFER_SIZE_0_ADDR, reg);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_0_WR_STRIDE_ADDR, size->stride);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_we_1_size - Write engine 1 size configuration.
+ * @dma: Pointer to dma device.
+ * @size: Pointer to size configuration.
+ */
+static int msm_jpegdma_hw_we_1_size(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_size *size)
+{
+ u32 reg;
+
+ reg = (size->width) | ((size->height) <<
+ MSM_JPEGDMA_WE_PLN_WR_BUFFER_SIZE_HEIGHT_SHFT);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_WR_BUFFER_SIZE_1_ADDR, reg);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_1_WR_STRIDE_ADDR, size->stride);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_we_0_addr - Set write engine 0 address.
+ * @dma: Pointer to dma device.
+ * @addr: Fetch engine addres.
+ */
+static int msm_jpegdma_hw_we_0_addr(struct msm_jpegdma_device *dma, u32 addr)
+{
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_CMD_ADDR, MSM_JPEGDMA_CMD_CLEAR_WRITE_PLN_QUEUES);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_0_WR_PNTR_ADDR, addr);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_we_1_addr - Set write engine 1 address.
+ * @dma: Pointer to dma device.
+ * @addr: Fetch engine addres.
+ */
+static int msm_jpegdma_hw_we_1_addr(struct msm_jpegdma_device *dma, u32 addr)
+{
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_WE_PLN_1_WR_PNTR_ADDR, addr);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_scale_0_config - Scale configuration for 0 pipeline.
+ * @dma: Pointer to dma device.
+ * @scale: Scale configuration.
+ */
+static int msm_jpegdma_hw_scale_0_config(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_scale *scale)
+{
+ u32 reg;
+ u32 h_down_en;
+ u32 v_down_en;
+
+ h_down_en = (scale->hor_scale == MSM_JPEGDMA_SCALE_UNI) ? 0 : 1;
+ v_down_en = (scale->ver_scale == MSM_JPEGDMA_SCALE_UNI) ? 0 : 1;
+
+ reg = (h_down_en << MSM_JPEGDMA_PP_SCALE_CFG_HSCALE_ENABLE_SHFT) |
+ (v_down_en << MSM_JPEGDMA_PP_SCALE_CFG_VSCALE_ENABLE_SHFT);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_PP_0_SCALE_CFG_ADDR, reg);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_PP_0_SCALE_PHASEH_STEP_ADDR, scale->hor_scale);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_PP_0_SCALE_PHASEV_STEP_ADDR, scale->ver_scale);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_scale_1_config - Scale configuration for 1 pipeline.
+ * @dma: Pointer to dma device.
+ * @scale: Scale configuration.
+ */
+static int msm_jpegdma_hw_scale_1_config(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_scale *scale)
+{
+ u32 reg;
+ u32 h_down_en;
+ u32 v_down_en;
+
+ h_down_en = (scale->hor_scale == MSM_JPEGDMA_SCALE_UNI) ? 0 : 1;
+ v_down_en = (scale->ver_scale == MSM_JPEGDMA_SCALE_UNI) ? 0 : 1;
+
+ reg = (h_down_en << MSM_JPEGDMA_PP_SCALE_CFG_HSCALE_ENABLE_SHFT) |
+ (v_down_en << MSM_JPEGDMA_PP_SCALE_CFG_VSCALE_ENABLE_SHFT);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_PP_1_SCALE_CFG_ADDR, reg);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_PP_1_SCALE_PHASEH_STEP_ADDR, scale->hor_scale);
+
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_PP_1_SCALE_PHASEV_STEP_ADDR, scale->ver_scale);
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_config_qos - Configure qos registers.
+ * @dma: Pointer to dma device.
+ */
+static void msm_jpegdma_hw_config_qos(struct msm_jpegdma_device *dma)
+{
+ int i;
+
+ if (!dma->qos_regs_num)
+ return;
+
+ for (i = 0; i < dma->qos_regs_num; i++)
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ dma->qos_regs[i].reg, dma->qos_regs[i].val);
+
+ return;
+}
+
+/*
+ * msm_jpegdma_hw_config_vbif - Configure and vbif interface.
+ * @dma: Pointer to dma device.
+ */
+static void msm_jpegdma_hw_config_vbif(struct msm_jpegdma_device *dma)
+{
+ int i;
+
+ if (!dma->vbif_regs_num)
+ return;
+
+ for (i = 0; i < dma->vbif_regs_num; i++)
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_VBIF,
+ dma->vbif_regs[i].reg, dma->vbif_regs[i].val);
+
+ return;
+}
+
+/*
+ * msm_jpegdma_hw_config_mmu_prefetch - Configure mmu prefetch registers.
+ * @dma: Pointer to dma device.
+ * @min_addr: Pointer to jpeg dma addr, containing min addrs of the plane.
+ * @max_addr: Pointer to jpeg dma addr, containing max addrs of the plane.
+ */
+static void msm_jpegdma_hw_config_mmu_prefetch(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_addr *min_addr,
+ struct msm_jpegdma_addr *max_addr)
+{
+ int i;
+
+ if (!dma->prefetch_regs_num)
+ return;
+
+ for (i = 0; i < dma->prefetch_regs_num; i++)
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_VBIF,
+ dma->prefetch_regs[i].reg, dma->prefetch_regs[i].val);
+
+ if (min_addr != NULL && max_addr != NULL) {
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_S0_MMU_PF_ADDR_MIN, min_addr->in_addr);
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_S0_MMU_PF_ADDR_MAX, max_addr->in_addr);
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_S1_MMU_PF_ADDR_MIN, min_addr->out_addr);
+ msm_jpegdma_hw_write_reg(dma, MSM_JPEGDMA_IOMEM_CORE,
+ MSM_JPEGDMA_S1_MMU_PF_ADDR_MAX, max_addr->out_addr);
+ }
+}
+
+/*
+* msm_jpegdma_hw_calc_speed - Calculate speed based on framerate and size.
+* @dma: Pointer to dma device.
+* @size: Dma user size configuration.
+* @speed: Calculated speed.
+*/
+static int msm_jpegdma_hw_calc_speed(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_size_config *size,
+ struct msm_jpegdma_speed *speed)
+{
+ u64 width;
+ u64 height;
+ u64 real_clock;
+ u64 calc_rate;
+
+ width = size->in_size.width + size->in_size.left;
+ height = size->in_size.height + size->in_size.top;
+
+ calc_rate = (width * height * size->format.depth * size->fps) / 16;
+ real_clock = clk_round_rate(dma->clk[MSM_JPEGDMA_CORE_CLK], calc_rate);
+ if (real_clock < 0) {
+ dev_err(dma->dev, "Can not round core clock\n");
+ return -EINVAL;
+ }
+
+ speed->bus_ab = calc_rate * 2;
+ speed->bus_ib = (real_clock *
+ (MSM_JPEGDMA_BW_NUM + MSM_JPEGDMA_BW_DEN - 1)) /
+ MSM_JPEGDMA_BW_DEN;
+ speed->core_clock = real_clock;
+ dev_dbg(dma->dev, "Speed core clk %llu ab %llu ib %llu fps %d\n",
+ speed->core_clock, speed->bus_ab, speed->bus_ib, size->fps);
+
+ return 0;
+}
+
+/*
+* msm_jpegdma_hw_set_speed - Configure clock and bus bandwidth based on
+* requested speed and dma clients.
+* @size: Jpeg dma size configuration.
+* @speed: Requested dma speed.
+*/
+static int msm_jpegdma_hw_set_speed(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_size_config *size,
+ struct msm_jpegdma_speed *speed)
+{
+ struct msm_jpegdma_speed new_sp;
+ struct msm_jpegdma_size_config new_size;
+ int ret;
+
+ if (dma->active_clock_rate >= speed->core_clock)
+ return 0;
+
+ new_sp = *speed;
+ if (dma->ref_count > 2) {
+ new_size = *size;
+ new_size.fps = size->fps * ((dma->ref_count + 1) / 2);
+ ret = msm_jpegdma_hw_calc_speed(dma, &new_size, &new_sp);
+ if (ret < 0)
+ return -EINVAL;
+ }
+
+ ret = clk_set_rate(dma->clk[MSM_JPEGDMA_CORE_CLK], new_sp.core_clock);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail Core clock rate %d\n", ret);
+ return -EINVAL;
+ }
+ dma->active_clock_rate = speed->core_clock;
+
+ dma->bus_vectors.ab = new_sp.bus_ab;
+ dma->bus_vectors.ib = new_sp.bus_ib;
+
+ ret = msm_bus_scale_client_update_request(dma->bus_client, 0);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail bus scale update %d\n", ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+* msm_jpegdma_hw_add_plane_offset - Add plane offset to all pipelines.
+* @plane: Jpeg dma plane configuration.
+* @in_offset: Input plane offset.
+* @out_offset: Output plane offset.
+*/
+static int msm_jpegdma_hw_add_plane_offset(struct msm_jpegdma_plane *plane,
+ unsigned int in_offset, unsigned int out_offset)
+{
+ int i;
+
+ for (i = 0; i < plane->active_pipes; i++) {
+ plane->config[i].in_offset += in_offset;
+ plane->config[i].out_offset += out_offset;
+ }
+
+ return 0;
+}
+
+/*
+* msm_jpegdma_hw_calc_config - Calculate plane configuration.
+* @size_cfg: Size configuration.
+* @plane: Plane configuration need to be calculated.
+*/
+static int msm_jpegdma_hw_calc_config(struct msm_jpegdma_size_config *size_cfg,
+ struct msm_jpegdma_plane *plane)
+{
+ u64 scale_hor, scale_ver, phase;
+ u64 in_width, in_height;
+ u64 out_width, out_height;
+ struct msm_jpegdma_config *config;
+ int i;
+
+ if (!size_cfg->out_size.width || !size_cfg->out_size.height)
+ return -EINVAL;
+
+ config = &plane->config[0];
+ config->scale_cfg.enable = 0;
+
+ in_width = size_cfg->in_size.width;
+ out_width = size_cfg->out_size.width;
+ scale_hor = (in_width * MSM_JPEGDMA_SCALE_UNI) / out_width;
+ if (scale_hor != MSM_JPEGDMA_SCALE_UNI)
+ config->scale_cfg.enable = 1;
+
+ in_height = size_cfg->in_size.height;
+ out_height = size_cfg->out_size.height;
+ scale_ver = (in_height * MSM_JPEGDMA_SCALE_UNI) / out_height;
+ if (scale_ver != MSM_JPEGDMA_SCALE_UNI)
+ config->scale_cfg.enable = 1;
+
+ config->scale_cfg.ver_scale = scale_ver;
+ config->scale_cfg.hor_scale = scale_hor;
+
+ for (i = 0; ARRAY_SIZE(msm_jpegdma_block_sel); i++)
+ if (scale_hor <= msm_jpegdma_block_sel[i].div)
+ break;
+
+ if (i == ARRAY_SIZE(msm_jpegdma_block_sel))
+ return -EINVAL;
+
+ config->block_cfg.block = msm_jpegdma_block_sel[i];
+
+ if (plane->active_pipes > 1) {
+ phase = (out_height * scale_ver + (plane->active_pipes - 1)) /
+ plane->active_pipes;
+ phase &= (MSM_JPEGDMA_SCALE_UNI - 1);
+ out_height = (out_height + (plane->active_pipes - 1)) /
+ plane->active_pipes;
+ in_height = (out_height * scale_ver) / MSM_JPEGDMA_SCALE_UNI;
+ }
+
+ config->block_cfg.blocks_per_row = out_width /
+ config->block_cfg.block.width;
+
+ config->block_cfg.blocks_per_col = out_height;
+
+ config->block_cfg.h_step = config->block_cfg.block.width;
+
+ config->block_cfg.h_step_last = out_width %
+ config->block_cfg.block.width;
+ if (!config->block_cfg.h_step_last)
+ config->block_cfg.h_step_last = config->block_cfg.h_step;
+ else
+ config->block_cfg.blocks_per_row++;
+
+ config->block_cfg.v_step = 1;
+ config->block_cfg.v_step_last = 1;
+
+ config->size_cfg = *size_cfg;
+ config->size_cfg.in_size.width = in_width;
+ config->size_cfg.in_size.height = in_height;
+ config->size_cfg.out_size.width = out_width;
+ config->size_cfg.out_size.height = out_height;
+ config->in_offset = 0;
+ config->out_offset = 0;
+
+ if (plane->active_pipes > 1) {
+ plane->config[1] = *config;
+ /* Recalculate offset for second pipe */
+ plane->config[1].in_offset =
+ config->size_cfg.in_size.scanline *
+ config->size_cfg.in_size.stride;
+
+ plane->config[1].out_offset =
+ config->size_cfg.out_size.scanline *
+ config->size_cfg.out_size.stride;
+
+ plane->config[1].phase = phase;
+ }
+
+ return 0;
+}
+
+/*
+* msm_jpegdma_hw_check_config - Check configuration based on size is possible.
+ *@dma: Pointer to dma device.
+* @size_cfg: Size configuration.
+*/
+int msm_jpegdma_hw_check_config(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_size_config *size_cfg)
+{
+ u64 in_width, in_height;
+ u64 out_width, out_height;
+ u64 scale;
+
+ if (!size_cfg->out_size.width || !size_cfg->out_size.height)
+ return -EINVAL;
+
+ in_width = size_cfg->in_size.width;
+ out_width = size_cfg->out_size.width;
+ scale = ((in_width * MSM_JPEGDMA_SCALE_UNI)) / out_width;
+ if (scale < MSM_JPEGDMA_SCALE_UNI)
+ return -EINVAL;
+
+
+ in_height = size_cfg->in_size.height;
+ out_height = size_cfg->out_size.height;
+ scale = (in_height * MSM_JPEGDMA_SCALE_UNI) / out_height;
+ if (scale < MSM_JPEGDMA_SCALE_UNI)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+* msm_jpegdma_hw_set_config - Set dma configuration based on size.
+ *@dma: Pointer to dma device.
+* @size_cfg: Size configuration.
+* @plane_cfg: Calculated plane configuration.
+*/
+int msm_jpegdma_hw_set_config(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_size_config *size_cfg,
+ struct msm_jpegdma_plane_config *plane_cfg)
+{
+ unsigned int in_offset;
+ unsigned int out_offset;
+ struct msm_jpegdma_size_config plane_size;
+ int ret;
+ int i;
+
+ if (!size_cfg->format.colplane_h || !size_cfg->format.colplane_v)
+ return -EINVAL;
+
+ ret = msm_jpegdma_hw_calc_speed(dma, size_cfg, &plane_cfg->speed);
+ if (ret < 0)
+ return -EINVAL;
+
+ dma->active_clock_rate = 0;
+
+ plane_cfg->plane[0].active_pipes = dma->hw_num_pipes;
+ plane_cfg->plane[0].type = size_cfg->format.planes[0];
+ msm_jpegdma_hw_calc_config(size_cfg, &plane_cfg->plane[0]);
+ if (size_cfg->format.num_planes == 1)
+ return 0;
+
+ in_offset = size_cfg->in_size.scanline *
+ size_cfg->in_size.stride;
+ out_offset = size_cfg->out_size.scanline *
+ size_cfg->out_size.stride;
+
+ memset(&plane_size, 0x00, sizeof(plane_size));
+ for (i = 1; i < size_cfg->format.num_planes; i++) {
+ plane_cfg->plane[i].active_pipes = dma->hw_num_pipes;
+ plane_cfg->plane[i].type = size_cfg->format.planes[i];
+
+ if (size_cfg->in_size.top)
+ plane_size.in_size.top = size_cfg->in_size.top /
+ size_cfg->format.colplane_v;
+
+ if (size_cfg->in_size.left)
+ plane_size.in_size.left = size_cfg->in_size.left /
+ size_cfg->format.colplane_h;
+
+ plane_size.in_size.width = size_cfg->in_size.width /
+ size_cfg->format.colplane_h;
+ plane_size.in_size.height = size_cfg->in_size.height /
+ size_cfg->format.colplane_v;
+ plane_size.in_size.scanline = size_cfg->in_size.scanline /
+ size_cfg->format.colplane_v;
+
+ plane_size.in_size.stride = size_cfg->in_size.stride;
+
+ plane_size.out_size.width = size_cfg->out_size.width /
+ size_cfg->format.colplane_h;
+ plane_size.out_size.height = size_cfg->out_size.height /
+ size_cfg->format.colplane_v;
+ plane_size.out_size.scanline = size_cfg->out_size.scanline /
+ size_cfg->format.colplane_v;
+
+ plane_size.out_size.stride = size_cfg->out_size.stride;
+
+ plane_size.format = size_cfg->format;
+ plane_size.fps = size_cfg->fps;
+
+ msm_jpegdma_hw_calc_config(&plane_size,
+ &plane_cfg->plane[i]);
+
+ msm_jpegdma_hw_add_plane_offset(&plane_cfg->plane[i],
+ in_offset, out_offset);
+
+ in_offset += (plane_size.in_size.scanline *
+ plane_size.in_size.stride);
+ out_offset += (plane_size.out_size.scanline *
+ plane_size.out_size.stride);
+ }
+ return 0;
+}
+
+/*
+* msm_jpegdma_hw_start - Start dma processing.
+ *@dma: Pointer to dma device.
+* @addr: Input address.
+* @plane: Plane configuration.
+* @speed: Clock and bus bandwidth configuration.
+*/
+int msm_jpegdma_hw_start(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_addr *addr,
+ struct msm_jpegdma_plane *plane,
+ struct msm_jpegdma_speed *speed)
+{
+ struct msm_jpegdma_config *cfg;
+ struct msm_jpegdma_addr prefetch_max_addr;
+ unsigned int prefetch_in_size;
+ unsigned int prefetch_out_size;
+
+ int ret;
+
+ if (!plane->active_pipes)
+ return -EINVAL;
+
+ if (plane->active_pipes > MSM_JPEGDMA_MAX_PIPES)
+ return -EINVAL;
+ ret = msm_jpegdma_hw_set_speed(dma, &plane->config[0].size_cfg, speed);
+ if (ret < 0)
+ return -EINVAL;
+
+ msm_jpegdma_hw_core_config(dma, plane->active_pipes,
+ plane->config[0].scale_cfg.enable,
+ plane->config[1].scale_cfg.enable);
+
+ cfg = &plane->config[0];
+ msm_jpegdma_hw_scale_0_config(dma, &cfg->scale_cfg);
+
+ msm_jpegdma_hw_fe_0_block(dma, &cfg->block_cfg, plane->type);
+ msm_jpegdma_hw_fe_0_phase(dma, cfg->phase);
+ msm_jpegdma_hw_fe_0_size(dma, &cfg->size_cfg.in_size, plane->type);
+ msm_jpegdma_hw_fe_0_addr(dma, addr->in_addr + cfg->in_offset);
+ prefetch_in_size = cfg->size_cfg.in_size.stride *
+ cfg->size_cfg.in_size.scanline;
+
+ msm_jpegdma_hw_we_0_block(dma, &cfg->block_cfg, plane->type);
+ msm_jpegdma_hw_we_0_size(dma, &cfg->size_cfg.out_size);
+ msm_jpegdma_hw_we_0_addr(dma, addr->out_addr + cfg->out_offset);
+ prefetch_out_size = cfg->size_cfg.out_size.stride *
+ cfg->size_cfg.out_size.scanline;
+
+ if (plane->active_pipes > 1) {
+ cfg = &plane->config[1];
+ msm_jpegdma_hw_scale_1_config(dma, &cfg->scale_cfg);
+
+ msm_jpegdma_hw_fe_1_block(dma, &cfg->block_cfg, plane->type);
+ msm_jpegdma_hw_fe_1_phase(dma, cfg->phase);
+ msm_jpegdma_hw_fe_1_size(dma, &cfg->size_cfg.in_size,
+ plane->type);
+ msm_jpegdma_hw_fe_1_addr(dma, addr->in_addr + cfg->in_offset);
+ prefetch_in_size += (cfg->size_cfg.in_size.stride *
+ cfg->size_cfg.in_size.scanline);
+
+ msm_jpegdma_hw_we_1_block(dma, &cfg->block_cfg, plane->type);
+ msm_jpegdma_hw_we_1_size(dma, &cfg->size_cfg.out_size);
+ msm_jpegdma_hw_we_1_addr(dma, addr->out_addr + cfg->out_offset);
+ prefetch_out_size += (cfg->size_cfg.out_size.stride *
+ cfg->size_cfg.out_size.scanline);
+ }
+
+ if (prefetch_in_size > 0 && prefetch_out_size > 0) {
+ prefetch_max_addr.in_addr = addr->in_addr +
+ (prefetch_in_size - 1);
+ prefetch_max_addr.out_addr = addr->out_addr +
+ (prefetch_out_size - 1);
+ msm_jpegdma_hw_config_mmu_prefetch(dma, addr,
+ &prefetch_max_addr);
+ }
+
+ msm_jpegdma_hw_run(dma);
+
+ return 1;
+}
+
+/*
+* msm_jpegdma_hw_abort - abort dma processing.
+ *@dma: Pointer to dma device.
+*/
+int msm_jpegdma_hw_abort(struct msm_jpegdma_device *dma)
+{
+ int ret;
+
+ ret = msm_jpegdma_hw_halt(dma);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail to halt hw\n");
+ return ret;
+ }
+
+ ret = msm_jpegdma_hw_reset(dma);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail to reset hw\n");
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_irq - Dma irq handler.
+ * @irq: Irq number.
+ * @dev_id: Pointer to dma device.
+ */
+static irqreturn_t msm_jpegdma_hw_irq(int irq, void *dev_id)
+{
+ struct msm_jpegdma_device *dma = dev_id;
+
+ u32 irq_status;
+
+ irq_status = msm_jpegdma_hw_get_irq_status(dma);
+ msm_jpegdma_hw_clear_irq(dma, irq_status);
+
+ if (irq_status & MSM_JPEGDMA_IRQ_STATUS_RST_DONE) {
+ dev_dbg(dma->dev, "Jpeg v4l2 dma IRQ reset done\n");
+ complete_all(&dma->hw_reset_completion);
+ }
+
+ if (irq_status & MSM_JPEGDMA_IRQ_STATUS_AXI_HALT) {
+ dev_dbg(dma->dev, "Jpeg v4l2 dma IRQ AXI halt\n");
+ complete_all(&dma->hw_halt_completion);
+ }
+
+ if (irq_status & MSM_JPEGDMA_IRQ_STATUS_SESSION_DONE) {
+ dev_dbg(dma->dev, "Jpeg v4l2 dma IRQ session done\n");
+ msm_jpegdma_isr_processing_done(dma);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * msm_jpegdma_hw_request_irq - Request dma irq.
+ * @pdev: Pointer to platform device.
+ * @dma: Pointer to dma device.
+ */
+int msm_jpegdma_hw_request_irq(struct platform_device *pdev,
+ struct msm_jpegdma_device *dma)
+{
+ int ret;
+
+ dma->irq_num = platform_get_irq(pdev, 0);
+ if (dma->irq_num < 0) {
+ dev_err(dma->dev, "Can not get dma core irq resource\n");
+ ret = -ENODEV;
+ goto error_irq;
+ }
+
+ ret = request_threaded_irq(dma->irq_num, NULL,
+ msm_jpegdma_hw_irq, IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ dev_name(&pdev->dev), dma);
+ if (ret) {
+ dev_err(dma->dev, "Can not claim wrapper IRQ %d\n",
+ dma->irq_num);
+ goto error_irq;
+ }
+
+ return 0;
+
+error_irq:
+ return ret;
+}
+
+/*
+ * msm_jpegdma_hw_release_irq - Free dma irq.
+ * @dma: Pointer to dma device.
+ */
+void msm_jpegdma_hw_release_irq(struct msm_jpegdma_device *dma)
+{
+ if (dma->irq_num >= 0) {
+ free_irq(dma->irq_num, dma);
+ dma->irq_num = -1;
+ }
+}
+
+/*
+ * msm_jpegdma_hw_release_mem_resources - Releases memory resources.
+ * @dma: Pointer to dma device.
+ */
+void msm_jpegdma_hw_release_mem_resources(struct msm_jpegdma_device *dma)
+{
+ int i;
+
+ /* Prepare memory resources */
+ for (i = 0; i < MSM_JPEGDMA_IOMEM_LAST; i++) {
+ if (dma->iomem_base[i]) {
+ iounmap(dma->iomem_base[i]);
+ dma->iomem_base[i] = NULL;
+ }
+ if (dma->ioarea[i]) {
+ release_mem_region(dma->res_mem[i]->start,
+ resource_size(dma->res_mem[i]));
+ dma->ioarea[i] = NULL;
+ }
+ dma->res_mem[i] = NULL;
+ }
+}
+
+/*
+ * msm_jpegdma_hw_get_mem_resources - Get memory resources.
+ * @pdev: Pointer to dma platform device.
+ * @dma: Pointer to dma device.
+ *
+ * Get and ioremap platform memory resources.
+ */
+int msm_jpegdma_hw_get_mem_resources(struct platform_device *pdev,
+ struct msm_jpegdma_device *dma)
+{
+ int i;
+ int ret = 0;
+
+ /* Prepare memory resources */
+ for (i = 0; i < MSM_JPEGDMA_IOMEM_LAST; i++) {
+ /* Get resources */
+ dma->res_mem[i] = platform_get_resource(pdev,
+ IORESOURCE_MEM, i);
+ if (!dma->res_mem[i]) {
+ dev_err(dma->dev, "Fail get resource idx %d\n", i);
+ ret = -ENODEV;
+ break;
+ }
+
+ dma->ioarea[i] = request_mem_region(dma->res_mem[i]->start,
+ resource_size(dma->res_mem[i]), dma->res_mem[i]->name);
+ if (!dma->ioarea[i]) {
+ dev_err(dma->dev, "%s can not request mem\n",
+ dma->res_mem[i]->name);
+ ret = -ENODEV;
+ break;
+ }
+
+ dma->iomem_base[i] = ioremap(dma->res_mem[i]->start,
+ resource_size(dma->res_mem[i]));
+ if (!dma->iomem_base[i]) {
+ dev_err(dma->dev, "%s can not remap region\n",
+ dma->res_mem[i]->name);
+ ret = -ENODEV;
+ break;
+ }
+ }
+
+ if (ret < 0)
+ msm_jpegdma_hw_release_mem_resources(dma);
+
+ return ret;
+}
+
+/*
+ * msm_jpegdma_hw_get_regulators - Get jpeg dma regulators.
+ * @dma: Pointer to dma device.
+ *
+ * Read regulator information from device tree and perform get regulator.
+ */
+int msm_jpegdma_hw_get_regulators(struct msm_jpegdma_device *dma)
+{
+ const char *name;
+ uint32_t cnt;
+ int i;
+ int ret;
+
+ if (of_get_property(dma->dev->of_node, "qcom,vdd-names", NULL)) {
+ cnt = of_property_count_strings(dma->dev->of_node,
+ "qcom,vdd-names");
+
+ if ((cnt == 0) || (cnt == -EINVAL)) {
+ dev_err(dma->dev, "no regulators found %d\n", cnt);
+ return -EINVAL;
+ }
+
+ if (cnt > MSM_JPEGDMA_MAX_REGULATOR_NUM) {
+ dev_err(dma->dev, "Exceed max regulators %d\n", cnt);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ ret = of_property_read_string_index(dma->dev->of_node,
+ "qcom,vdd-names", i, &name);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail regulator idx %d\n", i);
+ goto regulator_get_error;
+ }
+
+ dma->vdd[i] = devm_regulator_get(dma->dev, name);
+ if (IS_ERR(dma->vdd[i])) {
+ ret = PTR_ERR(dma->vdd[i]);
+ dma->vdd[i] = NULL;
+ dev_err(dma->dev, "Error regulator get %s\n",
+ name);
+ goto regulator_get_error;
+ }
+ dev_dbg(dma->dev, "Regulator %s idx %d\n", name, i);
+ }
+ dma->regulator_num = cnt;
+ } else {
+ dma->regulator_num = 1;
+ dma->vdd[0] = devm_regulator_get(dma->dev, "vdd");
+ if (IS_ERR(dma->vdd[0])) {
+ dev_err(dma->dev, "Fail to get vdd regulator\n");
+ ret = PTR_ERR(dma->vdd[0]);
+ dma->vdd[0] = NULL;
+ return ret;
+ }
+ }
+ return 0;
+
+regulator_get_error:
+ for (; i > 0; i--) {
+ if (!IS_ERR_OR_NULL(dma->vdd[i - 1]))
+ devm_regulator_put(dma->vdd[i - 1]);
+ }
+ return ret;
+}
+
+/*
+ * msm_jpegdma_hw_put_regulators - Put fd regulators.
+ * @dma: Pointer to dma device.
+ */
+void msm_jpegdma_hw_put_regulators(struct msm_jpegdma_device *dma)
+{
+ int i;
+
+ for (i = dma->regulator_num - 1; i >= 0; i--) {
+ if (!IS_ERR_OR_NULL(dma->vdd[i]))
+ devm_regulator_put(dma->vdd[i]);
+
+ dma->vdd[i] = NULL;
+ }
+}
+
+/*
+ * msm_jpegdma_hw_enable_regulators - Prepare and enable fd regulators.
+ * @dma: Pointer to dma device.
+ */
+static int msm_jpegdma_hw_enable_regulators(struct msm_jpegdma_device *dma)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < dma->regulator_num; i++) {
+
+ ret = regulator_enable(dma->vdd[i]);
+ if (ret < 0) {
+ dev_err(dma->dev, "regulator enable failed %d\n", i);
+ regulator_put(dma->vdd[i]);
+ goto error;
+ }
+ }
+
+ return 0;
+error:
+ for (; i > 0; i--) {
+ if (!IS_ERR_OR_NULL(dma->vdd[i - 1])) {
+ regulator_disable(dma->vdd[i - 1]);
+ regulator_put(dma->vdd[i - 1]);
+ }
+ }
+ return ret;
+}
+
+/*
+ * msm_jpegdma_hw_disable_regulators - Disable jpeg dma regulator.
+ * @dma: Pointer to dma device.
+ */
+static void msm_jpegdma_hw_disable_regulators(struct msm_jpegdma_device *dma)
+{
+ int i;
+
+ for (i = dma->regulator_num - 1; i >= 0; i--) {
+ if (!IS_ERR_OR_NULL(dma->vdd[i]))
+ regulator_disable(dma->vdd[i]);
+ }
+}
+
+/*
+ * msm_jpegdma_hw_get_clocks - Get dma clocks.
+ * @dma: Pointer to dma device.
+ *
+ * Read clock information from device tree and perform get clock.
+ */
+int msm_jpegdma_hw_get_clocks(struct msm_jpegdma_device *dma)
+{
+ const char *clk_name;
+ size_t cnt;
+ int i;
+ int ret;
+
+ cnt = of_property_count_strings(dma->dev->of_node, "clock-names");
+ if (cnt > MSM_JPEGDMA_MAX_CLK) {
+ dev_err(dma->dev, "Exceed max number of clocks %zu\n", cnt);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ ret = of_property_read_string_index(dma->dev->of_node,
+ "clock-names", i, &clk_name);
+ if (ret < 0) {
+ dev_err(dma->dev, "Can not read clock name %d\n", i);
+ goto error;
+ }
+
+ dma->clk[i] = clk_get(dma->dev, clk_name);
+ if (IS_ERR(dma->clk[i])) {
+ ret = -ENOENT;
+ dev_err(dma->dev, "Error clock get %s\n", clk_name);
+ goto error;
+ }
+ dev_dbg(dma->dev, "Clock name idx %d %s\n", i, clk_name);
+
+ ret = of_property_read_u32_index(dma->dev->of_node,
+ "qcom,clock-rates", i, &dma->clk_rates[i]);
+ if (ret < 0) {
+ dev_err(dma->dev, "Get clock rate fail %s\n", clk_name);
+ goto error;
+ }
+ dev_dbg(dma->dev, "Clock rate idx %d value %d\n", i,
+ dma->clk_rates[i]);
+ }
+ dma->clk_num = cnt;
+
+ return 0;
+error:
+ for (; i > 0; i--)
+ clk_put(dma->clk[i - 1]);
+
+ return ret;
+}
+
+/*
+ * msm_jpegdma_hw_put_clocks - Put dma clocks.
+ * @dma: Pointer to dma device.
+ */
+int msm_jpegdma_hw_put_clocks(struct msm_jpegdma_device *dma)
+{
+ int i;
+
+ for (i = 0; i < dma->clk_num; i++) {
+ if (!IS_ERR_OR_NULL(dma->clk[i]))
+ clk_put(dma->clk[i]);
+ dma->clk_num = 0;
+ }
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_get_qos - Get dma qos settings from device-tree.
+ * @dma: Pointer to dma device.
+ */
+int msm_jpegdma_hw_get_qos(struct msm_jpegdma_device *dma)
+{
+ int i;
+ int ret;
+ unsigned int cnt;
+ const void *property;
+
+ property = of_get_property(dma->dev->of_node, "qcom,qos-regs", &cnt);
+ if (!property || !cnt) {
+ dev_dbg(dma->dev, "Missing qos settings\n");
+ return 0;
+ }
+ cnt /= 4;
+
+ dma->qos_regs = kzalloc((sizeof(*dma->qos_regs) * cnt), GFP_KERNEL);
+ if (!dma->qos_regs)
+ return -ENOMEM;
+
+ for (i = 0; i < cnt; i++) {
+ ret = of_property_read_u32_index(dma->dev->of_node,
+ "qcom,qos-regs", i,
+ &dma->qos_regs[i].reg);
+ if (ret < 0) {
+ dev_err(dma->dev, "can not read qos reg %d\n", i);
+ goto error;
+ }
+
+ ret = of_property_read_u32_index(dma->dev->of_node,
+ "qcom,qos-settings", i,
+ &dma->qos_regs[i].val);
+ if (ret < 0) {
+ dev_err(dma->dev, "can not read qos setting %d\n", i);
+ goto error;
+ }
+ dev_dbg(dma->dev, "Qos idx %d, reg %x val %x\n", i,
+ dma->qos_regs[i].reg, dma->qos_regs[i].val);
+ }
+ dma->qos_regs_num = cnt;
+
+ return 0;
+error:
+ kfree(dma->qos_regs);
+ dma->qos_regs = NULL;
+
+ return ret;
+}
+
+/*
+ * msm_jpegdma_hw_put_qos - Free dma qos settings.
+ * @dma: Pointer to dma device.
+ */
+void msm_jpegdma_hw_put_qos(struct msm_jpegdma_device *dma)
+{
+ kfree(dma->qos_regs);
+ dma->qos_regs = NULL;
+}
+
+/*
+ * msm_jpegdma_hw_get_vbif - Get dma vbif settings from device-tree.
+ * @dma: Pointer to dma device.
+ */
+int msm_jpegdma_hw_get_vbif(struct msm_jpegdma_device *dma)
+{
+ int i;
+ int ret;
+ unsigned int cnt;
+ const void *property;
+
+ property = of_get_property(dma->dev->of_node, "qcom,vbif-regs", &cnt);
+ if (!property || !cnt) {
+ dev_dbg(dma->dev, "Missing vbif settings\n");
+ return 0;
+ }
+ cnt /= 4;
+
+ dma->vbif_regs = kzalloc((sizeof(*dma->vbif_regs) * cnt), GFP_KERNEL);
+ if (!dma->vbif_regs)
+ return -ENOMEM;
+
+ for (i = 0; i < cnt; i++) {
+ ret = of_property_read_u32_index(dma->dev->of_node,
+ "qcom,vbif-regs", i,
+ &dma->vbif_regs[i].reg);
+ if (ret < 0) {
+ dev_err(dma->dev, "can not read vbif reg %d\n", i);
+ goto error;
+ }
+
+ ret = of_property_read_u32_index(dma->dev->of_node,
+ "qcom,vbif-settings", i,
+ &dma->vbif_regs[i].val);
+ if (ret < 0) {
+ dev_err(dma->dev, "can not read vbif setting %d\n", i);
+ goto error;
+ }
+
+ dev_dbg(dma->dev, "Vbif idx %d, reg %x val %x\n", i,
+ dma->vbif_regs[i].reg, dma->vbif_regs[i].val);
+ }
+ dma->vbif_regs_num = cnt;
+
+ return 0;
+error:
+ kfree(dma->vbif_regs);
+ dma->vbif_regs = NULL;
+
+ return ret;
+}
+
+/*
+ * msm_jpegdma_hw_put_vbif - Put dma clocks.
+ * @dma: Pointer to dma device.
+ */
+void msm_jpegdma_hw_put_vbif(struct msm_jpegdma_device *dma)
+{
+ kfree(dma->vbif_regs);
+ dma->vbif_regs = NULL;
+}
+
+/*
+ * msm_jpegdma_hw_get_prefetch - Get dma prefetch settings from device-tree.
+ * @dma: Pointer to dma device.
+ */
+int msm_jpegdma_hw_get_prefetch(struct msm_jpegdma_device *dma)
+{
+ int i;
+ int ret;
+ unsigned int cnt;
+ const void *property;
+
+ property = of_get_property(dma->dev->of_node, "qcom,prefetch-regs",
+ &cnt);
+ if (!property || !cnt) {
+ dev_dbg(dma->dev, "Missing prefetch settings\n");
+ return 0;
+ }
+ cnt /= 4;
+
+ dma->prefetch_regs = kcalloc(cnt, sizeof(*dma->prefetch_regs),
+ GFP_KERNEL);
+ if (!dma->prefetch_regs)
+ return -ENOMEM;
+
+ for (i = 0; i < cnt; i++) {
+ ret = of_property_read_u32_index(dma->dev->of_node,
+ "qcom,prefetch-regs", i,
+ &dma->prefetch_regs[i].reg);
+ if (ret < 0) {
+ dev_err(dma->dev, "can not read prefetch reg %d\n", i);
+ goto error;
+ }
+
+ ret = of_property_read_u32_index(dma->dev->of_node,
+ "qcom,prefetch-settings", i,
+ &dma->prefetch_regs[i].val);
+ if (ret < 0) {
+ dev_err(dma->dev, "can not read prefetch setting %d\n",
+ i);
+ goto error;
+ }
+
+ dev_dbg(dma->dev, "Prefetch idx %d, reg %x val %x\n", i,
+ dma->prefetch_regs[i].reg, dma->prefetch_regs[i].val);
+ }
+ dma->prefetch_regs_num = cnt;
+
+ return 0;
+error:
+ kfree(dma->prefetch_regs);
+ dma->prefetch_regs = NULL;
+
+ return ret;
+}
+
+/*
+ * msm_jpegdma_hw_put_prefetch - free prefetch settings.
+ * @dma: Pointer to dma device.
+ */
+void msm_jpegdma_hw_put_prefetch(struct msm_jpegdma_device *dma)
+{
+ kfree(dma->prefetch_regs);
+ dma->prefetch_regs = NULL;
+}
+
+/*
+ * msm_jpegdma_hw_set_clock_rate - Set clock rates described in device tree.
+ * @dma: Pointer to dma device.
+ */
+static int msm_jpegdma_hw_set_clock_rate(struct msm_jpegdma_device *dma)
+{
+ int ret;
+ long clk_rate;
+ int i;
+
+ for (i = 0; i < dma->clk_num; i++) {
+
+ clk_rate = clk_round_rate(dma->clk[i], dma->clk_rates[i]);
+ if (clk_rate < 0) {
+ dev_dbg(dma->dev, "Clk round rate fail skip %d\n", i);
+ continue;
+ }
+
+ ret = clk_set_rate(dma->clk[i], clk_rate);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail clock rate %ld\n", clk_rate);
+ return -EINVAL;
+ }
+ dev_dbg(dma->dev, "Clk rate %d-%ld\n", i, clk_rate);
+ }
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_enable_clocks - Prepare and enable dma clocks.
+ * @dma: Pointer to dma device.
+ */
+static int msm_jpegdma_hw_enable_clocks(struct msm_jpegdma_device *dma)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < dma->clk_num; i++) {
+ ret = clk_prepare(dma->clk[i]);
+ if (ret < 0) {
+ dev_err(dma->dev, "clock prepare failed %d\n", i);
+ goto error;
+ }
+
+ ret = clk_enable(dma->clk[i]);
+ if (ret < 0) {
+ dev_err(dma->dev, "clock enable %d\n", i);
+ clk_unprepare(dma->clk[i]);
+ goto error;
+ }
+ }
+
+ return 0;
+error:
+ for (; i > 0; i--) {
+ clk_disable(dma->clk[i - 1]);
+ clk_unprepare(dma->clk[i - 1]);
+ }
+ return ret;
+}
+/*
+ * msm_jpegdma_hw_disable_clocks - Disable dma clock.
+ * @dma: Pointer to dma device.
+ */
+static void msm_jpegdma_hw_disable_clocks(struct msm_jpegdma_device *dma)
+{
+ int i;
+
+ for (i = 0; i < dma->clk_num; i++) {
+ clk_disable(dma->clk[i]);
+ clk_unprepare(dma->clk[i]);
+ }
+}
+
+/*
+ * msm_jpegdma_hw_bus_request - Request bus for memory access.
+ * @dma: Pointer to dma device.
+ * @clk_idx: Clock rate index.
+ */
+static int msm_jpegdma_hw_bus_request(struct msm_jpegdma_device *dma)
+{
+ int ret;
+
+ dma->bus_vectors.src = MSM_BUS_MASTER_JPEG;
+ dma->bus_vectors.dst = MSM_BUS_SLAVE_EBI_CH0;
+ dma->bus_vectors.ab = dma->clk_rates[MSM_JPEGDMA_CORE_CLK] * 2;
+ dma->bus_vectors.ib = dma->clk_rates[MSM_JPEGDMA_CORE_CLK] * 2;
+
+ dma->bus_paths.num_paths = 1;
+ dma->bus_paths.vectors = &dma->bus_vectors;
+
+ dma->bus_scale_data.usecase = &dma->bus_paths;
+ dma->bus_scale_data.num_usecases = 1;
+ dma->bus_scale_data.name = MSM_JPEGDMA_BUS_CLIENT_NAME;
+
+ dma->bus_client = msm_bus_scale_register_client(&dma->bus_scale_data);
+ if (!dma->bus_client) {
+ dev_err(dma->dev, "Fail to register bus client\n");
+ return -ENOENT;
+ }
+
+ ret = msm_bus_scale_client_update_request(dma->bus_client, 0);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail bus scale update %d\n", ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_bus_release - Release memory access bus.
+ * @dma: Pointer to dma device.
+ */
+static void msm_jpegdma_hw_bus_release(struct msm_jpegdma_device *dma)
+{
+ if (dma->bus_client) {
+ msm_bus_scale_unregister_client(dma->bus_client);
+ dma->bus_client = 0;
+ }
+}
+
+/*
+ * msm_jpegdma_hw_update_bus_data - Update bus data request
+ * @dma: Pointer to dma device.
+ * @clk_idx: Clock rate index.
+ */
+int msm_jpegdma_hw_update_bus_data(struct msm_jpegdma_device *dma,
+ u64 ab, u64 ib)
+{
+ int ret;
+
+ dma->bus_vectors.ab = ab;
+ dma->bus_vectors.ib = ib;
+
+ ret = msm_bus_scale_client_update_request(dma->bus_client, 0);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail bus scale update %d\n", ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * msm_jpegdma_hw_get_capabilities - Get dma hw for performing any hw operation.
+ * @dma: Pointer to dma device.
+ */
+int msm_jpegdma_hw_get_capabilities(struct msm_jpegdma_device *dma)
+{
+ int ret = 0;
+
+ mutex_lock(&dma->lock);
+
+ ret = msm_jpegdma_hw_enable_regulators(dma);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail to enable regulators\n");
+ goto error_regulators_get;
+ }
+
+ ret = msm_jpegdma_hw_set_clock_rate(dma);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail to set clock rate\n");
+ goto error_fail_clock;
+ }
+
+ ret = msm_jpegdma_hw_enable_clocks(dma);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail to enable clocks\n");
+ goto error_fail_clock;
+ }
+ dma->hw_num_pipes = msm_jpegdma_hw_get_num_pipes(dma);
+
+ msm_jpegdma_hw_disable_clocks(dma);
+ msm_jpegdma_hw_disable_regulators(dma);
+
+ mutex_unlock(&dma->lock);
+
+ return 0;
+
+error_fail_clock:
+ msm_jpegdma_hw_disable_regulators(dma);
+error_regulators_get:
+ mutex_unlock(&dma->lock);
+ return ret;
+}
+
+/*
+ * msm_jpegdma_hw_get - Get dma hw for performing any hw operation.
+ * @dma: Pointer to dma device.
+ * @clock_rate_idx: Clock rate index.
+ *
+ * Prepare dma hw for operation. Have reference count protected by
+ * dma device mutex.
+ */
+int msm_jpegdma_hw_get(struct msm_jpegdma_device *dma)
+{
+ int ret;
+
+ mutex_lock(&dma->lock);
+ if (dma->ref_count == 0) {
+
+ dev_dbg(dma->dev, "msm_jpegdma_hw_get E\n");
+ ret = msm_jpegdma_hw_set_clock_rate(dma);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail to set clock rates\n");
+ goto error;
+ }
+
+ ret = msm_jpegdma_hw_enable_regulators(dma);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail to enable regulators\n");
+ goto error;
+ }
+
+ ret = msm_jpegdma_hw_enable_clocks(dma);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail to enable clocks\n");
+ goto error_clocks;
+ }
+
+ ret = msm_jpegdma_hw_bus_request(dma);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail bus request\n");
+ goto error_bus_request;
+ }
+ msm_jpegdma_hw_config_qos(dma);
+ msm_jpegdma_hw_config_vbif(dma);
+
+ msm_jpegdma_hw_enable_irq(dma);
+
+ ret = msm_jpegdma_hw_reset(dma);
+ if (ret < 0) {
+ dev_err(dma->dev, "Fail to reset hw\n");
+ goto error_hw_reset;
+ }
+ msm_jpegdma_hw_config_qos(dma);
+ msm_jpegdma_hw_config_mmu_prefetch(dma, NULL, NULL);
+ msm_jpegdma_hw_enable_irq(dma);
+ }
+ dma->ref_count++;
+ dev_dbg(dma->dev, "msm_jpegdma_hw_get X\n");
+ mutex_unlock(&dma->lock);
+
+ return 0;
+
+error_hw_reset:
+ msm_jpegdma_hw_disable_irq(dma);
+error_bus_request:
+ msm_jpegdma_hw_disable_clocks(dma);
+error_clocks:
+ msm_jpegdma_hw_disable_regulators(dma);
+error:
+ mutex_unlock(&dma->lock);
+ return ret;
+}
+
+/*
+ * msm_jpegdma_hw_put - Put dma hw.
+ * @dma: Pointer to dma device.
+ *
+ * Release dma hw. Have reference count protected by
+ * dma device mutex.
+ */
+void msm_jpegdma_hw_put(struct msm_jpegdma_device *dma)
+{
+ mutex_lock(&dma->lock);
+ BUG_ON(dma->ref_count == 0);
+
+ if (--dma->ref_count == 0) {
+ msm_jpegdma_hw_halt(dma);
+ msm_jpegdma_hw_disable_irq(dma);
+ msm_jpegdma_hw_bus_release(dma);
+ msm_jpegdma_hw_disable_clocks(dma);
+ msm_jpegdma_hw_disable_regulators(dma);
+ }
+ /* Reset clock rate, need to be updated on next processing */
+ dma->active_clock_rate = -1;
+ mutex_unlock(&dma->lock);
+}
+
+/*
+ * msm_jpegdma_hw_attach_iommu - Attach iommu to jpeg dma engine.
+ * @dma: Pointer to dma device.
+ *
+ * Iommu attach have reference count protected by
+ * dma device mutex.
+ */
+static int msm_jpegdma_hw_attach_iommu(struct msm_jpegdma_device *dma)
+{
+ int ret;
+
+ mutex_lock(&dma->lock);
+
+ if (dma->iommu_attached_cnt == UINT_MAX) {
+ dev_err(dma->dev, "Max count reached! can not attach iommu\n");
+ goto error;
+ }
+
+ if (dma->iommu_attached_cnt == 0) {
+ ret = cam_smmu_get_handle(MSM_JPEGDMA_SMMU_NAME,
+ &dma->iommu_hndl);
+ if (ret < 0) {
+ dev_err(dma->dev, "Smmu get handle failed\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+ ret = cam_smmu_ops(dma->iommu_hndl, CAM_SMMU_ATTACH);
+ if (ret < 0) {
+ dev_err(dma->dev, "Can not attach smmu.\n");
+ goto error_attach;
+ }
+ }
+ dma->iommu_attached_cnt++;
+ mutex_unlock(&dma->lock);
+
+ return 0;
+error_attach:
+ cam_smmu_destroy_handle(dma->iommu_hndl);
+error:
+ mutex_unlock(&dma->lock);
+ return ret;
+}
+
+/*
+ * msm_jpegdma_hw_detach_iommu - Detach iommu from jpeg dma engine.
+ * @dma: Pointer to dma device.
+ *
+ * Iommu detach have reference count protected by
+ * dma device mutex.
+ */
+static void msm_jpegdma_hw_detach_iommu(struct msm_jpegdma_device *dma)
+{
+ mutex_lock(&dma->lock);
+ if (dma->iommu_attached_cnt == 0) {
+ dev_err(dma->dev, "There is no attached device\n");
+ mutex_unlock(&dma->lock);
+ return;
+ }
+
+ if (--dma->iommu_attached_cnt == 0) {
+ cam_smmu_ops(dma->iommu_hndl, CAM_SMMU_DETACH);
+ cam_smmu_destroy_handle(dma->iommu_hndl);
+ }
+ mutex_unlock(&dma->lock);
+}
+
+/*
+ * msm_jpegdma_hw_map_buffer - Map buffer to dma hw mmu.
+ * @dma: Pointer to dma device.
+ * @fd: Ion fd.
+ * @buf: dma buffer handle, for storing mapped buffer information.
+ *
+ * It will map ion fd to dma hw smmu.
+ */
+int msm_jpegdma_hw_map_buffer(struct msm_jpegdma_device *dma, int fd,
+ struct msm_jpegdma_buf_handle *buf)
+{
+ int ret;
+
+ if (!dma || fd < 0)
+ return -EINVAL;
+
+ ret = msm_jpegdma_hw_attach_iommu(dma);
+ if (ret < 0)
+ goto error;
+
+ buf->dma = dma;
+ buf->fd = fd;
+
+ ret = cam_smmu_get_phy_addr(dma->iommu_hndl, buf->fd,
+ CAM_SMMU_MAP_RW, &buf->addr, &buf->size);
+ if (ret < 0) {
+ dev_err(dma->dev, "Can not get physical address\n");
+ goto error_get_phy;
+ }
+
+ return buf->size;
+
+error_get_phy:
+ msm_jpegdma_hw_detach_iommu(dma);
+error:
+ return -ENOMEM;
+}
+
+/*
+ * msm_jpegdma_hw_unmap_buffer - Unmap buffer from dma hw mmu.
+ * @buf: dma buffer handle, for storing mapped buffer information.
+ */
+void msm_jpegdma_hw_unmap_buffer(struct msm_jpegdma_buf_handle *buf)
+{
+ if (buf->size && buf->dma) {
+ cam_smmu_put_phy_addr(buf->dma->iommu_hndl,
+ buf->fd);
+ msm_jpegdma_hw_detach_iommu(buf->dma);
+ buf->size = 0;
+ }
+ buf->fd = -1;
+ buf->dma = NULL;
+}
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.h b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.h
new file mode 100644
index 000000000000..b7ff3b53aeb0
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.h
@@ -0,0 +1,78 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_JPEG_DMA_HW_H__
+#define __MSM_JPEG_DMA_HW_H__
+
+#include "msm_jpeg_dma_dev.h"
+
+int msm_jpegdma_hw_check_config(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_size_config *size_cfg);
+
+int msm_jpegdma_hw_set_config(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_size_config *size_cfg,
+ struct msm_jpegdma_plane_config *plane_cfg);
+
+int msm_jpegdma_hw_start(struct msm_jpegdma_device *dma,
+ struct msm_jpegdma_addr *addr,
+ struct msm_jpegdma_plane *plane,
+ struct msm_jpegdma_speed *speed);
+
+int msm_jpegdma_hw_abort(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_update_bus_data(struct msm_jpegdma_device *dma,
+ u64 ab, u64 ib);
+
+int msm_jpegdma_hw_handle_irq(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_request_irq(struct platform_device *pdev,
+ struct msm_jpegdma_device *dma);
+
+void msm_jpegdma_hw_release_irq(struct msm_jpegdma_device *dma);
+
+void msm_jpegdma_hw_release_mem_resources(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_get_mem_resources(struct platform_device *pdev,
+ struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_get_regulators(struct msm_jpegdma_device *dma);
+
+void msm_jpegdma_hw_put_regulators(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_get_clocks(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_put_clocks(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_get_qos(struct msm_jpegdma_device *dma);
+
+void msm_jpegdma_hw_put_qos(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_get_vbif(struct msm_jpegdma_device *dma);
+
+void msm_jpegdma_hw_put_vbif(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_get_prefetch(struct msm_jpegdma_device *dma);
+
+void msm_jpegdma_hw_put_prefetch(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_get_capabilities(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_get(struct msm_jpegdma_device *dma);
+
+void msm_jpegdma_hw_put(struct msm_jpegdma_device *dma);
+
+int msm_jpegdma_hw_map_buffer(struct msm_jpegdma_device *dma, int fd,
+ struct msm_jpegdma_buf_handle *buf);
+
+void msm_jpegdma_hw_unmap_buffer(struct msm_jpegdma_buf_handle *buf);
+
+#endif /* __MSM_JPEG_DMA_HW_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_regs.h b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_regs.h
new file mode 100644
index 000000000000..31bdfbfda49e
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_regs.h
@@ -0,0 +1,122 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_JPEGDMA_REGS_H__
+#define __MSM_JPEGDMA_REGS_H__
+
+#define MSM_JPEGDMA_HW_REVISION 0x00
+#define MSM_JPEGDMA_HW_CAPABILITY 0x04
+#define MSM_JPEGDMA_HW_CAPABILITY_NUM_PIPES_BMSK 0x06
+#define MSM_JPEGDMA_HW_CAPABILITY_NUM_PIPES_SHFT 0x01
+
+#define MSM_JPEGDMA_IRQ_MASK_ADDR 0x0C
+#define MSM_JPEGDMA_IRQ_MASK_SESSION_DONE (1 << 0)
+#define MSM_JPEGDMA_IRQ_MASK_RD_BUF_DONE (1 << 1)
+#define MSM_JPEGDMA_IRQ_MASK_WR_BUF_DONE (1 << 5)
+#define MSM_JPEGDMA_IRQ_MASK_AXI_HALT (1 << 9)
+#define MSM_JPEGDMA_IRQ_MASK_RST_DONE (1 << 10)
+
+#define MSM_JPEGDMA_IRQ_STATUS 0x10
+#define MSM_JPEGDMA_IRQ_STATUS_SESSION_DONE (1 << 0)
+#define MSM_JPEGDMA_IRQ_STATUS_RD_BUF_DONE (1 << 1)
+#define MSM_JPEGDMA_IRQ_STATUS_WR_BUF_DONE (1 << 5)
+#define MSM_JPEGDMA_IRQ_STATUS_AXI_HALT (1 << 9)
+#define MSM_JPEGDMA_IRQ_STATUS_RST_DONE (1 << 10)
+
+#define MSM_JPEGDMA_IRQ_CLEAR_ADDR 0x14
+#define MSM_JPEGDMA_IRQ_CLEAR_BMSK 0xFFFFFFFF
+
+#define MSM_JPEGDMA_CORE_CFG_ADDR 0x18
+#define MSM_JPEGDMA_CMD_ADDR 0x1C
+
+#define MSM_JPEGDMA_CORE_CFG_TEST_BUS_ENABLE_SHFT 19
+#define MSM_JPEGDMA_CORE_CFG_BRIDGE_ENABLE_SHFT 6
+#define MSM_JPEGDMA_CORE_CFG_SCALE_1_ENABLE_SHFT 5
+#define MSM_JPEGDMA_CORE_CFG_SCALE_0_ENABLE_SHFT 4
+
+#define MSM_JPEGDMA_CORE_CFG_WE_1_ENABLE_SHFT 0x03
+#define MSM_JPEGDMA_CORE_CFG_WE_0_ENABLE_SHFT 0x02
+#define MSM_JPEGDMA_CORE_CFG_FE_1_ENABLE_SHFT 0x01
+#define MSM_JPEGDMA_CORE_CFG_FE_0_ENABLE_SHFT 0x00
+
+#define MSM_JPEGDMA_FE_0_CFG_ADDR 0x2C
+#define MSM_JPEGDMA_FE_1_CFG_ADDR 0x70
+#define MSM_JPEGDMA_FE_CFG_MAL_BOUNDARY_SHFT 25
+#define MSM_JPEGDMA_FE_CFG_MAL_EN_SHFT 21
+#define MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_CBCR 0x03
+#define MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_CR 0x02
+#define MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_CB 0x01
+#define MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_Y 0x00
+#define MSM_JPEGDMA_FE_CFG_PLN_BLOCK_TYPE_SHFT 19
+#define MSM_JPEGDMA_FE_CFG_BLOCK_WIDTH_SHFT 0x04
+#define MSM_JPEGDMA_FE_CFG_BURST_LENGTH_MAX_SHFT 0x00
+
+#define MSM_JPEGDMA_FE_RD_0_PNTR_ADDR 0x34
+#define MSM_JPEGDMA_FE_RD_1_PNTR_ADDR 0x78
+#define MSM_JPEGDMA_FE_RD_BUFFER_SIZE_0_ADDR 0x44
+#define MSM_JPEGDMA_FE_RD_BUFFER_SIZE_1_ADDR 0x88
+#define MSM_JPEGDMA_FE_RD_BUFFER_SIZE_HEIGHT_SHFT 16
+#define MSM_JPEGDMA_FE_RD_0_STRIDE_ADDR 0x48
+#define MSM_JPEGDMA_FE_RD_1_STRIDE_ADDR 0x8C
+#define MSM_JPEGDMA_FE_RD_0_HINIT_ADDR 0x4C
+#define MSM_JPEGDMA_FE_RD_1_HINIT_ADDR 0x90
+#define MSM_JPEGDMA_FE_RD_0_HINIT_INT_ADDR 0x50
+#define MSM_JPEGDMA_FE_RD_1_HINIT_INT_ADDR 0x94
+#define MSM_JPEGDMA_FE_RD_0_VINIT_INT_ADDR 0x58
+#define MSM_JPEGDMA_FE_RD_1_VINIT_INT_ADDR 0x9C
+
+#define MSM_JPEGDMA_WE_CFG_ADDR 0xB8
+#define MSM_JPEGDMA_WE_CFG_MAL_BOUNDARY_SHFT 0x08
+#define MSM_JPEGDMA_WE_CFG_MAL_EN_SHFT 0x07
+#define MSM_JPEGDMA_WE_CFG_BURST_LENGTH_MAX_SHFT 0x00
+#define MSM_JPEGDMA_WE_PLN_0_WR_PNTR_ADDR 0xBC
+#define MSM_JPEGDMA_WE_PLN_1_WR_PNTR_ADDR 0xEC
+#define MSM_JPEGDMA_WE_PLN_WR_BUFFER_SIZE_0_ADDR 0xC4
+#define MSM_JPEGDMA_WE_PLN_WR_BUFFER_SIZE_1_ADDR 0xF4
+#define MSM_JPEGDMA_WE_PLN_WR_BUFFER_SIZE_HEIGHT_SHFT 16
+#define MSM_JPEGDMA_WE_PLN_0_WR_STRIDE_ADDR 0xC8
+#define MSM_JPEGDMA_WE_PLN_1_WR_STRIDE_ADDR 0xF8
+#define MSM_JPEGDMA_WE_PLN_0_WR_CFG_0_ADDR 0xCC
+#define MSM_JPEGDMA_WE_PLN_1_WR_CFG_0_ADDR 0xFC
+#define MSM_JPEGDMA_WE_PLN_WR_CFG_0_BLOCKS_PER_ROW_SHFT 16
+#define MSM_JPEGDMA_WE_PLN_0_WR_CFG_1_ADDR 0xD0
+#define MSM_JPEGDMA_WE_PLN_1_WR_CFG_1_ADDR 0x100
+#define MSM_JPEGDMA_WE_PLN_WR_CFG_1_LAST_H_STEP_SHFT 16
+#define MSM_JPEGDMA_WE_PLN_0_WR_CFG_2_ADDR 0xD4
+#define MSM_JPEGDMA_WE_PLN_1_WR_CFG_2_ADDR 0x104
+#define MSM_JPEGDMA_WE_PLN_WR_CFG_2_LAST_V_STEP_SHFT 16
+#define MSM_JPEGDMA_WE_PLN_0_WR_CFG_3_ADDR 0xD8
+#define MSM_JPEGDMA_WE_PLN_1_WR_CFG_3_ADDR 0x108
+
+#define MSM_JPEGDMA_PP_0_SCALE_PHASEV_STEP_ADDR 0x19C
+#define MSM_JPEGDMA_PP_1_SCALE_PHASEV_STEP_ADDR 0x1BC
+#define MSM_JPEGDMA_PP_0_SCALE_PHASEH_STEP_ADDR 0x194
+#define MSM_JPEGDMA_PP_1_SCALE_PHASEH_STEP_ADDR 0x1B4
+#define MSM_JPEGDMA_PP_0_SCALE_CFG_ADDR 0x188
+#define MSM_JPEGDMA_PP_1_SCALE_CFG_ADDR 0x1A8
+#define MSM_JPEGDMA_PP_SCALE_CFG_VSCALE_ENABLE_SHFT 0x05
+#define MSM_JPEGDMA_PP_SCALE_CFG_HSCALE_ENABLE_SHFT 0x04
+
+#define MSM_JPEGDMA_S0_MMU_PF_ADDR_MIN 0x190
+#define MSM_JPEGDMA_S0_MMU_PF_ADDR_MAX 0x198
+#define MSM_JPEGDMA_S1_MMU_PF_ADDR_MIN 0x1A4
+#define MSM_JPEGDMA_S1_MMU_PF_ADDR_MAX 0x1AC
+
+#define MSM_JPEGDMA_CMD_CLEAR_READ_PLN_QUEUES 0x030
+#define MSM_JPEGDMA_CMD_CLEAR_WRITE_PLN_QUEUES 0x300
+
+#define MSM_HW_JPEGDMA_RESET 0x08
+#define MSM_HW_JPEGDMA_RESET_DEFAULT 0x32083
+
+#define MSM_JPEGDMA_RESET_CMD_BMSK 0xFFFFFFFF
+
+#endif /* __MSM_JPEG_DMA_REGS_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c
new file mode 100644
index 000000000000..7187e694f2bd
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/msm.c
@@ -0,0 +1,1236 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/ioctl.h>
+#include <linux/spinlock.h>
+#include <linux/proc_fs.h>
+#include <linux/atomic.h>
+#include <linux/videodev2.h>
+#include <linux/msm_ion.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <media/v4l2-fh.h>
+#include "msm.h"
+#include "msm_vb2.h"
+#include "msm_sd.h"
+#include "cam_hw_ops.h"
+#include <media/msmb_generic_buf_mgr.h>
+
+
+static struct v4l2_device *msm_v4l2_dev;
+static struct list_head ordered_sd_list;
+
+static struct msm_queue_head *msm_session_q;
+
+/* config node envent queue */
+static struct v4l2_fh *msm_eventq;
+spinlock_t msm_eventq_lock;
+
+static struct pid *msm_pid;
+spinlock_t msm_pid_lock;
+
+/*
+ * It takes 20 bytes + NULL character to write the
+ * largest decimal value of an uint64_t
+ */
+#define LOGSYNC_PACKET_SIZE 21
+
+#define msm_dequeue(queue, type, member) ({ \
+ unsigned long flags; \
+ struct msm_queue_head *__q = (queue); \
+ type *node = 0; \
+ spin_lock_irqsave(&__q->lock, flags); \
+ if (!list_empty(&__q->list)) { \
+ __q->len--; \
+ node = list_first_entry(&__q->list, \
+ type, member); \
+ if ((node) && (&node->member) && (&node->member.next)) \
+ list_del_init(&node->member); \
+ } \
+ spin_unlock_irqrestore(&__q->lock, flags); \
+ node; \
+})
+
+#define msm_delete_sd_entry(queue, type, member, q_node) ({ \
+ unsigned long flags; \
+ struct msm_queue_head *__q = (queue); \
+ type *node = 0; \
+ spin_lock_irqsave(&__q->lock, flags); \
+ if (!list_empty(&__q->list)) { \
+ list_for_each_entry(node, &__q->list, member) \
+ if (node->sd == q_node) { \
+ __q->len--; \
+ list_del_init(&node->member); \
+ kzfree(node); \
+ break; \
+ } \
+ } \
+ spin_unlock_irqrestore(&__q->lock, flags); \
+})
+
+#define msm_delete_entry(queue, type, member, q_node) ({ \
+ unsigned long flags; \
+ struct msm_queue_head *__q = (queue); \
+ type *node = 0; \
+ spin_lock_irqsave(&__q->lock, flags); \
+ if (!list_empty(&__q->list)) { \
+ list_for_each_entry(node, &__q->list, member) \
+ if (node == q_node) { \
+ __q->len--; \
+ list_del_init(&node->member); \
+ kzfree(node); \
+ break; \
+ } \
+ } \
+ spin_unlock_irqrestore(&__q->lock, flags); \
+})
+
+#define msm_queue_drain(queue, type, member) do { \
+ unsigned long flags; \
+ struct msm_queue_head *__q = (queue); \
+ type *node; \
+ spin_lock_irqsave(&__q->lock, flags); \
+ while (!list_empty(&__q->list)) { \
+ __q->len--; \
+ node = list_first_entry(&__q->list, \
+ type, member); \
+ if (node) { \
+ if (&node->member) \
+ list_del_init(&node->member); \
+ kzfree(node); \
+ } \
+ } \
+ spin_unlock_irqrestore(&__q->lock, flags); \
+} while (0)
+
+typedef int (*msm_queue_func)(void *d1, void *d2);
+#define msm_queue_traverse_action(queue, type, member, func, data) do {\
+ unsigned long flags; \
+ struct msm_queue_head *__q = (queue); \
+ type *node = 0; \
+ msm_queue_func __f = (func); \
+ spin_lock_irqsave(&__q->lock, flags); \
+ if (!list_empty(&__q->list)) { \
+ list_for_each_entry(node, &__q->list, member) \
+ if (node && __f) { \
+ __f(node, data); \
+ } \
+ } \
+ spin_unlock_irqrestore(&__q->lock, flags); \
+} while (0)
+
+typedef int (*msm_queue_find_func)(void *d1, void *d2);
+#define msm_queue_find(queue, type, member, func, data) ({\
+ unsigned long flags; \
+ struct msm_queue_head *__q = (queue); \
+ type *node = 0; \
+ typeof(node) __ret = NULL; \
+ msm_queue_find_func __f = (func); \
+ spin_lock_irqsave(&__q->lock, flags); \
+ if (!list_empty(&__q->list)) { \
+ list_for_each_entry(node, &__q->list, member) \
+ if ((__f) && __f(node, data)) { \
+ __ret = node; \
+ break; \
+ } \
+ } \
+ spin_unlock_irqrestore(&__q->lock, flags); \
+ __ret; \
+})
+
+static void msm_init_queue(struct msm_queue_head *qhead)
+{
+ BUG_ON(!qhead);
+
+ INIT_LIST_HEAD(&qhead->list);
+ spin_lock_init(&qhead->lock);
+ qhead->len = 0;
+ qhead->max = 0;
+}
+
+static void msm_enqueue(struct msm_queue_head *qhead,
+ struct list_head *entry)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&qhead->lock, flags);
+ qhead->len++;
+ if (qhead->len > qhead->max)
+ qhead->max = qhead->len;
+ list_add_tail(entry, &qhead->list);
+ spin_unlock_irqrestore(&qhead->lock, flags);
+}
+
+void msm_cam_copy_v4l2_subdev_fops(struct v4l2_file_operations *d1)
+{
+ *d1 = v4l2_subdev_fops;
+}
+EXPORT_SYMBOL(msm_cam_copy_v4l2_subdev_fops);
+
+static const struct v4l2_file_operations *msm_cam_get_v4l2_subdev_fops_ptr(
+ void)
+{
+ return &v4l2_subdev_fops;
+}
+
+/* index = session id */
+static inline int __msm_queue_find_session(void *d1, void *d2)
+{
+ struct msm_session *session = d1;
+ return (session->session_id == *(unsigned int *)d2) ? 1 : 0;
+}
+
+static inline int __msm_queue_find_stream(void *d1, void *d2)
+{
+ struct msm_stream *stream = d1;
+ return (stream->stream_id == *(unsigned int *)d2) ? 1 : 0;
+}
+
+static inline int __msm_queue_find_command_ack_q(void *d1, void *d2)
+{
+ struct msm_command_ack *ack = d1;
+ return (ack->stream_id == *(unsigned int *)d2) ? 1 : 0;
+}
+
+
+struct msm_session *msm_session_find(unsigned int session_id)
+{
+ struct msm_session *session;
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (WARN_ON(!session))
+ return NULL;
+ return session;
+}
+EXPORT_SYMBOL(msm_session_find);
+
+int msm_create_stream(unsigned int session_id,
+ unsigned int stream_id, struct vb2_queue *q)
+{
+ struct msm_session *session;
+ struct msm_stream *stream;
+
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (!session)
+ return -EINVAL;
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (!stream)
+ return -ENOMEM;
+
+ stream->stream_id = stream_id;
+ stream->vb2_q = q;
+ spin_lock_init(&stream->stream_lock);
+ msm_enqueue(&session->stream_q, &stream->list);
+ session->stream_q.len++;
+
+ INIT_LIST_HEAD(&stream->queued_list);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_create_stream);
+
+void msm_delete_stream(unsigned int session_id, unsigned int stream_id)
+{
+ struct msm_session *session = NULL;
+ struct msm_stream *stream = NULL;
+ unsigned long flags;
+
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (!session)
+ return;
+
+ stream = msm_queue_find(&session->stream_q, struct msm_stream,
+ list, __msm_queue_find_stream, &stream_id);
+ if (!stream)
+ return;
+ spin_lock_irqsave(&(session->stream_q.lock), flags);
+ list_del_init(&stream->list);
+ session->stream_q.len--;
+ kfree(stream);
+ stream = NULL;
+ spin_unlock_irqrestore(&(session->stream_q.lock), flags);
+}
+EXPORT_SYMBOL(msm_delete_stream);
+
+static void msm_sd_unregister_subdev(struct video_device *vdev)
+{
+ struct v4l2_subdev *sd = video_get_drvdata(vdev);
+ sd->devnode = NULL;
+ kzfree(vdev);
+}
+
+static inline int __msm_sd_register_subdev(struct v4l2_subdev *sd)
+{
+ int rc = 0;
+ struct video_device *vdev;
+
+ if (!msm_v4l2_dev || !sd || !sd->name[0])
+ return -EINVAL;
+
+ rc = v4l2_device_register_subdev(msm_v4l2_dev, sd);
+ if (rc < 0)
+ return rc;
+
+ /* Register a device node for every subdev marked with the
+ * V4L2_SUBDEV_FL_HAS_DEVNODE flag.
+ */
+ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
+ return rc;
+
+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
+ if (!vdev) {
+ rc = -ENOMEM;
+ goto clean_up;
+ }
+
+ video_set_drvdata(vdev, sd);
+ strlcpy(vdev->name, sd->name, sizeof(vdev->name));
+ vdev->v4l2_dev = msm_v4l2_dev;
+ vdev->fops = msm_cam_get_v4l2_subdev_fops_ptr();
+ vdev->release = msm_sd_unregister_subdev;
+ rc = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1,
+ sd->owner);
+ if (rc < 0) {
+ kzfree(vdev);
+ goto clean_up;
+ }
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ sd->entity.info.v4l.major = VIDEO_MAJOR;
+ sd->entity.info.v4l.minor = vdev->minor;
+ sd->entity.name = video_device_node_name(vdev);
+#endif
+ sd->devnode = vdev;
+ return 0;
+
+clean_up:
+ if (sd->devnode)
+ video_unregister_device(sd->devnode);
+ return rc;
+}
+
+static void msm_add_sd_in_position(struct msm_sd_subdev *msm_subdev,
+ struct list_head *sd_list)
+{
+ struct msm_sd_subdev *temp_sd;
+
+ list_for_each_entry(temp_sd, sd_list, list) {
+ if (msm_subdev->close_seq < temp_sd->close_seq) {
+ list_add_tail(&msm_subdev->list, &temp_sd->list);
+ return;
+ }
+ }
+ list_add_tail(&msm_subdev->list, sd_list);
+}
+
+int msm_sd_register(struct msm_sd_subdev *msm_subdev)
+{
+ if (WARN_ON(!msm_subdev))
+ return -EINVAL;
+
+ if (WARN_ON(!msm_v4l2_dev) || WARN_ON(!msm_v4l2_dev->dev))
+ return -EIO;
+
+ msm_add_sd_in_position(msm_subdev, &ordered_sd_list);
+ return __msm_sd_register_subdev(&msm_subdev->sd);
+}
+EXPORT_SYMBOL(msm_sd_register);
+
+int msm_sd_unregister(struct msm_sd_subdev *msm_subdev)
+{
+ if (WARN_ON(!msm_subdev))
+ return -EINVAL;
+
+ v4l2_device_unregister_subdev(&msm_subdev->sd);
+ return 0;
+}
+EXPORT_SYMBOL(msm_sd_unregister);
+
+static struct v4l2_subdev *msm_sd_find(const char *name)
+{
+ unsigned long flags;
+ struct v4l2_subdev *subdev = NULL;
+ struct v4l2_subdev *subdev_out = NULL;
+
+ spin_lock_irqsave(&msm_v4l2_dev->lock, flags);
+ if (!list_empty(&msm_v4l2_dev->subdevs)) {
+ list_for_each_entry(subdev, &msm_v4l2_dev->subdevs, list)
+ if (!strcmp(name, subdev->name)) {
+ subdev_out = subdev;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&msm_v4l2_dev->lock, flags);
+
+ return subdev_out;
+}
+
+int msm_create_session(unsigned int session_id, struct video_device *vdev)
+{
+ struct msm_session *session = NULL;
+
+ if (!msm_session_q) {
+ pr_err("%s : session queue not available Line %d\n",
+ __func__, __LINE__);
+ return -ENODEV;
+ }
+
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (session) {
+ pr_err("%s : Session not found Line %d\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ session = kzalloc(sizeof(*session), GFP_KERNEL);
+ if (!session) {
+ pr_err("%s : Memory not available Line %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ session->session_id = session_id;
+ session->event_q.vdev = vdev;
+ msm_init_queue(&session->command_ack_q);
+ msm_init_queue(&session->stream_q);
+ msm_enqueue(msm_session_q, &session->list);
+ mutex_init(&session->lock);
+ mutex_init(&session->lock_q);
+ return 0;
+}
+EXPORT_SYMBOL(msm_create_session);
+
+int msm_create_command_ack_q(unsigned int session_id, unsigned int stream_id)
+{
+ struct msm_session *session;
+ struct msm_command_ack *cmd_ack;
+
+ if (!msm_session_q) {
+ pr_err("%s : Session queue not available Line %d\n",
+ __func__, __LINE__);
+ return -ENODEV;
+ }
+
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (!session) {
+ pr_err("%s : Session not found Line %d\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ mutex_lock(&session->lock);
+ cmd_ack = kzalloc(sizeof(*cmd_ack), GFP_KERNEL);
+ if (!cmd_ack) {
+ mutex_unlock(&session->lock);
+ pr_err("%s : memory not available Line %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ msm_init_queue(&cmd_ack->command_q);
+ INIT_LIST_HEAD(&cmd_ack->list);
+ init_completion(&cmd_ack->wait_complete);
+ cmd_ack->stream_id = stream_id;
+
+ msm_enqueue(&session->command_ack_q, &cmd_ack->list);
+ session->command_ack_q.len++;
+ mutex_unlock(&session->lock);
+ return 0;
+}
+EXPORT_SYMBOL(msm_create_command_ack_q);
+
+void msm_delete_command_ack_q(unsigned int session_id, unsigned int stream_id)
+{
+ struct msm_session *session;
+ struct msm_command_ack *cmd_ack;
+ unsigned long flags;
+
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (!session)
+ return;
+ mutex_lock(&session->lock);
+
+ cmd_ack = msm_queue_find(&session->command_ack_q,
+ struct msm_command_ack, list, __msm_queue_find_command_ack_q,
+ &stream_id);
+ if (!cmd_ack) {
+ mutex_unlock(&session->lock);
+ return;
+ }
+
+ msm_queue_drain(&cmd_ack->command_q, struct msm_command, list);
+
+ spin_lock_irqsave(&(session->command_ack_q.lock), flags);
+ list_del_init(&cmd_ack->list);
+ kzfree(cmd_ack);
+ session->command_ack_q.len--;
+ spin_unlock_irqrestore(&(session->command_ack_q.lock), flags);
+ mutex_unlock(&session->lock);
+}
+EXPORT_SYMBOL(msm_delete_command_ack_q);
+
+static inline int __msm_sd_close_subdevs(struct msm_sd_subdev *msm_sd,
+ struct msm_sd_close_ioctl *sd_close)
+{
+ struct v4l2_subdev *sd;
+ sd = &msm_sd->sd;
+ pr_debug("%s: Shutting down subdev %s", __func__, sd->name);
+
+ v4l2_subdev_call(sd, core, ioctl, MSM_SD_SHUTDOWN, sd_close);
+ v4l2_subdev_call(sd, core, s_power, 0);
+
+ return 0;
+}
+
+static inline int __msm_sd_notify_freeze_subdevs(struct msm_sd_subdev *msm_sd)
+{
+ struct v4l2_subdev *sd;
+ sd = &msm_sd->sd;
+
+ v4l2_subdev_call(sd, core, ioctl, MSM_SD_NOTIFY_FREEZE, NULL);
+
+ return 0;
+}
+
+static inline int __msm_destroy_session_streams(void *d1, void *d2)
+{
+ struct msm_stream *stream = d1;
+ unsigned long flags;
+
+ pr_err("%s: Error: Destroyed list is not empty\n", __func__);
+ spin_lock_irqsave(&stream->stream_lock, flags);
+ INIT_LIST_HEAD(&stream->queued_list);
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+ return 0;
+}
+
+static void msm_destroy_session_streams(struct msm_session *session)
+{
+
+ if (!session)
+ return;
+
+ msm_queue_traverse_action(&session->stream_q, struct msm_stream, list,
+ __msm_destroy_session_streams, NULL);
+
+ msm_queue_drain(&session->stream_q, struct msm_stream, list);
+}
+
+static inline int __msm_remove_session_cmd_ack_q(void *d1, void *d2)
+{
+ struct msm_command_ack *cmd_ack = d1;
+
+ if (!(&cmd_ack->command_q))
+ return 0;
+
+ msm_queue_drain(&cmd_ack->command_q, struct msm_command, list);
+
+ return 0;
+}
+
+static void msm_remove_session_cmd_ack_q(struct msm_session *session)
+{
+ if ((!session) || !(&session->command_ack_q))
+ return;
+
+ mutex_lock(&session->lock);
+ /* to ensure error handling purpose, it needs to detach all subdevs
+ * which are being connected to streams */
+ msm_queue_traverse_action(&session->command_ack_q,
+ struct msm_command_ack, list,
+ __msm_remove_session_cmd_ack_q, NULL);
+
+ msm_queue_drain(&session->command_ack_q, struct msm_command_ack, list);
+
+ mutex_unlock(&session->lock);
+}
+
+int msm_destroy_session(unsigned int session_id)
+{
+ struct msm_session *session;
+ struct v4l2_subdev *buf_mgr_subdev;
+ struct msm_sd_close_ioctl session_info;
+
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (!session)
+ return -EINVAL;
+
+ msm_destroy_session_streams(session);
+ msm_remove_session_cmd_ack_q(session);
+ mutex_destroy(&session->lock);
+ mutex_destroy(&session->lock_q);
+ msm_delete_entry(msm_session_q, struct msm_session,
+ list, session);
+ buf_mgr_subdev = msm_sd_find("msm_buf_mngr");
+ if (buf_mgr_subdev) {
+ session_info.session = session_id;
+ session_info.stream = 0;
+ v4l2_subdev_call(buf_mgr_subdev, core, ioctl,
+ MSM_SD_SHUTDOWN, &session_info);
+ } else {
+ pr_err("%s: Buff manger device node is NULL\n", __func__);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_destroy_session);
+
+static int __msm_close_destry_session_notify_apps(void *d1, void *d2)
+{
+ struct v4l2_event event;
+ struct msm_v4l2_event_data *event_data =
+ (struct msm_v4l2_event_data *)&event.u.data[0];
+ struct msm_session *session = d1;
+
+ event.type = MSM_CAMERA_V4L2_EVENT_TYPE;
+ event.id = MSM_CAMERA_MSM_NOTIFY;
+ event_data->command = MSM_CAMERA_PRIV_SHUTDOWN;
+
+ v4l2_event_queue(session->event_q.vdev, &event);
+
+ return 0;
+}
+
+static long msm_private_ioctl(struct file *file, void *fh,
+ bool valid_prio, unsigned int cmd, void *arg)
+{
+ int rc = 0;
+ struct msm_v4l2_event_data *event_data = arg;
+ struct v4l2_event event;
+ struct msm_session *session;
+ unsigned int session_id;
+ unsigned int stream_id;
+ unsigned long spin_flags = 0;
+ struct msm_sd_subdev *msm_sd;
+
+ session_id = event_data->session_id;
+ stream_id = event_data->stream_id;
+
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+
+ if (!session)
+ return -EINVAL;
+
+ switch (cmd) {
+ case MSM_CAM_V4L2_IOCTL_NOTIFY: {
+ if (WARN_ON(!session->event_q.vdev)) {
+ rc = -EFAULT;
+ break;
+ }
+ event.type = event_data->v4l2_event_type;
+ event.id = event_data->v4l2_event_id;
+ memcpy(&event.u.data, event_data,
+ sizeof(struct msm_v4l2_event_data));
+ v4l2_event_queue(session->event_q.vdev,
+ &event);
+ }
+ break;
+
+ case MSM_CAM_V4L2_IOCTL_CMD_ACK: {
+ struct msm_command_ack *cmd_ack;
+ struct msm_command *ret_cmd;
+
+ ret_cmd = kzalloc(sizeof(*ret_cmd), GFP_KERNEL);
+ if (!ret_cmd) {
+ rc = -ENOMEM;
+ break;
+ }
+
+ cmd_ack = msm_queue_find(&session->command_ack_q,
+ struct msm_command_ack, list,
+ __msm_queue_find_command_ack_q,
+ &stream_id);
+ if (WARN_ON(!cmd_ack)) {
+ kzfree(ret_cmd);
+ rc = -EFAULT;
+ break;
+ }
+
+ spin_lock_irqsave(&(session->command_ack_q.lock),
+ spin_flags);
+ event.type = event_data->v4l2_event_type;
+ event.id = event_data->v4l2_event_id;
+ memcpy(&event.u.data, event_data,
+ sizeof(struct msm_v4l2_event_data));
+ memcpy(&ret_cmd->event, &event, sizeof(struct v4l2_event));
+ msm_enqueue(&cmd_ack->command_q, &ret_cmd->list);
+ complete(&cmd_ack->wait_complete);
+ spin_unlock_irqrestore(&(session->command_ack_q.lock),
+ spin_flags);
+ }
+ break;
+
+ case MSM_CAM_V4L2_IOCTL_NOTIFY_DEBUG: {
+ pr_err("Notifying subdevs about potential sof freeze\n");
+ if (!list_empty(&msm_v4l2_dev->subdevs)) {
+ list_for_each_entry(msm_sd, &ordered_sd_list, list)
+ __msm_sd_notify_freeze_subdevs(msm_sd);
+ }
+ }
+ break;
+
+ case MSM_CAM_V4L2_IOCTL_NOTIFY_ERROR:
+ /* send v4l2_event to HAL next*/
+ msm_queue_traverse_action(msm_session_q,
+ struct msm_session, list,
+ __msm_close_destry_session_notify_apps, NULL);
+ break;
+
+ default:
+ rc = -ENOTTY;
+ break;
+ }
+
+ return rc;
+}
+
+static int msm_unsubscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+static int msm_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_subscribe(fh, sub, 5, NULL);
+}
+
+static const struct v4l2_ioctl_ops g_msm_ioctl_ops = {
+ .vidioc_subscribe_event = msm_subscribe_event,
+ .vidioc_unsubscribe_event = msm_unsubscribe_event,
+ .vidioc_default = msm_private_ioctl,
+};
+
+static unsigned int msm_poll(struct file *f,
+ struct poll_table_struct *pll_table)
+{
+ int rc = 0;
+ struct v4l2_fh *eventq = f->private_data;
+
+ BUG_ON(!eventq);
+
+ poll_wait(f, &eventq->wait, pll_table);
+
+ if (v4l2_event_pending(eventq))
+ rc = POLLIN | POLLRDNORM;
+
+ return rc;
+}
+
+static void msm_print_event_error(struct v4l2_event *event)
+{
+ struct msm_v4l2_event_data *event_data =
+ (struct msm_v4l2_event_data *)&event->u.data[0];
+
+ pr_err("Evt_type=%x Evt_id=%d Evt_cmd=%x\n", event->type,
+ event->id, event_data->command);
+ pr_err("Evt_session_id=%d Evt_stream_id=%d Evt_arg=%d\n",
+ event_data->session_id, event_data->stream_id,
+ event_data->arg_value);
+}
+
+/* something seriously wrong if msm_close is triggered
+ * !!! user space imaging server is shutdown !!!
+ */
+int msm_post_event(struct v4l2_event *event, int timeout)
+{
+ int rc = 0;
+ struct video_device *vdev;
+ struct msm_session *session;
+ struct msm_v4l2_event_data *event_data =
+ (struct msm_v4l2_event_data *)&event->u.data[0];
+ struct msm_command_ack *cmd_ack;
+ struct msm_command *cmd;
+ int session_id, stream_id;
+ unsigned long flags = 0;
+
+ session_id = event_data->session_id;
+ stream_id = event_data->stream_id;
+
+ spin_lock_irqsave(&msm_eventq_lock, flags);
+ if (!msm_eventq) {
+ spin_unlock_irqrestore(&msm_eventq_lock, flags);
+ pr_err("%s : msm event queue not available Line %d\n",
+ __func__, __LINE__);
+ return -ENODEV;
+ }
+ spin_unlock_irqrestore(&msm_eventq_lock, flags);
+
+ vdev = msm_eventq->vdev;
+
+ /* send to imaging server and wait for ACK */
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (WARN_ON(!session)) {
+ pr_err("%s : session not found Line %d\n",
+ __func__, __LINE__);
+ return -EIO;
+ }
+ mutex_lock(&session->lock);
+ cmd_ack = msm_queue_find(&session->command_ack_q,
+ struct msm_command_ack, list,
+ __msm_queue_find_command_ack_q, &stream_id);
+ if (WARN_ON(!cmd_ack)) {
+ mutex_unlock(&session->lock);
+ pr_err("%s : cmd_ack not found Line %d\n",
+ __func__, __LINE__);
+ return -EIO;
+ }
+
+ /*re-init wait_complete */
+ reinit_completion(&cmd_ack->wait_complete);
+
+ v4l2_event_queue(vdev, event);
+
+ if (timeout < 0) {
+ mutex_unlock(&session->lock);
+ pr_debug("%s : timeout cannot be negative Line %d\n",
+ __func__, __LINE__);
+ return rc;
+ }
+
+ /* should wait on session based condition */
+ rc = wait_for_completion_timeout(&cmd_ack->wait_complete,
+ msecs_to_jiffies(timeout));
+
+
+ if (list_empty_careful(&cmd_ack->command_q.list)) {
+ if (!rc) {
+ pr_err("%s: Timed out\n", __func__);
+ msm_print_event_error(event);
+ mutex_unlock(&session->lock);
+ return -ETIMEDOUT;
+ } else {
+ pr_err("%s: Error: No timeout but list empty!",
+ __func__);
+ msm_print_event_error(event);
+ mutex_unlock(&session->lock);
+ return -EINVAL;
+ }
+ }
+
+ cmd = msm_dequeue(&cmd_ack->command_q,
+ struct msm_command, list);
+ if (!cmd) {
+ mutex_unlock(&session->lock);
+ pr_err("%s : cmd dequeue failed Line %d\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ event_data = (struct msm_v4l2_event_data *)cmd->event.u.data;
+
+ /* compare cmd_ret and event */
+ if (WARN_ON(event->type != cmd->event.type) ||
+ WARN_ON(event->id != cmd->event.id)) {
+ pr_err("%s : Either event type or id didnot match Line %d\n",
+ __func__, __LINE__);
+ pr_err("%s : event->type %d event->id %d\n", __func__,
+ event->type, event->id);
+ pr_err("%s : cmd->event.type %d cmd->event.id %d\n", __func__,
+ cmd->event.type, cmd->event.id);
+ rc = -EINVAL;
+ }
+
+ *event = cmd->event;
+
+ kzfree(cmd);
+ mutex_unlock(&session->lock);
+ return rc;
+}
+EXPORT_SYMBOL(msm_post_event);
+
+static int msm_close(struct file *filep)
+{
+ int rc = 0;
+ unsigned long flags;
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ struct msm_sd_close_ioctl sd_close;
+ struct msm_sd_subdev *msm_sd;
+
+ /*stop all hardware blocks immediately*/
+ if (!list_empty(&msm_v4l2_dev->subdevs))
+ list_for_each_entry(msm_sd, &ordered_sd_list, list)
+ __msm_sd_close_subdevs(msm_sd, &sd_close);
+
+ /* send v4l2_event to HAL next*/
+ msm_queue_traverse_action(msm_session_q, struct msm_session, list,
+ __msm_close_destry_session_notify_apps, NULL);
+
+ spin_lock_irqsave(&msm_eventq_lock, flags);
+ msm_eventq = NULL;
+ spin_unlock_irqrestore(&msm_eventq_lock, flags);
+ v4l2_fh_release(filep);
+
+ spin_lock_irqsave(&msm_pid_lock, flags);
+ put_pid(msm_pid);
+ msm_pid = NULL;
+ spin_unlock_irqrestore(&msm_pid_lock, flags);
+
+ atomic_set(&pvdev->opened, 0);
+
+ return rc;
+}
+
+static inline void msm_list_switch(struct list_head *l1,
+ struct list_head *l2)
+{
+ l1->next = l2->next;
+ l2->prev = l1->prev;
+ l1->prev->next = l2;
+ l2->next->prev = l1;
+ l1->prev = l2;
+ l2->next = l1;
+}
+
+static int msm_open(struct file *filep)
+{
+ int rc;
+ unsigned long flags;
+ struct msm_video_device *pvdev = video_drvdata(filep);
+ BUG_ON(!pvdev);
+
+ /* !!! only ONE open is allowed !!! */
+ if (atomic_read(&pvdev->opened))
+ return -EBUSY;
+
+ atomic_set(&pvdev->opened, 1);
+
+ spin_lock_irqsave(&msm_pid_lock, flags);
+ msm_pid = get_pid(task_pid(current));
+ spin_unlock_irqrestore(&msm_pid_lock, flags);
+
+ /* create event queue */
+ rc = v4l2_fh_open(filep);
+ if (rc < 0)
+ return rc;
+
+ spin_lock_irqsave(&msm_eventq_lock, flags);
+ msm_eventq = filep->private_data;
+ spin_unlock_irqrestore(&msm_eventq_lock, flags);
+
+ return rc;
+}
+
+static struct v4l2_file_operations msm_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_open,
+ .poll = msm_poll,
+ .release = msm_close,
+ .ioctl = video_ioctl2,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = video_ioctl2,
+#endif
+};
+
+struct msm_stream *msm_get_stream(unsigned int session_id,
+ unsigned int stream_id)
+{
+ struct msm_session *session;
+ struct msm_stream *stream;
+
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (!session)
+ return ERR_PTR(-EINVAL);
+
+ stream = msm_queue_find(&session->stream_q, struct msm_stream,
+ list, __msm_queue_find_stream, &stream_id);
+
+ if (!stream)
+ return ERR_PTR(-EINVAL);
+
+ return stream;
+}
+EXPORT_SYMBOL(msm_get_stream);
+
+struct vb2_queue *msm_get_stream_vb2q(unsigned int session_id,
+ unsigned int stream_id)
+{
+ struct msm_session *session;
+ struct msm_stream *stream;
+
+ session = msm_queue_find(msm_session_q, struct msm_session,
+ list, __msm_queue_find_session, &session_id);
+ if (!session)
+ return NULL;
+
+ stream = msm_queue_find(&session->stream_q, struct msm_stream,
+ list, __msm_queue_find_stream, &stream_id);
+ if (!stream)
+ return NULL;
+
+ return stream->vb2_q;
+}
+EXPORT_SYMBOL(msm_get_stream_vb2q);
+
+struct msm_stream *msm_get_stream_from_vb2q(struct vb2_queue *q)
+{
+ struct msm_session *session;
+ struct msm_stream *stream;
+ unsigned long flags1;
+ unsigned long flags2;
+ spin_lock_irqsave(&msm_session_q->lock, flags1);
+ list_for_each_entry(session, &(msm_session_q->list), list) {
+ spin_lock_irqsave(&(session->stream_q.lock), flags2);
+ list_for_each_entry(
+ stream, &(session->stream_q.list), list) {
+ if (stream->vb2_q == q) {
+ spin_unlock_irqrestore
+ (&(session->stream_q.lock), flags2);
+ spin_unlock_irqrestore
+ (&msm_session_q->lock, flags1);
+ return stream;
+ }
+ }
+ spin_unlock_irqrestore(&(session->stream_q.lock), flags2);
+ }
+ spin_unlock_irqrestore(&msm_session_q->lock, flags1);
+ return NULL;
+}
+EXPORT_SYMBOL(msm_get_stream_from_vb2q);
+
+static void msm_sd_notify(struct v4l2_subdev *sd,
+ unsigned int notification, void *arg)
+{
+ int rc = 0;
+ struct v4l2_subdev *subdev = NULL;
+
+ BUG_ON(!sd);
+ BUG_ON(!arg);
+
+ /* Check if subdev exists before processing*/
+ if (!msm_sd_find(sd->name))
+ return;
+
+ switch (notification) {
+ case MSM_SD_NOTIFY_GET_SD: {
+ struct msm_sd_req_sd *get_sd = arg;
+
+ get_sd->subdev = msm_sd_find(get_sd->name);
+ /* TODO: might need to add ref count on ret_sd */
+ }
+ break;
+
+ case MSM_SD_NOTIFY_PUT_SD: {
+ struct msm_sd_req_sd *put_sd = arg;
+ subdev = msm_sd_find(put_sd->name);
+ }
+ break;
+
+ case MSM_SD_NOTIFY_REQ_CB: {
+ struct msm_sd_req_vb2_q *req_sd = arg;
+ rc = msm_vb2_request_cb(req_sd);
+ if (rc < 0)
+ return;
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+static ssize_t write_logsync(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char lbuf[LOGSYNC_PACKET_SIZE] = {0};
+ uint64_t seq_num = 0;
+ int ret;
+
+ if (copy_from_user(lbuf, buf, sizeof(lbuf)))
+ return -EFAULT;
+
+ ret = sscanf(lbuf, "%llu", &seq_num);
+ if (ret != 1)
+ pr_err("LOGSYNC (Kernel): Bad or malformed sequence number\n");
+ else
+ pr_debug("LOGSYNC (Kernel): seq_num = %llu\n", seq_num);
+
+ return count;
+}
+
+
+static const struct file_operations logsync_fops = {
+ .write = write_logsync,
+};
+
+static int msm_probe(struct platform_device *pdev)
+{
+ struct msm_video_device *pvdev = NULL;
+ static struct dentry *cam_debugfs_root;
+ int rc = 0;
+
+ msm_v4l2_dev = kzalloc(sizeof(*msm_v4l2_dev),
+ GFP_KERNEL);
+ if (WARN_ON(!msm_v4l2_dev)) {
+ rc = -ENOMEM;
+ goto probe_end;
+ }
+
+ pvdev = kzalloc(sizeof(struct msm_video_device),
+ GFP_KERNEL);
+ if (WARN_ON(!pvdev)) {
+ rc = -ENOMEM;
+ goto pvdev_fail;
+ }
+
+ pvdev->vdev = video_device_alloc();
+ if (WARN_ON(!pvdev->vdev)) {
+ rc = -ENOMEM;
+ goto video_fail;
+ }
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ msm_v4l2_dev->mdev = kzalloc(sizeof(struct media_device),
+ GFP_KERNEL);
+ if (!msm_v4l2_dev->mdev) {
+ rc = -ENOMEM;
+ goto mdev_fail;
+ }
+ strlcpy(msm_v4l2_dev->mdev->model, MSM_CONFIGURATION_NAME,
+ sizeof(msm_v4l2_dev->mdev->model));
+ msm_v4l2_dev->mdev->dev = &(pdev->dev);
+
+ rc = media_device_register(msm_v4l2_dev->mdev);
+ if (WARN_ON(rc < 0))
+ goto media_fail;
+
+ if (WARN_ON((rc == media_entity_init(&pvdev->vdev->entity,
+ 0, NULL, 0)) < 0))
+ goto entity_fail;
+
+ pvdev->vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L;
+ pvdev->vdev->entity.group_id = QCAMERA_VNODE_GROUP_ID;
+#endif
+
+ msm_v4l2_dev->notify = msm_sd_notify;
+
+ pvdev->vdev->v4l2_dev = msm_v4l2_dev;
+
+ rc = v4l2_device_register(&(pdev->dev), pvdev->vdev->v4l2_dev);
+ if (WARN_ON(rc < 0))
+ goto register_fail;
+
+ strlcpy(pvdev->vdev->name, "msm-config", sizeof(pvdev->vdev->name));
+ pvdev->vdev->release = video_device_release;
+ pvdev->vdev->fops = &msm_fops;
+ pvdev->vdev->ioctl_ops = &g_msm_ioctl_ops;
+ pvdev->vdev->minor = -1;
+ pvdev->vdev->vfl_type = VFL_TYPE_GRABBER;
+ rc = video_register_device(pvdev->vdev,
+ VFL_TYPE_GRABBER, -1);
+ if (WARN_ON(rc < 0))
+ goto v4l2_fail;
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ /* FIXME: How to get rid of this messy? */
+ pvdev->vdev->entity.name = video_device_node_name(pvdev->vdev);
+#endif
+
+ atomic_set(&pvdev->opened, 0);
+ video_set_drvdata(pvdev->vdev, pvdev);
+
+ msm_session_q = kzalloc(sizeof(*msm_session_q), GFP_KERNEL);
+ if (WARN_ON(!msm_session_q))
+ goto v4l2_fail;
+
+ msm_init_queue(msm_session_q);
+ spin_lock_init(&msm_eventq_lock);
+ spin_lock_init(&msm_pid_lock);
+ INIT_LIST_HEAD(&ordered_sd_list);
+
+ cam_debugfs_root = debugfs_create_dir(MSM_CAM_LOGSYNC_FILE_BASEDIR,
+ NULL);
+ if (!cam_debugfs_root) {
+ pr_warn("NON-FATAL: failed to create logsync base directory\n");
+ } else {
+ if (!debugfs_create_file(MSM_CAM_LOGSYNC_FILE_NAME,
+ 0666,
+ cam_debugfs_root,
+ NULL,
+ &logsync_fops))
+ pr_warn("NON-FATAL: failed to create logsync debugfs file\n");
+ }
+
+ rc = cam_ahb_clk_init(pdev);
+ if (rc < 0) {
+ pr_err("%s: failed to register ahb clocks\n", __func__);
+ goto v4l2_fail;
+ }
+
+ goto probe_end;
+
+v4l2_fail:
+ v4l2_device_unregister(pvdev->vdev->v4l2_dev);
+register_fail:
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ media_entity_cleanup(&pvdev->vdev->entity);
+entity_fail:
+ media_device_unregister(msm_v4l2_dev->mdev);
+media_fail:
+ kzfree(msm_v4l2_dev->mdev);
+mdev_fail:
+#endif
+ video_device_release(pvdev->vdev);
+video_fail:
+ kzfree(pvdev);
+pvdev_fail:
+ kzfree(msm_v4l2_dev);
+probe_end:
+ return rc;
+}
+
+static const struct of_device_id msm_dt_match[] = {
+ {.compatible = "qcom,msm-cam"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, msm_dt_match);
+
+static struct platform_driver msm_driver = {
+ .probe = msm_probe,
+ .driver = {
+ .name = "msm",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_dt_match,
+ },
+};
+
+static int __init msm_init(void)
+{
+ return platform_driver_register(&msm_driver);
+}
+
+static void __exit msm_exit(void)
+{
+ platform_driver_unregister(&msm_driver);
+}
+
+
+module_init(msm_init);
+module_exit(msm_exit);
+MODULE_DESCRIPTION("MSM V4L2 Camera");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/msm.h b/drivers/media/platform/msm/camera_v2/msm.h
new file mode 100644
index 000000000000..fed7dd041ead
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/msm.h
@@ -0,0 +1,124 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_H
+#define _MSM_H
+
+#include <linux/version.h>
+#include <linux/completion.h>
+#include <linux/i2c.h>
+#include <linux/videodev2.h>
+#include <linux/pm_qos.h>
+#include <linux/msm_ion.h>
+#include <linux/iommu.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mediabus.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/msmb_camera.h>
+
+/* Setting MAX timeout to 6.5seconds considering
+ * backend will operate @ .6fps in certain usecases
+ * like Long exposure usecase and isp needs max of 2 frames
+ * to stop the hardware which will be around 3 seconds*/
+#define MSM_POST_EVT_TIMEOUT 6500
+#define MSM_POST_EVT_NOTIMEOUT 0xFFFFFFFF
+#define MSM_CAMERA_STREAM_CNT_BITS 32
+
+struct msm_video_device {
+ struct video_device *vdev;
+ atomic_t opened;
+};
+
+struct msm_queue_head {
+ struct list_head list;
+ spinlock_t lock;
+ int len;
+ int max;
+};
+
+/** msm_event:
+ *
+ * event sent by imaging server
+ **/
+struct msm_event {
+ struct video_device *vdev;
+ atomic_t on_heap;
+};
+
+struct msm_command {
+ struct list_head list;
+ struct v4l2_event event;
+ atomic_t on_heap;
+};
+
+/** struct msm_command_ack
+ *
+ * Object of command_ack_q, which is
+ * created per open operation
+ *
+ * contains struct msm_command
+ **/
+struct msm_command_ack {
+ struct list_head list;
+ struct msm_queue_head command_q;
+ struct completion wait_complete;
+ int stream_id;
+};
+
+struct msm_v4l2_subdev {
+ /* FIXME: for session close and error handling such
+ * as daemon shutdown */
+ int close_sequence;
+};
+
+struct msm_session {
+ struct list_head list;
+
+ /* session index */
+ unsigned int session_id;
+
+ /* event queue sent by imaging server */
+ struct msm_event event_q;
+
+ /* ACK by imaging server. Object type of
+ * struct msm_command_ack per open,
+ * assumption is application can send
+ * command on every opened video node */
+ struct msm_queue_head command_ack_q;
+
+ /* real streams(either data or metadate) owned by one
+ * session struct msm_stream */
+ struct msm_queue_head stream_q;
+ struct mutex lock;
+ struct mutex lock_q;
+};
+
+int msm_post_event(struct v4l2_event *event, int timeout);
+int msm_create_session(unsigned int session, struct video_device *vdev);
+int msm_destroy_session(unsigned int session_id);
+
+int msm_create_stream(unsigned int session_id,
+ unsigned int stream_id, struct vb2_queue *q);
+void msm_delete_stream(unsigned int session_id, unsigned int stream_id);
+int msm_create_command_ack_q(unsigned int session_id, unsigned int stream_id);
+void msm_delete_command_ack_q(unsigned int session_id, unsigned int stream_id);
+struct msm_stream *msm_get_stream(unsigned int session_id,
+ unsigned int stream_id);
+struct vb2_queue *msm_get_stream_vb2q(unsigned int session_id,
+ unsigned int stream_id);
+struct msm_stream *msm_get_stream_from_vb2q(struct vb2_queue *q);
+struct msm_session *msm_session_find(unsigned int session_id);
+#endif /*_MSM_H */
diff --git a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/Makefile b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/Makefile
new file mode 100644
index 000000000000..8832457f4cf5
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/Makefile
@@ -0,0 +1,2 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+obj-$(CONFIG_MSMB_CAMERA) += msm_generic_buf_mgr.o
diff --git a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c
new file mode 100644
index 000000000000..cb2048fa1454
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c
@@ -0,0 +1,667 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "CAM-BUFMGR %s:%d " fmt, __func__, __LINE__
+
+#include "msm_generic_buf_mgr.h"
+
+static struct msm_buf_mngr_device *msm_buf_mngr_dev;
+
+struct v4l2_subdev *msm_buf_mngr_get_subdev(void)
+{
+ return &msm_buf_mngr_dev->subdev.sd;
+}
+
+static int32_t msm_buf_mngr_hdl_cont_get_buf(struct msm_buf_mngr_device *dev,
+ struct msm_buf_mngr_info *buf_info)
+{
+ unsigned int i;
+ struct msm_buf_mngr_user_buf_cont_info *cbuf, *cont_save;
+
+ list_for_each_entry_safe(cbuf, cont_save, &dev->cont_qhead, entry) {
+ if ((cbuf->sessid == buf_info->session_id) &&
+ (cbuf->index == buf_info->index) &&
+ (cbuf->strid == buf_info->stream_id)) {
+ buf_info->user_buf.buf_cnt = cbuf->paddr->buf_cnt;
+ if (buf_info->user_buf.buf_cnt >
+ MSM_CAMERA_MAX_USER_BUFF_CNT) {
+ pr_err("Invalid cnt%d,%d,%d\n",
+ cbuf->paddr->buf_cnt,
+ buf_info->session_id,
+ buf_info->stream_id);
+ return -EINVAL;
+ }
+ for (i = 0 ; i < buf_info->user_buf.buf_cnt; i++) {
+ buf_info->user_buf.buf_idx[i] =
+ cbuf->paddr->buf_idx[i];
+ }
+ break;
+ }
+ }
+ return 0;
+}
+static int32_t msm_buf_mngr_get_buf(struct msm_buf_mngr_device *dev,
+ void __user *argp)
+{
+ unsigned long flags;
+ int32_t rc = 0;
+ struct msm_buf_mngr_info *buf_info =
+ (struct msm_buf_mngr_info *)argp;
+ struct msm_get_bufs *new_entry =
+ kzalloc(sizeof(struct msm_get_bufs), GFP_KERNEL);
+
+ if (!new_entry) {
+ pr_err("%s:No mem\n", __func__);
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&new_entry->entry);
+ new_entry->vb2_buf = dev->vb2_ops.get_buf(buf_info->session_id,
+ buf_info->stream_id);
+ if (!new_entry->vb2_buf) {
+ pr_debug("%s:Get buf is null\n", __func__);
+ kfree(new_entry);
+ return -EINVAL;
+ }
+ new_entry->session_id = buf_info->session_id;
+ new_entry->stream_id = buf_info->stream_id;
+ spin_lock_irqsave(&dev->buf_q_spinlock, flags);
+ list_add_tail(&new_entry->entry, &dev->buf_qhead);
+ spin_unlock_irqrestore(&dev->buf_q_spinlock, flags);
+ buf_info->index = new_entry->vb2_buf->v4l2_buf.index;
+ if (buf_info->type == MSM_CAMERA_BUF_MNGR_BUF_USER) {
+ mutex_lock(&dev->cont_mutex);
+ if (!list_empty(&dev->cont_qhead)) {
+ rc = msm_buf_mngr_hdl_cont_get_buf(dev, buf_info);
+ } else {
+ pr_err("Nothing mapped in user buf for %d,%d\n",
+ buf_info->session_id, buf_info->stream_id);
+ rc = -EINVAL;
+ }
+ mutex_unlock(&dev->cont_mutex);
+ }
+ return rc;
+}
+
+static int32_t msm_buf_mngr_buf_done(struct msm_buf_mngr_device *buf_mngr_dev,
+ struct msm_buf_mngr_info *buf_info)
+{
+ unsigned long flags;
+ struct msm_get_bufs *bufs, *save;
+ int32_t ret = -EINVAL;
+
+ spin_lock_irqsave(&buf_mngr_dev->buf_q_spinlock, flags);
+ list_for_each_entry_safe(bufs, save, &buf_mngr_dev->buf_qhead, entry) {
+ if ((bufs->session_id == buf_info->session_id) &&
+ (bufs->stream_id == buf_info->stream_id) &&
+ (bufs->vb2_buf->v4l2_buf.index == buf_info->index)) {
+ bufs->vb2_buf->v4l2_buf.sequence = buf_info->frame_id;
+ bufs->vb2_buf->v4l2_buf.timestamp = buf_info->timestamp;
+ bufs->vb2_buf->v4l2_buf.reserved = buf_info->reserved;
+ ret = buf_mngr_dev->vb2_ops.buf_done
+ (bufs->vb2_buf,
+ buf_info->session_id,
+ buf_info->stream_id);
+ list_del_init(&bufs->entry);
+ kfree(bufs);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&buf_mngr_dev->buf_q_spinlock, flags);
+ return ret;
+}
+
+
+static int32_t msm_buf_mngr_put_buf(struct msm_buf_mngr_device *buf_mngr_dev,
+ struct msm_buf_mngr_info *buf_info)
+{
+ unsigned long flags;
+ struct msm_get_bufs *bufs, *save;
+ int32_t ret = -EINVAL;
+
+ spin_lock_irqsave(&buf_mngr_dev->buf_q_spinlock, flags);
+ list_for_each_entry_safe(bufs, save, &buf_mngr_dev->buf_qhead, entry) {
+ if ((bufs->session_id == buf_info->session_id) &&
+ (bufs->stream_id == buf_info->stream_id) &&
+ (bufs->vb2_buf->v4l2_buf.index == buf_info->index)) {
+ ret = buf_mngr_dev->vb2_ops.put_buf(bufs->vb2_buf,
+ buf_info->session_id, buf_info->stream_id);
+ list_del_init(&bufs->entry);
+ kfree(bufs);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&buf_mngr_dev->buf_q_spinlock, flags);
+ return ret;
+}
+
+static int32_t msm_generic_buf_mngr_flush(
+ struct msm_buf_mngr_device *buf_mngr_dev,
+ struct msm_buf_mngr_info *buf_info)
+{
+ unsigned long flags;
+ struct msm_get_bufs *bufs, *save;
+ int32_t ret = -EINVAL;
+
+ spin_lock_irqsave(&buf_mngr_dev->buf_q_spinlock, flags);
+ /*
+ * Sanity check on client buf list, remove buf mgr
+ * queue entries in case any
+ */
+ list_for_each_entry_safe(bufs, save, &buf_mngr_dev->buf_qhead, entry) {
+ if ((bufs->session_id == buf_info->session_id) &&
+ (bufs->stream_id == buf_info->stream_id)) {
+ ret = buf_mngr_dev->vb2_ops.buf_done(bufs->vb2_buf,
+ buf_info->session_id,
+ buf_info->stream_id);
+ pr_err("Bufs not flushed: str_id = %d buf_index = %d ret = %d\n",
+ buf_info->stream_id, bufs->vb2_buf->v4l2_buf.index,
+ ret);
+ list_del_init(&bufs->entry);
+ kfree(bufs);
+ }
+ }
+ spin_unlock_irqrestore(&buf_mngr_dev->buf_q_spinlock, flags);
+ /* Flush the remaining vb2 buffers in stream list */
+ ret = buf_mngr_dev->vb2_ops.flush_buf(buf_info->session_id,
+ buf_info->stream_id);
+ return ret;
+}
+
+static int32_t msm_buf_mngr_find_cont_stream(struct msm_buf_mngr_device *dev,
+ uint32_t *cnt, uint32_t *tstream,
+ struct msm_sd_close_ioctl *session)
+{
+ struct msm_buf_mngr_user_buf_cont_info *cont_bufs, *cont_save;
+ int32_t ret = -1;
+
+ list_for_each_entry_safe(cont_bufs,
+ cont_save, &dev->cont_qhead, entry) {
+ if (cont_bufs->sessid == session->session) {
+ *cnt = cont_bufs->cnt;
+ *tstream = cont_bufs->strid;
+ return 0;
+ }
+ }
+ return ret;
+}
+
+static void msm_buf_mngr_contq_listdel(struct msm_buf_mngr_device *dev,
+ uint32_t session, int32_t stream,
+ bool unmap, uint32_t cnt)
+{
+ struct msm_buf_mngr_user_buf_cont_info *cont_bufs, *cont_save;
+
+ list_for_each_entry_safe(cont_bufs,
+ cont_save, &dev->cont_qhead, entry) {
+ if ((cont_bufs->sessid == session) &&
+ (cont_bufs->strid == stream)) {
+ if (cnt == 1 && unmap == 1) {
+ ion_unmap_kernel(dev->ion_client,
+ cont_bufs->ion_handle);
+ ion_free(dev->ion_client,
+ cont_bufs->ion_handle);
+ }
+ list_del_init(&cont_bufs->entry);
+ kfree(cont_bufs);
+ cnt--;
+ }
+ }
+ if (cnt != 0)
+ pr_err("Buffers pending cnt = %d\n", cnt);
+}
+
+static void msm_buf_mngr_contq_cleanup(struct msm_buf_mngr_device *dev,
+ struct msm_sd_close_ioctl *session)
+{
+ int32_t stream = -1, found = -1;
+ uint32_t cnt = 0;
+
+ do {
+ found = msm_buf_mngr_find_cont_stream(dev, &cnt,
+ &stream, session);
+ if (found == -1)
+ break;
+ msm_buf_mngr_contq_listdel(dev, session->session,
+ stream, 1, cnt);
+ } while (found == 0);
+}
+
+static void msm_buf_mngr_sd_shutdown(struct msm_buf_mngr_device *dev,
+ struct msm_sd_close_ioctl *session)
+{
+ unsigned long flags;
+ struct msm_get_bufs *bufs, *save;
+
+ BUG_ON(!dev);
+ BUG_ON(!session);
+
+ spin_lock_irqsave(&dev->buf_q_spinlock, flags);
+ if (!list_empty(&dev->buf_qhead)) {
+ list_for_each_entry_safe(bufs,
+ save, &dev->buf_qhead, entry) {
+ pr_info("%s: Delete invalid bufs =%lx, session_id=%u, bufs->ses_id=%d, str_id=%d, idx=%d\n",
+ __func__, (unsigned long)bufs, session->session,
+ bufs->session_id, bufs->stream_id,
+ bufs->vb2_buf->v4l2_buf.index);
+ if (session->session == bufs->session_id) {
+ list_del_init(&bufs->entry);
+ kfree(bufs);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&dev->buf_q_spinlock, flags);
+ mutex_lock(&dev->cont_mutex);
+ if (!list_empty(&dev->cont_qhead))
+ msm_buf_mngr_contq_cleanup(dev, session);
+ mutex_unlock(&dev->cont_mutex);
+}
+
+static int msm_buf_mngr_handle_cont_cmd(struct msm_buf_mngr_device *dev,
+ struct msm_buf_mngr_main_cont_info
+ *cont_cmd)
+{
+ int rc = 0, i = 0;
+ struct ion_handle *ion_handle = NULL;
+ struct msm_camera_user_buf_cont_t *iaddr, *temp_addr;
+ struct msm_buf_mngr_user_buf_cont_info *new_entry, *bufs, *save;
+ size_t size;
+
+ if ((cont_cmd->cmd >= MSM_CAMERA_BUF_MNGR_CONT_MAX) ||
+ (cont_cmd->cmd < 0) ||
+ (cont_cmd->cnt > VB2_MAX_FRAME) ||
+ (cont_cmd->cont_fd < 0)) {
+ pr_debug("Invalid arg passed Cmd:%d, cnt:%d, fd:%d\n",
+ cont_cmd->cmd, cont_cmd->cnt,
+ cont_cmd->cont_fd);
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->cont_mutex);
+
+ if (cont_cmd->cmd == MSM_CAMERA_BUF_MNGR_CONT_MAP) {
+ if (!list_empty(&dev->cont_qhead)) {
+ list_for_each_entry_safe(bufs,
+ save, &dev->cont_qhead, entry) {
+ if ((bufs->sessid == cont_cmd->session_id) &&
+ (bufs->strid == cont_cmd->stream_id)) {
+ pr_err("Map exist %d,%d unmap first\n",
+ cont_cmd->session_id,
+ cont_cmd->stream_id);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+ }
+ ion_handle = ion_import_dma_buf(dev->ion_client,
+ cont_cmd->cont_fd);
+ if (IS_ERR_OR_NULL(ion_handle)) {
+ pr_err("Failed to create ion handle for fd %d\n",
+ cont_cmd->cont_fd);
+ rc = -EINVAL;
+ goto end;
+ }
+ if (ion_handle_get_size(dev->ion_client,
+ ion_handle, &size) < 0) {
+ pr_err("Get ion size failed\n");
+ rc = -EINVAL;
+ goto free_ion_handle;
+ }
+ if ((size == 0) || (size <
+ (sizeof(struct msm_camera_user_buf_cont_t) *
+ cont_cmd->cnt))) {
+ pr_err("Invalid or zero size ION buffer %zu\n", size);
+ rc = -EINVAL;
+ goto free_ion_handle;
+ }
+ iaddr = ion_map_kernel(dev->ion_client, ion_handle);
+ if (IS_ERR_OR_NULL(iaddr)) {
+ pr_err("Mapping cont buff failed\n");
+ rc = -EINVAL;
+ goto free_ion_handle;
+ }
+ for (i = 0; i < cont_cmd->cnt; i++) {
+ temp_addr = iaddr + i;
+ if (temp_addr->buf_cnt >
+ MSM_CAMERA_MAX_USER_BUFF_CNT) {
+ pr_err("%s:Invalid buf_cnt:%d for cont:%d\n",
+ __func__, temp_addr->buf_cnt, i);
+ rc = -EINVAL;
+ goto free_list;
+ }
+ new_entry = kzalloc(sizeof(
+ struct msm_buf_mngr_user_buf_cont_info),
+ GFP_KERNEL);
+ if (!new_entry) {
+ pr_err("%s:No mem\n", __func__);
+ rc = -ENOMEM;
+ goto free_list;
+ }
+ INIT_LIST_HEAD(&new_entry->entry);
+ new_entry->sessid = cont_cmd->session_id;
+ new_entry->strid = cont_cmd->stream_id;
+ new_entry->index = i;
+ new_entry->main_fd = cont_cmd->cont_fd;
+ new_entry->ion_handle = ion_handle;
+ new_entry->cnt = cont_cmd->cnt;
+ new_entry->paddr = temp_addr;
+ list_add_tail(&new_entry->entry, &dev->cont_qhead);
+ }
+ goto end;
+ } else if (cont_cmd->cmd == MSM_CAMERA_BUF_MNGR_CONT_UNMAP) {
+ if (!list_empty(&dev->cont_qhead)) {
+ msm_buf_mngr_contq_listdel(dev, cont_cmd->session_id,
+ cont_cmd->stream_id, 1, cont_cmd->cnt);
+ } else {
+ pr_err("Nothing mapped for %d,%d\n",
+ cont_cmd->session_id, cont_cmd->stream_id);
+ rc = -EINVAL;
+ }
+ goto end;
+ }
+
+free_list:
+ if (i != 0) {
+ if (!list_empty(&dev->cont_qhead)) {
+ msm_buf_mngr_contq_listdel(dev, cont_cmd->session_id,
+ cont_cmd->stream_id, 0, i);
+ }
+ }
+ ion_unmap_kernel(dev->ion_client, ion_handle);
+free_ion_handle:
+ ion_free(dev->ion_client, ion_handle);
+end:
+ mutex_unlock(&dev->cont_mutex);
+ return rc;
+}
+
+static int msm_generic_buf_mngr_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ int rc = 0;
+ struct msm_buf_mngr_device *buf_mngr_dev = v4l2_get_subdevdata(sd);
+ if (!buf_mngr_dev) {
+ pr_err("%s buf manager device NULL\n", __func__);
+ rc = -ENODEV;
+ return rc;
+ }
+ return rc;
+}
+
+static int msm_generic_buf_mngr_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh)
+{
+ int rc = 0;
+ struct msm_buf_mngr_device *buf_mngr_dev = v4l2_get_subdevdata(sd);
+ if (!buf_mngr_dev) {
+ pr_err("%s buf manager device NULL\n", __func__);
+ rc = -ENODEV;
+ return rc;
+ }
+ return rc;
+}
+
+static long msm_buf_mngr_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int32_t rc = 0;
+ struct msm_buf_mngr_device *buf_mngr_dev = v4l2_get_subdevdata(sd);
+ void __user *argp = (void __user *)arg;
+
+ if (!buf_mngr_dev) {
+ pr_err("%s buf manager device NULL\n", __func__);
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ switch (cmd) {
+ case VIDIOC_MSM_BUF_MNGR_GET_BUF:
+ rc = msm_buf_mngr_get_buf(buf_mngr_dev, argp);
+ break;
+ case VIDIOC_MSM_BUF_MNGR_BUF_DONE:
+ rc = msm_buf_mngr_buf_done(buf_mngr_dev, argp);
+ break;
+ case VIDIOC_MSM_BUF_MNGR_PUT_BUF:
+ rc = msm_buf_mngr_put_buf(buf_mngr_dev, argp);
+ break;
+ case VIDIOC_MSM_BUF_MNGR_INIT:
+ rc = msm_generic_buf_mngr_open(sd, NULL);
+ break;
+ case VIDIOC_MSM_BUF_MNGR_DEINIT:
+ rc = msm_generic_buf_mngr_close(sd, NULL);
+ break;
+ case MSM_SD_NOTIFY_FREEZE:
+ break;
+ case VIDIOC_MSM_BUF_MNGR_FLUSH:
+ rc = msm_generic_buf_mngr_flush(buf_mngr_dev, argp);
+ break;
+ case MSM_SD_SHUTDOWN:
+ msm_buf_mngr_sd_shutdown(buf_mngr_dev, argp);
+ break;
+ case VIDIOC_MSM_BUF_MNGR_CONT_CMD:
+ rc = msm_buf_mngr_handle_cont_cmd(buf_mngr_dev, argp);
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_bmgr_subdev_fops_compat_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ int32_t rc = 0;
+
+ void __user *up = (void __user *)arg;
+
+ /* Convert 32 bit IOCTL ID's to 64 bit IOCTL ID's
+ * except VIDIOC_MSM_CPP_CFG32, which needs special
+ * processing
+ */
+ switch (cmd) {
+ case VIDIOC_MSM_BUF_MNGR_GET_BUF32:
+ cmd = VIDIOC_MSM_BUF_MNGR_GET_BUF;
+ break;
+ case VIDIOC_MSM_BUF_MNGR_BUF_DONE32:
+ cmd = VIDIOC_MSM_BUF_MNGR_BUF_DONE;
+ break;
+ case VIDIOC_MSM_BUF_MNGR_PUT_BUF32:
+ cmd = VIDIOC_MSM_BUF_MNGR_PUT_BUF;
+ break;
+ case VIDIOC_MSM_BUF_MNGR_CONT_CMD:
+ cmd = VIDIOC_MSM_BUF_MNGR_CONT_CMD;
+ break;
+ case VIDIOC_MSM_BUF_MNGR_FLUSH32:
+ cmd = VIDIOC_MSM_BUF_MNGR_FLUSH;
+ break;
+ default:
+ pr_debug("%s : unsupported compat type", __func__);
+ return -ENOIOCTLCMD;
+ }
+
+ switch (cmd) {
+ case VIDIOC_MSM_BUF_MNGR_GET_BUF:
+ case VIDIOC_MSM_BUF_MNGR_BUF_DONE:
+ case VIDIOC_MSM_BUF_MNGR_FLUSH:
+ case VIDIOC_MSM_BUF_MNGR_PUT_BUF: {
+ struct msm_buf_mngr_info32_t buf_info32;
+ struct msm_buf_mngr_info buf_info;
+
+ if (copy_from_user(&buf_info32, (void __user *)up,
+ sizeof(struct msm_buf_mngr_info32_t)))
+ return -EFAULT;
+
+ buf_info.session_id = buf_info32.session_id;
+ buf_info.stream_id = buf_info32.stream_id;
+ buf_info.frame_id = buf_info32.frame_id;
+ buf_info.index = buf_info32.index;
+ buf_info.timestamp.tv_sec = (long) buf_info32.timestamp.tv_sec;
+ buf_info.timestamp.tv_usec = (long) buf_info32.
+ timestamp.tv_usec;
+ buf_info.reserved = buf_info32.reserved;
+ buf_info.type = buf_info32.type;
+
+ rc = v4l2_subdev_call(sd, core, ioctl, cmd, &buf_info);
+ if (rc < 0) {
+ pr_debug("%s : Subdev cmd %d fail", __func__, cmd);
+ return rc;
+ }
+
+ buf_info32.session_id = buf_info.session_id;
+ buf_info32.stream_id = buf_info.stream_id;
+ buf_info32.index = buf_info.index;
+ buf_info32.timestamp.tv_sec = (int32_t) buf_info.
+ timestamp.tv_sec;
+ buf_info32.timestamp.tv_usec = (int32_t) buf_info.timestamp.
+ tv_usec;
+ buf_info32.reserved = buf_info.reserved;
+ buf_info32.type = buf_info.type;
+ buf_info32.user_buf.buf_cnt = buf_info.user_buf.buf_cnt;
+ memcpy(&buf_info32.user_buf.buf_idx,
+ &buf_info.user_buf.buf_idx,
+ sizeof(buf_info.user_buf.buf_idx));
+ if (copy_to_user((void __user *)up, &buf_info32,
+ sizeof(struct msm_buf_mngr_info32_t)))
+ return -EFAULT;
+ }
+ break;
+ case VIDIOC_MSM_BUF_MNGR_CONT_CMD: {
+ struct msm_buf_mngr_main_cont_info cont_cmd;
+
+ if (copy_from_user(&cont_cmd, (void __user *)up,
+ sizeof(struct msm_buf_mngr_main_cont_info)))
+ return -EFAULT;
+ rc = v4l2_subdev_call(sd, core, ioctl, cmd, &cont_cmd);
+ if (rc < 0) {
+ pr_debug("%s : Subdev cmd %d fail", __func__, cmd);
+ return rc;
+ }
+ }
+ break;
+ default:
+ pr_debug("%s : unsupported compat type", __func__);
+ return -ENOIOCTLCMD;
+ break;
+ }
+
+
+
+ return 0;
+}
+#endif
+
+static struct v4l2_subdev_core_ops msm_buf_mngr_subdev_core_ops = {
+ .ioctl = msm_buf_mngr_subdev_ioctl,
+};
+
+static const struct v4l2_subdev_internal_ops
+ msm_generic_buf_mngr_subdev_internal_ops = {
+ .open = msm_generic_buf_mngr_open,
+ .close = msm_generic_buf_mngr_close,
+};
+
+static const struct v4l2_subdev_ops msm_buf_mngr_subdev_ops = {
+ .core = &msm_buf_mngr_subdev_core_ops,
+};
+
+static const struct of_device_id msm_buf_mngr_dt_match[] = {
+ {.compatible = "qcom,msm_buf_mngr"},
+ {}
+};
+
+static struct v4l2_file_operations msm_buf_v4l2_subdev_fops;
+
+static long msm_bmgr_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+ return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
+}
+
+
+static long msm_buf_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_bmgr_subdev_do_ioctl);
+}
+
+static int32_t __init msm_buf_mngr_init(void)
+{
+ int32_t rc = 0;
+ msm_buf_mngr_dev = kzalloc(sizeof(*msm_buf_mngr_dev),
+ GFP_KERNEL);
+ if (WARN_ON(!msm_buf_mngr_dev)) {
+ pr_err("%s: not enough memory", __func__);
+ return -ENOMEM;
+ }
+ /* Sub-dev */
+ v4l2_subdev_init(&msm_buf_mngr_dev->subdev.sd,
+ &msm_buf_mngr_subdev_ops);
+ msm_cam_copy_v4l2_subdev_fops(&msm_buf_v4l2_subdev_fops);
+ msm_buf_v4l2_subdev_fops.unlocked_ioctl = msm_buf_subdev_fops_ioctl;
+#ifdef CONFIG_COMPAT
+ msm_buf_v4l2_subdev_fops.compat_ioctl32 =
+ msm_bmgr_subdev_fops_compat_ioctl;
+#endif
+ snprintf(msm_buf_mngr_dev->subdev.sd.name,
+ ARRAY_SIZE(msm_buf_mngr_dev->subdev.sd.name), "msm_buf_mngr");
+ msm_buf_mngr_dev->subdev.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ v4l2_set_subdevdata(&msm_buf_mngr_dev->subdev.sd, msm_buf_mngr_dev);
+
+ media_entity_init(&msm_buf_mngr_dev->subdev.sd.entity, 0, NULL, 0);
+ msm_buf_mngr_dev->subdev.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ msm_buf_mngr_dev->subdev.sd.entity.group_id =
+ MSM_CAMERA_SUBDEV_BUF_MNGR;
+ msm_buf_mngr_dev->subdev.sd.internal_ops =
+ &msm_generic_buf_mngr_subdev_internal_ops;
+ msm_buf_mngr_dev->subdev.close_seq = MSM_SD_CLOSE_4TH_CATEGORY;
+ rc = msm_sd_register(&msm_buf_mngr_dev->subdev);
+ if (rc != 0) {
+ pr_err("%s: msm_sd_register error = %d\n", __func__, rc);
+ goto end;
+ }
+
+ msm_buf_mngr_dev->subdev.sd.devnode->fops = &msm_buf_v4l2_subdev_fops;
+
+ v4l2_subdev_notify(&msm_buf_mngr_dev->subdev.sd, MSM_SD_NOTIFY_REQ_CB,
+ &msm_buf_mngr_dev->vb2_ops);
+
+ INIT_LIST_HEAD(&msm_buf_mngr_dev->buf_qhead);
+ spin_lock_init(&msm_buf_mngr_dev->buf_q_spinlock);
+
+ mutex_init(&msm_buf_mngr_dev->cont_mutex);
+ INIT_LIST_HEAD(&msm_buf_mngr_dev->cont_qhead);
+ msm_buf_mngr_dev->ion_client =
+ msm_ion_client_create("msm_cam_generic_buf_mgr");
+ if (!msm_buf_mngr_dev->ion_client) {
+ pr_err("%s: Failed to create ion client\n", __func__);
+ rc = -EBADFD;
+ }
+
+end:
+ return rc;
+}
+
+static void __exit msm_buf_mngr_exit(void)
+{
+ msm_sd_unregister(&msm_buf_mngr_dev->subdev);
+ mutex_destroy(&msm_buf_mngr_dev->cont_mutex);
+ kfree(msm_buf_mngr_dev);
+}
+
+module_init(msm_buf_mngr_init);
+module_exit(msm_buf_mngr_exit);
+MODULE_DESCRIPTION("MSM Buffer Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.h b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.h
new file mode 100644
index 000000000000..af9f927cb3b4
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.h
@@ -0,0 +1,54 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUF_GENERIC_MNGR_H__
+#define __MSM_BUF_GENERIC_MNGR_H__
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-subdev.h>
+#include <media/msmb_camera.h>
+#include <media/msmb_generic_buf_mgr.h>
+
+#include "msm.h"
+#include "msm_sd.h"
+
+struct msm_get_bufs {
+ struct list_head entry;
+ struct vb2_buffer *vb2_buf;
+ uint32_t session_id;
+ uint32_t stream_id;
+};
+
+struct msm_buf_mngr_device {
+ struct list_head buf_qhead;
+ spinlock_t buf_q_spinlock;
+ struct ion_client *ion_client;
+ struct msm_sd_subdev subdev;
+ struct msm_sd_req_vb2_q vb2_ops;
+ struct list_head cont_qhead;
+ struct mutex cont_mutex;
+};
+
+struct msm_buf_mngr_user_buf_cont_info {
+ struct list_head entry;
+ uint32_t sessid;
+ uint32_t strid;
+ uint32_t index;
+ int32_t main_fd;
+ struct msm_camera_user_buf_cont_t *paddr;
+ uint32_t cnt;
+ struct ion_handle *ion_handle;
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/msm_sd.h b/drivers/media/platform/msm/camera_v2/msm_sd.h
new file mode 100644
index 000000000000..25fcb75b0c87
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/msm_sd.h
@@ -0,0 +1,90 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_SD_H
+#define _MSM_SD_H
+
+#include <media/v4l2-subdev.h>
+#include <media/msmb_camera.h>
+
+/* NOTE: this header file should ONLY be included by subdev drivers */
+
+struct msm_sd_close_ioctl {
+ unsigned int session;
+ unsigned int stream;
+};
+
+#define MSM_SD_CLOSE_STREAM \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 26, struct msm_sd_close_ioctl)
+
+#define MSM_SD_CLOSE_SESSION \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 27, struct msm_sd_close_ioctl)
+
+#define MSM_SD_CLOSE_SESSION_AND_STREAM \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 28, struct msm_sd_close_ioctl)
+
+#define MSM_SD_SHUTDOWN \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 29, struct msm_sd_close_ioctl)
+
+#define MSM_SD_NOTIFY_FREEZE \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 30, struct msm_sd_close_ioctl)
+/*
+ * This is used to install Sequence in msm_sd_register.
+ * During msm_close, proper close sequence will be triggered.
+ * For example:
+ *
+ * close_sequence = 0x00100001 (ISP)
+ * close_sequence = 0x00100002 (ISP)
+ * close_sequence = 0x00100003 (ISP)
+ * close_sequence = 0x00200001 (sensor)
+ * close_sequence = 0x00200002 (sensor)
+ * close_sequence = 0x00200003 (sensor)
+ */
+#define MSM_SD_CLOSE_1ST_CATEGORY 0x00010000
+#define MSM_SD_CLOSE_2ND_CATEGORY 0x00020000
+#define MSM_SD_CLOSE_3RD_CATEGORY 0x00030000
+#define MSM_SD_CLOSE_4TH_CATEGORY 0x00040000
+
+struct msm_sd_subdev {
+ struct v4l2_subdev sd;
+ int close_seq;
+ struct list_head list;
+};
+
+struct msm_sd_req_sd {
+ char *name;
+ struct v4l2_subdev *subdev;
+};
+
+struct msm_sd_req_vb2_q {
+ struct vb2_buffer * (*get_buf)(int session_id, unsigned int stream_id);
+ struct vb2_queue * (*get_vb2_queue)(int session_id,
+ unsigned int stream_id);
+ int (*put_buf)(struct vb2_buffer *vb2_buf, int session_id,
+ unsigned int stream_id);
+ int (*buf_done)(struct vb2_buffer *vb2_buf, int session_id,
+ unsigned int stream_id);
+ int (*flush_buf)(int session_id, unsigned int stream_id);
+};
+
+#define MSM_SD_NOTIFY_GET_SD 0x00000001
+#define MSM_SD_NOTIFY_PUT_SD 0x00000002
+#define MSM_SD_NOTIFY_REQ_CB 0x00000003
+
+int msm_sd_register(struct msm_sd_subdev *msm_subdev);
+int msm_sd_unregister(struct msm_sd_subdev *sd);
+struct v4l2_subdev *msm_sd_get_subdev(struct v4l2_subdev *sd,
+ const char *get_name);
+void msm_sd_put_subdev(struct v4l2_subdev *sd, struct v4l2_subdev *put);
+void msm_cam_copy_v4l2_subdev_fops(struct v4l2_file_operations *d1);
+
+#endif /*_MSM_SD_H */
diff --git a/drivers/media/platform/msm/camera_v2/msm_vb2/Makefile b/drivers/media/platform/msm/camera_v2/msm_vb2/Makefile
new file mode 100644
index 000000000000..2673bdd3eeb6
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/msm_vb2/Makefile
@@ -0,0 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/msm_vb2
+obj-$(CONFIG_MSMB_CAMERA) += msm_vb2.o
diff --git a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
new file mode 100644
index 000000000000..b2993d0bb033
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c
@@ -0,0 +1,351 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_vb2.h"
+
+static int msm_vb2_queue_setup(struct vb2_queue *q,
+ const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ int i;
+ struct msm_v4l2_format_data *data = q->drv_priv;
+
+ if (!data) {
+ pr_err("%s: drv_priv NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (data->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (WARN_ON(data->num_planes > VIDEO_MAX_PLANES))
+ return -EINVAL;
+
+ *num_planes = data->num_planes;
+
+ for (i = 0; i < data->num_planes; i++)
+ sizes[i] = data->plane_sizes[i];
+ } else {
+ pr_err("%s: Unsupported buf type :%d\n", __func__,
+ data->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int msm_vb2_buf_init(struct vb2_buffer *vb)
+{
+ struct msm_stream *stream;
+ struct msm_vb2_buffer *msm_vb2_buf;
+
+ stream = msm_get_stream_from_vb2q(vb->vb2_queue);
+ if (!stream) {
+ pr_err("%s: Couldn't find stream\n", __func__);
+ return -EINVAL;
+ }
+ msm_vb2_buf = container_of(vb, struct msm_vb2_buffer, vb2_buf);
+ msm_vb2_buf->in_freeq = 0;
+
+ return 0;
+}
+
+static void msm_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct msm_vb2_buffer *msm_vb2;
+ struct msm_stream *stream;
+ unsigned long flags;
+
+ msm_vb2 = container_of(vb, struct msm_vb2_buffer, vb2_buf);
+
+ if (!msm_vb2) {
+ pr_err("%s:%d] vb2_buf NULL", __func__, __LINE__);
+ return;
+ }
+
+ stream = msm_get_stream_from_vb2q(vb->vb2_queue);
+ if (!stream) {
+ pr_err("%s:%d] NULL stream", __func__, __LINE__);
+ return;
+ }
+
+ spin_lock_irqsave(&stream->stream_lock, flags);
+ list_add_tail(&msm_vb2->list, &stream->queued_list);
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+}
+
+static void msm_vb2_buf_finish(struct vb2_buffer *vb)
+{
+ struct msm_vb2_buffer *msm_vb2;
+ struct msm_stream *stream;
+ unsigned long flags;
+ struct msm_vb2_buffer *msm_vb2_entry, *temp;
+
+ msm_vb2 = container_of(vb, struct msm_vb2_buffer, vb2_buf);
+
+ if (!msm_vb2) {
+ pr_err("%s:%d] vb2_buf NULL", __func__, __LINE__);
+ return;
+ }
+
+ stream = msm_get_stream_from_vb2q(vb->vb2_queue);
+ if (!stream) {
+ pr_err("%s:%d] NULL stream", __func__, __LINE__);
+ return;
+ }
+
+ spin_lock_irqsave(&stream->stream_lock, flags);
+ list_for_each_entry_safe(msm_vb2_entry, temp, &(stream->queued_list),
+ list) {
+ if (msm_vb2_entry == msm_vb2) {
+ list_del_init(&msm_vb2_entry->list);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+ return;
+}
+
+static void msm_vb2_stop_stream(struct vb2_queue *q)
+{
+ struct msm_vb2_buffer *msm_vb2, *temp;
+ struct msm_stream *stream;
+ unsigned long flags;
+ struct vb2_buffer *vb2_buf;
+
+ stream = msm_get_stream_from_vb2q(q);
+ if (!stream) {
+ pr_err("%s:%d] NULL stream", __func__, __LINE__);
+ return;
+ }
+
+ /*
+ * Release all the buffers enqueued to driver
+ * when streamoff is issued
+ */
+
+ spin_lock_irqsave(&stream->stream_lock, flags);
+ list_for_each_entry_safe(msm_vb2, temp, &(stream->queued_list),
+ list) {
+ vb2_buf = &(msm_vb2->vb2_buf);
+ if (vb2_buf->state == VB2_BUF_STATE_DONE)
+ continue;
+ vb2_buffer_done(vb2_buf, VB2_BUF_STATE_DONE);
+ msm_vb2->in_freeq = 0;
+ }
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+}
+
+static struct vb2_ops msm_vb2_get_q_op = {
+ .queue_setup = msm_vb2_queue_setup,
+ .buf_init = msm_vb2_buf_init,
+ .buf_queue = msm_vb2_buf_queue,
+ .buf_finish = msm_vb2_buf_finish,
+ .stop_streaming = msm_vb2_stop_stream,
+};
+
+
+struct vb2_ops *msm_vb2_get_q_ops(void)
+{
+ return &msm_vb2_get_q_op;
+}
+
+static void *msm_vb2_dma_contig_get_userptr(void *alloc_ctx,
+ unsigned long vaddr, unsigned long size, int write)
+{
+ struct msm_vb2_private_data *priv;
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+ priv->vaddr = (void *)vaddr;
+ priv->size = size;
+ priv->alloc_ctx = alloc_ctx;
+ return priv;
+}
+
+static void msm_vb2_dma_contig_put_userptr(void *buf_priv)
+{
+ kzfree(buf_priv);
+}
+
+static struct vb2_mem_ops msm_vb2_get_q_mem_op = {
+ .get_userptr = msm_vb2_dma_contig_get_userptr,
+ .put_userptr = msm_vb2_dma_contig_put_userptr,
+};
+
+struct vb2_mem_ops *msm_vb2_get_q_mem_ops(void)
+{
+ return &msm_vb2_get_q_mem_op;
+}
+
+static struct vb2_queue *msm_vb2_get_queue(int session_id,
+ unsigned int stream_id)
+{
+ return msm_get_stream_vb2q(session_id, stream_id);
+}
+
+static struct vb2_buffer *msm_vb2_get_buf(int session_id,
+ unsigned int stream_id)
+{
+ struct msm_stream *stream;
+ struct vb2_buffer *vb2_buf = NULL;
+ struct msm_vb2_buffer *msm_vb2 = NULL;
+ unsigned long flags;
+
+ stream = msm_get_stream(session_id, stream_id);
+ if (IS_ERR_OR_NULL(stream))
+ return NULL;
+
+ spin_lock_irqsave(&stream->stream_lock, flags);
+
+ if (!stream->vb2_q) {
+ pr_err("%s: stream q not available\n", __func__);
+ goto end;
+ }
+
+ list_for_each_entry(msm_vb2, &(stream->queued_list), list) {
+ vb2_buf = &(msm_vb2->vb2_buf);
+ if (vb2_buf->state != VB2_BUF_STATE_ACTIVE)
+ continue;
+
+ if (msm_vb2->in_freeq)
+ continue;
+
+ msm_vb2->in_freeq = 1;
+ goto end;
+ }
+ msm_vb2 = NULL;
+ vb2_buf = NULL;
+end:
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+ return vb2_buf;
+}
+
+static int msm_vb2_put_buf(struct vb2_buffer *vb, int session_id,
+ unsigned int stream_id)
+{
+ struct msm_stream *stream;
+ struct msm_vb2_buffer *msm_vb2;
+ struct vb2_buffer *vb2_buf = NULL;
+ int rc = 0;
+ unsigned long flags;
+ stream = msm_get_stream(session_id, stream_id);
+ if (IS_ERR_OR_NULL(stream))
+ return -EINVAL;
+
+ spin_lock_irqsave(&stream->stream_lock, flags);
+ if (vb) {
+ list_for_each_entry(msm_vb2, &(stream->queued_list), list) {
+ vb2_buf = &(msm_vb2->vb2_buf);
+ if (vb2_buf == vb)
+ break;
+ }
+ if (vb2_buf != vb) {
+ pr_err("VB buffer is INVALID vb=%p, ses_id=%d, str_id=%d\n",
+ vb, session_id, stream_id);
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+ return -EINVAL;
+ }
+ msm_vb2 =
+ container_of(vb, struct msm_vb2_buffer, vb2_buf);
+ if (msm_vb2->in_freeq) {
+ msm_vb2->in_freeq = 0;
+ rc = 0;
+ } else
+ rc = -EINVAL;
+ } else {
+ pr_err(" VB buffer is null for ses_id=%d, str_id=%d\n",
+ session_id, stream_id);
+ rc = -EINVAL;
+ }
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+ return rc;
+}
+
+static int msm_vb2_buf_done(struct vb2_buffer *vb, int session_id,
+ unsigned int stream_id)
+{
+ unsigned long flags;
+ struct msm_vb2_buffer *msm_vb2;
+ struct msm_stream *stream;
+ struct vb2_buffer *vb2_buf = NULL;
+ int rc = 0;
+
+ stream = msm_get_stream(session_id, stream_id);
+ if (IS_ERR_OR_NULL(stream))
+ return -EINVAL;
+ spin_lock_irqsave(&stream->stream_lock, flags);
+ if (vb) {
+ list_for_each_entry(msm_vb2, &(stream->queued_list), list) {
+ vb2_buf = &(msm_vb2->vb2_buf);
+ if (vb2_buf == vb)
+ break;
+ }
+ if (vb2_buf != vb) {
+ pr_err("VB buffer is INVALID ses_id=%d, str_id=%d, vb=%p\n",
+ session_id, stream_id, vb);
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+ return -EINVAL;
+ }
+ msm_vb2 =
+ container_of(vb, struct msm_vb2_buffer, vb2_buf);
+ /* put buf before buf done */
+ if (msm_vb2->in_freeq) {
+ vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
+ msm_vb2->in_freeq = 0;
+ rc = 0;
+ } else
+ rc = -EINVAL;
+ } else {
+ pr_err(" VB buffer is NULL for ses_id=%d, str_id=%d\n",
+ session_id, stream_id);
+ rc = -EINVAL;
+ }
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+ return rc;
+}
+
+static int msm_vb2_flush_buf(int session_id, unsigned int stream_id)
+{
+ unsigned long flags;
+ struct msm_vb2_buffer *msm_vb2;
+ struct msm_stream *stream;
+ struct vb2_buffer *vb2_buf = NULL;
+
+ stream = msm_get_stream(session_id, stream_id);
+ if (IS_ERR_OR_NULL(stream))
+ return -EINVAL;
+ spin_lock_irqsave(&stream->stream_lock, flags);
+ list_for_each_entry(msm_vb2, &(stream->queued_list), list) {
+ vb2_buf = &(msm_vb2->vb2_buf);
+ /* Do buf done for all buffers*/
+ vb2_buffer_done(vb2_buf, VB2_BUF_STATE_DONE);
+ msm_vb2->in_freeq = 0;
+ }
+ spin_unlock_irqrestore(&stream->stream_lock, flags);
+ return 0;
+}
+
+
+int msm_vb2_request_cb(struct msm_sd_req_vb2_q *req)
+{
+ if (!req) {
+ pr_err("%s: suddev is null\n", __func__);
+ return -EINVAL;
+ }
+
+ req->get_buf = msm_vb2_get_buf;
+ req->get_vb2_queue = msm_vb2_get_queue;
+ req->put_buf = msm_vb2_put_buf;
+ req->buf_done = msm_vb2_buf_done;
+ req->flush_buf = msm_vb2_flush_buf;
+
+ return 0;
+}
+
diff --git a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.h b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.h
new file mode 100644
index 000000000000..4f6be3985bb8
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.h
@@ -0,0 +1,69 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_VB_H
+#define _MSM_VB_H
+
+#include <linux/version.h>
+#include <linux/i2c.h>
+#include <linux/videodev2.h>
+#include <linux/pm_qos.h>
+#include <linux/wakelock.h>
+#include <linux/msm_ion.h>
+#include <linux/iommu.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mediabus.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/msmb_camera.h>
+#include <media/videobuf2-core.h>
+#include "msm.h"
+#include "msm_sd.h"
+
+struct msm_vb2_buffer {
+ /*
+ * vb2 buffer has to be first in the structure
+ * because both v4l2 frameworks and driver directly
+ * cast msm_vb2_buffer to a vb2_buf.
+ */
+ struct vb2_buffer vb2_buf;
+ struct list_head list;
+ int in_freeq;
+};
+
+struct msm_vb2_private_data {
+ void *vaddr;
+ unsigned long size;
+ /* Offset of the plane inside the buffer */
+ void *alloc_ctx;
+};
+
+struct msm_stream {
+ struct list_head list;
+
+ /* stream index per session, same
+ * as stream_id but set through s_parm */
+ unsigned int stream_id;
+ /* vb2 buffer handling */
+ struct vb2_queue *vb2_q;
+ spinlock_t stream_lock;
+ struct list_head queued_list;
+};
+
+struct vb2_ops *msm_vb2_get_q_ops(void);
+struct vb2_mem_ops *msm_vb2_get_q_mem_ops(void);
+int msm_vb2_request_cb(struct msm_sd_req_vb2_q *req_sd);
+
+#endif /*_MSM_VB_H */
diff --git a/drivers/media/platform/msm/camera_v2/pproc/Makefile b/drivers/media/platform/msm/camera_v2/pproc/Makefile
new file mode 100644
index 000000000000..854e4e72173a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/pproc/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MSMB_CAMERA) += cpp/
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/Makefile b/drivers/media/platform/msm/camera_v2/pproc/cpp/Makefile
new file mode 100644
index 000000000000..17fb6dfd8dcd
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/isp/
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+obj-$(CONFIG_MSM_CPP) += msm_cpp.o
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
new file mode 100644
index 000000000000..b529812f13b9
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -0,0 +1,4403 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "MSM-CPP %s:%d " fmt, __func__, __LINE__
+
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/ion.h>
+#include <linux/proc_fs.h>
+#include <linux/msm_ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/clk/msm-clk.h>
+#include <media/msm_isp.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/msmb_camera.h>
+#include <media/msmb_generic_buf_mgr.h>
+#include <media/msmb_pproc.h>
+#include <linux/clk/msm-clk-provider.h>
+#include "msm_cpp.h"
+#include "msm_isp_util.h"
+#include "msm_camera_io_util.h"
+#include <linux/debugfs.h>
+#include "cam_smmu_api.h"
+#include "cam_hw_ops.h"
+
+#define MSM_CPP_DRV_NAME "msm_cpp"
+
+#define MSM_CPP_MAX_BUFF_QUEUE 16
+
+#define CONFIG_MSM_CPP_DBG 0
+
+#define ENABLE_CPP_LOW 0
+
+#define CPP_CMD_TIMEOUT_MS 300
+#define MSM_CPP_INVALID_OFFSET 0x00000000
+#define MSM_CPP_NOMINAL_CLOCK 266670000
+#define MSM_CPP_TURBO_CLOCK 320000000
+
+#define CPP_FW_VERSION_1_2_0 0x10020000
+#define CPP_FW_VERSION_1_4_0 0x10040000
+#define CPP_FW_VERSION_1_6_0 0x10060000
+#define CPP_FW_VERSION_1_8_0 0x10080000
+#define CPP_FW_VERSION_1_10_0 0x10100000
+
+/* dump the frame command before writing to the hardware */
+#define MSM_CPP_DUMP_FRM_CMD 0
+
+#define CPP_CLK_INFO_MAX 16
+
+#define MSM_CPP_IRQ_MASK_VAL 0x7c8
+
+#define CPP_GDSCR_SW_COLLAPSE_ENABLE 0xFFFFFFFE
+#define CPP_GDSCR_SW_COLLAPSE_DISABLE 0xFFFFFFFD
+#define CPP_GDSCR_HW_CONTROL_ENABLE 0x2
+#define CPP_GDSCR_HW_CONTROL_DISABLE 0x1
+#define PAYLOAD_NUM_PLANES 3
+#define TNR_MASK 0x4
+#define UBWC_MASK 0x20
+#define CDS_MASK 0x40
+#define MMU_PF_MASK 0x80
+#define POP_FRONT 1
+#define POP_BACK 0
+#define BATCH_DUP_MASK 0x100
+
+#define CPP_DT_READ_U32_ERR(_dev, _key, _str, _ret, _out) { \
+ _key = _str; \
+ _ret = of_property_read_u32(_dev, _key, &_out); \
+ if (_ret) \
+ break; \
+ }
+
+#define CPP_DT_READ_U32(_dev, _str, _out) { \
+ of_property_read_u32(_dev, _str, &_out); \
+ }
+
+static int msm_cpp_buffer_ops(struct cpp_device *cpp_dev,
+ uint32_t buff_mgr_ops, struct msm_buf_mngr_info *buff_mgr_info);
+static int msm_cpp_send_frame_to_hardware(struct cpp_device *cpp_dev,
+ struct msm_queue_cmd *frame_qcmd);
+static int msm_cpp_send_command_to_hardware(struct cpp_device *cpp_dev,
+ uint32_t *cmd_msg, uint32_t payload_size);
+
+static int msm_cpp_update_gdscr_status(struct cpp_device *cpp_dev,
+ bool status);
+#if CONFIG_MSM_CPP_DBG
+#define CPP_DBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CPP_DBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define CPP_LOW(fmt, args...) do { \
+ if (ENABLE_CPP_LOW) \
+ pr_info(fmt, ##args); \
+ } while (0)
+
+#define ERR_USER_COPY(to) pr_err("copy %s user\n", \
+ ((to) ? "to" : "from"))
+#define ERR_COPY_FROM_USER() ERR_USER_COPY(0)
+
+/* CPP bus bandwidth definitions */
+static struct msm_bus_vectors msm_cpp_init_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_CPP,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 0,
+ .ib = 0,
+ },
+};
+
+static struct msm_bus_vectors msm_cpp_ping_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_CPP,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 0,
+ .ib = 0,
+ },
+};
+
+static struct msm_bus_vectors msm_cpp_pong_vectors[] = {
+ {
+ .src = MSM_BUS_MASTER_CPP,
+ .dst = MSM_BUS_SLAVE_EBI_CH0,
+ .ab = 0,
+ .ib = 0,
+ },
+};
+
+
+
+static struct msm_bus_paths msm_cpp_bus_client_config[] = {
+ {
+ ARRAY_SIZE(msm_cpp_init_vectors),
+ msm_cpp_init_vectors,
+ },
+ {
+ ARRAY_SIZE(msm_cpp_ping_vectors),
+ msm_cpp_ping_vectors,
+ },
+ {
+ ARRAY_SIZE(msm_cpp_pong_vectors),
+ msm_cpp_pong_vectors,
+ },
+};
+
+static struct msm_bus_scale_pdata msm_cpp_bus_scale_data = {
+ msm_cpp_bus_client_config,
+ ARRAY_SIZE(msm_cpp_bus_client_config),
+ .name = "msm_camera_cpp",
+};
+
+#define msm_dequeue(queue, member, pop_dir) ({ \
+ unsigned long flags; \
+ struct msm_device_queue *__q = (queue); \
+ struct msm_queue_cmd *qcmd = 0; \
+ spin_lock_irqsave(&__q->lock, flags); \
+ if (!list_empty(&__q->list)) { \
+ __q->len--; \
+ qcmd = pop_dir ? list_first_entry(&__q->list, \
+ struct msm_queue_cmd, member) : \
+ list_last_entry(&__q->list, \
+ struct msm_queue_cmd, member); \
+ list_del_init(&qcmd->member); \
+ } \
+ spin_unlock_irqrestore(&__q->lock, flags); \
+ qcmd; \
+})
+
+#define MSM_CPP_MAX_TIMEOUT_TRIAL 1
+
+struct msm_cpp_timer_data_t {
+ struct cpp_device *cpp_dev;
+ struct msm_cpp_frame_info_t *processed_frame[MAX_CPP_PROCESSING_FRAME];
+ spinlock_t processed_frame_lock;
+};
+
+struct msm_cpp_timer_t {
+ atomic_t used;
+ struct msm_cpp_timer_data_t data;
+ struct timer_list cpp_timer;
+};
+
+struct msm_cpp_timer_t cpp_timer;
+static void msm_cpp_set_vbif_reg_values(struct cpp_device *cpp_dev);
+
+static int msm_cpp_init_bandwidth_mgr(struct cpp_device *cpp_dev)
+{
+ int rc = 0;
+
+ cpp_dev->bus_client =
+ msm_bus_scale_register_client(&msm_cpp_bus_scale_data);
+ if (!cpp_dev->bus_client) {
+ pr_err("Fail to register bus client\n");
+ return -ENOENT;
+ }
+
+ rc = msm_bus_scale_client_update_request(cpp_dev->bus_client, 0);
+ if (rc < 0) {
+ msm_bus_scale_unregister_client(cpp_dev->bus_client);
+ cpp_dev->bus_client = 0;
+ pr_err("Fail bus scale update %d\n", rc);
+ return -EINVAL;
+ }
+ cpp_dev->bus_idx = 1;
+
+ return 0;
+}
+
+static int msm_cpp_update_bandwidth(struct cpp_device *cpp_dev,
+ uint64_t ab, uint64_t ib)
+{
+
+ int rc;
+ struct msm_bus_paths *path;
+
+ cpp_dev->bus_idx = 3 - cpp_dev->bus_idx;
+
+ path = &(msm_cpp_bus_scale_data.usecase[cpp_dev->bus_idx]);
+ path->vectors[0].ab = ab;
+ path->vectors[0].ib = ib;
+
+ rc = msm_bus_scale_client_update_request(cpp_dev->bus_client,
+ cpp_dev->bus_idx);
+ if (rc < 0) {
+ pr_err("Fail bus scale update %d\n", rc);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void msm_cpp_deinit_bandwidth_mgr(struct cpp_device *cpp_dev)
+{
+ if (cpp_dev->bus_client) {
+ msm_bus_scale_unregister_client(cpp_dev->bus_client);
+ cpp_dev->bus_client = 0;
+ }
+}
+
+static int msm_cpp_update_bandwidth_setting(struct cpp_device *cpp_dev,
+ uint64_t ab, uint64_t ib) {
+ int rc;
+ if (cpp_dev->bus_master_flag)
+ rc = msm_cpp_update_bandwidth(cpp_dev, ab, ib);
+ else
+ rc = msm_isp_update_bandwidth(ISP_CPP, ab, ib);
+ return rc;
+}
+
+static void msm_queue_init(struct msm_device_queue *queue, const char *name)
+{
+ CPP_DBG("E\n");
+ spin_lock_init(&queue->lock);
+ queue->len = 0;
+ queue->max = 0;
+ queue->name = name;
+ INIT_LIST_HEAD(&queue->list);
+ init_waitqueue_head(&queue->wait);
+}
+
+static void msm_enqueue(struct msm_device_queue *queue,
+ struct list_head *entry)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&queue->lock, flags);
+ queue->len++;
+ if (queue->len > queue->max) {
+ queue->max = queue->len;
+ pr_debug("queue %s new max is %d\n", queue->name, queue->max);
+ }
+ list_add_tail(entry, &queue->list);
+ wake_up(&queue->wait);
+ CPP_DBG("woke up %s\n", queue->name);
+ spin_unlock_irqrestore(&queue->lock, flags);
+}
+
+#define msm_cpp_empty_list(queue, member) { \
+ unsigned long flags; \
+ struct msm_queue_cmd *qcmd = NULL; \
+ if (queue) { \
+ spin_lock_irqsave(&queue->lock, flags); \
+ while (!list_empty(&queue->list)) { \
+ queue->len--; \
+ qcmd = list_first_entry(&queue->list, \
+ struct msm_queue_cmd, member); \
+ list_del_init(&qcmd->member); \
+ kfree(qcmd); \
+ } \
+ spin_unlock_irqrestore(&queue->lock, flags); \
+ } \
+}
+
+static struct msm_cam_clk_info cpp_clk_info[CPP_CLK_INFO_MAX];
+
+static int get_clock_index(const char *clk_name)
+{
+ uint32_t i = 0;
+ for (i = 0; i < ARRAY_SIZE(cpp_clk_info); i++) {
+ if (!strcmp(clk_name, cpp_clk_info[i].clk_name))
+ return i;
+ }
+ return -EINVAL;
+}
+
+
+static int msm_cpp_notify_frame_done(struct cpp_device *cpp_dev,
+ uint8_t put_buf);
+static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin);
+static void cpp_timer_callback(unsigned long data);
+
+uint8_t induce_error;
+static int msm_cpp_enable_debugfs(struct cpp_device *cpp_dev);
+
+static void msm_cpp_write(u32 data, void __iomem *cpp_base)
+{
+ msm_camera_io_w((data), cpp_base + MSM_CPP_MICRO_FIFO_RX_DATA);
+}
+
+static void msm_cpp_clear_timer(struct cpp_device *cpp_dev)
+{
+ uint32_t i = 0;
+
+ if (atomic_read(&cpp_timer.used)) {
+ atomic_set(&cpp_timer.used, 0);
+ del_timer(&cpp_timer.cpp_timer);
+ for (i = 0; i < MAX_CPP_PROCESSING_FRAME; i++)
+ cpp_timer.data.processed_frame[i] = NULL;
+ cpp_dev->timeout_trial_cnt = 0;
+ }
+}
+
+static void msm_cpp_timer_queue_update(struct cpp_device *cpp_dev)
+{
+ uint32_t i;
+ unsigned long flags;
+ CPP_DBG("Frame done qlen %d\n", cpp_dev->processing_q.len);
+ if (cpp_dev->processing_q.len <= 1) {
+ msm_cpp_clear_timer(cpp_dev);
+ } else {
+ spin_lock_irqsave(&cpp_timer.data.processed_frame_lock, flags);
+ for (i = 0; i < cpp_dev->processing_q.len - 1; i++)
+ cpp_timer.data.processed_frame[i] =
+ cpp_timer.data.processed_frame[i + 1];
+ cpp_timer.data.processed_frame[i] = NULL;
+ cpp_dev->timeout_trial_cnt = 0;
+ spin_unlock_irqrestore(&cpp_timer.data.processed_frame_lock,
+ flags);
+
+ mod_timer(&cpp_timer.cpp_timer,
+ jiffies + msecs_to_jiffies(CPP_CMD_TIMEOUT_MS));
+ }
+}
+
+static uint32_t msm_cpp_read(void __iomem *cpp_base)
+{
+ uint32_t tmp, retry = 0;
+ do {
+ tmp = msm_camera_io_r(cpp_base + MSM_CPP_MICRO_FIFO_TX_STAT);
+ } while (((tmp & 0x2) == 0x0) && (retry++ < 10));
+ if (retry < 10) {
+ tmp = msm_camera_io_r(cpp_base + MSM_CPP_MICRO_FIFO_TX_DATA);
+ CPP_DBG("Read data: 0%x\n", tmp);
+ } else {
+ CPP_DBG("Read failed\n");
+ tmp = 0xDEADBEEF;
+ }
+
+ return tmp;
+}
+
+static struct msm_cpp_buff_queue_info_t *msm_cpp_get_buff_queue_entry(
+ struct cpp_device *cpp_dev, uint32_t session_id, uint32_t stream_id)
+{
+ uint32_t i = 0;
+ struct msm_cpp_buff_queue_info_t *buff_queue_info = NULL;
+
+ for (i = 0; i < cpp_dev->num_buffq; i++) {
+ if ((cpp_dev->buff_queue[i].used == 1) &&
+ (cpp_dev->buff_queue[i].session_id == session_id) &&
+ (cpp_dev->buff_queue[i].stream_id == stream_id)) {
+ buff_queue_info = &cpp_dev->buff_queue[i];
+ break;
+ }
+ }
+
+ if (buff_queue_info == NULL) {
+ CPP_DBG("buffer queue entry for sess:%d strm:%d not found\n",
+ session_id, stream_id);
+ }
+ return buff_queue_info;
+}
+
+static unsigned long msm_cpp_get_phy_addr(struct cpp_device *cpp_dev,
+ struct msm_cpp_buff_queue_info_t *buff_queue_info, uint32_t buff_index,
+ uint8_t native_buff, int32_t *fd)
+{
+ unsigned long phy_add = 0;
+ struct list_head *buff_head;
+ struct msm_cpp_buffer_map_list_t *buff, *save;
+
+ if (native_buff)
+ buff_head = &buff_queue_info->native_buff_head;
+ else
+ buff_head = &buff_queue_info->vb2_buff_head;
+
+ list_for_each_entry_safe(buff, save, buff_head, entry) {
+ if (buff->map_info.buff_info.index == buff_index) {
+ phy_add = buff->map_info.phy_addr;
+ *fd = buff->map_info.buff_info.fd;
+ break;
+ }
+ }
+
+ return phy_add;
+}
+
+static unsigned long msm_cpp_queue_buffer_info(struct cpp_device *cpp_dev,
+ struct msm_cpp_buff_queue_info_t *buff_queue,
+ struct msm_cpp_buffer_info_t *buffer_info)
+{
+ struct list_head *buff_head;
+ struct msm_cpp_buffer_map_list_t *buff, *save;
+ int rc = 0;
+
+ if (buffer_info->native_buff)
+ buff_head = &buff_queue->native_buff_head;
+ else
+ buff_head = &buff_queue->vb2_buff_head;
+
+ list_for_each_entry_safe(buff, save, buff_head, entry) {
+ if (buff->map_info.buff_info.index == buffer_info->index) {
+ pr_err("error buffer index already queued\n");
+ goto error;
+ }
+ }
+
+ buff = kzalloc(
+ sizeof(struct msm_cpp_buffer_map_list_t), GFP_KERNEL);
+ if (!buff) {
+ pr_err("error allocating memory\n");
+ goto error;
+ }
+ buff->map_info.buff_info = *buffer_info;
+
+ buff->map_info.buf_fd = buffer_info->fd;
+ rc = cam_smmu_get_phy_addr(cpp_dev->iommu_hdl, buffer_info->fd,
+ CAM_SMMU_MAP_RW, &buff->map_info.phy_addr,
+ (size_t *)&buff->map_info.len);
+ if (rc < 0) {
+ pr_err("ION mmap failed\n");
+ kzfree(buff);
+ goto error;
+ }
+
+ INIT_LIST_HEAD(&buff->entry);
+ list_add_tail(&buff->entry, buff_head);
+
+ return buff->map_info.phy_addr;
+error:
+ return 0;
+}
+
+static void msm_cpp_dequeue_buffer_info(struct cpp_device *cpp_dev,
+ struct msm_cpp_buffer_map_list_t *buff)
+{
+ int ret = -1;
+ ret = cam_smmu_put_phy_addr(cpp_dev->iommu_hdl, buff->map_info.buf_fd);
+ if (ret < 0)
+ pr_err("Error: cannot put the iommu handle back to ion fd\n");
+
+ list_del_init(&buff->entry);
+ kzfree(buff);
+
+ return;
+}
+
+static unsigned long msm_cpp_fetch_buffer_info(struct cpp_device *cpp_dev,
+ struct msm_cpp_buffer_info_t *buffer_info, uint32_t session_id,
+ uint32_t stream_id, int32_t *fd)
+{
+ unsigned long phy_addr = 0;
+ struct msm_cpp_buff_queue_info_t *buff_queue_info;
+ uint8_t native_buff = buffer_info->native_buff;
+
+ buff_queue_info = msm_cpp_get_buff_queue_entry(cpp_dev, session_id,
+ stream_id);
+ if (buff_queue_info == NULL) {
+ pr_err("error finding buffer queue entry for sessid:%d strmid:%d\n",
+ session_id, stream_id);
+ return phy_addr;
+ }
+
+ phy_addr = msm_cpp_get_phy_addr(cpp_dev, buff_queue_info,
+ buffer_info->index, native_buff, fd);
+ if ((phy_addr == 0) && (native_buff)) {
+ phy_addr = msm_cpp_queue_buffer_info(cpp_dev, buff_queue_info,
+ buffer_info);
+ *fd = buffer_info->fd;
+ }
+ return phy_addr;
+}
+
+static int32_t msm_cpp_dequeue_buff_info_list(struct cpp_device *cpp_dev,
+ struct msm_cpp_buff_queue_info_t *buff_queue_info)
+{
+ struct msm_cpp_buffer_map_list_t *buff, *save;
+ struct list_head *buff_head;
+
+ buff_head = &buff_queue_info->native_buff_head;
+ list_for_each_entry_safe(buff, save, buff_head, entry) {
+ msm_cpp_dequeue_buffer_info(cpp_dev, buff);
+ }
+
+ buff_head = &buff_queue_info->vb2_buff_head;
+ list_for_each_entry_safe(buff, save, buff_head, entry) {
+ msm_cpp_dequeue_buffer_info(cpp_dev, buff);
+ }
+
+ return 0;
+}
+
+static int32_t msm_cpp_dequeue_buff(struct cpp_device *cpp_dev,
+ struct msm_cpp_buff_queue_info_t *buff_queue_info, uint32_t buff_index,
+ uint8_t native_buff)
+{
+ struct msm_cpp_buffer_map_list_t *buff, *save;
+ struct list_head *buff_head;
+
+ if (native_buff)
+ buff_head = &buff_queue_info->native_buff_head;
+ else
+ buff_head = &buff_queue_info->vb2_buff_head;
+
+ list_for_each_entry_safe(buff, save, buff_head, entry) {
+ if (buff->map_info.buff_info.index == buff_index) {
+ msm_cpp_dequeue_buffer_info(cpp_dev, buff);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int32_t msm_cpp_add_buff_queue_entry(struct cpp_device *cpp_dev,
+ uint16_t session_id, uint16_t stream_id)
+{
+ uint32_t i;
+ struct msm_cpp_buff_queue_info_t *buff_queue_info;
+
+ for (i = 0; i < cpp_dev->num_buffq; i++) {
+ if (cpp_dev->buff_queue[i].used == 0) {
+ buff_queue_info = &cpp_dev->buff_queue[i];
+ buff_queue_info->used = 1;
+ buff_queue_info->session_id = session_id;
+ buff_queue_info->stream_id = stream_id;
+ INIT_LIST_HEAD(&buff_queue_info->vb2_buff_head);
+ INIT_LIST_HEAD(&buff_queue_info->native_buff_head);
+ return 0;
+ }
+ }
+ pr_err("buffer queue full. error for sessionid: %d streamid: %d\n",
+ session_id, stream_id);
+ return -EINVAL;
+}
+
+static int32_t msm_cpp_free_buff_queue_entry(struct cpp_device *cpp_dev,
+ uint32_t session_id, uint32_t stream_id)
+{
+ struct msm_cpp_buff_queue_info_t *buff_queue_info;
+
+ buff_queue_info = msm_cpp_get_buff_queue_entry(cpp_dev, session_id,
+ stream_id);
+ if (buff_queue_info == NULL) {
+ pr_err("error finding buffer queue entry for sessid:%d strmid:%d\n",
+ session_id, stream_id);
+ return -EINVAL;
+ }
+
+ buff_queue_info->used = 0;
+ buff_queue_info->session_id = 0;
+ buff_queue_info->stream_id = 0;
+ INIT_LIST_HEAD(&buff_queue_info->vb2_buff_head);
+ INIT_LIST_HEAD(&buff_queue_info->native_buff_head);
+ return 0;
+}
+
+static int32_t msm_cpp_create_buff_queue(struct cpp_device *cpp_dev,
+ uint32_t num_buffq)
+{
+ struct msm_cpp_buff_queue_info_t *buff_queue;
+ buff_queue = kzalloc(
+ sizeof(struct msm_cpp_buff_queue_info_t) * num_buffq,
+ GFP_KERNEL);
+ if (!buff_queue) {
+ pr_err("Buff queue allocation failure\n");
+ return -ENOMEM;
+ }
+
+ if (cpp_dev->buff_queue) {
+ pr_err("Buff queue not empty\n");
+ kzfree(buff_queue);
+ return -EINVAL;
+ } else {
+ cpp_dev->buff_queue = buff_queue;
+ cpp_dev->num_buffq = num_buffq;
+ }
+ return 0;
+}
+
+static void msm_cpp_delete_buff_queue(struct cpp_device *cpp_dev)
+{
+ uint32_t i;
+
+ for (i = 0; i < cpp_dev->num_buffq; i++) {
+ if (cpp_dev->buff_queue[i].used == 1) {
+ pr_warn("Queue not free sessionid: %d, streamid: %d\n",
+ cpp_dev->buff_queue[i].session_id,
+ cpp_dev->buff_queue[i].stream_id);
+ msm_cpp_dequeue_buff_info_list
+ (cpp_dev, &cpp_dev->buff_queue[i]);
+ msm_cpp_free_buff_queue_entry(cpp_dev,
+ cpp_dev->buff_queue[i].session_id,
+ cpp_dev->buff_queue[i].stream_id);
+ }
+ }
+ kzfree(cpp_dev->buff_queue);
+ cpp_dev->buff_queue = NULL;
+ cpp_dev->num_buffq = 0;
+ return;
+}
+
+static int32_t msm_cpp_poll(void __iomem *cpp_base, u32 val)
+{
+ uint32_t tmp, retry = 0;
+ int32_t rc = 0;
+ do {
+ tmp = msm_cpp_read(cpp_base);
+ if (tmp != 0xDEADBEEF)
+ CPP_LOW("poll: 0%x\n", tmp);
+ usleep_range(200, 250);
+ } while ((tmp != val) && (retry++ < MSM_CPP_POLL_RETRIES));
+ if (retry < MSM_CPP_POLL_RETRIES) {
+ CPP_LOW("Poll finished\n");
+ } else {
+ pr_err("Poll failed: expect: 0x%x\n", val);
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+static int32_t msm_cpp_poll_rx_empty(void __iomem *cpp_base)
+{
+ uint32_t tmp, retry = 0;
+ int32_t rc = 0;
+
+ tmp = msm_camera_io_r(cpp_base + MSM_CPP_MICRO_FIFO_RX_STAT);
+ while (((tmp & 0x2) != 0x0) && (retry++ < MSM_CPP_POLL_RETRIES)) {
+ /*
+ * Below usleep values are chosen based on experiments
+ * and this was the smallest number which works. This
+ * sleep is needed to leave enough time for Microcontroller
+ * to read rx fifo.
+ */
+ usleep_range(200, 300);
+ tmp = msm_camera_io_r(cpp_base + MSM_CPP_MICRO_FIFO_RX_STAT);
+ }
+
+ if (retry < MSM_CPP_POLL_RETRIES) {
+ CPP_LOW("Poll rx empty\n");
+ } else {
+ pr_err("Poll rx empty failed\n");
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+
+static int cpp_init_mem(struct cpp_device *cpp_dev)
+{
+ int rc = 0;
+ int iommu_hdl;
+
+ if (cpp_dev->hw_info.cpp_hw_version == CPP_HW_VERSION_5_0_0 ||
+ cpp_dev->hw_info.cpp_hw_version == CPP_HW_VERSION_5_1_0)
+ rc = cam_smmu_get_handle("cpp_0", &iommu_hdl);
+ else
+ rc = cam_smmu_get_handle("cpp", &iommu_hdl);
+
+ if (rc < 0)
+ return -ENODEV;
+
+ cpp_dev->iommu_hdl = iommu_hdl;
+ return 0;
+}
+
+
+static irqreturn_t msm_cpp_irq(int irq_num, void *data)
+{
+ unsigned long flags;
+ uint32_t tx_level;
+ uint32_t irq_status;
+ uint32_t i;
+ uint32_t tx_fifo[MSM_CPP_TX_FIFO_LEVEL];
+ struct cpp_device *cpp_dev = data;
+ struct msm_cpp_tasklet_queue_cmd *queue_cmd;
+ irq_status = msm_camera_io_r(cpp_dev->base + MSM_CPP_MICRO_IRQGEN_STAT);
+
+ if (irq_status & 0x8) {
+ tx_level = msm_camera_io_r(cpp_dev->base +
+ MSM_CPP_MICRO_FIFO_TX_STAT) >> 2;
+ for (i = 0; i < tx_level; i++) {
+ tx_fifo[i] = msm_camera_io_r(cpp_dev->base +
+ MSM_CPP_MICRO_FIFO_TX_DATA);
+ }
+ spin_lock_irqsave(&cpp_dev->tasklet_lock, flags);
+ queue_cmd = &cpp_dev->tasklet_queue_cmd[cpp_dev->taskletq_idx];
+ if (queue_cmd->cmd_used) {
+ pr_err("%s:%d] cpp tasklet queue overflow tx %d rc %x",
+ __func__, __LINE__, tx_level, irq_status);
+ list_del(&queue_cmd->list);
+ } else {
+ atomic_add(1, &cpp_dev->irq_cnt);
+ }
+ queue_cmd->irq_status = irq_status;
+ queue_cmd->tx_level = tx_level;
+ memset(&queue_cmd->tx_fifo[0], 0, sizeof(queue_cmd->tx_fifo));
+ for (i = 0; i < tx_level; i++)
+ queue_cmd->tx_fifo[i] = tx_fifo[i];
+
+ queue_cmd->cmd_used = 1;
+ cpp_dev->taskletq_idx =
+ (cpp_dev->taskletq_idx + 1) % MSM_CPP_TASKLETQ_SIZE;
+ list_add_tail(&queue_cmd->list, &cpp_dev->tasklet_q);
+ spin_unlock_irqrestore(&cpp_dev->tasklet_lock, flags);
+
+ tasklet_schedule(&cpp_dev->cpp_tasklet);
+ } else if (irq_status & 0x7C0) {
+ pr_debug("irq_status: 0x%x\n", irq_status);
+ pr_debug("DEBUG_SP: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x40));
+ pr_debug("DEBUG_T: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x44));
+ pr_debug("DEBUG_N: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x48));
+ pr_debug("DEBUG_R: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x4C));
+ pr_debug("DEBUG_OPPC: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x50));
+ pr_debug("DEBUG_MO: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x54));
+ pr_debug("DEBUG_TIMER0: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x60));
+ pr_debug("DEBUG_TIMER1: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x64));
+ pr_debug("DEBUG_GPI: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x70));
+ pr_debug("DEBUG_GPO: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x74));
+ pr_debug("DEBUG_T0: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x80));
+ pr_debug("DEBUG_R0: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x84));
+ pr_debug("DEBUG_T1: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x88));
+ pr_debug("DEBUG_R1: 0x%x\n",
+ msm_camera_io_r(cpp_dev->base + 0x8C));
+ }
+ msm_camera_io_w(irq_status, cpp_dev->base + MSM_CPP_MICRO_IRQGEN_CLR);
+ return IRQ_HANDLED;
+}
+
+void msm_cpp_do_tasklet(unsigned long data)
+{
+ unsigned long flags;
+ uint32_t irq_status;
+ uint32_t tx_level;
+ uint32_t msg_id, cmd_len;
+ uint32_t i;
+ uint32_t tx_fifo[MSM_CPP_TX_FIFO_LEVEL];
+ struct cpp_device *cpp_dev = (struct cpp_device *) data;
+ struct msm_cpp_tasklet_queue_cmd *queue_cmd;
+
+ while (atomic_read(&cpp_dev->irq_cnt)) {
+ spin_lock_irqsave(&cpp_dev->tasklet_lock, flags);
+ queue_cmd = list_first_entry(&cpp_dev->tasklet_q,
+ struct msm_cpp_tasklet_queue_cmd, list);
+ if (!queue_cmd) {
+ atomic_set(&cpp_dev->irq_cnt, 0);
+ spin_unlock_irqrestore(&cpp_dev->tasklet_lock, flags);
+ return;
+ }
+ atomic_sub(1, &cpp_dev->irq_cnt);
+ list_del(&queue_cmd->list);
+ queue_cmd->cmd_used = 0;
+ irq_status = queue_cmd->irq_status;
+ tx_level = queue_cmd->tx_level;
+ for (i = 0; i < tx_level; i++)
+ tx_fifo[i] = queue_cmd->tx_fifo[i];
+
+ spin_unlock_irqrestore(&cpp_dev->tasklet_lock, flags);
+
+ for (i = 0; i < tx_level; i++) {
+ if (tx_fifo[i] == MSM_CPP_MSG_ID_CMD) {
+ cmd_len = tx_fifo[i+1];
+ msg_id = tx_fifo[i+2];
+ if (msg_id == MSM_CPP_MSG_ID_FRAME_ACK) {
+ CPP_DBG("Frame done!!\n");
+ /* delete CPP timer */
+ CPP_DBG("delete timer.\n");
+ msm_cpp_timer_queue_update(cpp_dev);
+ msm_cpp_notify_frame_done(cpp_dev, 0);
+ } else if (msg_id ==
+ MSM_CPP_MSG_ID_FRAME_NACK) {
+ pr_err("NACK error from hw!!\n");
+ CPP_DBG("delete timer.\n");
+ msm_cpp_timer_queue_update(cpp_dev);
+ msm_cpp_notify_frame_done(cpp_dev, 0);
+ }
+ i += cmd_len + 2;
+ }
+ }
+ }
+}
+
+static void cpp_get_clk_freq_tbl(struct clk *clk, struct cpp_hw_info *hw_info,
+ uint32_t min_clk_rate)
+{
+ uint32_t i;
+ uint32_t idx = 0;
+ signed long freq_tbl_entry = 0;
+
+ if ((clk == NULL) || (hw_info == NULL) || (clk->ops == NULL) ||
+ (clk->ops->list_rate == NULL)) {
+ pr_err("Bad parameter\n");
+ return;
+ }
+
+ for (i = 0; i < MAX_FREQ_TBL; i++) {
+ freq_tbl_entry = clk->ops->list_rate(clk, i);
+ pr_debug("entry=%ld\n", freq_tbl_entry);
+ if (freq_tbl_entry >= 0) {
+ if (freq_tbl_entry >= min_clk_rate) {
+ hw_info->freq_tbl[idx++] = freq_tbl_entry;
+ pr_debug("tbl[%d]=%ld\n", idx-1,
+ freq_tbl_entry);
+ }
+ } else {
+ pr_debug("freq table returned invalid entry/end %ld\n",
+ freq_tbl_entry);
+ break;
+ }
+ }
+
+ pr_debug("%s: idx %d", __func__, idx);
+ hw_info->freq_tbl_count = idx;
+}
+
+static int msm_cpp_read_payload_params_from_dt(struct cpp_device *cpp_dev)
+{
+ struct platform_device *pdev = cpp_dev->pdev;
+ struct device_node *fw_info_node = NULL, *dev_node = NULL;
+ char *key = "qcom,cpp-fw-payload-info";
+ struct msm_cpp_payload_params *payload_params;
+ int ret = 0;
+
+ if (!pdev || !pdev->dev.of_node) {
+ pr_err("%s: Invalid platform device/node\n", __func__);
+ ret = -ENODEV;
+ goto no_cpp_node;
+ }
+
+ dev_node = pdev->dev.of_node;
+ fw_info_node = of_find_node_by_name(dev_node, key);
+ if (!fw_info_node) {
+ ret = -ENODEV;
+ goto no_binding;
+ }
+ payload_params = &cpp_dev->payload_params;
+ memset(payload_params, 0x0, sizeof(struct msm_cpp_payload_params));
+
+ do {
+ CPP_DT_READ_U32_ERR(fw_info_node, key, "qcom,stripe-base", ret,
+ payload_params->stripe_base);
+ CPP_DT_READ_U32_ERR(fw_info_node, key, "qcom,plane-base", ret,
+ payload_params->plane_base);
+ CPP_DT_READ_U32_ERR(fw_info_node, key, "qcom,stripe-size", ret,
+ payload_params->stripe_size);
+ CPP_DT_READ_U32_ERR(fw_info_node, key, "qcom,plane-size", ret,
+ payload_params->plane_size);
+ CPP_DT_READ_U32_ERR(fw_info_node, key, "qcom,fe-ptr-off", ret,
+ payload_params->rd_pntr_off);
+ CPP_DT_READ_U32_ERR(fw_info_node, key, "qcom,we-ptr-off", ret,
+ payload_params->wr_0_pntr_off);
+
+ CPP_DT_READ_U32(fw_info_node, "qcom,ref-fe-ptr-off",
+ payload_params->rd_ref_pntr_off);
+ CPP_DT_READ_U32(fw_info_node, "qcom,ref-we-ptr-off",
+ payload_params->wr_ref_pntr_off);
+ CPP_DT_READ_U32(fw_info_node, "qcom,we-meta-ptr-off",
+ payload_params->wr_0_meta_data_wr_pntr_off);
+ CPP_DT_READ_U32(fw_info_node, "qcom,fe-mmu-pf-ptr-off",
+ payload_params->fe_mmu_pf_ptr_off);
+ CPP_DT_READ_U32(fw_info_node, "qcom,ref-fe-mmu-pf-ptr-off",
+ payload_params->ref_fe_mmu_pf_ptr_off);
+ CPP_DT_READ_U32(fw_info_node, "qcom,we-mmu-pf-ptr-off",
+ payload_params->we_mmu_pf_ptr_off);
+ CPP_DT_READ_U32(fw_info_node, "qcom,dup-we-mmu-pf-ptr-off",
+ payload_params->dup_we_mmu_pf_ptr_off);
+ CPP_DT_READ_U32(fw_info_node, "qcom,ref-we-mmu-pf-ptr-off",
+ payload_params->ref_we_mmu_pf_ptr_off);
+ CPP_DT_READ_U32(fw_info_node, "qcom,set-group-buffer-len",
+ payload_params->set_group_buffer_len);
+ CPP_DT_READ_U32(fw_info_node, "qcom,dup-frame-indicator-off",
+ payload_params->dup_frame_indicator_off);
+ } while (0);
+
+no_binding:
+ if (ret)
+ pr_err("%s: Error reading binding %s, ret %d\n",
+ __func__, key, ret);
+no_cpp_node:
+ return ret;
+}
+
+static int cpp_init_hardware(struct cpp_device *cpp_dev)
+{
+ int rc = 0;
+ uint32_t msm_cpp_core_clk_idx;
+ uint32_t msm_micro_iface_idx;
+ uint32_t vbif_version;
+
+ rc = cam_config_ahb_clk(CAM_AHB_CLIENT_CPP, CAMERA_AHB_SVS_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ goto ahb_vote_fail;
+ }
+
+ if (cpp_dev->bus_master_flag)
+ rc = msm_cpp_init_bandwidth_mgr(cpp_dev);
+ else
+ rc = msm_isp_init_bandwidth_mgr(ISP_CPP);
+ if (rc < 0) {
+ pr_err("%s: Bandwidth registration Failed!\n", __func__);
+ goto bus_scale_register_failed;
+ }
+
+ if (of_get_property(cpp_dev->pdev->dev.of_node,
+ "mmagic-vdd-supply", NULL) &&
+ (cpp_dev->fs_mmagic_camss == NULL)) {
+ cpp_dev->fs_mmagic_camss = regulator_get(&cpp_dev->pdev->dev,
+ "mmagic-vdd");
+ if (IS_ERR(cpp_dev->fs_mmagic_camss)) {
+ pr_debug("%s: Regulator mmagic get failed %ld\n",
+ __func__, PTR_ERR(cpp_dev->fs_mmagic_camss));
+ cpp_dev->fs_mmagic_camss = NULL;
+ rc = -ENODEV;
+ goto fs_mmagic_failed;
+ }
+ rc = regulator_enable(cpp_dev->fs_mmagic_camss);
+ if (rc) {
+ pr_err("%s: Regulator enable mmagic camss failed\n",
+ __func__);
+ regulator_put(cpp_dev->fs_mmagic_camss);
+ cpp_dev->fs_mmagic_camss = NULL;
+ goto fs_mmagic_failed;
+ }
+ }
+
+ if (of_get_property(cpp_dev->pdev->dev.of_node,
+ "camss-vdd-supply", NULL) &&
+ (cpp_dev->fs_camss == NULL)) {
+ cpp_dev->fs_camss = regulator_get(&cpp_dev->pdev->dev,
+ "camss-vdd");
+ if (IS_ERR(cpp_dev->fs_camss)) {
+ pr_err("%s: Regulator camss get failed %ld\n",
+ __func__, PTR_ERR(cpp_dev->fs_camss));
+ cpp_dev->fs_camss = NULL;
+ rc = -ENODEV;
+ goto fs_camss_failed;
+ }
+ rc = regulator_enable(cpp_dev->fs_camss);
+ if (rc) {
+ pr_err("%s: Regulator enable camss failed\n", __func__);
+ regulator_put(cpp_dev->fs_camss);
+ cpp_dev->fs_camss = NULL;
+ goto fs_camss_failed;
+ }
+ }
+
+ if (cpp_dev->fs_cpp == NULL) {
+ cpp_dev->fs_cpp =
+ regulator_get(&cpp_dev->pdev->dev, "vdd");
+ if (IS_ERR(cpp_dev->fs_cpp)) {
+ pr_err("Regulator cpp vdd get failed %ld\n",
+ PTR_ERR(cpp_dev->fs_cpp));
+ rc = -ENODEV;
+ cpp_dev->fs_cpp = NULL;
+ goto fs_failed;
+ }
+
+ rc = regulator_enable(cpp_dev->fs_cpp);
+ if (rc != 0) {
+ pr_err("Regulator cpp vdd enable failed\n");
+ regulator_put(cpp_dev->fs_cpp);
+ cpp_dev->fs_cpp = NULL;
+ goto fs_failed;
+ }
+ }
+ msm_micro_iface_idx = get_clock_index("micro_iface_clk");
+ if (msm_micro_iface_idx < 0) {
+ pr_err("Fail to get clock index\n");
+ rc = msm_micro_iface_idx;
+ goto clk_failed;
+ }
+
+ cpp_dev->cpp_clk[msm_micro_iface_idx] =
+ clk_get(&cpp_dev->pdev->dev,
+ cpp_clk_info[msm_micro_iface_idx].clk_name);
+ if (IS_ERR(cpp_dev->cpp_clk[msm_micro_iface_idx])) {
+ pr_err("%s get failed\n",
+ cpp_clk_info[msm_micro_iface_idx].clk_name);
+ rc = PTR_ERR(cpp_dev->cpp_clk[msm_micro_iface_idx]);
+ goto remap_failed;
+ }
+
+ rc = clk_reset(cpp_dev->cpp_clk[msm_micro_iface_idx],
+ CLK_RESET_ASSERT);
+ if (rc) {
+ pr_err("%s:micro_iface_clk assert failed\n",
+ __func__);
+ clk_put(cpp_dev->cpp_clk[msm_micro_iface_idx]);
+ goto remap_failed;
+ }
+ /*Below usleep values are chosen based on experiments
+ and this was the smallest number which works. This
+ sleep is needed to leave enough time for Microcontroller
+ to resets all its registers.*/
+ usleep_range(1000, 1200);
+
+ rc = clk_reset(cpp_dev->cpp_clk[msm_micro_iface_idx],
+ CLK_RESET_DEASSERT);
+ if (rc) {
+ pr_err("%s:micro_iface_clk assert failed\n", __func__);
+ clk_put(cpp_dev->cpp_clk[msm_micro_iface_idx]);
+ goto remap_failed;
+ }
+ /*Below usleep values are chosen based on experiments and
+ this was the smallest number which works. This sleep is
+ needed to leave enough time for Microcontroller to
+ resets all its registers.*/
+ usleep_range(1000, 1200);
+
+ clk_put(cpp_dev->cpp_clk[msm_micro_iface_idx]);
+
+ rc = msm_cam_clk_enable(&cpp_dev->pdev->dev, cpp_clk_info,
+ cpp_dev->cpp_clk, cpp_dev->num_clk, 1);
+ if (rc < 0) {
+ pr_err("clk enable failed\n");
+ goto clk_failed;
+ }
+
+ if (cpp_dev->camss_cpp != NULL) {
+ cpp_dev->camss_cpp_base = ioremap(cpp_dev->camss_cpp->start,
+ resource_size(cpp_dev->camss_cpp));
+ if (!cpp_dev->camss_cpp_base) {
+ rc = -ENOMEM;
+ pr_err("ioremap failed\n");
+ goto remap_failed;
+ }
+ }
+
+ cpp_dev->base = ioremap(cpp_dev->mem->start,
+ resource_size(cpp_dev->mem));
+ if (!cpp_dev->base) {
+ rc = -ENOMEM;
+ pr_err("ioremap failed\n");
+ goto remap_failed;
+ }
+
+ cpp_dev->vbif_base = ioremap(cpp_dev->vbif_mem->start,
+ resource_size(cpp_dev->vbif_mem));
+ if (!cpp_dev->vbif_base) {
+ rc = -ENOMEM;
+ pr_err("ioremap failed\n");
+ goto vbif_remap_failed;
+ }
+
+ cpp_dev->cpp_hw_base = ioremap(cpp_dev->cpp_hw_mem->start,
+ resource_size(cpp_dev->cpp_hw_mem));
+ if (!cpp_dev->cpp_hw_base) {
+ rc = -ENOMEM;
+ pr_err("ioremap failed\n");
+ goto cpp_hw_remap_failed;
+ }
+
+ if (cpp_dev->state != CPP_STATE_BOOT) {
+ rc = request_irq(cpp_dev->irq->start, msm_cpp_irq,
+ IRQF_TRIGGER_RISING, "cpp", cpp_dev);
+ if (rc < 0) {
+ pr_err("irq request fail\n");
+ goto req_irq_fail;
+ }
+ cpp_dev->buf_mgr_subdev = msm_buf_mngr_get_subdev();
+
+ rc = msm_cpp_buffer_ops(cpp_dev,
+ VIDIOC_MSM_BUF_MNGR_INIT, NULL);
+ if (rc < 0) {
+ pr_err("buf mngr init failed\n");
+ free_irq(cpp_dev->irq->start, cpp_dev);
+ goto req_irq_fail;
+ }
+ }
+
+ rc = msm_cpp_update_gdscr_status(cpp_dev, true);
+ if (rc < 0) {
+ pr_err("update cpp gdscr status failed\n");
+ goto req_irq_fail;
+ }
+
+ cpp_dev->hw_info.cpp_hw_version =
+ msm_camera_io_r(cpp_dev->cpp_hw_base);
+ if (cpp_dev->hw_info.cpp_hw_version == CPP_HW_VERSION_4_1_0) {
+ vbif_version = msm_camera_io_r(cpp_dev->vbif_base);
+ if (vbif_version == VBIF_VERSION_2_3_0)
+ cpp_dev->hw_info.cpp_hw_version = CPP_HW_VERSION_4_0_0;
+ }
+ pr_info("CPP HW Version: 0x%x\n", cpp_dev->hw_info.cpp_hw_version);
+ cpp_dev->hw_info.cpp_hw_caps =
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x4);
+ msm_cpp_core_clk_idx = get_clock_index("cpp_core_clk");
+ if (msm_cpp_core_clk_idx < 0) {
+ pr_err("cpp_core_clk: fail to get clock index\n");
+ rc = msm_cpp_core_clk_idx;
+ goto pwr_collapse_reset;
+ }
+ cpp_get_clk_freq_tbl(cpp_dev->cpp_clk[msm_cpp_core_clk_idx],
+ &cpp_dev->hw_info, cpp_dev->min_clk_rate);
+ pr_debug("CPP HW Caps: 0x%x\n", cpp_dev->hw_info.cpp_hw_caps);
+ msm_camera_io_w(0x1, cpp_dev->vbif_base + 0x4);
+ cpp_dev->taskletq_idx = 0;
+ atomic_set(&cpp_dev->irq_cnt, 0);
+ rc = msm_cpp_create_buff_queue(cpp_dev, MSM_CPP_MAX_BUFF_QUEUE);
+ if (rc < 0) {
+ pr_err("%s: create buff queue failed with err %d\n",
+ __func__, rc);
+ goto pwr_collapse_reset;
+ }
+ pr_err("stream_cnt:%d\n", cpp_dev->stream_cnt);
+ cpp_dev->stream_cnt = 0;
+ if (cpp_dev->fw_name_bin) {
+ disable_irq(cpp_dev->irq->start);
+ rc = cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
+ enable_irq(cpp_dev->irq->start);
+ if (rc < 0) {
+ pr_err("%s: load firmware failure %d\n", __func__, rc);
+ goto pwr_collapse_reset;
+ }
+ msm_camera_io_w_mb(0x7C8, cpp_dev->base +
+ MSM_CPP_MICRO_IRQGEN_MASK);
+ msm_camera_io_w_mb(0xFFFF, cpp_dev->base +
+ MSM_CPP_MICRO_IRQGEN_CLR);
+ }
+
+ msm_cpp_set_vbif_reg_values(cpp_dev);
+ return rc;
+
+pwr_collapse_reset:
+ msm_cpp_update_gdscr_status(cpp_dev, false);
+req_irq_fail:
+ iounmap(cpp_dev->cpp_hw_base);
+cpp_hw_remap_failed:
+ iounmap(cpp_dev->vbif_base);
+vbif_remap_failed:
+ iounmap(cpp_dev->base);
+remap_failed:
+ iounmap(cpp_dev->camss_cpp_base);
+ msm_cam_clk_enable(&cpp_dev->pdev->dev, cpp_clk_info,
+ cpp_dev->cpp_clk, cpp_dev->num_clk, 0);
+clk_failed:
+ regulator_disable(cpp_dev->fs_cpp);
+ regulator_put(cpp_dev->fs_cpp);
+fs_failed:
+ regulator_disable(cpp_dev->fs_camss);
+ regulator_put(cpp_dev->fs_camss);
+fs_camss_failed:
+ regulator_disable(cpp_dev->fs_mmagic_camss);
+ regulator_put(cpp_dev->fs_mmagic_camss);
+fs_mmagic_failed:
+ if (cpp_dev->bus_master_flag)
+ msm_cpp_deinit_bandwidth_mgr(cpp_dev);
+ else
+ msm_isp_deinit_bandwidth_mgr(ISP_CPP);
+bus_scale_register_failed:
+ rc = cam_config_ahb_clk(CAM_AHB_CLIENT_CPP, CAMERA_AHB_SUSPEND_VOTE);
+ if (rc < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+ahb_vote_fail:
+ return rc;
+}
+
+static void cpp_release_hardware(struct cpp_device *cpp_dev)
+{
+ int32_t rc;
+ if (cpp_dev->state != CPP_STATE_BOOT) {
+ rc = msm_cpp_buffer_ops(cpp_dev,
+ VIDIOC_MSM_BUF_MNGR_DEINIT, NULL);
+ if (rc < 0) {
+ pr_err("error in buf mngr deinit\n");
+ rc = -EINVAL;
+ }
+ free_irq(cpp_dev->irq->start, cpp_dev);
+ tasklet_kill(&cpp_dev->cpp_tasklet);
+ atomic_set(&cpp_dev->irq_cnt, 0);
+ }
+ msm_cpp_delete_buff_queue(cpp_dev);
+ msm_cpp_update_gdscr_status(cpp_dev, false);
+ iounmap(cpp_dev->base);
+ iounmap(cpp_dev->vbif_base);
+ iounmap(cpp_dev->cpp_hw_base);
+ iounmap(cpp_dev->camss_cpp_base);
+ msm_cam_clk_enable(&cpp_dev->pdev->dev, cpp_clk_info,
+ cpp_dev->cpp_clk, cpp_dev->num_clk, 0);
+ if (cpp_dev->fs_cpp) {
+ regulator_disable(cpp_dev->fs_cpp);
+ regulator_put(cpp_dev->fs_cpp);
+ cpp_dev->fs_cpp = NULL;
+ }
+ if (cpp_dev->fs_camss) {
+ regulator_disable(cpp_dev->fs_camss);
+ regulator_put(cpp_dev->fs_camss);
+ cpp_dev->fs_camss = NULL;
+ }
+ if (cpp_dev->fs_mmagic_camss) {
+ regulator_disable(cpp_dev->fs_mmagic_camss);
+ regulator_put(cpp_dev->fs_mmagic_camss);
+ cpp_dev->fs_mmagic_camss = NULL;
+ }
+ if (cpp_dev->stream_cnt > 0) {
+ pr_warn("stream count active\n");
+ rc = msm_cpp_update_bandwidth_setting(cpp_dev, 0, 0);
+ }
+ cpp_dev->stream_cnt = 0;
+ if (cpp_dev->bus_master_flag)
+ msm_cpp_deinit_bandwidth_mgr(cpp_dev);
+ else
+ msm_isp_deinit_bandwidth_mgr(ISP_CPP);
+
+ rc = cam_config_ahb_clk(CAM_AHB_CLIENT_CPP,
+ CAMERA_AHB_SUSPEND_VOTE);
+ if (rc < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+}
+
+static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
+{
+ uint32_t i;
+ uint32_t *ptr_bin = NULL;
+ int32_t rc = 0;
+
+ if (!fw_name_bin) {
+ pr_err("%s:%d] invalid fw name", __func__, __LINE__);
+ rc = -EINVAL;
+ goto end;
+ }
+ pr_debug("%s:%d] FW file: %s\n", __func__, __LINE__, fw_name_bin);
+ if (NULL == cpp_dev->fw) {
+ pr_err("%s:%d] fw NULL", __func__, __LINE__);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ ptr_bin = (uint32_t *)cpp_dev->fw->data;
+ if (!ptr_bin) {
+ pr_err("%s:%d] Fw bin NULL", __func__, __LINE__);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ msm_camera_io_w(0x1, cpp_dev->base + MSM_CPP_MICRO_CLKEN_CTL);
+ msm_camera_io_w(0x1, cpp_dev->base +
+ MSM_CPP_MICRO_BOOT_START);
+
+ rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
+ if (rc) {
+ pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
+ MSM_CPP_MSG_ID_CMD, rc);
+ goto end;
+ }
+
+ msm_camera_io_w(0xFFFFFFFF, cpp_dev->base +
+ MSM_CPP_MICRO_IRQGEN_CLR);
+
+ /*Start firmware loading*/
+ msm_cpp_write(MSM_CPP_CMD_FW_LOAD, cpp_dev->base);
+ msm_cpp_write(cpp_dev->fw->size, cpp_dev->base);
+ msm_cpp_write(MSM_CPP_START_ADDRESS, cpp_dev->base);
+ rc = msm_cpp_poll_rx_empty(cpp_dev->base);
+ if (rc) {
+ pr_err("%s:%d] poll rx empty failed %d",
+ __func__, __LINE__, rc);
+ goto end;
+ }
+ for (i = 0; i < cpp_dev->fw->size/4; i++) {
+ msm_cpp_write(*ptr_bin, cpp_dev->base);
+ if (i % MSM_CPP_RX_FIFO_LEVEL == 0) {
+ rc = msm_cpp_poll_rx_empty(cpp_dev->base);
+ if (rc) {
+ pr_err("%s:%d] poll rx empty failed %d",
+ __func__, __LINE__, rc);
+ goto end;
+ }
+ }
+ ptr_bin++;
+ }
+ msm_camera_io_w_mb(0x00, cpp_dev->cpp_hw_base + 0xC);
+ rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_OK);
+ if (rc) {
+ pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
+ MSM_CPP_MSG_ID_OK, rc);
+ goto end;
+ }
+
+ rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
+ if (rc) {
+ pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
+ MSM_CPP_MSG_ID_CMD, rc);
+ goto end;
+ }
+
+ /*Trigger MC to jump to start address*/
+ msm_cpp_write(MSM_CPP_CMD_EXEC_JUMP, cpp_dev->base);
+ msm_cpp_write(MSM_CPP_JUMP_ADDRESS, cpp_dev->base);
+
+ rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
+ if (rc) {
+ pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
+ MSM_CPP_MSG_ID_CMD, rc);
+ goto end;
+ }
+
+ rc = msm_cpp_poll(cpp_dev->base, 0x1);
+ if (rc) {
+ pr_err("%s:%d] poll command 0x1 failed %d", __func__, __LINE__,
+ rc);
+ goto end;
+ }
+
+ rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_JUMP_ACK);
+ if (rc) {
+ pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
+ MSM_CPP_MSG_ID_JUMP_ACK, rc);
+ goto end;
+ }
+
+ rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_TRAILER);
+ if (rc) {
+ pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
+ MSM_CPP_MSG_ID_JUMP_ACK, rc);
+ }
+
+end:
+ return rc;
+}
+
+static int cpp_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ int rc;
+ uint32_t i;
+ struct cpp_device *cpp_dev = NULL;
+ CPP_DBG("E\n");
+
+ if (!sd || !fh) {
+ pr_err("Wrong input parameters sd %p fh %p!",
+ sd, fh);
+ return -EINVAL;
+ }
+ cpp_dev = v4l2_get_subdevdata(sd);
+ if (!cpp_dev) {
+ pr_err("failed: cpp_dev %p\n", cpp_dev);
+ return -EINVAL;
+ }
+ mutex_lock(&cpp_dev->mutex);
+ if (cpp_dev->cpp_open_cnt == MAX_ACTIVE_CPP_INSTANCE) {
+ pr_err("No free CPP instance\n");
+ mutex_unlock(&cpp_dev->mutex);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
+ if (cpp_dev->cpp_subscribe_list[i].active == 0) {
+ cpp_dev->cpp_subscribe_list[i].active = 1;
+ cpp_dev->cpp_subscribe_list[i].vfh = &fh->vfh;
+ break;
+ }
+ }
+ if (i == MAX_ACTIVE_CPP_INSTANCE) {
+ pr_err("No free instance\n");
+ mutex_unlock(&cpp_dev->mutex);
+ return -ENODEV;
+ }
+
+ CPP_DBG("open %d %p\n", i, &fh->vfh);
+ cpp_dev->cpp_open_cnt++;
+ if (cpp_dev->cpp_open_cnt == 1) {
+ rc = cpp_init_hardware(cpp_dev);
+ if (rc < 0) {
+ cpp_dev->cpp_open_cnt--;
+ cpp_dev->cpp_subscribe_list[i].active = 0;
+ cpp_dev->cpp_subscribe_list[i].vfh = NULL;
+ mutex_unlock(&cpp_dev->mutex);
+ return rc;
+ }
+
+ rc = cpp_init_mem(cpp_dev);
+ if (rc < 0) {
+ pr_err("Error: init memory fail\n");
+ cpp_dev->cpp_open_cnt--;
+ cpp_dev->cpp_subscribe_list[i].active = 0;
+ cpp_dev->cpp_subscribe_list[i].vfh = NULL;
+ mutex_unlock(&cpp_dev->mutex);
+ return rc;
+ }
+ cpp_dev->state = CPP_STATE_IDLE;
+ }
+ mutex_unlock(&cpp_dev->mutex);
+ return 0;
+}
+
+static int cpp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ uint32_t i;
+ int rc = -1;
+ struct cpp_device *cpp_dev = NULL;
+ struct msm_device_queue *processing_q = NULL;
+ struct msm_device_queue *eventData_q = NULL;
+
+ if (!sd) {
+ pr_err("Wrong input sd parameter");
+ return -EINVAL;
+ }
+ cpp_dev = v4l2_get_subdevdata(sd);
+
+ if (!cpp_dev) {
+ pr_err("failed: cpp_dev %p\n", cpp_dev);
+ return -EINVAL;
+ }
+
+ mutex_lock(&cpp_dev->mutex);
+
+ processing_q = &cpp_dev->processing_q;
+ eventData_q = &cpp_dev->eventData_q;
+
+ if (cpp_dev->cpp_open_cnt == 0) {
+ mutex_unlock(&cpp_dev->mutex);
+ return 0;
+ }
+
+ for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
+ if (cpp_dev->cpp_subscribe_list[i].active == 1) {
+ cpp_dev->cpp_subscribe_list[i].active = 0;
+ cpp_dev->cpp_subscribe_list[i].vfh = NULL;
+ break;
+ }
+ }
+ if (i == MAX_ACTIVE_CPP_INSTANCE) {
+ pr_err("Invalid close\n");
+ mutex_unlock(&cpp_dev->mutex);
+ return -ENODEV;
+ }
+
+ cpp_dev->cpp_open_cnt--;
+ if (cpp_dev->cpp_open_cnt == 0) {
+ pr_debug("irq_status: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x4));
+ pr_debug("DEBUG_SP: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x40));
+ pr_debug("DEBUG_T: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x44));
+ pr_debug("DEBUG_N: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x48));
+ pr_debug("DEBUG_R: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x4C));
+ pr_debug("DEBUG_OPPC: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x50));
+ pr_debug("DEBUG_MO: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x54));
+ pr_debug("DEBUG_TIMER0: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x60));
+ pr_debug("DEBUG_TIMER1: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x64));
+ pr_debug("DEBUG_GPI: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x70));
+ pr_debug("DEBUG_GPO: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x74));
+ pr_debug("DEBUG_T0: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x80));
+ pr_debug("DEBUG_R0: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x84));
+ pr_debug("DEBUG_T1: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x88));
+ pr_debug("DEBUG_R1: 0x%x\n",
+ msm_camera_io_r(cpp_dev->cpp_hw_base + 0x8C));
+ msm_camera_io_w(0x0, cpp_dev->base + MSM_CPP_MICRO_CLKEN_CTL);
+ msm_cpp_clear_timer(cpp_dev);
+ cpp_release_hardware(cpp_dev);
+ if (cpp_dev->iommu_state == CPP_IOMMU_STATE_ATTACHED) {
+ cpp_dev->iommu_state = CPP_IOMMU_STATE_DETACHED;
+ rc = cam_smmu_ops(cpp_dev->iommu_hdl, CAM_SMMU_DETACH);
+ if (rc < 0)
+ pr_err("Error: Detach fail in release\n");
+ }
+ cam_smmu_destroy_handle(cpp_dev->iommu_hdl);
+ msm_cpp_empty_list(processing_q, list_frame);
+ msm_cpp_empty_list(eventData_q, list_eventdata);
+ cpp_dev->state = CPP_STATE_OFF;
+ }
+
+ mutex_unlock(&cpp_dev->mutex);
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops msm_cpp_internal_ops = {
+ .open = cpp_open_node,
+ .close = cpp_close_node,
+};
+
+static int msm_cpp_buffer_ops(struct cpp_device *cpp_dev,
+ uint32_t buff_mgr_ops, struct msm_buf_mngr_info *buff_mgr_info)
+{
+ int rc = -EINVAL;
+
+ rc = v4l2_subdev_call(cpp_dev->buf_mgr_subdev, core, ioctl,
+ buff_mgr_ops, buff_mgr_info);
+ if (rc < 0)
+ pr_debug("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+ return rc;
+}
+
+static int msm_cpp_notify_frame_done(struct cpp_device *cpp_dev,
+ uint8_t put_buf)
+{
+ struct v4l2_event v4l2_evt;
+ struct msm_queue_cmd *frame_qcmd = NULL;
+ struct msm_queue_cmd *event_qcmd = NULL;
+ struct msm_cpp_frame_info_t *processed_frame = NULL;
+ struct msm_device_queue *queue = &cpp_dev->processing_q;
+ struct msm_buf_mngr_info buff_mgr_info;
+ int rc = 0;
+
+ frame_qcmd = msm_dequeue(queue, list_frame, POP_FRONT);
+ if (frame_qcmd) {
+ processed_frame = frame_qcmd->command;
+ do_gettimeofday(&(processed_frame->out_time));
+ kfree(frame_qcmd);
+ event_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_ATOMIC);
+ if (!event_qcmd) {
+ pr_err("Insufficient memory\n");
+ return -ENOMEM;
+ }
+ atomic_set(&event_qcmd->on_heap, 1);
+ event_qcmd->command = processed_frame;
+ CPP_DBG("fid %d\n", processed_frame->frame_id);
+ msm_enqueue(&cpp_dev->eventData_q, &event_qcmd->list_eventdata);
+
+ if ((processed_frame->partial_frame_indicator != 0) &&
+ (processed_frame->last_payload == 0))
+ goto NOTIFY_FRAME_DONE;
+
+ if (!processed_frame->output_buffer_info[0].processed_divert &&
+ !processed_frame->output_buffer_info[0].native_buff &&
+ !processed_frame->we_disable) {
+ memset(&buff_mgr_info, 0 ,
+ sizeof(struct msm_buf_mngr_info));
+ buff_mgr_info.session_id =
+ ((processed_frame->identity >> 16) & 0xFFFF);
+ buff_mgr_info.stream_id =
+ (processed_frame->identity & 0xFFFF);
+ buff_mgr_info.frame_id = processed_frame->frame_id;
+ buff_mgr_info.timestamp = processed_frame->timestamp;
+ /*
+ * Update the reserved field (cds information) to buffer
+ * manager structure so it is propogated back to HAL
+ */
+ buff_mgr_info.reserved = processed_frame->reserved;
+ if (processed_frame->batch_info.batch_mode ==
+ BATCH_MODE_VIDEO) {
+ buff_mgr_info.index =
+ processed_frame->batch_info.cont_idx;
+ } else {
+ buff_mgr_info.index = processed_frame->
+ output_buffer_info[0].index;
+ }
+ if (put_buf) {
+ rc = msm_cpp_buffer_ops(cpp_dev,
+ VIDIOC_MSM_BUF_MNGR_PUT_BUF,
+ &buff_mgr_info);
+ if (rc < 0) {
+ pr_err("error putting buffer\n");
+ rc = -EINVAL;
+ }
+ } else {
+ rc = msm_cpp_buffer_ops(cpp_dev,
+ VIDIOC_MSM_BUF_MNGR_BUF_DONE,
+ &buff_mgr_info);
+ if (rc < 0) {
+ pr_err("error putting buffer\n");
+ rc = -EINVAL;
+ }
+ }
+ }
+
+ if (processed_frame->duplicate_output &&
+ !processed_frame->
+ duplicate_buffer_info.processed_divert &&
+ !processed_frame->we_disable) {
+ memset(&buff_mgr_info, 0 ,
+ sizeof(struct msm_buf_mngr_info));
+ buff_mgr_info.session_id =
+ ((processed_frame->duplicate_identity >> 16) & 0xFFFF);
+ buff_mgr_info.stream_id =
+ (processed_frame->duplicate_identity & 0xFFFF);
+ buff_mgr_info.frame_id = processed_frame->frame_id;
+ buff_mgr_info.timestamp = processed_frame->timestamp;
+ buff_mgr_info.index =
+ processed_frame->duplicate_buffer_info.index;
+ /*
+ * Update the reserved field (cds information) to buffer
+ * manager structure so it is propogated back to HAL
+ */
+ buff_mgr_info.reserved = processed_frame->reserved;
+ if (put_buf) {
+ rc = msm_cpp_buffer_ops(cpp_dev,
+ VIDIOC_MSM_BUF_MNGR_PUT_BUF,
+ &buff_mgr_info);
+ if (rc < 0) {
+ pr_err("error putting buffer\n");
+ rc = -EINVAL;
+ }
+ } else {
+ rc = msm_cpp_buffer_ops(cpp_dev,
+ VIDIOC_MSM_BUF_MNGR_BUF_DONE,
+ &buff_mgr_info);
+ if (rc < 0) {
+ pr_err("error putting buffer\n");
+ rc = -EINVAL;
+ }
+ }
+ }
+NOTIFY_FRAME_DONE:
+ v4l2_evt.id = processed_frame->inst_id;
+ v4l2_evt.type = V4L2_EVENT_CPP_FRAME_DONE;
+ v4l2_event_queue(cpp_dev->msm_sd.sd.devnode, &v4l2_evt);
+ }
+ return rc;
+}
+
+#if MSM_CPP_DUMP_FRM_CMD
+static int msm_cpp_dump_frame_cmd(struct msm_cpp_frame_info_t *frame_info)
+{
+ int i, i1, i2;
+ struct cpp_device *cpp_dev = cpp_timer.data.cpp_dev;
+ CPP_DBG("-- start: cpp frame cmd for identity=0x%x, frame_id=%d --\n",
+ frame_info->identity, frame_info->frame_id);
+
+ CPP_DBG("msg[%03d] = 0x%08x\n", 0, 0x6);
+ /* send top level and plane level */
+ for (i = 0; i < cpp_dev->payload_params.stripe_base; i++)
+ CPP_DBG("msg[%03d] = 0x%08x\n", i,
+ frame_info->cpp_cmd_msg[i]);
+ /* send stripes */
+ i1 = cpp_dev->payload_params.stripe_base +
+ cpp_dev->payload_params.stripe_size *
+ frame_info->first_stripe_index;
+ i2 = cpp_dev->payload_params.stripe_size *
+ (frame_info->last_stripe_index -
+ frame_info->first_stripe_index + 1);
+ for (i = 0; i < i2; i++)
+ CPP_DBG("msg[%03d] = 0x%08x\n", i+i1,
+ frame_info->cpp_cmd_msg[i+i1]);
+ /* send trailer */
+ CPP_DBG("msg[%03d] = 0x%08x\n", i+i1, MSM_CPP_MSG_ID_TRAILER);
+ CPP_DBG("-- end: cpp frame cmd for identity=0x%x, frame_id=%d --\n",
+ frame_info->identity, frame_info->frame_id);
+ return 0;
+}
+#else
+static int msm_cpp_dump_frame_cmd(struct msm_cpp_frame_info_t *frame_info)
+{
+ return 0;
+}
+#endif
+
+static void msm_cpp_flush_queue_and_release_buffer(struct cpp_device *cpp_dev,
+ int queue_len) {
+ uint32_t i;
+
+ while (queue_len) {
+ msm_cpp_notify_frame_done(cpp_dev, 1);
+ queue_len--;
+ }
+ atomic_set(&cpp_timer.used, 0);
+ for (i = 0; i < MAX_CPP_PROCESSING_FRAME; i++)
+ cpp_timer.data.processed_frame[i] = NULL;
+}
+
+static void msm_cpp_set_micro_irq_mask(struct cpp_device *cpp_dev,
+ uint8_t enable, uint32_t irq_mask)
+{
+ msm_camera_io_w_mb(irq_mask, cpp_dev->base +
+ MSM_CPP_MICRO_IRQGEN_MASK);
+ msm_camera_io_w_mb(0xFFFF, cpp_dev->base +
+ MSM_CPP_MICRO_IRQGEN_CLR);
+ if (enable)
+ enable_irq(cpp_dev->irq->start);
+}
+
+static void msm_cpp_do_timeout_work(struct work_struct *work)
+{
+ uint32_t j = 0, i = 0, i1 = 0, i2 = 0;
+ int32_t queue_len = 0, rc = 0;
+ struct msm_device_queue *queue = NULL;
+ struct msm_cpp_frame_info_t *processed_frame[MAX_CPP_PROCESSING_FRAME];
+ struct cpp_device *cpp_dev = cpp_timer.data.cpp_dev;
+
+ pr_warn("cpp_timer_callback called. (jiffies=%lu)\n",
+ jiffies);
+ mutex_lock(&cpp_dev->mutex);
+
+ if (!work || (cpp_timer.data.cpp_dev->state != CPP_STATE_ACTIVE)) {
+ pr_err("Invalid work:%p or state:%d\n", work,
+ cpp_timer.data.cpp_dev->state);
+ /* Do not flush queue here as it is not a fatal error */
+ goto end;
+ }
+ if (!atomic_read(&cpp_timer.used)) {
+ pr_warn("Delayed trigger, IRQ serviced\n");
+ /* Do not flush queue here as it is not a fatal error */
+ goto end;
+ }
+
+
+ disable_irq(cpp_timer.data.cpp_dev->irq->start);
+
+ queue = &cpp_timer.data.cpp_dev->processing_q;
+ queue_len = queue->len;
+
+ pr_debug("Reloading firmware %d\n", queue_len);
+ rc = cpp_load_fw(cpp_timer.data.cpp_dev,
+ cpp_timer.data.cpp_dev->fw_name_bin);
+ if (rc) {
+ pr_warn("Firmware loading failed\n");
+ cpp_dev->state = CPP_STATE_OFF;
+ /* clean buf queue here */
+ msm_cpp_flush_queue_and_release_buffer(cpp_dev, queue_len);
+ msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x0);
+ cpp_dev->timeout_trial_cnt = 0;
+ goto end;
+ } else {
+ pr_debug("Firmware loading done\n");
+ }
+
+ if (!atomic_read(&cpp_timer.used)) {
+ pr_warn("Delayed trigger, IRQ serviced\n");
+ /* Do not flush queue here as it is not a fatal error */
+ msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
+ cpp_dev->timeout_trial_cnt = 0;
+ goto end;
+ }
+
+ if (cpp_dev->timeout_trial_cnt >=
+ cpp_dev->max_timeout_trial_cnt) {
+ pr_warn("Max trial reached\n");
+ msm_cpp_flush_queue_and_release_buffer(cpp_dev, queue_len);
+ msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
+ goto end;
+ }
+
+ atomic_set(&cpp_timer.used, 1);
+ pr_warn("Starting timer to fire in %d ms. (jiffies=%lu)\n",
+ CPP_CMD_TIMEOUT_MS, jiffies);
+ mod_timer(&cpp_timer.cpp_timer,
+ jiffies + msecs_to_jiffies(CPP_CMD_TIMEOUT_MS));
+
+ msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
+
+ for (i = 0; i < MAX_CPP_PROCESSING_FRAME; i++)
+ processed_frame[i] = cpp_timer.data.processed_frame[i];
+
+ for (i = 0; i < queue_len; i++) {
+ pr_warn("Rescheduling for identity=0x%x, frame_id=%03d\n",
+ processed_frame[i]->identity,
+ processed_frame[i]->frame_id);
+
+ msm_cpp_write(0x6, cpp_dev->base);
+ /* send top level and plane level */
+ for (j = 0; j < cpp_dev->payload_params.stripe_base; j++) {
+ if (j % MSM_CPP_RX_FIFO_LEVEL == 0) {
+ rc = msm_cpp_poll_rx_empty(cpp_dev->base);
+ if (rc) {
+ pr_err("%s:%d] poll failed %d rc %d",
+ __func__, __LINE__, j, rc);
+ cpp_dev->state = CPP_STATE_OFF;
+ msm_cpp_set_micro_irq_mask(cpp_dev,
+ 0, 0x0);
+ cpp_dev->timeout_trial_cnt = 0;
+ goto end;
+ }
+ }
+ msm_cpp_write(processed_frame[i]->cpp_cmd_msg[j],
+ cpp_dev->base);
+ }
+ if (rc) {
+ pr_err("%s: Rescheduling plane info failed %d\n",
+ __func__, rc);
+ /* flush the queue */
+ cpp_dev->state = CPP_STATE_OFF;
+ msm_cpp_flush_queue_and_release_buffer(cpp_dev,
+ queue_len);
+ msm_cpp_set_micro_irq_mask(cpp_dev, 0, 0x0);
+ cpp_dev->timeout_trial_cnt = 0;
+ goto end;
+ }
+ /* send stripes */
+ i1 = cpp_dev->payload_params.stripe_base +
+ cpp_dev->payload_params.stripe_size *
+ processed_frame[i]->first_stripe_index;
+ i2 = cpp_dev->payload_params.stripe_size *
+ (processed_frame[i]->last_stripe_index -
+ processed_frame[i]->first_stripe_index + 1);
+ for (j = 0; j < i2; j++) {
+ if (j % MSM_CPP_RX_FIFO_LEVEL == 0) {
+ rc = msm_cpp_poll_rx_empty(cpp_dev->base);
+ if (rc) {
+ pr_err("%s:%d] poll failed %d rc %d",
+ __func__, __LINE__, j, rc);
+ break;
+ }
+ }
+ msm_cpp_write(processed_frame[i]->cpp_cmd_msg[j+i1],
+ cpp_dev->base);
+ }
+ if (rc) {
+ pr_err("%s:%d] Rescheduling stripe info failed %d\n",
+ __func__, __LINE__, rc);
+ /* flush the queue */
+ cpp_dev->state = CPP_STATE_OFF;
+ msm_cpp_flush_queue_and_release_buffer(cpp_dev,
+ queue_len);
+ msm_cpp_set_micro_irq_mask(cpp_dev, 0, 0x0);
+ cpp_dev->timeout_trial_cnt = 0;
+ goto end;
+ }
+ /* send trailer */
+ msm_cpp_write(0xabcdefaa, cpp_dev->base);
+ pr_debug("After frame:%d write\n", i+1);
+ }
+
+ cpp_timer.data.cpp_dev->timeout_trial_cnt++;
+
+end:
+ mutex_unlock(&cpp_dev->mutex);
+ pr_debug("%s:%d] exit\n", __func__, __LINE__);
+ return;
+}
+
+void cpp_timer_callback(unsigned long data)
+{
+ struct msm_cpp_work_t *work =
+ cpp_timer.data.cpp_dev->work;
+ queue_work(cpp_timer.data.cpp_dev->timer_wq,
+ (struct work_struct *)work);
+}
+
+static int msm_cpp_send_frame_to_hardware(struct cpp_device *cpp_dev,
+ struct msm_queue_cmd *frame_qcmd)
+{
+ unsigned long flags;
+ uint32_t i, i1, i2;
+ int32_t rc = -EAGAIN;
+ int ret;
+ struct msm_cpp_frame_info_t *process_frame;
+ struct msm_queue_cmd *qcmd = NULL;
+ uint32_t queue_len = 0;
+
+ if (cpp_dev->processing_q.len < MAX_CPP_PROCESSING_FRAME) {
+ process_frame = frame_qcmd->command;
+ msm_cpp_dump_frame_cmd(process_frame);
+ spin_lock_irqsave(&cpp_timer.data.processed_frame_lock, flags);
+ msm_enqueue(&cpp_dev->processing_q,
+ &frame_qcmd->list_frame);
+ cpp_timer.data.processed_frame[cpp_dev->processing_q.len - 1] =
+ process_frame;
+ queue_len = cpp_dev->processing_q.len;
+ spin_unlock_irqrestore(&cpp_timer.data.processed_frame_lock,
+ flags);
+ if (queue_len == 1) {
+ atomic_set(&cpp_timer.used, 1);
+ }
+ CPP_DBG("Starting timer to fire in %d ms. (jiffies=%lu)\n",
+ CPP_CMD_TIMEOUT_MS, jiffies);
+ ret = mod_timer(&cpp_timer.cpp_timer,
+ jiffies + msecs_to_jiffies(CPP_CMD_TIMEOUT_MS));
+ if (ret)
+ CPP_DBG("Timer has not expired yet\n");
+
+ msm_cpp_write(0x6, cpp_dev->base);
+ /* send top level and plane level */
+ for (i = 0; i < cpp_dev->payload_params.stripe_base; i++) {
+ if (i % MSM_CPP_RX_FIFO_LEVEL == 0) {
+ rc = msm_cpp_poll_rx_empty(cpp_dev->base);
+ if (rc)
+ break;
+ }
+ msm_cpp_write(process_frame->cpp_cmd_msg[i],
+ cpp_dev->base);
+ }
+ if (rc) {
+ pr_err("%s: Rescheduling plane info failed %d\n",
+ __func__, rc);
+ goto dequeue_frame;
+ }
+ /* send stripes */
+ i1 = cpp_dev->payload_params.stripe_base +
+ cpp_dev->payload_params.stripe_size *
+ process_frame->first_stripe_index;
+ i2 = cpp_dev->payload_params.stripe_size *
+ (process_frame->last_stripe_index -
+ process_frame->first_stripe_index + 1);
+ for (i = 0; i < i2; i++) {
+ if (i % MSM_CPP_RX_FIFO_LEVEL == 0) {
+ rc = msm_cpp_poll_rx_empty(cpp_dev->base);
+ if (rc)
+ break;
+ }
+ msm_cpp_write(process_frame->cpp_cmd_msg[i+i1],
+ cpp_dev->base);
+ }
+ if (rc) {
+ pr_err("%s: Rescheduling stripe info failed %d\n",
+ __func__, rc);
+ goto dequeue_frame;
+ }
+ /* send trailer */
+ msm_cpp_write(MSM_CPP_MSG_ID_TRAILER, cpp_dev->base);
+
+ do_gettimeofday(&(process_frame->in_time));
+ rc = 0;
+ } else {
+ pr_err("process queue full. drop frame\n");
+ goto end;
+ }
+
+dequeue_frame:
+ if (rc < 0) {
+ qcmd = msm_dequeue(&cpp_dev->processing_q, list_frame,
+ POP_BACK);
+ if (!qcmd)
+ pr_warn("%s:%d: no queue cmd\n", __func__, __LINE__);
+ spin_lock_irqsave(&cpp_timer.data.processed_frame_lock,
+ flags);
+ queue_len = cpp_dev->processing_q.len;
+ spin_unlock_irqrestore(
+ &cpp_timer.data.processed_frame_lock, flags);
+ if (queue_len == 0) {
+ atomic_set(&cpp_timer.used, 0);
+ del_timer(&cpp_timer.cpp_timer);
+ }
+ }
+end:
+ return rc;
+}
+
+static int msm_cpp_send_command_to_hardware(struct cpp_device *cpp_dev,
+ uint32_t *cmd_msg, uint32_t payload_size)
+{
+ uint32_t i;
+
+ for (i = 0; i < payload_size; i++) {
+ msm_cpp_write(cmd_msg[i],
+ cpp_dev->base);
+ }
+ return 0;
+}
+
+static int msm_cpp_flush_frames(struct cpp_device *cpp_dev)
+{
+ return 0;
+}
+
+static struct msm_cpp_frame_info_t *msm_cpp_get_frame(
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr)
+{
+ uint32_t *cpp_frame_msg;
+ struct msm_cpp_frame_info_t *new_frame = NULL;
+ int32_t rc = 0;
+
+ new_frame = kzalloc(sizeof(struct msm_cpp_frame_info_t), GFP_KERNEL);
+
+ if (!new_frame) {
+ pr_err("Insufficient memory\n");
+ rc = -ENOMEM;
+ goto no_mem_err;
+ }
+
+ rc = (copy_from_user(new_frame, (void __user *)ioctl_ptr->ioctl_ptr,
+ sizeof(struct msm_cpp_frame_info_t)) ? -EFAULT : 0);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ goto frame_err;
+ }
+
+ if ((new_frame->msg_len == 0) ||
+ (new_frame->msg_len > MSM_CPP_MAX_FRAME_LENGTH)) {
+ pr_err("%s:%d: Invalid frame len:%d\n", __func__,
+ __LINE__, new_frame->msg_len);
+ goto frame_err;
+ }
+
+ cpp_frame_msg = kzalloc(sizeof(uint32_t) * new_frame->msg_len,
+ GFP_KERNEL);
+ if (!cpp_frame_msg) {
+ pr_err("Insufficient memory\n");
+ goto frame_err;
+ }
+
+ rc = (copy_from_user(cpp_frame_msg,
+ (void __user *)new_frame->cpp_cmd_msg,
+ sizeof(uint32_t) * new_frame->msg_len) ? -EFAULT : 0);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ goto frame_msg_err;
+ }
+ new_frame->cpp_cmd_msg = cpp_frame_msg;
+ return new_frame;
+
+frame_msg_err:
+ kfree(cpp_frame_msg);
+frame_err:
+ kfree(new_frame);
+no_mem_err:
+ return NULL;
+}
+
+static int msm_cpp_check_buf_type(struct msm_buf_mngr_info *buff_mgr_info,
+ struct msm_cpp_frame_info_t *new_frame)
+{
+ int32_t num_output_bufs = 0;
+ uint32_t i = 0;
+ if (buff_mgr_info->type == MSM_CAMERA_BUF_MNGR_BUF_USER) {
+ new_frame->batch_info.cont_idx =
+ buff_mgr_info->index;
+ num_output_bufs = buff_mgr_info->user_buf.buf_cnt;
+ if (buff_mgr_info->user_buf.buf_cnt <
+ new_frame->batch_info.batch_size) {
+ /* Less bufs than Input buffer */
+ num_output_bufs = buff_mgr_info->user_buf.buf_cnt;
+ } else {
+ /* More or equal bufs as Input buffer */
+ num_output_bufs = new_frame->batch_info.batch_size;
+ }
+ for (i = 0; i < num_output_bufs; i++) {
+ new_frame->output_buffer_info[i].index =
+ buff_mgr_info->user_buf.buf_idx[i];
+ }
+ } else {
+ /* For non-group case use first buf slot */
+ new_frame->output_buffer_info[0].index = buff_mgr_info->index;
+ num_output_bufs = 1;
+ }
+
+ return num_output_bufs;
+}
+
+static void msm_cpp_update_frame_msg_phy_address(struct cpp_device *cpp_dev,
+ struct msm_cpp_frame_info_t *new_frame, unsigned long in_phyaddr,
+ unsigned long out_phyaddr0, unsigned long out_phyaddr1,
+ unsigned long tnr_scratch_buffer0, unsigned long tnr_scratch_buffer1)
+{
+ int32_t stripe_base, plane_base;
+ uint32_t rd_pntr_off, wr_0_pntr_off, wr_1_pntr_off,
+ wr_2_pntr_off, wr_3_pntr_off;
+ uint32_t wr_0_meta_data_wr_pntr_off, wr_1_meta_data_wr_pntr_off,
+ wr_2_meta_data_wr_pntr_off, wr_3_meta_data_wr_pntr_off;
+ uint32_t rd_ref_pntr_off, wr_ref_pntr_off;
+ uint32_t stripe_size, plane_size;
+ uint32_t fe_mmu_pf_ptr_off, ref_fe_mmu_pf_ptr_off, we_mmu_pf_ptr_off,
+ dup_we_mmu_pf_ptr_off, ref_we_mmu_pf_ptr_off;
+ uint8_t tnr_enabled, ubwc_enabled, mmu_pf_en, cds_en;
+ int32_t i = 0;
+ uint32_t *cpp_frame_msg;
+
+ cpp_frame_msg = new_frame->cpp_cmd_msg;
+
+ /* Update stripe/plane size and base offsets */
+ stripe_base = cpp_dev->payload_params.stripe_base;
+ stripe_size = cpp_dev->payload_params.stripe_size;
+ plane_base = cpp_dev->payload_params.plane_base;
+ plane_size = cpp_dev->payload_params.plane_size;
+
+ /* Fetch engine Offset */
+ rd_pntr_off = cpp_dev->payload_params.rd_pntr_off;
+ /* Write engine offsets */
+ wr_0_pntr_off = cpp_dev->payload_params.wr_0_pntr_off;
+ wr_1_pntr_off = wr_0_pntr_off + 1;
+ wr_2_pntr_off = wr_1_pntr_off + 1;
+ wr_3_pntr_off = wr_2_pntr_off + 1;
+ /* Reference engine offsets */
+ rd_ref_pntr_off = cpp_dev->payload_params.rd_ref_pntr_off;
+ wr_ref_pntr_off = cpp_dev->payload_params.wr_ref_pntr_off;
+ /* Meta data offsets */
+ wr_0_meta_data_wr_pntr_off =
+ cpp_dev->payload_params.wr_0_meta_data_wr_pntr_off;
+ wr_1_meta_data_wr_pntr_off = (wr_0_meta_data_wr_pntr_off + 1);
+ wr_2_meta_data_wr_pntr_off = (wr_1_meta_data_wr_pntr_off + 1);
+ wr_3_meta_data_wr_pntr_off = (wr_2_meta_data_wr_pntr_off + 1);
+ /* MMU PF offsets */
+ fe_mmu_pf_ptr_off = cpp_dev->payload_params.fe_mmu_pf_ptr_off;
+ ref_fe_mmu_pf_ptr_off = cpp_dev->payload_params.ref_fe_mmu_pf_ptr_off;
+ we_mmu_pf_ptr_off = cpp_dev->payload_params.we_mmu_pf_ptr_off;
+ dup_we_mmu_pf_ptr_off = cpp_dev->payload_params.dup_we_mmu_pf_ptr_off;
+ ref_we_mmu_pf_ptr_off = cpp_dev->payload_params.ref_we_mmu_pf_ptr_off;
+
+ pr_debug("%s: feature_mask 0x%x\n", __func__, new_frame->feature_mask);
+
+ /* Update individual module status from feature mask */
+ tnr_enabled = ((new_frame->feature_mask & TNR_MASK) >> 2);
+ ubwc_enabled = ((new_frame->feature_mask & UBWC_MASK) >> 5);
+ cds_en = ((new_frame->feature_mask & CDS_MASK) >> 6);
+ mmu_pf_en = ((new_frame->feature_mask & MMU_PF_MASK) >> 7);
+
+ /*
+ * Update the stripe based addresses for fetch/write/reference engines.
+ * Update meta data offset for ubwc.
+ * Update ref engine address for cds / tnr.
+ */
+ for (i = 0; i < new_frame->num_strips; i++) {
+ cpp_frame_msg[stripe_base + rd_pntr_off + i * stripe_size] +=
+ (uint32_t) in_phyaddr;
+ cpp_frame_msg[stripe_base + wr_0_pntr_off + i * stripe_size] +=
+ (uint32_t) out_phyaddr0;
+ cpp_frame_msg[stripe_base + wr_1_pntr_off + i * stripe_size] +=
+ (uint32_t) out_phyaddr1;
+ cpp_frame_msg[stripe_base + wr_2_pntr_off + i * stripe_size] +=
+ (uint32_t) out_phyaddr0;
+ cpp_frame_msg[stripe_base + wr_3_pntr_off + i * stripe_size] +=
+ (uint32_t) out_phyaddr1;
+ if (tnr_enabled) {
+ cpp_frame_msg[stripe_base + rd_ref_pntr_off +
+ i * stripe_size] +=
+ (uint32_t)tnr_scratch_buffer0;
+ cpp_frame_msg[stripe_base + wr_ref_pntr_off +
+ i * stripe_size] +=
+ (uint32_t)tnr_scratch_buffer1;
+ } else if (cds_en) {
+ cpp_frame_msg[stripe_base + rd_ref_pntr_off +
+ i * stripe_size] +=
+ (uint32_t)in_phyaddr;
+ }
+ if (ubwc_enabled) {
+ cpp_frame_msg[stripe_base + wr_0_meta_data_wr_pntr_off +
+ i * stripe_size] += (uint32_t) out_phyaddr0;
+ cpp_frame_msg[stripe_base + wr_1_meta_data_wr_pntr_off +
+ i * stripe_size] += (uint32_t) out_phyaddr1;
+ cpp_frame_msg[stripe_base + wr_2_meta_data_wr_pntr_off +
+ i * stripe_size] += (uint32_t) out_phyaddr0;
+ cpp_frame_msg[stripe_base + wr_3_meta_data_wr_pntr_off +
+ i * stripe_size] += (uint32_t) out_phyaddr1;
+ }
+ }
+
+ if (!mmu_pf_en)
+ goto exit;
+
+ /* Update mmu prefetch related plane specific address */
+ for (i = 0; i < PAYLOAD_NUM_PLANES; i++) {
+ cpp_frame_msg[plane_base + fe_mmu_pf_ptr_off +
+ i * plane_size] += (uint32_t)in_phyaddr;
+ cpp_frame_msg[plane_base + fe_mmu_pf_ptr_off +
+ i * plane_size + 1] += (uint32_t)in_phyaddr;
+ cpp_frame_msg[plane_base + ref_fe_mmu_pf_ptr_off +
+ i * plane_size] += (uint32_t)tnr_scratch_buffer0;
+ cpp_frame_msg[plane_base + ref_fe_mmu_pf_ptr_off +
+ i * plane_size + 1] += (uint32_t)tnr_scratch_buffer0;
+ cpp_frame_msg[plane_base + we_mmu_pf_ptr_off +
+ i * plane_size] += (uint32_t)out_phyaddr0;
+ cpp_frame_msg[plane_base + we_mmu_pf_ptr_off +
+ i * plane_size + 1] += (uint32_t)out_phyaddr0;
+ cpp_frame_msg[plane_base + dup_we_mmu_pf_ptr_off +
+ i * plane_size] += (uint32_t)out_phyaddr1;
+ cpp_frame_msg[plane_base + dup_we_mmu_pf_ptr_off +
+ i * plane_size + 1] += (uint32_t)out_phyaddr1;
+ cpp_frame_msg[plane_base + ref_we_mmu_pf_ptr_off +
+ i * plane_size] += (uint32_t)tnr_scratch_buffer1;
+ cpp_frame_msg[plane_base + ref_we_mmu_pf_ptr_off +
+ i * plane_size + 1] += (uint32_t)tnr_scratch_buffer1;
+ }
+exit:
+ return;
+}
+
+static int32_t msm_cpp_set_group_buffer_duplicate(struct cpp_device *cpp_dev,
+ struct msm_cpp_frame_info_t *new_frame, unsigned long out_phyaddr,
+ uint32_t num_output_bufs)
+{
+
+ uint32_t *set_group_buffer_w_duplication = NULL;
+ uint32_t *ptr;
+ unsigned long out_phyaddr0, out_phyaddr1, distance;
+ int32_t rc = 0;
+ uint32_t set_group_buffer_len, set_group_buffer_len_bytes,
+ dup_frame_off, ubwc_enabled, j, i = 0;
+
+ do {
+ set_group_buffer_len =
+ cpp_dev->payload_params.set_group_buffer_len;
+ if (!set_group_buffer_len) {
+ pr_err("%s: invalid set group buffer cmd len %d\n",
+ __func__, set_group_buffer_len);
+ rc = -EINVAL;
+ break;
+ }
+
+ /*
+ * Length of MSM_CPP_CMD_GROUP_BUFFER_DUP command +
+ * 4 byte for header + 4 byte for the length field +
+ * 4 byte for the trailer + 4 byte for
+ * MSM_CPP_CMD_GROUP_BUFFER_DUP prefix before the payload
+ */
+ set_group_buffer_len += 4;
+ set_group_buffer_len_bytes = set_group_buffer_len *
+ sizeof(uint32_t);
+ set_group_buffer_w_duplication =
+ kzalloc(set_group_buffer_len_bytes, GFP_KERNEL);
+ if (!set_group_buffer_w_duplication) {
+ pr_err("%s: set group buffer data alloc failed\n",
+ __func__);
+ rc = -ENOMEM;
+ break;
+ }
+
+ memset(set_group_buffer_w_duplication, 0x0,
+ set_group_buffer_len_bytes);
+ dup_frame_off =
+ cpp_dev->payload_params.dup_frame_indicator_off;
+ /* Add a factor of 1 as command is prefixed to the payload. */
+ dup_frame_off += 1;
+ ubwc_enabled = ((new_frame->feature_mask & UBWC_MASK) >> 5);
+ ptr = set_group_buffer_w_duplication;
+ /*create and send Set Group Buffer with Duplicate command*/
+ *ptr++ = MSM_CPP_CMD_GROUP_BUFFER_DUP;
+ *ptr++ = MSM_CPP_MSG_ID_CMD;
+ /*
+ * This field is the value read from dt and stands for length of
+ * actual data in payload
+ */
+ *ptr++ = cpp_dev->payload_params.set_group_buffer_len;
+ *ptr++ = MSM_CPP_CMD_GROUP_BUFFER_DUP;
+ *ptr++ = 0;
+ out_phyaddr0 = out_phyaddr;
+
+ for (i = 1; i < num_output_bufs; i++) {
+ out_phyaddr1 = msm_cpp_fetch_buffer_info(cpp_dev,
+ &new_frame->output_buffer_info[i],
+ ((new_frame->identity >> 16) & 0xFFFF),
+ (new_frame->identity & 0xFFFF),
+ &new_frame->output_buffer_info[i].fd);
+ if (!out_phyaddr1) {
+ pr_err("%s: error getting o/p phy addr\n",
+ __func__);
+ rc = -EINVAL;
+ break;
+ }
+ distance = out_phyaddr1 - out_phyaddr0;
+ out_phyaddr0 = out_phyaddr1;
+ for (j = 0; j < PAYLOAD_NUM_PLANES; j++)
+ *ptr++ = distance;
+
+ if (ubwc_enabled) {
+ for (j = 0; j < PAYLOAD_NUM_PLANES; j++)
+ *ptr++ = distance;
+ }
+ }
+ if (rc)
+ break;
+
+ if (new_frame->duplicate_output)
+ set_group_buffer_w_duplication[dup_frame_off] =
+ 1 << new_frame->batch_info.pick_preview_idx;
+ else
+ set_group_buffer_w_duplication[dup_frame_off] = 0;
+
+ /*
+ * Index for cpp message id trailer is length of payload for
+ * set group buffer minus 1
+ */
+ set_group_buffer_w_duplication[set_group_buffer_len - 1] =
+ MSM_CPP_MSG_ID_TRAILER;
+ rc = msm_cpp_send_command_to_hardware(cpp_dev,
+ set_group_buffer_w_duplication, set_group_buffer_len);
+ if (rc < 0) {
+ pr_err("%s: Send Command Error rc %d\n", __func__, rc);
+ break;
+ }
+
+ } while (0);
+
+ kfree(set_group_buffer_w_duplication);
+ return rc;
+}
+
+static int32_t msm_cpp_set_group_buffer(struct cpp_device *cpp_dev,
+ struct msm_cpp_frame_info_t *new_frame, unsigned long out_phyaddr,
+ uint32_t num_output_bufs)
+{
+ uint32_t set_group_buffer_len;
+ uint32_t *set_group_buffer = NULL;
+ uint32_t *ptr;
+ unsigned long out_phyaddr0, out_phyaddr1, distance;
+ int32_t rc = 0;
+ uint32_t set_group_buffer_len_bytes, i = 0;
+
+ if (new_frame->batch_info.batch_mode != BATCH_MODE_VIDEO) {
+ pr_debug("%s: batch mode not set %d\n", __func__,
+ new_frame->batch_info.batch_mode);
+ return rc;
+ }
+
+ if (new_frame->batch_info.batch_size <= 1) {
+ pr_debug("%s: batch size is invalid %d\n", __func__,
+ new_frame->batch_info.batch_size);
+ return rc;
+ }
+
+ if ((new_frame->feature_mask & BATCH_DUP_MASK) >> 8) {
+ return msm_cpp_set_group_buffer_duplicate(cpp_dev, new_frame,
+ out_phyaddr, num_output_bufs);
+ }
+
+ if (new_frame->duplicate_output) {
+ pr_err("cannot support duplication enable\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ set_group_buffer_len =
+ 2 + 3 * (num_output_bufs - 1);
+ /*
+ * Length of MSM_CPP_CMD_GROUP_BUFFER command +
+ * 4 byte for header + 4 byte for the length field +
+ * 4 byte for the trailer + 4 byte for
+ * MSM_CPP_CMD_GROUP_BUFFER prefix before the payload
+ */
+ set_group_buffer_len += 4;
+ set_group_buffer_len_bytes = set_group_buffer_len *
+ sizeof(uint32_t);
+ set_group_buffer =
+ kzalloc(set_group_buffer_len_bytes, GFP_KERNEL);
+ if (!set_group_buffer) {
+ pr_err("%s: set group buffer data alloc failed\n",
+ __func__);
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ memset(set_group_buffer, 0x0,
+ set_group_buffer_len_bytes);
+ ptr = set_group_buffer;
+ /*Create and send Set Group Buffer*/
+ *ptr++ = MSM_CPP_CMD_GROUP_BUFFER;
+ *ptr++ = MSM_CPP_MSG_ID_CMD;
+ /*
+ * This field is the value read from dt and stands
+ * for length of actual data in payload
+ */
+ *ptr++ = set_group_buffer_len - 4;
+ *ptr++ = MSM_CPP_CMD_GROUP_BUFFER;
+ *ptr++ = 0;
+ out_phyaddr0 = out_phyaddr;
+
+ for (i = 1; i < num_output_bufs; i++) {
+ out_phyaddr1 =
+ msm_cpp_fetch_buffer_info(cpp_dev,
+ &new_frame->output_buffer_info[i],
+ ((new_frame->identity >> 16) & 0xFFFF),
+ (new_frame->identity & 0xFFFF),
+ &new_frame->output_buffer_info[i].fd);
+ if (!out_phyaddr1) {
+ pr_err("%s: error getting o/p phy addr\n",
+ __func__);
+ rc = -EINVAL;
+ goto free_and_exit;
+ }
+ distance = out_phyaddr1 - out_phyaddr0;
+ out_phyaddr0 = out_phyaddr1;
+ *ptr++ = distance;
+ *ptr++ = distance;
+ *ptr++ = distance;
+ }
+ if (rc)
+ goto free_and_exit;
+
+ /*
+ * Index for cpp message id trailer is length of
+ * payload for set group buffer minus 1
+ */
+ set_group_buffer[set_group_buffer_len - 1] =
+ MSM_CPP_MSG_ID_TRAILER;
+ rc = msm_cpp_send_command_to_hardware(cpp_dev,
+ set_group_buffer, set_group_buffer_len);
+ if (rc < 0)
+ pr_err("Send Command Error rc %d\n", rc);
+
+free_and_exit:
+ kfree(set_group_buffer);
+exit:
+ return rc;
+}
+
+static int msm_cpp_cfg_frame(struct cpp_device *cpp_dev,
+ struct msm_cpp_frame_info_t *new_frame)
+{
+ int32_t rc = 0;
+ struct msm_queue_cmd *frame_qcmd = NULL;
+ uint32_t *cpp_frame_msg;
+ unsigned long in_phyaddr, out_phyaddr0 = (unsigned long)NULL;
+ unsigned long out_phyaddr1;
+ unsigned long tnr_scratch_buffer0, tnr_scratch_buffer1;
+ uint16_t num_stripes = 0;
+ struct msm_buf_mngr_info buff_mgr_info, dup_buff_mgr_info;
+ int32_t in_fd;
+ int32_t num_output_bufs = 1;
+ int32_t stripe_base = 0;
+ uint32_t stripe_size;
+ uint8_t tnr_enabled;
+ enum msm_camera_buf_mngr_buf_type buf_type =
+ MSM_CAMERA_BUF_MNGR_BUF_PLANAR;
+
+ stripe_base = cpp_dev->payload_params.stripe_base;
+ stripe_size = cpp_dev->payload_params.stripe_size;
+
+ if (!new_frame) {
+ pr_err("%s: Frame is Null\n", __func__);
+ return -EINVAL;
+ }
+
+ if (cpp_dev->state == CPP_STATE_OFF) {
+ pr_err("%s: cpp state is off, return fatal error\n", __func__);
+ return -EINVAL;
+ }
+
+ cpp_frame_msg = new_frame->cpp_cmd_msg;
+
+ if (cpp_frame_msg == NULL ||
+ (new_frame->msg_len < MSM_CPP_MIN_FRAME_LENGTH)) {
+ pr_err("Length is not correct or frame message is missing\n");
+ return -EINVAL;
+ }
+
+ if (!new_frame->partial_frame_indicator) {
+ if (cpp_frame_msg[new_frame->msg_len - 1] !=
+ MSM_CPP_MSG_ID_TRAILER) {
+ pr_err("Invalid frame message\n");
+ return -EINVAL;
+ }
+
+ if ((stripe_base + new_frame->num_strips * stripe_size + 1) !=
+ new_frame->msg_len) {
+ pr_err("Invalid frame message,len=%d,expected=%d\n",
+ new_frame->msg_len,
+ (stripe_base +
+ new_frame->num_strips * stripe_size + 1));
+ return -EINVAL;
+ }
+ }
+
+ if (cpp_dev->iommu_state != CPP_IOMMU_STATE_ATTACHED) {
+ pr_err("IOMMU is not attached\n");
+ return -EAGAIN;
+ }
+
+ in_phyaddr = msm_cpp_fetch_buffer_info(cpp_dev,
+ &new_frame->input_buffer_info,
+ ((new_frame->input_buffer_info.identity >> 16) & 0xFFFF),
+ (new_frame->input_buffer_info.identity & 0xFFFF), &in_fd);
+ if (!in_phyaddr) {
+ pr_err("%s: error gettting input physical address\n", __func__);
+ rc = -EINVAL;
+ goto frame_msg_err;
+ }
+
+ if (new_frame->we_disable == 0) {
+ if ((new_frame->output_buffer_info[0].native_buff == 0) &&
+ (new_frame->first_payload)) {
+ memset(&buff_mgr_info, 0,
+ sizeof(struct msm_buf_mngr_info));
+ if (new_frame->batch_info.batch_mode ==
+ BATCH_MODE_VIDEO)
+ buf_type = MSM_CAMERA_BUF_MNGR_BUF_USER;
+
+ buff_mgr_info.session_id =
+ ((new_frame->identity >> 16) & 0xFFFF);
+ buff_mgr_info.stream_id =
+ (new_frame->identity & 0xFFFF);
+ buff_mgr_info.type = buf_type;
+ rc = msm_cpp_buffer_ops(cpp_dev,
+ VIDIOC_MSM_BUF_MNGR_GET_BUF,
+ &buff_mgr_info);
+ if (rc < 0) {
+ rc = -EAGAIN;
+ pr_debug("%s: error getting buffer rc:%d\n",
+ __func__, rc);
+ goto frame_msg_err;
+ }
+ num_output_bufs =
+ msm_cpp_check_buf_type(&buff_mgr_info,
+ new_frame);
+ if (!num_output_bufs) {
+ pr_err("%s: error getting buffer %d\n",
+ __func__, num_output_bufs);
+ rc = -EINVAL;
+ goto phyaddr_err;
+ }
+ }
+
+ out_phyaddr0 = msm_cpp_fetch_buffer_info(cpp_dev,
+ &new_frame->output_buffer_info[0],
+ ((new_frame->identity >> 16) & 0xFFFF),
+ (new_frame->identity & 0xFFFF),
+ &new_frame->output_buffer_info[0].fd);
+ if (!out_phyaddr0) {
+ pr_err("%s: error gettting output physical address\n",
+ __func__);
+ rc = -EINVAL;
+ goto phyaddr_err;
+ }
+ }
+ out_phyaddr1 = out_phyaddr0;
+
+ /* get buffer for duplicate output */
+ if (new_frame->duplicate_output) {
+ CPP_DBG("duplication enabled, dup_id=0x%x",
+ new_frame->duplicate_identity);
+ memset(&dup_buff_mgr_info, 0, sizeof(struct msm_buf_mngr_info));
+ dup_buff_mgr_info.session_id =
+ ((new_frame->duplicate_identity >> 16) & 0xFFFF);
+ dup_buff_mgr_info.stream_id =
+ (new_frame->duplicate_identity & 0xFFFF);
+ dup_buff_mgr_info.type =
+ MSM_CAMERA_BUF_MNGR_BUF_PLANAR;
+ rc = msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_GET_BUF,
+ &dup_buff_mgr_info);
+ if (rc < 0) {
+ rc = -EAGAIN;
+ pr_debug("%s: error getting buffer rc:%d\n",
+ __func__, rc);
+ goto phyaddr_err;
+ }
+ new_frame->duplicate_buffer_info.index =
+ dup_buff_mgr_info.index;
+ out_phyaddr1 = msm_cpp_fetch_buffer_info(cpp_dev,
+ &new_frame->duplicate_buffer_info,
+ ((new_frame->duplicate_identity >> 16) & 0xFFFF),
+ (new_frame->duplicate_identity & 0xFFFF),
+ &new_frame->duplicate_buffer_info.fd);
+ if (!out_phyaddr1) {
+ pr_err("error gettting output physical address\n");
+ rc = -EINVAL;
+ msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_PUT_BUF,
+ &dup_buff_mgr_info);
+ goto phyaddr_err;
+ }
+ /* set duplicate enable bit */
+ cpp_frame_msg[5] |= 0x1;
+ CPP_DBG("out_phyaddr1= %08x\n", (uint32_t)out_phyaddr1);
+ }
+
+ tnr_enabled = ((new_frame->feature_mask & TNR_MASK) >> 2);
+ if (tnr_enabled) {
+ tnr_scratch_buffer0 = msm_cpp_fetch_buffer_info(cpp_dev,
+ &new_frame->tnr_scratch_buffer_info[0],
+ ((new_frame->identity >> 16) & 0xFFFF),
+ (new_frame->identity & 0xFFFF),
+ &new_frame->tnr_scratch_buffer_info[0].fd);
+ if (!tnr_scratch_buffer0) {
+ pr_err("error getting scratch buffer physical address\n");
+ rc = -EINVAL;
+ goto phyaddr_err;
+ }
+
+ tnr_scratch_buffer1 = msm_cpp_fetch_buffer_info(cpp_dev,
+ &new_frame->tnr_scratch_buffer_info[1],
+ ((new_frame->identity >> 16) & 0xFFFF),
+ (new_frame->identity & 0xFFFF),
+ &new_frame->tnr_scratch_buffer_info[1].fd);
+ if (!tnr_scratch_buffer1) {
+ pr_err("error getting scratch buffer physical address\n");
+ rc = -EINVAL;
+ goto phyaddr_err;
+ }
+ } else {
+ tnr_scratch_buffer0 = 0;
+ tnr_scratch_buffer1 = 0;
+ }
+
+
+ msm_cpp_update_frame_msg_phy_address(cpp_dev, new_frame,
+ in_phyaddr, out_phyaddr0, out_phyaddr1,
+ tnr_scratch_buffer0, tnr_scratch_buffer1);
+ if (tnr_enabled) {
+ cpp_frame_msg[10] = tnr_scratch_buffer1 -
+ tnr_scratch_buffer0;
+ }
+
+ rc = msm_cpp_set_group_buffer(cpp_dev, new_frame, out_phyaddr0,
+ num_output_bufs);
+ if (rc) {
+ pr_err("%s: set group buffer failure %d\n", __func__, rc);
+ goto phyaddr_err;
+ }
+
+ num_stripes = new_frame->last_stripe_index -
+ new_frame->first_stripe_index + 1;
+ cpp_frame_msg[1] = stripe_base - 2 + num_stripes * stripe_size;
+
+ frame_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_KERNEL);
+ if (!frame_qcmd) {
+ pr_err("%s: Insufficient memory\n", __func__);
+ rc = -ENOMEM;
+ goto qcmd_err;
+ }
+
+ atomic_set(&frame_qcmd->on_heap, 1);
+ frame_qcmd->command = new_frame;
+ rc = msm_cpp_send_frame_to_hardware(cpp_dev, frame_qcmd);
+ if (rc < 0) {
+ pr_err("%s: error cannot send frame to hardware\n", __func__);
+ rc = -EINVAL;
+ goto qcmd_err;
+ }
+
+ return rc;
+qcmd_err:
+ kfree(frame_qcmd);
+phyaddr_err:
+ if (new_frame->output_buffer_info[0].native_buff == 0)
+ msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_PUT_BUF,
+ &buff_mgr_info);
+frame_msg_err:
+ kfree(cpp_frame_msg);
+ kfree(new_frame);
+ return rc;
+}
+
+static int msm_cpp_cfg(struct cpp_device *cpp_dev,
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr)
+{
+ struct msm_cpp_frame_info_t *frame = NULL;
+ struct msm_cpp_frame_info_t k_frame_info;
+ int32_t rc = 0;
+ int32_t i = 0;
+ int32_t num_buff = sizeof(k_frame_info.output_buffer_info)/
+ sizeof(struct msm_cpp_buffer_info_t);
+ if (copy_from_user(&k_frame_info,
+ (void __user *)ioctl_ptr->ioctl_ptr,
+ sizeof(k_frame_info)))
+ return -EFAULT;
+
+ frame = msm_cpp_get_frame(ioctl_ptr);
+ if (!frame) {
+ pr_err("%s: Error allocating frame\n", __func__);
+ rc = -EINVAL;
+ } else {
+ rc = msm_cpp_cfg_frame(cpp_dev, frame);
+ if (rc >= 0) {
+ for (i = 0; i < num_buff; i++) {
+ k_frame_info.output_buffer_info[i] =
+ frame->output_buffer_info[i];
+ }
+ }
+ }
+
+ ioctl_ptr->trans_code = rc;
+
+ if (copy_to_user((void __user *)k_frame_info.status, &rc,
+ sizeof(int32_t)))
+ pr_err("error cannot copy error\n");
+
+
+ if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
+ &k_frame_info, sizeof(k_frame_info))) {
+ pr_err("Error: cannot copy k_frame_info");
+ return -EFAULT;
+ }
+
+ return rc;
+}
+
+void msm_cpp_clean_queue(struct cpp_device *cpp_dev)
+{
+ struct msm_queue_cmd *frame_qcmd = NULL;
+ struct msm_cpp_frame_info_t *processed_frame = NULL;
+ struct msm_device_queue *queue = NULL;
+
+ while (cpp_dev->processing_q.len) {
+ pr_debug("queue len:%d\n", cpp_dev->processing_q.len);
+ queue = &cpp_dev->processing_q;
+ frame_qcmd = msm_dequeue(queue, list_frame, POP_FRONT);
+ if (frame_qcmd) {
+ processed_frame = frame_qcmd->command;
+ kfree(frame_qcmd);
+ if (processed_frame)
+ kfree(processed_frame->cpp_cmd_msg);
+ kfree(processed_frame);
+ }
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static int msm_cpp_copy_from_ioctl_ptr(void *dst_ptr,
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr)
+{
+ int ret;
+ if ((ioctl_ptr->ioctl_ptr == NULL) || (ioctl_ptr->len == 0)) {
+ pr_err("%s: Wrong ioctl_ptr %p / len %zu\n", __func__,
+ ioctl_ptr, ioctl_ptr->len);
+ return -EINVAL;
+ }
+
+ /* For compat task, source ptr is in kernel space */
+ if (is_compat_task()) {
+ memcpy(dst_ptr, ioctl_ptr->ioctl_ptr, ioctl_ptr->len);
+ ret = 0;
+ } else {
+ ret = copy_from_user(dst_ptr,
+ (void __user *)ioctl_ptr->ioctl_ptr, ioctl_ptr->len);
+ if (ret)
+ pr_err("Copy from user fail %d\n", ret);
+ }
+ return ret ? -EFAULT : 0;
+}
+#else
+static int msm_cpp_copy_from_ioctl_ptr(void *dst_ptr,
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr)
+{
+ int ret;
+ if ((ioctl_ptr->ioctl_ptr == NULL) || (ioctl_ptr->len == 0)) {
+ pr_err("%s: Wrong ioctl_ptr %p / len %zu\n", __func__,
+ ioctl_ptr, ioctl_ptr->len);
+ return -EINVAL;
+ }
+
+ ret = copy_from_user(dst_ptr,
+ (void __user *)ioctl_ptr->ioctl_ptr, ioctl_ptr->len);
+ if (ret)
+ pr_err("Copy from user fail %d\n", ret);
+
+ return ret ? -EFAULT : 0;
+}
+#endif
+
+static int32_t msm_cpp_fw_version(struct cpp_device *cpp_dev)
+{
+ int32_t rc = 0;
+
+ /*Get Firmware Version*/
+ msm_cpp_write(MSM_CPP_CMD_GET_FW_VER, cpp_dev->base);
+ msm_cpp_write(MSM_CPP_MSG_ID_CMD, cpp_dev->base);
+ msm_cpp_write(0x1, cpp_dev->base);
+ msm_cpp_write(MSM_CPP_CMD_GET_FW_VER, cpp_dev->base);
+ msm_cpp_write(MSM_CPP_MSG_ID_TRAILER, cpp_dev->base);
+
+ rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
+ if (rc) {
+ pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
+ MSM_CPP_MSG_ID_CMD, rc);
+ goto end;
+ }
+ rc = msm_cpp_poll(cpp_dev->base, 0x2);
+ if (rc) {
+ pr_err("%s:%d] poll command 0x2 failed %d", __func__, __LINE__,
+ rc);
+ goto end;
+ }
+ rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_FW_VER);
+ if (rc) {
+ pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
+ MSM_CPP_MSG_ID_FW_VER, rc);
+ goto end;
+ }
+
+ cpp_dev->fw_version = msm_cpp_read(cpp_dev->base);
+ pr_debug("CPP FW Version: 0x%08x\n", cpp_dev->fw_version);
+
+ rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_TRAILER);
+ if (rc) {
+ pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
+ MSM_CPP_MSG_ID_TRAILER, rc);
+ }
+
+end:
+
+ return rc;
+}
+
+static int msm_cpp_validate_input(unsigned int cmd, void *arg,
+ struct msm_camera_v4l2_ioctl_t **ioctl_ptr)
+{
+ switch (cmd) {
+ case MSM_SD_SHUTDOWN:
+ break;
+ default: {
+ if (ioctl_ptr == NULL) {
+ pr_err("Wrong ioctl_ptr %p\n", ioctl_ptr);
+ return -EINVAL;
+ }
+
+ *ioctl_ptr = arg;
+ if ((*ioctl_ptr == NULL) ||
+ ((*ioctl_ptr)->ioctl_ptr == NULL)) {
+ pr_err("Wrong arg %p\n", arg);
+ return -EINVAL;
+ }
+ break;
+ }
+ }
+ return 0;
+}
+
+long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct cpp_device *cpp_dev = NULL;
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr = NULL;
+ int rc = 0;
+
+ if (sd == NULL) {
+ pr_err("sd %p\n", sd);
+ return -EINVAL;
+ }
+ cpp_dev = v4l2_get_subdevdata(sd);
+ if (cpp_dev == NULL) {
+ pr_err("cpp_dev is null\n");
+ return -EINVAL;
+ }
+ rc = msm_cpp_validate_input(cmd, arg, &ioctl_ptr);
+ if (rc != 0) {
+ pr_err("input validation failed\n");
+ return rc;
+ }
+ mutex_lock(&cpp_dev->mutex);
+
+ CPP_DBG("E cmd: 0x%x\n", cmd);
+ switch (cmd) {
+ case VIDIOC_MSM_CPP_GET_HW_INFO: {
+ CPP_DBG("VIDIOC_MSM_CPP_GET_HW_INFO\n");
+ if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
+ &cpp_dev->hw_info,
+ sizeof(struct cpp_hw_info))) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EFAULT;
+ }
+ break;
+ }
+
+ case VIDIOC_MSM_CPP_LOAD_FIRMWARE: {
+ CPP_DBG("VIDIOC_MSM_CPP_LOAD_FIRMWARE\n");
+ if (cpp_dev->is_firmware_loaded == 0) {
+ if (cpp_dev->fw_name_bin != NULL) {
+ kfree(cpp_dev->fw_name_bin);
+ cpp_dev->fw_name_bin = NULL;
+ }
+ if (cpp_dev->fw) {
+ release_firmware(cpp_dev->fw);
+ cpp_dev->fw = NULL;
+ }
+ if ((ioctl_ptr->len == 0) ||
+ (ioctl_ptr->len > MSM_CPP_MAX_FW_NAME_LEN)) {
+ pr_err("ioctl_ptr->len is 0\n");
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ cpp_dev->fw_name_bin = kzalloc(ioctl_ptr->len+1,
+ GFP_KERNEL);
+ if (!cpp_dev->fw_name_bin) {
+ pr_err("%s:%d: malloc error\n", __func__,
+ __LINE__);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ if (ioctl_ptr->ioctl_ptr == NULL) {
+ pr_err("ioctl_ptr->ioctl_ptr=NULL\n");
+ kfree(cpp_dev->fw_name_bin);
+ cpp_dev->fw_name_bin = NULL;
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ rc = (copy_from_user(cpp_dev->fw_name_bin,
+ (void __user *)ioctl_ptr->ioctl_ptr,
+ ioctl_ptr->len) ? -EFAULT : 0);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ kfree(cpp_dev->fw_name_bin);
+ cpp_dev->fw_name_bin = NULL;
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ *(cpp_dev->fw_name_bin+ioctl_ptr->len) = '\0';
+ rc = request_firmware(&cpp_dev->fw,
+ cpp_dev->fw_name_bin,
+ &cpp_dev->pdev->dev);
+ if (rc) {
+ dev_err(&cpp_dev->pdev->dev,
+ "Fail to loc blob %s dev %p, rc:%d\n",
+ cpp_dev->fw_name_bin,
+ &cpp_dev->pdev->dev, rc);
+ kfree(cpp_dev->fw_name_bin);
+ cpp_dev->fw_name_bin = NULL;
+ cpp_dev->fw = NULL;
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ disable_irq(cpp_dev->irq->start);
+ rc = cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
+ if (rc < 0) {
+ pr_err("%s: load firmware failure %d\n",
+ __func__, rc);
+ enable_irq(cpp_dev->irq->start);
+ return rc;
+ }
+ rc = msm_cpp_fw_version(cpp_dev);
+ if (rc < 0) {
+ pr_err("%s: get firmware failure %d\n",
+ __func__, rc);
+ enable_irq(cpp_dev->irq->start);
+ return rc;
+ }
+ enable_irq(cpp_dev->irq->start);
+ cpp_dev->is_firmware_loaded = 1;
+ }
+ break;
+ }
+ case VIDIOC_MSM_CPP_CFG:
+ CPP_DBG("VIDIOC_MSM_CPP_CFG\n");
+ rc = msm_cpp_cfg(cpp_dev, ioctl_ptr);
+ break;
+ case VIDIOC_MSM_CPP_FLUSH_QUEUE:
+ CPP_DBG("VIDIOC_MSM_CPP_FLUSH_QUEUE\n");
+ rc = msm_cpp_flush_frames(cpp_dev);
+ break;
+ case VIDIOC_MSM_CPP_DELETE_STREAM_BUFF:
+ case VIDIOC_MSM_CPP_APPEND_STREAM_BUFF_INFO:
+ case VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO: {
+ uint32_t j;
+ struct msm_cpp_stream_buff_info_t *u_stream_buff_info = NULL;
+ struct msm_cpp_stream_buff_info_t k_stream_buff_info;
+ struct msm_cpp_buff_queue_info_t *buff_queue_info = NULL;
+
+ memset(&k_stream_buff_info, 0, sizeof(k_stream_buff_info));
+ CPP_DBG("VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO\n");
+ if (sizeof(struct msm_cpp_stream_buff_info_t) !=
+ ioctl_ptr->len) {
+ pr_err("%s:%d: invalid length\n", __func__, __LINE__);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ u_stream_buff_info = kzalloc(ioctl_ptr->len, GFP_KERNEL);
+ if (!u_stream_buff_info) {
+ pr_err("%s:%d: malloc error\n", __func__, __LINE__);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ rc = msm_cpp_copy_from_ioctl_ptr(u_stream_buff_info,
+ ioctl_ptr);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ kfree(u_stream_buff_info);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ k_stream_buff_info.num_buffs = u_stream_buff_info->num_buffs;
+ k_stream_buff_info.identity = u_stream_buff_info->identity;
+
+ if (k_stream_buff_info.num_buffs > MSM_CAMERA_MAX_STREAM_BUF) {
+ pr_err("%s:%d: unexpected large num buff requested\n",
+ __func__, __LINE__);
+ kfree(u_stream_buff_info);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ if (u_stream_buff_info->num_buffs != 0) {
+ k_stream_buff_info.buffer_info =
+ kzalloc(k_stream_buff_info.num_buffs *
+ sizeof(struct msm_cpp_buffer_info_t),
+ GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(k_stream_buff_info.buffer_info)) {
+ pr_err("%s:%d: malloc error\n",
+ __func__, __LINE__);
+ kfree(u_stream_buff_info);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ rc = (copy_from_user(k_stream_buff_info.buffer_info,
+ (void __user *)u_stream_buff_info->buffer_info,
+ k_stream_buff_info.num_buffs *
+ sizeof(struct msm_cpp_buffer_info_t)) ?
+ -EFAULT : 0);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ kfree(k_stream_buff_info.buffer_info);
+ kfree(u_stream_buff_info);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ }
+
+ buff_queue_info = msm_cpp_get_buff_queue_entry(cpp_dev,
+ (k_stream_buff_info.identity >> 16) & 0xFFFF,
+ k_stream_buff_info.identity & 0xFFFF);
+
+ if (buff_queue_info == NULL) {
+ if (cmd == VIDIOC_MSM_CPP_DELETE_STREAM_BUFF)
+ goto STREAM_BUFF_END;
+
+ rc = msm_cpp_add_buff_queue_entry(cpp_dev,
+ ((k_stream_buff_info.identity >> 16) & 0xFFFF),
+ (k_stream_buff_info.identity & 0xFFFF));
+
+ if (rc)
+ goto STREAM_BUFF_END;
+
+ if (cpp_dev->stream_cnt == 0) {
+ cpp_dev->state = CPP_STATE_ACTIVE;
+ msm_cpp_clear_timer(cpp_dev);
+ msm_cpp_clean_queue(cpp_dev);
+ }
+ cpp_dev->stream_cnt++;
+ CPP_DBG("stream_cnt:%d\n", cpp_dev->stream_cnt);
+ }
+ buff_queue_info = msm_cpp_get_buff_queue_entry(cpp_dev,
+ ((k_stream_buff_info.identity >> 16) & 0xFFFF),
+ (k_stream_buff_info.identity & 0xFFFF));
+ if (buff_queue_info == NULL) {
+ pr_err("error finding buffer queue entry identity:%d\n",
+ k_stream_buff_info.identity);
+ kfree(k_stream_buff_info.buffer_info);
+ kfree(u_stream_buff_info);
+ cpp_dev->stream_cnt--;
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ if (VIDIOC_MSM_CPP_DELETE_STREAM_BUFF == cmd) {
+ for (j = 0; j < k_stream_buff_info.num_buffs; j++) {
+ msm_cpp_dequeue_buff(cpp_dev, buff_queue_info,
+ k_stream_buff_info.buffer_info[j].index,
+ k_stream_buff_info.buffer_info[j].native_buff);
+ }
+ } else {
+ for (j = 0; j < k_stream_buff_info.num_buffs; j++) {
+ msm_cpp_queue_buffer_info(cpp_dev,
+ buff_queue_info,
+ &k_stream_buff_info.buffer_info[j]);
+ }
+ }
+
+STREAM_BUFF_END:
+ kfree(k_stream_buff_info.buffer_info);
+ kfree(u_stream_buff_info);
+
+ break;
+ }
+ case VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO: {
+ uint32_t identity;
+ struct msm_cpp_buff_queue_info_t *buff_queue_info;
+ CPP_DBG("VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO\n");
+ if ((ioctl_ptr->len == 0) ||
+ (ioctl_ptr->len > sizeof(uint32_t))) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ rc = msm_cpp_copy_from_ioctl_ptr(&identity, ioctl_ptr);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ buff_queue_info = msm_cpp_get_buff_queue_entry(cpp_dev,
+ ((identity >> 16) & 0xFFFF), (identity & 0xFFFF));
+ if (buff_queue_info == NULL) {
+ pr_err("error finding buffer queue entry for identity:%d\n",
+ identity);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ msm_cpp_dequeue_buff_info_list(cpp_dev, buff_queue_info);
+ rc = msm_cpp_free_buff_queue_entry(cpp_dev,
+ buff_queue_info->session_id,
+ buff_queue_info->stream_id);
+ if (cpp_dev->stream_cnt > 0) {
+ cpp_dev->stream_cnt--;
+ pr_debug("stream_cnt:%d\n", cpp_dev->stream_cnt);
+ if (cpp_dev->stream_cnt == 0) {
+ rc = msm_cpp_update_bandwidth_setting(cpp_dev,
+ 0, 0);
+ if (rc < 0)
+ pr_err("Bandwidth Reset Failed!\n");
+ cpp_dev->state = CPP_STATE_IDLE;
+ msm_cpp_clear_timer(cpp_dev);
+ msm_cpp_clean_queue(cpp_dev);
+ }
+ } else {
+ pr_err("error: stream count underflow %d\n",
+ cpp_dev->stream_cnt);
+ }
+ break;
+ }
+ case VIDIOC_MSM_CPP_GET_EVENTPAYLOAD: {
+ struct msm_device_queue *queue = &cpp_dev->eventData_q;
+ struct msm_queue_cmd *event_qcmd;
+ struct msm_cpp_frame_info_t *process_frame;
+ CPP_DBG("VIDIOC_MSM_CPP_GET_EVENTPAYLOAD\n");
+ event_qcmd = msm_dequeue(queue, list_eventdata, POP_FRONT);
+ if (!event_qcmd) {
+ pr_err("no queue cmd available");
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ process_frame = event_qcmd->command;
+ CPP_DBG("fid %d\n", process_frame->frame_id);
+ if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
+ process_frame,
+ sizeof(struct msm_cpp_frame_info_t))) {
+ mutex_unlock(&cpp_dev->mutex);
+ kfree(process_frame->cpp_cmd_msg);
+ kfree(process_frame);
+ kfree(event_qcmd);
+ return -EFAULT;
+ }
+
+ kfree(process_frame->cpp_cmd_msg);
+ kfree(process_frame);
+ kfree(event_qcmd);
+ break;
+ }
+ case VIDIOC_MSM_CPP_SET_CLOCK: {
+ uint32_t msm_cpp_core_clk_idx;
+ struct msm_cpp_clock_settings_t clock_settings;
+ unsigned long clock_rate = 0;
+ CPP_DBG("VIDIOC_MSM_CPP_SET_CLOCK\n");
+ if (ioctl_ptr->len == 0) {
+ pr_err("ioctl_ptr->len is 0\n");
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ if (ioctl_ptr->ioctl_ptr == NULL) {
+ pr_err("ioctl_ptr->ioctl_ptr is NULL\n");
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ if (ioctl_ptr->len != sizeof(struct msm_cpp_clock_settings_t)) {
+ pr_err("Not valid ioctl_ptr->len\n");
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ rc = msm_cpp_copy_from_ioctl_ptr(&clock_settings, ioctl_ptr);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ if (clock_settings.clock_rate > 0) {
+ msm_cpp_core_clk_idx = get_clock_index("cpp_core_clk");
+ if (msm_cpp_core_clk_idx < 0) {
+ pr_err(" Fail to get clock index\n");
+ return -EINVAL;
+ }
+ rc = msm_cpp_update_bandwidth_setting(cpp_dev,
+ clock_settings.avg,
+ clock_settings.inst);
+ if (rc < 0) {
+ pr_err("Bandwidth Set Failed!\n");
+ rc = msm_cpp_update_bandwidth_setting(cpp_dev,
+ 0, 0);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ clock_rate = clk_round_rate(
+ cpp_dev->cpp_clk[msm_cpp_core_clk_idx],
+ clock_settings.clock_rate);
+ if (clock_rate != clock_settings.clock_rate)
+ pr_err("clock rate differ from settings\n");
+ clk_set_rate(cpp_dev->cpp_clk[msm_cpp_core_clk_idx],
+ clock_rate);
+ msm_isp_util_update_clk_rate(clock_rate);
+ }
+ break;
+ }
+ case MSM_SD_NOTIFY_FREEZE:
+ break;
+ case MSM_SD_SHUTDOWN:
+ CPP_DBG("MSM_SD_SHUTDOWN\n");
+ mutex_unlock(&cpp_dev->mutex);
+ pr_warn("shutdown cpp node. open cnt:%d\n",
+ cpp_dev->cpp_open_cnt);
+
+ if (atomic_read(&cpp_timer.used))
+ pr_debug("Timer state not cleared\n");
+
+ while (cpp_dev->cpp_open_cnt != 0)
+ cpp_close_node(sd, NULL);
+ mutex_lock(&cpp_dev->mutex);
+ rc = 0;
+ break;
+ case VIDIOC_MSM_CPP_QUEUE_BUF: {
+ struct msm_pproc_queue_buf_info queue_buf_info;
+ CPP_DBG("VIDIOC_MSM_CPP_QUEUE_BUF\n");
+
+ if (ioctl_ptr->len != sizeof(struct msm_pproc_queue_buf_info)) {
+ pr_err("%s: Not valid ioctl_ptr->len\n", __func__);
+ return -EINVAL;
+ }
+ rc = msm_cpp_copy_from_ioctl_ptr(&queue_buf_info, ioctl_ptr);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ break;
+ }
+
+ if (queue_buf_info.is_buf_dirty) {
+ rc = msm_cpp_buffer_ops(cpp_dev,
+ VIDIOC_MSM_BUF_MNGR_PUT_BUF,
+ &queue_buf_info.buff_mgr_info);
+ } else {
+ rc = msm_cpp_buffer_ops(cpp_dev,
+ VIDIOC_MSM_BUF_MNGR_BUF_DONE,
+ &queue_buf_info.buff_mgr_info);
+ }
+ if (rc < 0) {
+ pr_err("error in buf done\n");
+ rc = -EINVAL;
+ }
+
+ break;
+ }
+ case VIDIOC_MSM_CPP_POP_STREAM_BUFFER: {
+ struct msm_buf_mngr_info buff_mgr_info;
+ struct msm_cpp_frame_info_t frame_info;
+ if (ioctl_ptr->ioctl_ptr == NULL ||
+ (ioctl_ptr->len !=
+ sizeof(struct msm_cpp_frame_info_t))) {
+ rc = -EINVAL;
+ break;
+ }
+
+ rc = msm_cpp_copy_from_ioctl_ptr(&frame_info, ioctl_ptr);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ break;
+ }
+
+ memset(&buff_mgr_info, 0, sizeof(struct msm_buf_mngr_info));
+ buff_mgr_info.session_id =
+ ((frame_info.identity >> 16) & 0xFFFF);
+ buff_mgr_info.stream_id = (frame_info.identity & 0xFFFF);
+ buff_mgr_info.type =
+ MSM_CAMERA_BUF_MNGR_BUF_PLANAR;
+ rc = msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_GET_BUF,
+ &buff_mgr_info);
+ if (rc < 0) {
+ rc = -EAGAIN;
+ pr_err_ratelimited("error getting buffer rc:%d\n", rc);
+ break;
+ }
+ buff_mgr_info.frame_id = frame_info.frame_id;
+ rc = msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_BUF_DONE,
+ &buff_mgr_info);
+ if (rc < 0) {
+ pr_err("error in buf done\n");
+ rc = -EAGAIN;
+ }
+ break;
+ }
+ default:
+ pr_err_ratelimited("invalid value: cmd=0x%x\n", cmd);
+ break;
+ case VIDIOC_MSM_CPP_IOMMU_ATTACH: {
+ if (cpp_dev->iommu_state == CPP_IOMMU_STATE_DETACHED) {
+ rc = cam_smmu_ops(cpp_dev->iommu_hdl, CAM_SMMU_ATTACH);
+ if (rc < 0) {
+ pr_err("%s:%dError iommu_attach_device failed\n",
+ __func__, __LINE__);
+ rc = -EINVAL;
+ break;
+ }
+ cpp_dev->iommu_state = CPP_IOMMU_STATE_ATTACHED;
+ } else {
+ pr_err("%s:%d IOMMMU attach triggered in invalid state\n",
+ __func__, __LINE__);
+ rc = -EINVAL;
+ }
+ break;
+ }
+ case VIDIOC_MSM_CPP_IOMMU_DETACH: {
+ if ((cpp_dev->iommu_state == CPP_IOMMU_STATE_ATTACHED) &&
+ (cpp_dev->stream_cnt == 0)) {
+ rc = cam_smmu_ops(cpp_dev->iommu_hdl, CAM_SMMU_DETACH);
+ if (rc < 0) {
+ pr_err("%s:%dError iommu atach failed\n",
+ __func__, __LINE__);
+ rc = -EINVAL;
+ break;
+ }
+ cpp_dev->iommu_state = CPP_IOMMU_STATE_DETACHED;
+ } else {
+ pr_err("%s:%d IOMMMU attach triggered in invalid state\n",
+ __func__, __LINE__);
+ }
+ break;
+ }
+ }
+ mutex_unlock(&cpp_dev->mutex);
+ CPP_DBG("X\n");
+ return rc;
+}
+
+int msm_cpp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ CPP_DBG("Called\n");
+ return v4l2_event_subscribe(fh, sub, MAX_CPP_V4l2_EVENTS, NULL);
+}
+
+int msm_cpp_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ CPP_DBG("Called\n");
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+static struct v4l2_subdev_core_ops msm_cpp_subdev_core_ops = {
+ .ioctl = msm_cpp_subdev_ioctl,
+ .subscribe_event = msm_cpp_subscribe_event,
+ .unsubscribe_event = msm_cpp_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_ops msm_cpp_subdev_ops = {
+ .core = &msm_cpp_subdev_core_ops,
+};
+
+static long msm_cpp_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev;
+ struct v4l2_subdev *sd;
+ struct v4l2_fh *vfh = NULL;
+
+ if ((arg == NULL) || (file == NULL)) {
+ pr_err("Invalid input parameters arg %p, file %p\n", arg, file);
+ return -EINVAL;
+ }
+ vdev = video_devdata(file);
+ sd = vdev_to_v4l2_subdev(vdev);
+
+ if (sd == NULL) {
+ pr_err("Invalid input parameter sd %p\n", sd);
+ return -EINVAL;
+ }
+ vfh = file->private_data;
+
+ switch (cmd) {
+ case VIDIOC_DQEVENT:
+ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
+ return -ENOIOCTLCMD;
+
+ return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
+
+ case VIDIOC_SUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
+
+ case VIDIOC_UNSUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
+
+ case VIDIOC_MSM_CPP_GET_INST_INFO: {
+ uint32_t i;
+ struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr = arg;
+ struct msm_cpp_frame_info_t inst_info;
+ memset(&inst_info, 0, sizeof(struct msm_cpp_frame_info_t));
+ for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
+ if (cpp_dev->cpp_subscribe_list[i].vfh == vfh) {
+ inst_info.inst_id = i;
+ break;
+ }
+ }
+ if (copy_to_user(
+ (void __user *)ioctl_ptr->ioctl_ptr, &inst_info,
+ sizeof(struct msm_cpp_frame_info_t))) {
+ return -EFAULT;
+ }
+ }
+ break;
+ default:
+ return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
+ }
+
+ return 0;
+}
+
+static long msm_cpp_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_cpp_subdev_do_ioctl);
+}
+
+
+#ifdef CONFIG_COMPAT
+static struct msm_cpp_frame_info_t *get_64bit_cpp_frame_from_compat(
+ struct msm_camera_v4l2_ioctl_t *kp_ioctl)
+{
+ struct msm_cpp_frame_info32_t *new_frame32 = NULL;
+ struct msm_cpp_frame_info_t *new_frame = NULL;
+ uint32_t *cpp_frame_msg;
+ void *cpp_cmd_msg_64bit;
+ int32_t rc, i;
+
+ new_frame32 = kzalloc(sizeof(struct msm_cpp_frame_info32_t),
+ GFP_KERNEL);
+ if (!new_frame32) {
+ pr_err("Insufficient memory\n");
+ goto no_mem32;
+ }
+ new_frame = kzalloc(sizeof(struct msm_cpp_frame_info_t), GFP_KERNEL);
+ if (!new_frame) {
+ pr_err("Insufficient memory\n");
+ goto no_mem;
+ }
+
+ rc = (copy_from_user(new_frame32, (void __user *)kp_ioctl->ioctl_ptr,
+ sizeof(struct msm_cpp_frame_info32_t)) ? -EFAULT : 0);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ goto frame_err;
+ }
+
+ new_frame->frame_id = new_frame32->frame_id;
+ new_frame->inst_id = new_frame32->inst_id;
+ new_frame->client_id = new_frame32->client_id;
+ new_frame->frame_type = new_frame32->frame_type;
+ new_frame->num_strips = new_frame32->num_strips;
+
+ new_frame->src_fd = new_frame32->src_fd;
+ new_frame->dst_fd = new_frame32->dst_fd;
+
+ new_frame->timestamp.tv_sec =
+ (unsigned long)new_frame32->timestamp.tv_sec;
+ new_frame->timestamp.tv_usec =
+ (unsigned long)new_frame32->timestamp.tv_usec;
+
+ new_frame->in_time.tv_sec =
+ (unsigned long)new_frame32->in_time.tv_sec;
+ new_frame->in_time.tv_usec =
+ (unsigned long)new_frame32->in_time.tv_usec;
+
+ new_frame->out_time.tv_sec =
+ (unsigned long)new_frame32->out_time.tv_sec;
+ new_frame->out_time.tv_usec =
+ (unsigned long)new_frame32->out_time.tv_usec;
+
+ new_frame->msg_len = new_frame32->msg_len;
+ new_frame->identity = new_frame32->identity;
+ new_frame->input_buffer_info = new_frame32->input_buffer_info;
+ new_frame->output_buffer_info[0] =
+ new_frame32->output_buffer_info[0];
+ new_frame->output_buffer_info[1] =
+ new_frame32->output_buffer_info[1];
+ new_frame->output_buffer_info[2] =
+ new_frame32->output_buffer_info[2];
+ new_frame->output_buffer_info[3] =
+ new_frame32->output_buffer_info[3];
+ new_frame->output_buffer_info[4] =
+ new_frame32->output_buffer_info[4];
+ new_frame->output_buffer_info[5] =
+ new_frame32->output_buffer_info[5];
+ new_frame->output_buffer_info[6] =
+ new_frame32->output_buffer_info[6];
+ new_frame->output_buffer_info[7] =
+ new_frame32->output_buffer_info[7];
+ new_frame->duplicate_buffer_info =
+ new_frame32->duplicate_buffer_info;
+ new_frame->tnr_scratch_buffer_info[0] =
+ new_frame32->tnr_scratch_buffer_info[0];
+ new_frame->tnr_scratch_buffer_info[1] =
+ new_frame32->tnr_scratch_buffer_info[1];
+ new_frame->duplicate_output = new_frame32->duplicate_output;
+ new_frame->we_disable = new_frame32->we_disable;
+ new_frame->duplicate_identity = new_frame32->duplicate_identity;
+ new_frame->feature_mask = new_frame32->feature_mask;
+ new_frame->reserved = new_frame32->reserved;
+ new_frame->partial_frame_indicator =
+ new_frame32->partial_frame_indicator;
+ new_frame->first_payload = new_frame32->first_payload;
+ new_frame->last_payload = new_frame32->last_payload;
+ new_frame->first_stripe_index = new_frame32->first_stripe_index;
+ new_frame->last_stripe_index = new_frame32->last_stripe_index;
+ new_frame->stripe_info_offset =
+ new_frame32->stripe_info_offset;
+ new_frame->stripe_info = new_frame32->stripe_info;
+ new_frame->batch_info.batch_mode =
+ new_frame32->batch_info.batch_mode;
+ new_frame->batch_info.batch_size =
+ new_frame32->batch_info.batch_size;
+ new_frame->batch_info.cont_idx =
+ new_frame32->batch_info.cont_idx;
+ for (i = 0; i < MAX_PLANES; i++)
+ new_frame->batch_info.intra_plane_offset[i] =
+ new_frame32->batch_info.intra_plane_offset[i];
+ new_frame->batch_info.pick_preview_idx =
+ new_frame32->batch_info.pick_preview_idx;
+
+ /* Convert the 32 bit pointer to 64 bit pointer */
+ new_frame->cookie = compat_ptr(new_frame32->cookie);
+ cpp_cmd_msg_64bit = compat_ptr(new_frame32->cpp_cmd_msg);
+ if ((new_frame->msg_len == 0) ||
+ (new_frame->msg_len > MSM_CPP_MAX_FRAME_LENGTH)) {
+ pr_err("%s:%d: Invalid frame len:%d\n", __func__,
+ __LINE__, new_frame->msg_len);
+ goto frame_err;
+ }
+
+ cpp_frame_msg = kzalloc(sizeof(uint32_t)*new_frame->msg_len,
+ GFP_KERNEL);
+ if (!cpp_frame_msg) {
+ pr_err("Insufficient memory\n");
+ goto frame_err;
+ }
+
+ rc = (copy_from_user(cpp_frame_msg,
+ (void __user *)cpp_cmd_msg_64bit,
+ sizeof(uint32_t)*new_frame->msg_len) ? -EFAULT : 0);
+ if (rc) {
+ ERR_COPY_FROM_USER();
+ goto frame_msg_err;
+ }
+ new_frame->cpp_cmd_msg = cpp_frame_msg;
+
+ kfree(new_frame32);
+ return new_frame;
+
+frame_msg_err:
+ kfree(cpp_frame_msg);
+frame_err:
+ kfree(new_frame);
+no_mem:
+ kfree(new_frame32);
+no_mem32:
+ return NULL;
+}
+
+static void get_compat_frame_from_64bit(struct msm_cpp_frame_info_t *frame,
+ struct msm_cpp_frame_info32_t *k32_frame)
+{
+ int32_t i;
+
+ k32_frame->frame_id = frame->frame_id;
+ k32_frame->inst_id = frame->inst_id;
+ k32_frame->client_id = frame->client_id;
+ k32_frame->frame_type = frame->frame_type;
+ k32_frame->num_strips = frame->num_strips;
+
+ k32_frame->src_fd = frame->src_fd;
+ k32_frame->dst_fd = frame->dst_fd;
+
+ k32_frame->timestamp.tv_sec = (uint32_t)frame->timestamp.tv_sec;
+ k32_frame->timestamp.tv_usec = (uint32_t)frame->timestamp.tv_usec;
+
+ k32_frame->in_time.tv_sec = (uint32_t)frame->in_time.tv_sec;
+ k32_frame->in_time.tv_usec = (uint32_t)frame->in_time.tv_usec;
+
+ k32_frame->out_time.tv_sec = (uint32_t)frame->out_time.tv_sec;
+ k32_frame->out_time.tv_usec = (uint32_t)frame->out_time.tv_usec;
+
+ k32_frame->msg_len = frame->msg_len;
+ k32_frame->identity = frame->identity;
+ k32_frame->input_buffer_info = frame->input_buffer_info;
+ k32_frame->output_buffer_info[0] = frame->output_buffer_info[0];
+ k32_frame->output_buffer_info[1] = frame->output_buffer_info[1];
+ k32_frame->output_buffer_info[2] = frame->output_buffer_info[2];
+ k32_frame->output_buffer_info[3] = frame->output_buffer_info[3];
+ k32_frame->output_buffer_info[4] = frame->output_buffer_info[4];
+ k32_frame->output_buffer_info[5] = frame->output_buffer_info[5];
+ k32_frame->output_buffer_info[6] = frame->output_buffer_info[6];
+ k32_frame->output_buffer_info[7] = frame->output_buffer_info[7];
+ k32_frame->duplicate_buffer_info = frame->duplicate_buffer_info;
+ k32_frame->duplicate_output = frame->duplicate_output;
+ k32_frame->we_disable = frame->we_disable;
+ k32_frame->duplicate_identity = frame->duplicate_identity;
+ k32_frame->feature_mask = frame->feature_mask;
+ k32_frame->reserved = frame->reserved;
+ k32_frame->cookie = ptr_to_compat(frame->cookie);
+ k32_frame->partial_frame_indicator = frame->partial_frame_indicator;
+ k32_frame->first_payload = frame->first_payload;
+ k32_frame->last_payload = frame->last_payload;
+ k32_frame->first_stripe_index = frame->first_stripe_index;
+ k32_frame->last_stripe_index = frame->last_stripe_index;
+ k32_frame->stripe_info_offset = frame->stripe_info_offset;
+ k32_frame->stripe_info = frame->stripe_info;
+ k32_frame->batch_info.batch_mode = frame->batch_info.batch_mode;
+ k32_frame->batch_info.batch_size = frame->batch_info.batch_size;
+ k32_frame->batch_info.cont_idx = frame->batch_info.cont_idx;
+ for (i = 0; i < MAX_PLANES; i++)
+ k32_frame->batch_info.intra_plane_offset[i] =
+ frame->batch_info.intra_plane_offset[i];
+ k32_frame->batch_info.pick_preview_idx =
+ frame->batch_info.pick_preview_idx;
+}
+
+static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct cpp_device *cpp_dev = NULL;
+
+ int32_t rc = 0;
+ struct msm_camera_v4l2_ioctl_t kp_ioctl;
+ struct msm_camera_v4l2_ioctl32_t up32_ioctl;
+ struct msm_cpp_clock_settings_t clock_settings;
+ struct msm_pproc_queue_buf_info k_queue_buf;
+ struct msm_cpp_stream_buff_info_t k_cpp_buff_info;
+ struct msm_cpp_frame_info32_t k32_frame_info;
+ struct msm_cpp_frame_info_t k64_frame_info;
+ uint32_t identity_k = 0;
+ void __user *up = (void __user *)arg;
+
+ if (sd == NULL) {
+ pr_err("%s: Subdevice is NULL\n", __func__);
+ return -EINVAL;
+ }
+ cpp_dev = v4l2_get_subdevdata(sd);
+ if (!vdev || !cpp_dev) {
+ pr_err("Invalid vdev %p or cpp_dev %p structures!",
+ vdev, cpp_dev);
+ return -EINVAL;
+ }
+ mutex_lock(&cpp_dev->mutex);
+ /*
+ * copy the user space 32 bit pointer to kernel space 32 bit compat
+ * pointer
+ */
+ if (copy_from_user(&up32_ioctl, (void __user *)up,
+ sizeof(up32_ioctl))) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EFAULT;
+ }
+
+ /* copy the data from 32 bit compat to kernel space 64 bit pointer */
+ kp_ioctl.id = up32_ioctl.id;
+ kp_ioctl.len = up32_ioctl.len;
+ kp_ioctl.trans_code = up32_ioctl.trans_code;
+ /* Convert the 32 bit pointer to 64 bit pointer */
+ kp_ioctl.ioctl_ptr = compat_ptr(up32_ioctl.ioctl_ptr);
+ if (!kp_ioctl.ioctl_ptr) {
+ pr_err("%s: Invalid ioctl pointer\n", __func__);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+
+ /*
+ * Convert 32 bit IOCTL ID's to 64 bit IOCTL ID's
+ * except VIDIOC_MSM_CPP_CFG32, which needs special
+ * processing
+ */
+ switch (cmd) {
+ case VIDIOC_MSM_CPP_CFG32:
+ {
+ struct msm_cpp_frame_info32_t k32_frame_info;
+ struct msm_cpp_frame_info_t *cpp_frame = NULL;
+ int32_t *status;
+
+ if (copy_from_user(&k32_frame_info,
+ (void __user *)kp_ioctl.ioctl_ptr,
+ sizeof(k32_frame_info))) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EFAULT;
+ }
+ /* Get the cpp frame pointer */
+ cpp_frame = get_64bit_cpp_frame_from_compat(&kp_ioctl);
+
+ /* Configure the cpp frame */
+ if (cpp_frame) {
+ rc = msm_cpp_cfg_frame(cpp_dev, cpp_frame);
+ /* Cpp_frame can be free'd by cfg_frame in error */
+ if (rc >= 0) {
+ k32_frame_info.output_buffer_info[0] =
+ cpp_frame->output_buffer_info[0];
+ k32_frame_info.output_buffer_info[1] =
+ cpp_frame->output_buffer_info[1];
+ }
+ } else {
+ pr_err("%s: Error getting frame\n", __func__);
+ mutex_unlock(&cpp_dev->mutex);
+ rc = -EINVAL;
+ }
+
+ kp_ioctl.trans_code = rc;
+
+ /* Convert the 32 bit pointer to 64 bit pointer */
+ status = compat_ptr(k32_frame_info.status);
+
+ if (copy_to_user((void __user *)status, &rc,
+ sizeof(int32_t)))
+ pr_err("error cannot copy error\n");
+
+ if (copy_to_user((void __user *)kp_ioctl.ioctl_ptr,
+ &k32_frame_info,
+ sizeof(k32_frame_info))) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EFAULT;
+ }
+
+ cmd = VIDIOC_MSM_CPP_CFG;
+ break;
+ }
+ case VIDIOC_MSM_CPP_GET_HW_INFO32:
+ {
+ struct cpp_hw_info_32_t u32_cpp_hw_info;
+ uint32_t i;
+
+ u32_cpp_hw_info.cpp_hw_version =
+ cpp_dev->hw_info.cpp_hw_version;
+ u32_cpp_hw_info.cpp_hw_caps = cpp_dev->hw_info.cpp_hw_caps;
+ memset(&u32_cpp_hw_info.freq_tbl, 0x00,
+ sizeof(u32_cpp_hw_info.freq_tbl));
+ for (i = 0; i < cpp_dev->hw_info.freq_tbl_count; i++)
+ u32_cpp_hw_info.freq_tbl[i] =
+ cpp_dev->hw_info.freq_tbl[i];
+
+ u32_cpp_hw_info.freq_tbl_count =
+ cpp_dev->hw_info.freq_tbl_count;
+ if (copy_to_user((void __user *)kp_ioctl.ioctl_ptr,
+ &u32_cpp_hw_info, sizeof(struct cpp_hw_info_32_t))) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EFAULT;
+ }
+
+ cmd = VIDIOC_MSM_CPP_GET_HW_INFO;
+ break;
+ }
+ case VIDIOC_MSM_CPP_LOAD_FIRMWARE32:
+ cmd = VIDIOC_MSM_CPP_LOAD_FIRMWARE;
+ break;
+ case VIDIOC_MSM_CPP_GET_INST_INFO32:
+ {
+ struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
+ struct msm_cpp_frame_info32_t inst_info;
+ struct v4l2_fh *vfh = NULL;
+ uint32_t i;
+ vfh = file->private_data;
+ memset(&inst_info, 0, sizeof(struct msm_cpp_frame_info32_t));
+ for (i = 0; i < MAX_ACTIVE_CPP_INSTANCE; i++) {
+ if (cpp_dev->cpp_subscribe_list[i].vfh == vfh) {
+ inst_info.inst_id = i;
+ break;
+ }
+ }
+ if (copy_to_user(
+ (void __user *)kp_ioctl.ioctl_ptr, &inst_info,
+ sizeof(struct msm_cpp_frame_info32_t))) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EFAULT;
+ }
+ cmd = VIDIOC_MSM_CPP_GET_INST_INFO;
+ break;
+ }
+ case VIDIOC_MSM_CPP_FLUSH_QUEUE32:
+ cmd = VIDIOC_MSM_CPP_FLUSH_QUEUE;
+ break;
+ case VIDIOC_MSM_CPP_APPEND_STREAM_BUFF_INFO32:
+ case VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO32:
+ case VIDIOC_MSM_CPP_DELETE_STREAM_BUFF32:
+ {
+ compat_uptr_t p;
+ struct msm_cpp_stream_buff_info32_t *u32_cpp_buff_info =
+ (struct msm_cpp_stream_buff_info32_t *)kp_ioctl.ioctl_ptr;
+
+ get_user(k_cpp_buff_info.identity,
+ &u32_cpp_buff_info->identity);
+ get_user(k_cpp_buff_info.num_buffs,
+ &u32_cpp_buff_info->num_buffs);
+ get_user(p, &u32_cpp_buff_info->buffer_info);
+ k_cpp_buff_info.buffer_info = compat_ptr(p);
+
+ kp_ioctl.ioctl_ptr = (void *)&k_cpp_buff_info;
+ if (is_compat_task()) {
+ if (kp_ioctl.len != sizeof(
+ struct msm_cpp_stream_buff_info32_t)) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ } else {
+ kp_ioctl.len =
+ sizeof(struct msm_cpp_stream_buff_info_t);
+ }
+ }
+ if (cmd == VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO32)
+ cmd = VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO;
+ else if (cmd == VIDIOC_MSM_CPP_DELETE_STREAM_BUFF32)
+ cmd = VIDIOC_MSM_CPP_DELETE_STREAM_BUFF;
+ else
+ cmd = VIDIOC_MSM_CPP_APPEND_STREAM_BUFF_INFO;
+ break;
+ }
+ case VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO32: {
+ uint32_t *identity_u = (uint32_t *)kp_ioctl.ioctl_ptr;
+
+ get_user(identity_k, identity_u);
+ kp_ioctl.ioctl_ptr = (void *)&identity_k;
+ kp_ioctl.len = sizeof(uint32_t);
+ cmd = VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO;
+ break;
+ }
+ case VIDIOC_MSM_CPP_GET_EVENTPAYLOAD32:
+ {
+ struct msm_device_queue *queue = &cpp_dev->eventData_q;
+ struct msm_queue_cmd *event_qcmd;
+ struct msm_cpp_frame_info_t *process_frame;
+ struct msm_cpp_frame_info32_t k32_process_frame;
+
+ CPP_DBG("VIDIOC_MSM_CPP_GET_EVENTPAYLOAD\n");
+ event_qcmd = msm_dequeue(queue, list_eventdata, POP_FRONT);
+ if (!event_qcmd) {
+ pr_err("no queue cmd available");
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ }
+ process_frame = event_qcmd->command;
+
+ memset(&k32_process_frame, 0, sizeof(k32_process_frame));
+ get_compat_frame_from_64bit(process_frame, &k32_process_frame);
+
+ CPP_DBG("fid %d\n", process_frame->frame_id);
+ if (copy_to_user((void __user *)kp_ioctl.ioctl_ptr,
+ &k32_process_frame,
+ sizeof(struct msm_cpp_frame_info32_t))) {
+ kfree(process_frame->cpp_cmd_msg);
+ kfree(process_frame);
+ kfree(event_qcmd);
+ mutex_unlock(&cpp_dev->mutex);
+ return -EFAULT;
+ }
+
+ kfree(process_frame->cpp_cmd_msg);
+ kfree(process_frame);
+ kfree(event_qcmd);
+ cmd = VIDIOC_MSM_CPP_GET_EVENTPAYLOAD;
+ break;
+ }
+ case VIDIOC_MSM_CPP_SET_CLOCK32:
+ {
+ struct msm_cpp_clock_settings32_t *clock_settings32 =
+ (struct msm_cpp_clock_settings32_t *)kp_ioctl.ioctl_ptr;
+ get_user(clock_settings.clock_rate,
+ &clock_settings32->clock_rate);
+ get_user(clock_settings.avg, &clock_settings32->avg);
+ get_user(clock_settings.inst, &clock_settings32->inst);
+ kp_ioctl.ioctl_ptr = (void *)&clock_settings;
+ if (is_compat_task()) {
+ if (kp_ioctl.len != sizeof(
+ struct msm_cpp_clock_settings32_t)) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ } else {
+ kp_ioctl.len =
+ sizeof(struct msm_cpp_clock_settings_t);
+ }
+ }
+ cmd = VIDIOC_MSM_CPP_SET_CLOCK;
+ break;
+ }
+ case VIDIOC_MSM_CPP_QUEUE_BUF32:
+ {
+ struct msm_pproc_queue_buf_info32_t *u32_queue_buf =
+ (struct msm_pproc_queue_buf_info32_t *)kp_ioctl.ioctl_ptr;
+
+ get_user(k_queue_buf.is_buf_dirty,
+ &u32_queue_buf->is_buf_dirty);
+ get_user(k_queue_buf.buff_mgr_info.session_id,
+ &u32_queue_buf->buff_mgr_info.session_id);
+ get_user(k_queue_buf.buff_mgr_info.stream_id,
+ &u32_queue_buf->buff_mgr_info.stream_id);
+ get_user(k_queue_buf.buff_mgr_info.frame_id,
+ &u32_queue_buf->buff_mgr_info.frame_id);
+ get_user(k_queue_buf.buff_mgr_info.index,
+ &u32_queue_buf->buff_mgr_info.index);
+ get_user(k_queue_buf.buff_mgr_info.timestamp.tv_sec,
+ &u32_queue_buf->buff_mgr_info.timestamp.tv_sec);
+ get_user(k_queue_buf.buff_mgr_info.timestamp.tv_usec,
+ &u32_queue_buf->buff_mgr_info.timestamp.tv_usec);
+
+ /*
+ * Update the reserved field (cds information) to buffer
+ * manager structure so that it is propogated back to HAL
+ */
+ get_user(k_queue_buf.buff_mgr_info.reserved,
+ &u32_queue_buf->buff_mgr_info.reserved);
+
+ kp_ioctl.ioctl_ptr = (void *)&k_queue_buf;
+ kp_ioctl.len = sizeof(struct msm_pproc_queue_buf_info);
+ cmd = VIDIOC_MSM_CPP_QUEUE_BUF;
+ break;
+ }
+ case VIDIOC_MSM_CPP_POP_STREAM_BUFFER32:
+ {
+ if (kp_ioctl.len != sizeof(struct msm_cpp_frame_info32_t)) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EINVAL;
+ } else {
+ kp_ioctl.len = sizeof(struct msm_cpp_frame_info_t);
+ }
+
+ if (copy_from_user(&k32_frame_info,
+ (void __user *)kp_ioctl.ioctl_ptr,
+ sizeof(k32_frame_info))) {
+ mutex_unlock(&cpp_dev->mutex);
+ return -EFAULT;
+ }
+
+ memset(&k64_frame_info, 0, sizeof(k64_frame_info));
+ k64_frame_info.identity = k32_frame_info.identity;
+ k64_frame_info.frame_id = k32_frame_info.frame_id;
+
+ kp_ioctl.ioctl_ptr = (void *)&k64_frame_info;
+ cmd = VIDIOC_MSM_CPP_POP_STREAM_BUFFER;
+ break;
+ }
+ case VIDIOC_MSM_CPP_IOMMU_ATTACH32:
+ cmd = VIDIOC_MSM_CPP_IOMMU_ATTACH;
+ break;
+ case VIDIOC_MSM_CPP_IOMMU_DETACH32:
+ cmd = VIDIOC_MSM_CPP_IOMMU_DETACH;
+ break;
+ case MSM_SD_NOTIFY_FREEZE:
+ break;
+ case MSM_SD_SHUTDOWN:
+ cmd = MSM_SD_SHUTDOWN;
+ break;
+ default:
+ pr_err_ratelimited("%s: unsupported compat type :%x LOAD %lu\n",
+ __func__, cmd, VIDIOC_MSM_CPP_LOAD_FIRMWARE);
+ break;
+ }
+
+ mutex_unlock(&cpp_dev->mutex);
+ switch (cmd) {
+ case VIDIOC_MSM_CPP_LOAD_FIRMWARE:
+ case VIDIOC_MSM_CPP_FLUSH_QUEUE:
+ case VIDIOC_MSM_CPP_APPEND_STREAM_BUFF_INFO:
+ case VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO:
+ case VIDIOC_MSM_CPP_DELETE_STREAM_BUFF:
+ case VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO:
+ case VIDIOC_MSM_CPP_SET_CLOCK:
+ case VIDIOC_MSM_CPP_QUEUE_BUF:
+ case VIDIOC_MSM_CPP_POP_STREAM_BUFFER:
+ case VIDIOC_MSM_CPP_IOMMU_ATTACH:
+ case VIDIOC_MSM_CPP_IOMMU_DETACH:
+ case MSM_SD_SHUTDOWN:
+ rc = v4l2_subdev_call(sd, core, ioctl, cmd, &kp_ioctl);
+ break;
+ case VIDIOC_MSM_CPP_GET_HW_INFO:
+ case VIDIOC_MSM_CPP_CFG:
+ case VIDIOC_MSM_CPP_GET_EVENTPAYLOAD:
+ case VIDIOC_MSM_CPP_GET_INST_INFO:
+ break;
+ case MSM_SD_NOTIFY_FREEZE:
+ break;
+ default:
+ pr_err_ratelimited("%s: unsupported compat type :%d\n",
+ __func__, cmd);
+ break;
+ }
+
+ up32_ioctl.id = kp_ioctl.id;
+ up32_ioctl.len = kp_ioctl.len;
+ up32_ioctl.trans_code = kp_ioctl.trans_code;
+ up32_ioctl.ioctl_ptr = ptr_to_compat(kp_ioctl.ioctl_ptr);
+
+ if (copy_to_user((void __user *)up, &up32_ioctl, sizeof(up32_ioctl)))
+ return -EFAULT;
+
+ return rc;
+}
+#endif
+
+static int msm_cpp_get_clk_info(struct cpp_device *cpp_dev,
+ struct platform_device *pdev)
+{
+ uint32_t count;
+ int i, rc;
+ uint32_t rates[CPP_CLK_INFO_MAX];
+
+ struct device_node *of_node;
+ of_node = pdev->dev.of_node;
+
+ count = of_property_count_strings(of_node, "clock-names");
+
+ CPP_DBG("count = %d\n", count);
+ if (count == 0) {
+ pr_err("no clocks found in device tree, count=%d", count);
+ return 0;
+ }
+
+ if (count > CPP_CLK_INFO_MAX) {
+ pr_err("invalid count=%d, max is %d\n", count,
+ CPP_CLK_INFO_MAX);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node, "clock-names",
+ i, &(cpp_clk_info[i].clk_name));
+ CPP_DBG("clock-names[%d] = %s\n", i, cpp_clk_info[i].clk_name);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
+ }
+ rc = of_property_read_u32_array(of_node, "qcom,clock-rates",
+ rates, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
+ for (i = 0; i < count; i++) {
+ cpp_clk_info[i].clk_rate = (rates[i] == 0) ?
+ (long)-1 : rates[i];
+ CPP_DBG("clk_rate[%d] = %ld\n", i, cpp_clk_info[i].clk_rate);
+ }
+ cpp_dev->num_clk = count;
+ rc = of_property_read_u32(of_node, "qcom,min-clock-rate",
+ &cpp_dev->min_clk_rate);
+ if (rc < 0) {
+ CPP_DBG("min-clk-rate not defined, setting it to 0\n");
+ cpp_dev->min_clk_rate = 0;
+ }
+ return 0;
+}
+
+struct v4l2_file_operations msm_cpp_v4l2_subdev_fops = {
+ .unlocked_ioctl = msm_cpp_subdev_fops_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = msm_cpp_subdev_fops_compat_ioctl,
+#endif
+};
+static int msm_cpp_update_gdscr_status(struct cpp_device *cpp_dev,
+ bool status)
+{
+ int rc = 0;
+ int value = 0;
+ if (!cpp_dev) {
+ pr_err("%s: cpp device invalid\n", __func__);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (cpp_dev->camss_cpp_base) {
+ value = msm_camera_io_r(cpp_dev->camss_cpp_base);
+ pr_debug("value from camss cpp %x, status %d\n", value, status);
+ if (status) {
+ value &= CPP_GDSCR_SW_COLLAPSE_ENABLE;
+ value |= CPP_GDSCR_HW_CONTROL_ENABLE;
+ } else {
+ value |= CPP_GDSCR_HW_CONTROL_DISABLE;
+ value &= CPP_GDSCR_SW_COLLAPSE_DISABLE;
+ }
+ pr_debug("value %x after camss cpp mask\n", value);
+ msm_camera_io_w(value, cpp_dev->camss_cpp_base);
+ }
+end:
+ return rc;
+}
+static void msm_cpp_set_vbif_reg_values(struct cpp_device *cpp_dev)
+{
+ int i, reg, val;
+ const u32 *vbif_qos_arr = NULL;
+ int vbif_qos_len = 0;
+ struct platform_device *pdev;
+
+ pr_debug("%s\n", __func__);
+ if (cpp_dev != NULL) {
+ pdev = cpp_dev->pdev;
+ vbif_qos_arr = of_get_property(pdev->dev.of_node,
+ "qcom,vbif-qos-setting",
+ &vbif_qos_len);
+ if (!vbif_qos_arr || (vbif_qos_len & 1)) {
+ pr_debug("%s: vbif qos setting not found\n",
+ __func__);
+ vbif_qos_len = 0;
+ }
+ vbif_qos_len /= sizeof(u32);
+ pr_debug("%s: vbif_qos_len %d\n", __func__, vbif_qos_len);
+ if (cpp_dev->vbif_base) {
+ for (i = 0; i < vbif_qos_len; i = i+2) {
+ reg = be32_to_cpu(vbif_qos_arr[i]);
+ val = be32_to_cpu(vbif_qos_arr[i+1]);
+ pr_debug("%s: DT: offset %x, val %x\n",
+ __func__, reg, val);
+ pr_debug("%s: before write to register 0x%x\n",
+ __func__, msm_camera_io_r(
+ cpp_dev->vbif_base + reg));
+ msm_camera_io_w(val, cpp_dev->vbif_base + reg);
+ pr_debug("%s: after write to register 0x%x\n",
+ __func__, msm_camera_io_r(
+ cpp_dev->vbif_base + reg));
+ }
+ }
+ }
+}
+
+static int cpp_probe(struct platform_device *pdev)
+{
+ struct cpp_device *cpp_dev;
+ int rc = 0;
+ CPP_DBG("E");
+
+ cpp_dev = kzalloc(sizeof(struct cpp_device), GFP_KERNEL);
+ if (!cpp_dev) {
+ pr_err("no enough memory\n");
+ return -ENOMEM;
+ }
+
+ cpp_dev->cpp_clk = kzalloc(sizeof(struct clk *) *
+ ARRAY_SIZE(cpp_clk_info), GFP_KERNEL);
+ if (!cpp_dev->cpp_clk) {
+ pr_err("no enough memory\n");
+ rc = -ENOMEM;
+ goto clk_err;
+ }
+
+ v4l2_subdev_init(&cpp_dev->msm_sd.sd, &msm_cpp_subdev_ops);
+ cpp_dev->msm_sd.sd.internal_ops = &msm_cpp_internal_ops;
+ snprintf(cpp_dev->msm_sd.sd.name, ARRAY_SIZE(cpp_dev->msm_sd.sd.name),
+ "cpp");
+ cpp_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ cpp_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
+ v4l2_set_subdevdata(&cpp_dev->msm_sd.sd, cpp_dev);
+ platform_set_drvdata(pdev, &cpp_dev->msm_sd.sd);
+ mutex_init(&cpp_dev->mutex);
+ spin_lock_init(&cpp_dev->tasklet_lock);
+ spin_lock_init(&cpp_timer.data.processed_frame_lock);
+
+ if (pdev->dev.of_node)
+ of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+
+ cpp_dev->pdev = pdev;
+
+ cpp_dev->camss_cpp = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "camss_cpp");
+ if (!cpp_dev->camss_cpp)
+ pr_debug("no mem resource?\n");
+
+ cpp_dev->mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "cpp");
+ if (!cpp_dev->mem) {
+ pr_err("no mem resource?\n");
+ rc = -ENODEV;
+ goto mem_err;
+ }
+
+ cpp_dev->vbif_mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "cpp_vbif");
+ if (!cpp_dev->vbif_mem) {
+ pr_err("no mem resource?\n");
+ rc = -ENODEV;
+ goto mem_err;
+ }
+
+ cpp_dev->cpp_hw_mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "cpp_hw");
+ if (!cpp_dev->cpp_hw_mem) {
+ pr_err("no mem resource?\n");
+ rc = -ENODEV;
+ goto mem_err;
+ }
+
+ cpp_dev->irq = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "cpp");
+ if (!cpp_dev->irq) {
+ pr_err("%s: no irq resource?\n", __func__);
+ rc = -ENODEV;
+ goto mem_err;
+ }
+
+ cpp_dev->io = request_mem_region(cpp_dev->mem->start,
+ resource_size(cpp_dev->mem), pdev->name);
+ if (!cpp_dev->io) {
+ pr_err("%s: no valid mem region\n", __func__);
+ rc = -EBUSY;
+ goto mem_err;
+ }
+
+ if (msm_cpp_get_clk_info(cpp_dev, pdev) < 0) {
+ pr_err("msm_cpp_get_clk_info() failed\n");
+ goto region_err;
+ }
+
+ if (pdev->dev.of_node)
+ rc = of_property_read_u32(pdev->dev.of_node, "qcom,bus-master",
+ &cpp_dev->bus_master_flag);
+ if (rc)
+ cpp_dev->bus_master_flag = 0;
+
+ rc = msm_cpp_read_payload_params_from_dt(cpp_dev);
+ if (rc)
+ goto cpp_probe_init_error;
+
+ rc = cpp_init_hardware(cpp_dev);
+ if (rc < 0)
+ goto cpp_probe_init_error;
+
+ media_entity_init(&cpp_dev->msm_sd.sd.entity, 0, NULL, 0);
+ cpp_dev->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ cpp_dev->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_CPP;
+ cpp_dev->msm_sd.sd.entity.name = pdev->name;
+ cpp_dev->msm_sd.close_seq = MSM_SD_CLOSE_3RD_CATEGORY;
+ msm_sd_register(&cpp_dev->msm_sd);
+ msm_cam_copy_v4l2_subdev_fops(&msm_cpp_v4l2_subdev_fops);
+ msm_cpp_v4l2_subdev_fops.unlocked_ioctl = msm_cpp_subdev_fops_ioctl;
+#ifdef CONFIG_COMPAT
+ msm_cpp_v4l2_subdev_fops.compat_ioctl32 =
+ msm_cpp_subdev_fops_compat_ioctl;
+#endif
+
+ cpp_dev->msm_sd.sd.devnode->fops = &msm_cpp_v4l2_subdev_fops;
+ cpp_dev->msm_sd.sd.entity.revision = cpp_dev->msm_sd.sd.devnode->num;
+ cpp_dev->state = CPP_STATE_BOOT;
+
+ msm_camera_io_w(0x0, cpp_dev->base +
+ MSM_CPP_MICRO_IRQGEN_MASK);
+ msm_camera_io_w(0xFFFF, cpp_dev->base +
+ MSM_CPP_MICRO_IRQGEN_CLR);
+ msm_camera_io_w(0x80000000, cpp_dev->base + 0xF0);
+ cpp_release_hardware(cpp_dev);
+ cpp_dev->state = CPP_STATE_OFF;
+ msm_cpp_enable_debugfs(cpp_dev);
+
+ msm_queue_init(&cpp_dev->eventData_q, "eventdata");
+ msm_queue_init(&cpp_dev->processing_q, "frame");
+ INIT_LIST_HEAD(&cpp_dev->tasklet_q);
+ tasklet_init(&cpp_dev->cpp_tasklet, msm_cpp_do_tasklet,
+ (unsigned long)cpp_dev);
+ cpp_dev->timer_wq = create_workqueue("msm_cpp_workqueue");
+ cpp_dev->work = kmalloc(sizeof(struct msm_cpp_work_t),
+ GFP_KERNEL);
+
+ if (!cpp_dev->work) {
+ pr_err("no enough memory\n");
+ rc = -ENOMEM;
+ goto cpp_probe_init_error;
+ }
+
+ INIT_WORK((struct work_struct *)cpp_dev->work, msm_cpp_do_timeout_work);
+ cpp_dev->cpp_open_cnt = 0;
+ cpp_dev->is_firmware_loaded = 0;
+ cpp_dev->iommu_state = CPP_IOMMU_STATE_DETACHED;
+ cpp_timer.data.cpp_dev = cpp_dev;
+ atomic_set(&cpp_timer.used, 0);
+ /* install timer for cpp timeout */
+ CPP_DBG("Installing cpp_timer\n");
+ setup_timer(&cpp_timer.cpp_timer,
+ cpp_timer_callback, (unsigned long)&cpp_timer);
+ cpp_dev->fw_name_bin = NULL;
+ cpp_dev->max_timeout_trial_cnt = MSM_CPP_MAX_TIMEOUT_TRIAL;
+ if (rc == 0)
+ CPP_DBG("SUCCESS.");
+ else
+ CPP_DBG("FAILED.");
+ return rc;
+cpp_probe_init_error:
+ media_entity_cleanup(&cpp_dev->msm_sd.sd.entity);
+ msm_sd_unregister(&cpp_dev->msm_sd);
+region_err:
+ release_mem_region(cpp_dev->mem->start, resource_size(cpp_dev->mem));
+mem_err:
+ kfree(cpp_dev->cpp_clk);
+clk_err:
+ kfree(cpp_dev);
+ return rc;
+}
+
+static const struct of_device_id msm_cpp_dt_match[] = {
+ {.compatible = "qcom,cpp"},
+ {}
+};
+
+static int cpp_device_remove(struct platform_device *dev)
+{
+ struct v4l2_subdev *sd = platform_get_drvdata(dev);
+ struct cpp_device *cpp_dev;
+ if (!sd) {
+ pr_err("%s: Subdevice is NULL\n", __func__);
+ return 0;
+ }
+
+ cpp_dev = (struct cpp_device *)v4l2_get_subdevdata(sd);
+ if (!cpp_dev) {
+ pr_err("%s: cpp device is NULL\n", __func__);
+ return 0;
+ }
+ if (cpp_dev->fw) {
+ release_firmware(cpp_dev->fw);
+ cpp_dev->fw = NULL;
+ }
+ msm_sd_unregister(&cpp_dev->msm_sd);
+ release_mem_region(cpp_dev->mem->start, resource_size(cpp_dev->mem));
+ release_mem_region(cpp_dev->vbif_mem->start,
+ resource_size(cpp_dev->vbif_mem));
+ release_mem_region(cpp_dev->cpp_hw_mem->start,
+ resource_size(cpp_dev->cpp_hw_mem));
+ if (cpp_dev->camss_cpp)
+ release_mem_region(cpp_dev->camss_cpp->start,
+ resource_size(cpp_dev->camss_cpp));
+ mutex_destroy(&cpp_dev->mutex);
+ kfree(cpp_dev->work);
+ destroy_workqueue(cpp_dev->timer_wq);
+ kfree(cpp_dev->cpp_clk);
+ kfree(cpp_dev);
+ return 0;
+}
+
+static struct platform_driver cpp_driver = {
+ .probe = cpp_probe,
+ .remove = cpp_device_remove,
+ .driver = {
+ .name = MSM_CPP_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_cpp_dt_match,
+ },
+};
+
+static int __init msm_cpp_init_module(void)
+{
+ return platform_driver_register(&cpp_driver);
+}
+
+static void __exit msm_cpp_exit_module(void)
+{
+ platform_driver_unregister(&cpp_driver);
+}
+
+static int msm_cpp_debugfs_error_s(void *data, u64 val)
+{
+ pr_err("setting error inducement");
+ induce_error = val;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cpp_debugfs_error, NULL,
+ msm_cpp_debugfs_error_s, "%llu\n");
+
+static int msm_cpp_enable_debugfs(struct cpp_device *cpp_dev)
+{
+ struct dentry *debugfs_base;
+ debugfs_base = debugfs_create_dir("msm_cpp", NULL);
+ if (!debugfs_base)
+ return -ENOMEM;
+
+ if (!debugfs_create_file("error", S_IRUGO | S_IWUSR, debugfs_base,
+ (void *)cpp_dev, &cpp_debugfs_error))
+ return -ENOMEM;
+
+ return 0;
+}
+
+module_init(msm_cpp_init_module);
+module_exit(msm_cpp_exit_module);
+MODULE_DESCRIPTION("MSM CPP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h
new file mode 100644
index 000000000000..7a6a9efcaa26
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.h
@@ -0,0 +1,267 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CPP_H__
+#define __MSM_CPP_H__
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <media/v4l2-subdev.h>
+#include "msm_sd.h"
+
+/* hw version info:
+ 31:28 Major version
+ 27:16 Minor version
+ 15:0 Revision bits
+**/
+#define CPP_HW_VERSION_1_1_0 0x10010000
+#define CPP_HW_VERSION_1_1_1 0x10010001
+#define CPP_HW_VERSION_2_0_0 0x20000000
+#define CPP_HW_VERSION_4_0_0 0x40000000
+#define CPP_HW_VERSION_4_1_0 0x40010000
+#define CPP_HW_VERSION_5_0_0 0x50000000
+#define CPP_HW_VERSION_5_1_0 0x50010000
+
+#define VBIF_VERSION_2_3_0 0x20030000
+
+#define MAX_ACTIVE_CPP_INSTANCE 8
+#define MAX_CPP_PROCESSING_FRAME 2
+#define MAX_CPP_V4l2_EVENTS 30
+
+#define MSM_CPP_MICRO_BASE 0x4000
+#define MSM_CPP_MICRO_HW_VERSION 0x0000
+#define MSM_CPP_MICRO_IRQGEN_STAT 0x0004
+#define MSM_CPP_MICRO_IRQGEN_CLR 0x0008
+#define MSM_CPP_MICRO_IRQGEN_MASK 0x000C
+#define MSM_CPP_MICRO_FIFO_TX_DATA 0x0010
+#define MSM_CPP_MICRO_FIFO_TX_STAT 0x0014
+#define MSM_CPP_MICRO_FIFO_RX_DATA 0x0018
+#define MSM_CPP_MICRO_FIFO_RX_STAT 0x001C
+#define MSM_CPP_MICRO_BOOT_START 0x0020
+#define MSM_CPP_MICRO_BOOT_LDORG 0x0024
+#define MSM_CPP_MICRO_CLKEN_CTL 0x0030
+
+#define MSM_CPP_CMD_GET_BOOTLOADER_VER 0x1
+#define MSM_CPP_CMD_FW_LOAD 0x2
+#define MSM_CPP_CMD_EXEC_JUMP 0x3
+#define MSM_CPP_CMD_RESET_HW 0x5
+#define MSM_CPP_CMD_PROCESS_FRAME 0x6
+#define MSM_CPP_CMD_FLUSH_STREAM 0x7
+#define MSM_CPP_CMD_CFG_MEM_PARAM 0x8
+#define MSM_CPP_CMD_ERROR_REQUEST 0x9
+#define MSM_CPP_CMD_GET_STATUS 0xA
+#define MSM_CPP_CMD_GET_FW_VER 0xB
+#define MSM_CPP_CMD_GROUP_BUFFER_DUP 0x12
+#define MSM_CPP_CMD_GROUP_BUFFER 0xF
+
+#define MSM_CPP_MSG_ID_CMD 0x3E646D63
+#define MSM_CPP_MSG_ID_OK 0x0A0A4B4F
+#define MSM_CPP_MSG_ID_TRAILER 0xABCDEFAA
+
+#define MSM_CPP_MSG_ID_JUMP_ACK 0x00000001
+#define MSM_CPP_MSG_ID_FRAME_ACK 0x00000002
+#define MSM_CPP_MSG_ID_FRAME_NACK 0x00000003
+#define MSM_CPP_MSG_ID_FLUSH_ACK 0x00000004
+#define MSM_CPP_MSG_ID_FLUSH_NACK 0x00000005
+#define MSM_CPP_MSG_ID_CFG_MEM_ACK 0x00000006
+#define MSM_CPP_MSG_ID_CFG_MEM_INV 0x00000007
+#define MSM_CPP_MSG_ID_ERROR_STATUS 0x00000008
+#define MSM_CPP_MSG_ID_INVALID_CMD 0x00000009
+#define MSM_CPP_MSG_ID_GEN_STATUS 0x0000000A
+#define MSM_CPP_MSG_ID_FLUSHED 0x0000000B
+#define MSM_CPP_MSG_ID_FW_VER 0x0000000C
+
+#define MSM_CPP_JUMP_ADDRESS 0x20
+#define MSM_CPP_START_ADDRESS 0x0
+#define MSM_CPP_END_ADDRESS 0x3F00
+
+#define MSM_CPP_POLL_RETRIES 200
+#define MSM_CPP_TASKLETQ_SIZE 16
+#define MSM_CPP_TX_FIFO_LEVEL 16
+#define MSM_CPP_RX_FIFO_LEVEL 512
+
+struct cpp_subscribe_info {
+ struct v4l2_fh *vfh;
+ uint32_t active;
+};
+
+enum cpp_state {
+ CPP_STATE_BOOT,
+ CPP_STATE_IDLE,
+ CPP_STATE_ACTIVE,
+ CPP_STATE_OFF,
+};
+
+enum cpp_iommu_state {
+ CPP_IOMMU_STATE_DETACHED,
+ CPP_IOMMU_STATE_ATTACHED,
+};
+
+enum msm_queue {
+ MSM_CAM_Q_CTRL, /* control command or control command status */
+ MSM_CAM_Q_VFE_EVT, /* adsp event */
+ MSM_CAM_Q_VFE_MSG, /* adsp message */
+ MSM_CAM_Q_V4L2_REQ, /* v4l2 request */
+ MSM_CAM_Q_VPE_MSG, /* vpe message */
+ MSM_CAM_Q_PP_MSG, /* pp message */
+};
+
+struct msm_queue_cmd {
+ struct list_head list_config;
+ struct list_head list_control;
+ struct list_head list_frame;
+ struct list_head list_pict;
+ struct list_head list_vpe_frame;
+ struct list_head list_eventdata;
+ enum msm_queue type;
+ void *command;
+ atomic_t on_heap;
+ struct timespec ts;
+ uint32_t error_code;
+ uint32_t trans_code;
+};
+
+struct msm_device_queue {
+ struct list_head list;
+ spinlock_t lock;
+ wait_queue_head_t wait;
+ int max;
+ int len;
+ const char *name;
+};
+
+struct msm_cpp_tasklet_queue_cmd {
+ struct list_head list;
+ uint32_t irq_status;
+ uint32_t tx_fifo[MSM_CPP_TX_FIFO_LEVEL];
+ uint32_t tx_level;
+ uint8_t cmd_used;
+};
+
+struct msm_cpp_buffer_map_info_t {
+ unsigned long len;
+ dma_addr_t phy_addr;
+ int buf_fd;
+ struct msm_cpp_buffer_info_t buff_info;
+};
+
+struct msm_cpp_buffer_map_list_t {
+ struct msm_cpp_buffer_map_info_t map_info;
+ struct list_head entry;
+};
+
+struct msm_cpp_buff_queue_info_t {
+ uint32_t used;
+ uint16_t session_id;
+ uint16_t stream_id;
+ struct list_head vb2_buff_head;
+ struct list_head native_buff_head;
+};
+
+struct msm_cpp_work_t {
+ struct work_struct my_work;
+ struct cpp_device *cpp_dev;
+};
+
+struct msm_cpp_payload_params {
+ uint32_t stripe_base;
+ uint32_t stripe_size;
+ uint32_t plane_base;
+ uint32_t plane_size;
+
+ /* offsets for stripe/plane pointers in payload */
+ uint32_t rd_pntr_off;
+ uint32_t wr_0_pntr_off;
+ uint32_t rd_ref_pntr_off;
+ uint32_t wr_ref_pntr_off;
+ uint32_t wr_0_meta_data_wr_pntr_off;
+ uint32_t fe_mmu_pf_ptr_off;
+ uint32_t ref_fe_mmu_pf_ptr_off;
+ uint32_t we_mmu_pf_ptr_off;
+ uint32_t dup_we_mmu_pf_ptr_off;
+ uint32_t ref_we_mmu_pf_ptr_off;
+ uint32_t set_group_buffer_len;
+ uint32_t dup_frame_indicator_off;
+};
+
+struct cpp_device {
+ struct platform_device *pdev;
+ struct msm_sd_subdev msm_sd;
+ struct v4l2_subdev subdev;
+ struct resource *mem;
+ struct resource *irq;
+ struct resource *io;
+ struct resource *vbif_mem;
+ struct resource *vbif_io;
+ struct resource *cpp_hw_mem;
+ struct resource *camss_cpp;
+ void __iomem *vbif_base;
+ void __iomem *base;
+ void __iomem *cpp_hw_base;
+ void __iomem *camss_cpp_base;
+ struct clk **cpp_clk;
+ struct regulator *fs_cpp;
+ struct regulator *fs_camss;
+ struct regulator *fs_mmagic_camss;
+ struct mutex mutex;
+ enum cpp_state state;
+ enum cpp_iommu_state iommu_state;
+ uint8_t is_firmware_loaded;
+ char *fw_name_bin;
+ const struct firmware *fw;
+ struct workqueue_struct *timer_wq;
+ struct msm_cpp_work_t *work;
+ uint32_t fw_version;
+ uint8_t stream_cnt;
+ uint8_t timeout_trial_cnt;
+ uint8_t max_timeout_trial_cnt;
+
+ int domain_num;
+ struct iommu_domain *domain;
+ struct device *iommu_ctx;
+ uint32_t num_clk;
+ uint32_t min_clk_rate;
+
+ int iommu_hdl;
+ /* Reusing proven tasklet from msm isp */
+ atomic_t irq_cnt;
+ uint8_t taskletq_idx;
+ spinlock_t tasklet_lock;
+ struct list_head tasklet_q;
+ struct tasklet_struct cpp_tasklet;
+ struct msm_cpp_tasklet_queue_cmd
+ tasklet_queue_cmd[MSM_CPP_TASKLETQ_SIZE];
+
+ struct cpp_subscribe_info cpp_subscribe_list[MAX_ACTIVE_CPP_INSTANCE];
+ uint32_t cpp_open_cnt;
+ struct cpp_hw_info hw_info;
+
+ struct msm_device_queue eventData_q; /* V4L2 Event Payload Queue */
+
+ /* Processing Queue
+ * store frame info for frames sent to microcontroller
+ */
+ struct msm_device_queue processing_q;
+
+ struct msm_cpp_buff_queue_info_t *buff_queue;
+ uint32_t num_buffq;
+ struct v4l2_subdev *buf_mgr_subdev;
+
+ uint32_t bus_client;
+ uint32_t bus_idx;
+ uint32_t bus_master_flag;
+ struct msm_cpp_payload_params payload_params;
+};
+#endif /* __MSM_CPP_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/pproc/vpe/Makefile b/drivers/media/platform/msm/camera_v2/pproc/vpe/Makefile
new file mode 100644
index 000000000000..65a7e34469dc
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/pproc/vpe/Makefile
@@ -0,0 +1,3 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+obj-$(CONFIG_MSMB_CAMERA) += msm_vpe.o
diff --git a/drivers/media/platform/msm/camera_v2/pproc/vpe/msm_vpe.c b/drivers/media/platform/msm/camera_v2/pproc/vpe/msm_vpe.c
new file mode 100644
index 000000000000..bf4d3595ecf4
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/pproc/vpe/msm_vpe.c
@@ -0,0 +1,1683 @@
+/* Copyright (c) 2012-2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "MSM-VPE %s:%d " fmt, __func__, __LINE__
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/videodev2.h>
+#include <linux/msm_ion.h>
+#include <linux/iommu.h>
+#include <linux/msm_iommu_domains.h>
+#include <linux/qcom_iommu.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/media-entity.h>
+#include <media/msmb_generic_buf_mgr.h>
+#include <media/msmb_pproc.h>
+#include <asm/dma-iommu.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-buf.h>
+#include "msm_vpe.h"
+#include "msm_camera_io_util.h"
+
+#define MSM_VPE_IDENT_TO_SESSION_ID(identity) ((identity >> 16) & 0xFFFF)
+#define MSM_VPE_IDENT_TO_STREAM_ID(identity) (identity & 0xFFFF)
+
+#define MSM_VPE_DRV_NAME "msm_vpe"
+
+#define MSM_VPE_MAX_BUFF_QUEUE 16
+
+#define CONFIG_MSM_VPE_DBG 0
+
+#if CONFIG_MSM_VPE_DBG
+#define VPE_DBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define VPE_DBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+static void vpe_mem_dump(const char * const name, const void * const addr,
+ int size)
+{
+ char line_str[128], *p_str;
+ int i;
+ u32 *p = (u32 *) addr;
+ u32 data;
+ VPE_DBG("%s: (%s) %p %d\n", __func__, name, addr, size);
+ line_str[0] = '\0';
+ p_str = line_str;
+ for (i = 0; i < size/4; i++) {
+ if (i % 4 == 0) {
+ snprintf(p_str, 12, "%p: ", p);
+ p_str += 10;
+ }
+ data = *p++;
+ snprintf(p_str, 12, "%08x ", data);
+ p_str += 9;
+ if ((i + 1) % 4 == 0) {
+ VPE_DBG("%s\n", line_str);
+ line_str[0] = '\0';
+ p_str = line_str;
+ }
+ }
+ if (line_str[0] != '\0')
+ VPE_DBG("%s\n", line_str);
+}
+
+static inline long long vpe_do_div(long long num, long long den)
+{
+ do_div(num, den);
+ return num;
+}
+
+#define msm_dequeue(queue, member) ({ \
+ unsigned long flags; \
+ struct msm_device_queue *__q = (queue); \
+ struct msm_queue_cmd *qcmd = 0; \
+ spin_lock_irqsave(&__q->lock, flags); \
+ if (!list_empty(&__q->list)) { \
+ __q->len--; \
+ qcmd = list_first_entry(&__q->list, \
+ struct msm_queue_cmd, \
+ member); \
+ list_del_init(&qcmd->member); \
+ } \
+ spin_unlock_irqrestore(&__q->lock, flags); \
+ qcmd; \
+ })
+
+static void msm_queue_init(struct msm_device_queue *queue, const char *name)
+{
+ spin_lock_init(&queue->lock);
+ queue->len = 0;
+ queue->max = 0;
+ queue->name = name;
+ INIT_LIST_HEAD(&queue->list);
+ init_waitqueue_head(&queue->wait);
+}
+
+static struct msm_cam_clk_info vpe_clk_info[] = {
+ {"vpe_clk", 160000000},
+ {"vpe_pclk", -1},
+};
+
+static int msm_vpe_notify_frame_done(struct vpe_device *vpe_dev);
+
+static void msm_enqueue(struct msm_device_queue *queue,
+ struct list_head *entry)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&queue->lock, flags);
+ queue->len++;
+ if (queue->len > queue->max) {
+ queue->max = queue->len;
+ pr_debug("queue %s new max is %d\n", queue->name, queue->max);
+ }
+ list_add_tail(entry, &queue->list);
+ wake_up(&queue->wait);
+ VPE_DBG("woke up %s\n", queue->name);
+ spin_unlock_irqrestore(&queue->lock, flags);
+}
+
+static struct msm_vpe_buff_queue_info_t *msm_vpe_get_buff_queue_entry(
+ struct vpe_device *vpe_dev, uint32_t session_id, uint32_t stream_id)
+{
+ uint32_t i = 0;
+ struct msm_vpe_buff_queue_info_t *buff_queue_info = NULL;
+
+ for (i = 0; i < vpe_dev->num_buffq; i++) {
+ if ((vpe_dev->buff_queue[i].used == 1) &&
+ (vpe_dev->buff_queue[i].session_id == session_id) &&
+ (vpe_dev->buff_queue[i].stream_id == stream_id)) {
+ buff_queue_info = &vpe_dev->buff_queue[i];
+ break;
+ }
+ }
+
+ if (buff_queue_info == NULL) {
+ pr_err("error buffer queue entry for sess:%d strm:%d not found\n",
+ session_id, stream_id);
+ }
+ return buff_queue_info;
+}
+
+static unsigned long msm_vpe_get_phy_addr(struct vpe_device *vpe_dev,
+ struct msm_vpe_buff_queue_info_t *buff_queue_info, uint32_t buff_index,
+ uint8_t native_buff)
+{
+ unsigned long phy_add = 0;
+ struct list_head *buff_head;
+ struct msm_vpe_buffer_map_list_t *buff, *save;
+
+ if (native_buff)
+ buff_head = &buff_queue_info->native_buff_head;
+ else
+ buff_head = &buff_queue_info->vb2_buff_head;
+
+ list_for_each_entry_safe(buff, save, buff_head, entry) {
+ if (buff->map_info.buff_info.index == buff_index) {
+ phy_add = buff->map_info.phy_addr;
+ break;
+ }
+ }
+
+ return phy_add;
+}
+
+static unsigned long msm_vpe_queue_buffer_info(struct vpe_device *vpe_dev,
+ struct msm_vpe_buff_queue_info_t *buff_queue,
+ struct msm_vpe_buffer_info_t *buffer_info)
+{
+ struct list_head *buff_head;
+ struct msm_vpe_buffer_map_list_t *buff, *save;
+ int rc = 0;
+
+ if (buffer_info->native_buff)
+ buff_head = &buff_queue->native_buff_head;
+ else
+ buff_head = &buff_queue->vb2_buff_head;
+
+ list_for_each_entry_safe(buff, save, buff_head, entry) {
+ if (buff->map_info.buff_info.index == buffer_info->index) {
+ pr_err("error buffer index already queued\n");
+ return -EINVAL;
+ }
+ }
+
+ buff = kzalloc(
+ sizeof(struct msm_vpe_buffer_map_list_t), GFP_KERNEL);
+ if (!buff) {
+ pr_err("error allocating memory\n");
+ return -EINVAL;
+ }
+
+ buff->map_info.buff_info = *buffer_info;
+ buff->map_info.dbuf = dma_buf_get(buffer_info->fd);
+ if (IS_ERR_OR_NULL(buff->map_info.dbuf)) {
+ pr_err("Ion dma get buf failed\n");
+ rc = PTR_ERR(buff->map_info.dbuf);
+ goto err_get;
+ }
+
+ buff->map_info.attachment = dma_buf_attach(buff->map_info.dbuf,
+ &vpe_dev->pdev->dev);
+ if (IS_ERR_OR_NULL(buff->map_info.attachment)) {
+ pr_err("Ion dma buf attach failed\n");
+ rc = PTR_ERR(buff->map_info.attachment);
+ goto err_put;
+ }
+
+ buff->map_info.table =
+ dma_buf_map_attachment(buff->map_info.attachment,
+ DMA_BIDIRECTIONAL);
+ if (IS_ERR_OR_NULL(buff->map_info.table)) {
+ pr_err("DMA buf map attachment failed\n");
+ rc = PTR_ERR(buff->map_info.table);
+ goto err_detach;
+ }
+ if (msm_map_dma_buf(buff->map_info.dbuf, buff->map_info.table,
+ vpe_dev->domain_num, 0, SZ_4K, 0,
+ &buff->map_info.phy_addr,
+ &buff->map_info.len, 0, 0)) {
+ pr_err("%s: cannot map address", __func__);
+ goto err_detachment;
+ }
+
+ INIT_LIST_HEAD(&buff->entry);
+ list_add_tail(&buff->entry, buff_head);
+
+ return buff->map_info.phy_addr;
+
+err_detachment:
+ dma_buf_unmap_attachment(buff->map_info.attachment,
+ buff->map_info.table, DMA_BIDIRECTIONAL);
+err_detach:
+ dma_buf_detach(buff->map_info.dbuf, buff->map_info.attachment);
+err_put:
+ dma_buf_put(buff->map_info.dbuf);
+err_get:
+ kzfree(buff);
+ return 0;
+}
+
+static void msm_vpe_dequeue_buffer_info(struct vpe_device *vpe_dev,
+ struct msm_vpe_buffer_map_list_t *buff)
+{
+ msm_unmap_dma_buf(buff->map_info.table, vpe_dev->domain_num, 0);
+ dma_buf_unmap_attachment(buff->map_info.attachment,
+ buff->map_info.table, DMA_BIDIRECTIONAL);
+ dma_buf_detach(buff->map_info.dbuf, buff->map_info.attachment);
+ dma_buf_put(buff->map_info.dbuf);
+ list_del_init(&buff->entry);
+ kzfree(buff);
+
+ return;
+}
+
+static unsigned long msm_vpe_fetch_buffer_info(struct vpe_device *vpe_dev,
+ struct msm_vpe_buffer_info_t *buffer_info, uint32_t session_id,
+ uint32_t stream_id)
+{
+ unsigned long phy_addr = 0;
+ struct msm_vpe_buff_queue_info_t *buff_queue_info;
+ uint8_t native_buff = buffer_info->native_buff;
+
+ buff_queue_info = msm_vpe_get_buff_queue_entry(vpe_dev, session_id,
+ stream_id);
+ if (buff_queue_info == NULL) {
+ pr_err("error finding buffer queue entry for sessid:%d strmid:%d\n",
+ session_id, stream_id);
+ return phy_addr;
+ }
+
+ phy_addr = msm_vpe_get_phy_addr(vpe_dev, buff_queue_info,
+ buffer_info->index, native_buff);
+ if ((phy_addr == 0) && (native_buff)) {
+ phy_addr = msm_vpe_queue_buffer_info(vpe_dev, buff_queue_info,
+ buffer_info);
+ }
+ return phy_addr;
+}
+
+static int32_t msm_vpe_enqueue_buff_info_list(struct vpe_device *vpe_dev,
+ struct msm_vpe_stream_buff_info_t *stream_buff_info)
+{
+ uint32_t j;
+ struct msm_vpe_buff_queue_info_t *buff_queue_info;
+
+ buff_queue_info = msm_vpe_get_buff_queue_entry(vpe_dev,
+ (stream_buff_info->identity >> 16) & 0xFFFF,
+ stream_buff_info->identity & 0xFFFF);
+ if (buff_queue_info == NULL) {
+ pr_err("error finding buffer queue entry for sessid:%d strmid:%d\n",
+ (stream_buff_info->identity >> 16) & 0xFFFF,
+ stream_buff_info->identity & 0xFFFF);
+ return -EINVAL;
+ }
+
+ for (j = 0; j < stream_buff_info->num_buffs; j++) {
+ msm_vpe_queue_buffer_info(vpe_dev, buff_queue_info,
+ &stream_buff_info->buffer_info[j]);
+ }
+ return 0;
+}
+
+static int32_t msm_vpe_dequeue_buff_info_list(struct vpe_device *vpe_dev,
+ struct msm_vpe_buff_queue_info_t *buff_queue_info)
+{
+ struct msm_vpe_buffer_map_list_t *buff, *save;
+ struct list_head *buff_head;
+
+ buff_head = &buff_queue_info->native_buff_head;
+ list_for_each_entry_safe(buff, save, buff_head, entry) {
+ msm_vpe_dequeue_buffer_info(vpe_dev, buff);
+ }
+
+ buff_head = &buff_queue_info->vb2_buff_head;
+ list_for_each_entry_safe(buff, save, buff_head, entry) {
+ msm_vpe_dequeue_buffer_info(vpe_dev, buff);
+ }
+
+ return 0;
+}
+
+static int32_t msm_vpe_add_buff_queue_entry(struct vpe_device *vpe_dev,
+ uint16_t session_id, uint16_t stream_id)
+{
+ uint32_t i;
+ struct msm_vpe_buff_queue_info_t *buff_queue_info;
+
+ for (i = 0; i < vpe_dev->num_buffq; i++) {
+ if (vpe_dev->buff_queue[i].used == 0) {
+ buff_queue_info = &vpe_dev->buff_queue[i];
+ buff_queue_info->used = 1;
+ buff_queue_info->session_id = session_id;
+ buff_queue_info->stream_id = stream_id;
+ INIT_LIST_HEAD(&buff_queue_info->vb2_buff_head);
+ INIT_LIST_HEAD(&buff_queue_info->native_buff_head);
+ return 0;
+ }
+ }
+ pr_err("buffer queue full. error for sessionid: %d streamid: %d\n",
+ session_id, stream_id);
+ return -EINVAL;
+}
+
+static int32_t msm_vpe_free_buff_queue_entry(struct vpe_device *vpe_dev,
+ uint32_t session_id, uint32_t stream_id)
+{
+ struct msm_vpe_buff_queue_info_t *buff_queue_info;
+
+ buff_queue_info = msm_vpe_get_buff_queue_entry(vpe_dev, session_id,
+ stream_id);
+ if (buff_queue_info == NULL) {
+ pr_err("error finding buffer queue entry for sessid:%d strmid:%d\n",
+ session_id, stream_id);
+ return -EINVAL;
+ }
+
+ buff_queue_info->used = 0;
+ buff_queue_info->session_id = 0;
+ buff_queue_info->stream_id = 0;
+ INIT_LIST_HEAD(&buff_queue_info->vb2_buff_head);
+ INIT_LIST_HEAD(&buff_queue_info->native_buff_head);
+ return 0;
+}
+
+static int32_t msm_vpe_create_buff_queue(struct vpe_device *vpe_dev,
+ uint32_t num_buffq)
+{
+ struct msm_vpe_buff_queue_info_t *buff_queue;
+ buff_queue = kzalloc(
+ sizeof(struct msm_vpe_buff_queue_info_t) * num_buffq,
+ GFP_KERNEL);
+ if (!buff_queue) {
+ pr_err("Buff queue allocation failure\n");
+ return -ENOMEM;
+ }
+
+ if (vpe_dev->buff_queue) {
+ pr_err("Buff queue not empty\n");
+ kzfree(buff_queue);
+ return -EINVAL;
+ } else {
+ vpe_dev->buff_queue = buff_queue;
+ vpe_dev->num_buffq = num_buffq;
+ }
+ return 0;
+}
+
+static void msm_vpe_delete_buff_queue(struct vpe_device *vpe_dev)
+{
+ uint32_t i;
+
+ for (i = 0; i < vpe_dev->num_buffq; i++) {
+ if (vpe_dev->buff_queue[i].used == 1) {
+ pr_err("Queue not free sessionid: %d, streamid: %d\n",
+ vpe_dev->buff_queue[i].session_id,
+ vpe_dev->buff_queue[i].stream_id);
+ msm_vpe_free_buff_queue_entry(vpe_dev,
+ vpe_dev->buff_queue[i].session_id,
+ vpe_dev->buff_queue[i].stream_id);
+ }
+ }
+ kzfree(vpe_dev->buff_queue);
+ vpe_dev->buff_queue = NULL;
+ vpe_dev->num_buffq = 0;
+ return;
+}
+
+void vpe_release_ion_client(struct kref *ref)
+{
+ struct vpe_device *vpe_dev = container_of(ref,
+ struct vpe_device, refcount);
+ ion_client_destroy(vpe_dev->client);
+}
+
+static int vpe_init_mem(struct vpe_device *vpe_dev)
+{
+ kref_init(&vpe_dev->refcount);
+ kref_get(&vpe_dev->refcount);
+ vpe_dev->client = msm_ion_client_create("vpe");
+
+ if (!vpe_dev->client) {
+ pr_err("couldn't create ion client\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void vpe_deinit_mem(struct vpe_device *vpe_dev)
+{
+ kref_put(&vpe_dev->refcount, vpe_release_ion_client);
+}
+
+static irqreturn_t msm_vpe_irq(int irq_num, void *data)
+{
+ unsigned long flags;
+ uint32_t irq_status;
+ struct msm_vpe_tasklet_queue_cmd *queue_cmd;
+ struct vpe_device *vpe_dev = (struct vpe_device *) data;
+
+ irq_status = msm_camera_io_r_mb(vpe_dev->base +
+ VPE_INTR_STATUS_OFFSET);
+
+ spin_lock_irqsave(&vpe_dev->tasklet_lock, flags);
+ queue_cmd = &vpe_dev->tasklet_queue_cmd[vpe_dev->taskletq_idx];
+ if (queue_cmd->cmd_used) {
+ VPE_DBG("%s: vpe tasklet queue overflow\n", __func__);
+ list_del(&queue_cmd->list);
+ } else {
+ atomic_add(1, &vpe_dev->irq_cnt);
+ }
+ queue_cmd->irq_status = irq_status;
+
+ queue_cmd->cmd_used = 1;
+ vpe_dev->taskletq_idx =
+ (vpe_dev->taskletq_idx + 1) % MSM_VPE_TASKLETQ_SIZE;
+ list_add_tail(&queue_cmd->list, &vpe_dev->tasklet_q);
+ spin_unlock_irqrestore(&vpe_dev->tasklet_lock, flags);
+
+ tasklet_schedule(&vpe_dev->vpe_tasklet);
+
+ msm_camera_io_w_mb(irq_status, vpe_dev->base + VPE_INTR_CLEAR_OFFSET);
+ msm_camera_io_w(0, vpe_dev->base + VPE_INTR_ENABLE_OFFSET);
+ VPE_DBG("%s: irq_status=0x%x.\n", __func__, irq_status);
+
+ return IRQ_HANDLED;
+}
+
+static void msm_vpe_do_tasklet(unsigned long data)
+{
+ unsigned long flags;
+ struct vpe_device *vpe_dev = (struct vpe_device *)data;
+ struct msm_vpe_tasklet_queue_cmd *queue_cmd;
+
+ while (atomic_read(&vpe_dev->irq_cnt)) {
+ spin_lock_irqsave(&vpe_dev->tasklet_lock, flags);
+ queue_cmd = list_first_entry(&vpe_dev->tasklet_q,
+ struct msm_vpe_tasklet_queue_cmd, list);
+ if (!queue_cmd) {
+ atomic_set(&vpe_dev->irq_cnt, 0);
+ spin_unlock_irqrestore(&vpe_dev->tasklet_lock, flags);
+ return;
+ }
+ atomic_sub(1, &vpe_dev->irq_cnt);
+ list_del(&queue_cmd->list);
+ queue_cmd->cmd_used = 0;
+
+ spin_unlock_irqrestore(&vpe_dev->tasklet_lock, flags);
+
+ VPE_DBG("Frame done!!\n");
+ msm_vpe_notify_frame_done(vpe_dev);
+ }
+}
+
+static int vpe_init_hardware(struct vpe_device *vpe_dev)
+{
+ int rc = 0;
+
+ if (vpe_dev->fs_vpe == NULL) {
+ vpe_dev->fs_vpe =
+ regulator_get(&vpe_dev->pdev->dev, "vdd");
+ if (IS_ERR(vpe_dev->fs_vpe)) {
+ pr_err("Regulator vpe vdd get failed %ld\n",
+ PTR_ERR(vpe_dev->fs_vpe));
+ vpe_dev->fs_vpe = NULL;
+ rc = -ENODEV;
+ goto fail;
+ } else if (regulator_enable(vpe_dev->fs_vpe)) {
+ pr_err("Regulator vpe vdd enable failed\n");
+ regulator_put(vpe_dev->fs_vpe);
+ vpe_dev->fs_vpe = NULL;
+ rc = -ENODEV;
+ goto fail;
+ }
+ }
+
+ rc = msm_cam_clk_enable(&vpe_dev->pdev->dev, vpe_clk_info,
+ vpe_dev->vpe_clk, ARRAY_SIZE(vpe_clk_info), 1);
+ if (rc < 0) {
+ pr_err("clk enable failed\n");
+ goto disable_and_put_regulator;
+ }
+
+ vpe_dev->base = ioremap(vpe_dev->mem->start,
+ resource_size(vpe_dev->mem));
+ if (!vpe_dev->base) {
+ rc = -ENOMEM;
+ pr_err("ioremap failed\n");
+ goto disable_and_put_regulator;
+ }
+
+ if (vpe_dev->state != VPE_STATE_BOOT) {
+ rc = request_irq(vpe_dev->irq->start, msm_vpe_irq,
+ IRQF_TRIGGER_RISING,
+ "vpe", vpe_dev);
+ if (rc < 0) {
+ pr_err("irq request fail! start=%u\n",
+ (uint32_t) vpe_dev->irq->start);
+ rc = -EBUSY;
+ goto unmap_base;
+ } else {
+ VPE_DBG("Got irq! %d\n", (int)vpe_dev->irq->start);
+ }
+ } else {
+ VPE_DBG("Skip requesting the irq since device is booting\n");
+ }
+ vpe_dev->buf_mgr_subdev = msm_buf_mngr_get_subdev();
+
+ msm_vpe_create_buff_queue(vpe_dev, MSM_VPE_MAX_BUFF_QUEUE);
+ return rc;
+
+unmap_base:
+ iounmap(vpe_dev->base);
+disable_and_put_regulator:
+ regulator_disable(vpe_dev->fs_vpe);
+ regulator_put(vpe_dev->fs_vpe);
+fail:
+ return rc;
+}
+
+static int vpe_release_hardware(struct vpe_device *vpe_dev)
+{
+ if (vpe_dev->state != VPE_STATE_BOOT) {
+ free_irq(vpe_dev->irq->start, vpe_dev);
+ tasklet_kill(&vpe_dev->vpe_tasklet);
+ atomic_set(&vpe_dev->irq_cnt, 0);
+ }
+
+ msm_vpe_delete_buff_queue(vpe_dev);
+ iounmap(vpe_dev->base);
+ msm_cam_clk_enable(&vpe_dev->pdev->dev, vpe_clk_info,
+ vpe_dev->vpe_clk, ARRAY_SIZE(vpe_clk_info), 0);
+ return 0;
+}
+
+static int vpe_open_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ int rc = 0;
+ uint32_t i;
+ struct vpe_device *vpe_dev = v4l2_get_subdevdata(sd);
+
+ mutex_lock(&vpe_dev->mutex);
+ if (vpe_dev->vpe_open_cnt == MAX_ACTIVE_VPE_INSTANCE) {
+ pr_err("No free VPE instance\n");
+ rc = -ENODEV;
+ goto err_mutex_unlock;
+ }
+
+ for (i = 0; i < MAX_ACTIVE_VPE_INSTANCE; i++) {
+ if (vpe_dev->vpe_subscribe_list[i].active == 0) {
+ vpe_dev->vpe_subscribe_list[i].active = 1;
+ vpe_dev->vpe_subscribe_list[i].vfh = &fh->vfh;
+ break;
+ }
+ }
+ if (i == MAX_ACTIVE_VPE_INSTANCE) {
+ pr_err("No free instance\n");
+ rc = -ENODEV;
+ goto err_mutex_unlock;
+ }
+
+ VPE_DBG("open %d %p\n", i, &fh->vfh);
+ vpe_dev->vpe_open_cnt++;
+ if (vpe_dev->vpe_open_cnt == 1) {
+ rc = vpe_init_hardware(vpe_dev);
+ if (rc < 0) {
+ pr_err("%s: Couldn't init vpe hardware\n", __func__);
+ vpe_dev->vpe_open_cnt--;
+ goto err_fixup_sub_list;
+ }
+ rc = vpe_init_mem(vpe_dev);
+ if (rc < 0) {
+ pr_err("%s: Couldn't init mem\n", __func__);
+ vpe_dev->vpe_open_cnt--;
+ rc = -ENODEV;
+ goto err_release_hardware;
+ }
+ vpe_dev->state = VPE_STATE_IDLE;
+ }
+ mutex_unlock(&vpe_dev->mutex);
+
+ return rc;
+
+err_release_hardware:
+ vpe_release_hardware(vpe_dev);
+err_fixup_sub_list:
+ for (i = 0; i < MAX_ACTIVE_VPE_INSTANCE; i++) {
+ if (vpe_dev->vpe_subscribe_list[i].vfh == &fh->vfh) {
+ vpe_dev->vpe_subscribe_list[i].active = 0;
+ vpe_dev->vpe_subscribe_list[i].vfh = NULL;
+ break;
+ }
+ }
+err_mutex_unlock:
+ mutex_unlock(&vpe_dev->mutex);
+ return rc;
+}
+
+static int vpe_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ uint32_t i;
+ struct vpe_device *vpe_dev = v4l2_get_subdevdata(sd);
+ mutex_lock(&vpe_dev->mutex);
+ for (i = 0; i < MAX_ACTIVE_VPE_INSTANCE; i++) {
+ if (vpe_dev->vpe_subscribe_list[i].vfh == &fh->vfh) {
+ vpe_dev->vpe_subscribe_list[i].active = 0;
+ vpe_dev->vpe_subscribe_list[i].vfh = NULL;
+ break;
+ }
+ }
+ if (i == MAX_ACTIVE_VPE_INSTANCE) {
+ pr_err("Invalid close\n");
+ mutex_unlock(&vpe_dev->mutex);
+ return -ENODEV;
+ }
+
+ VPE_DBG("close %d %p\n", i, &fh->vfh);
+ vpe_dev->vpe_open_cnt--;
+ if (vpe_dev->vpe_open_cnt == 0) {
+ vpe_deinit_mem(vpe_dev);
+ vpe_release_hardware(vpe_dev);
+ vpe_dev->state = VPE_STATE_OFF;
+ }
+ mutex_unlock(&vpe_dev->mutex);
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops msm_vpe_internal_ops = {
+ .open = vpe_open_node,
+ .close = vpe_close_node,
+};
+
+static int msm_vpe_buffer_ops(struct vpe_device *vpe_dev,
+ uint32_t buff_mgr_ops, struct msm_buf_mngr_info *buff_mgr_info)
+{
+ int rc = -EINVAL;
+
+ rc = v4l2_subdev_call(vpe_dev->buf_mgr_subdev, core, ioctl,
+ buff_mgr_ops, buff_mgr_info);
+ if (rc < 0)
+ pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+ return rc;
+}
+
+static int msm_vpe_notify_frame_done(struct vpe_device *vpe_dev)
+{
+ struct v4l2_event v4l2_evt;
+ struct msm_queue_cmd *frame_qcmd;
+ struct msm_queue_cmd *event_qcmd;
+ struct msm_vpe_frame_info_t *processed_frame;
+ struct msm_device_queue *queue = &vpe_dev->processing_q;
+ struct msm_buf_mngr_info buff_mgr_info;
+ int rc = 0;
+
+ if (queue->len > 0) {
+ frame_qcmd = msm_dequeue(queue, list_frame);
+ if (!frame_qcmd) {
+ pr_err("%s: %d frame_qcmd is NULL\n",
+ __func__ , __LINE__);
+ return -EINVAL;
+ }
+ processed_frame = frame_qcmd->command;
+ do_gettimeofday(&(processed_frame->out_time));
+ kfree(frame_qcmd);
+ event_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_ATOMIC);
+ if (!event_qcmd) {
+ pr_err("%s: Insufficient memory\n", __func__);
+ return -ENOMEM;
+ }
+ atomic_set(&event_qcmd->on_heap, 1);
+ event_qcmd->command = processed_frame;
+ VPE_DBG("fid %d\n", processed_frame->frame_id);
+ msm_enqueue(&vpe_dev->eventData_q, &event_qcmd->list_eventdata);
+
+ if (!processed_frame->output_buffer_info.processed_divert) {
+ memset(&buff_mgr_info, 0 ,
+ sizeof(buff_mgr_info));
+ buff_mgr_info.session_id =
+ ((processed_frame->identity >> 16) & 0xFFFF);
+ buff_mgr_info.stream_id =
+ (processed_frame->identity & 0xFFFF);
+ buff_mgr_info.frame_id = processed_frame->frame_id;
+ buff_mgr_info.timestamp = processed_frame->timestamp;
+ buff_mgr_info.index =
+ processed_frame->output_buffer_info.index;
+ rc = msm_vpe_buffer_ops(vpe_dev,
+ VIDIOC_MSM_BUF_MNGR_BUF_DONE,
+ &buff_mgr_info);
+ if (rc < 0) {
+ pr_err("%s: error doing VIDIOC_MSM_BUF_MNGR_BUF_DONE\n",
+ __func__);
+ rc = -EINVAL;
+ }
+ }
+
+ v4l2_evt.id = processed_frame->inst_id;
+ v4l2_evt.type = V4L2_EVENT_VPE_FRAME_DONE;
+ v4l2_event_queue(vpe_dev->msm_sd.sd.devnode, &v4l2_evt);
+ }
+ return rc;
+}
+
+static void vpe_update_scaler_params(struct vpe_device *vpe_dev,
+ struct msm_vpe_frame_strip_info strip_info)
+{
+ uint32_t out_ROI_width, out_ROI_height;
+ uint32_t src_ROI_width, src_ROI_height;
+
+ /*
+ * phase_step_x, phase_step_y, phase_init_x and phase_init_y
+ * are represented in fixed-point, unsigned 3.29 format
+ */
+ uint32_t phase_step_x = 0;
+ uint32_t phase_step_y = 0;
+ uint32_t phase_init_x = 0;
+ uint32_t phase_init_y = 0;
+
+ uint32_t src_roi, src_x, src_y, src_xy, temp;
+ uint32_t yscale_filter_sel, xscale_filter_sel;
+ uint32_t scale_unit_sel_x, scale_unit_sel_y;
+ uint64_t numerator, denominator;
+
+ /*
+ * assumption is both direction need zoom. this can be
+ * improved.
+ */
+ temp = msm_camera_io_r(vpe_dev->base + VPE_OP_MODE_OFFSET) | 0x3;
+ msm_camera_io_w(temp, vpe_dev->base + VPE_OP_MODE_OFFSET);
+
+ src_ROI_width = strip_info.src_w;
+ src_ROI_height = strip_info.src_h;
+ out_ROI_width = strip_info.dst_w;
+ out_ROI_height = strip_info.dst_h;
+
+ VPE_DBG("src w = %u, h=%u, dst w = %u, h =%u.\n",
+ src_ROI_width, src_ROI_height, out_ROI_width,
+ out_ROI_height);
+ src_roi = (src_ROI_height << 16) + src_ROI_width;
+
+ msm_camera_io_w(src_roi, vpe_dev->base + VPE_SRC_SIZE_OFFSET);
+
+ src_x = strip_info.src_x;
+ src_y = strip_info.src_y;
+
+ VPE_DBG("src_x = %d, src_y=%d.\n", src_x, src_y);
+
+ src_xy = src_y*(1<<16) + src_x;
+ msm_camera_io_w(src_xy, vpe_dev->base +
+ VPE_SRC_XY_OFFSET);
+ VPE_DBG("src_xy = 0x%x, src_roi=0x%x.\n", src_xy, src_roi);
+
+ /* decide whether to use FIR or M/N for scaling */
+ if ((out_ROI_width == 1 && src_ROI_width < 4) ||
+ (src_ROI_width < 4 * out_ROI_width - 3))
+ scale_unit_sel_x = 0;/* use FIR scalar */
+ else
+ scale_unit_sel_x = 1;/* use M/N scalar */
+
+ if ((out_ROI_height == 1 && src_ROI_height < 4) ||
+ (src_ROI_height < 4 * out_ROI_height - 3))
+ scale_unit_sel_y = 0;/* use FIR scalar */
+ else
+ scale_unit_sel_y = 1;/* use M/N scalar */
+
+ /* calculate phase step for the x direction */
+
+ /*
+ * if destination is only 1 pixel wide, the value of
+ * phase_step_x is unimportant. Assigning phase_step_x to src
+ * ROI width as an arbitrary value.
+ */
+ if (out_ROI_width == 1)
+ phase_step_x = (uint32_t) ((src_ROI_width) <<
+ SCALER_PHASE_BITS);
+
+ /* if using FIR scalar */
+ else if (scale_unit_sel_x == 0) {
+
+ /*
+ * Calculate the quotient ( src_ROI_width - 1 ) (
+ * out_ROI_width - 1) with u3.29 precision. Quotient
+ * is rounded up to the larger 29th decimal point
+ */
+ numerator = (uint64_t)(src_ROI_width - 1) <<
+ SCALER_PHASE_BITS;
+ /*
+ * never equals to 0 because of the "(out_ROI_width ==
+ * 1 )"
+ */
+ denominator = (uint64_t)(out_ROI_width - 1);
+ /*
+ * divide and round up to the larger 29th decimal
+ * point.
+ */
+ phase_step_x = (uint32_t) vpe_do_div((numerator +
+ denominator - 1), denominator);
+ } else if (scale_unit_sel_x == 1) { /* if M/N scalar */
+ /*
+ * Calculate the quotient ( src_ROI_width ) / (
+ * out_ROI_width) with u3.29 precision. Quotient is
+ * rounded down to the smaller 29th decimal point.
+ */
+ numerator = (uint64_t)(src_ROI_width) <<
+ SCALER_PHASE_BITS;
+ denominator = (uint64_t)(out_ROI_width);
+ phase_step_x =
+ (uint32_t) vpe_do_div(numerator, denominator);
+ }
+ /* calculate phase step for the y direction */
+
+ /*
+ * if destination is only 1 pixel wide, the value of
+ * phase_step_x is unimportant. Assigning phase_step_x to src
+ * ROI width as an arbitrary value.
+ */
+ if (out_ROI_height == 1)
+ phase_step_y =
+ (uint32_t) ((src_ROI_height) << SCALER_PHASE_BITS);
+
+ /* if FIR scalar */
+ else if (scale_unit_sel_y == 0) {
+ /*
+ * Calculate the quotient ( src_ROI_height - 1 ) / (
+ * out_ROI_height - 1) with u3.29 precision. Quotient
+ * is rounded up to the larger 29th decimal point.
+ */
+ numerator = (uint64_t)(src_ROI_height - 1) <<
+ SCALER_PHASE_BITS;
+ /*
+ * never equals to 0 because of the " ( out_ROI_height
+ * == 1 )" case
+ */
+ denominator = (uint64_t)(out_ROI_height - 1);
+ /*
+ * Quotient is rounded up to the larger 29th decimal
+ * point.
+ */
+ phase_step_y =
+ (uint32_t) vpe_do_div(
+ (numerator + denominator - 1), denominator);
+ } else if (scale_unit_sel_y == 1) { /* if M/N scalar */
+ /*
+ * Calculate the quotient ( src_ROI_height ) (
+ * out_ROI_height) with u3.29 precision. Quotient is
+ * rounded down to the smaller 29th decimal point.
+ */
+ numerator = (uint64_t)(src_ROI_height) <<
+ SCALER_PHASE_BITS;
+ denominator = (uint64_t)(out_ROI_height);
+ phase_step_y = (uint32_t) vpe_do_div(
+ numerator, denominator);
+ }
+
+ /* decide which set of FIR coefficients to use */
+ if (phase_step_x > HAL_MDP_PHASE_STEP_2P50)
+ xscale_filter_sel = 0;
+ else if (phase_step_x > HAL_MDP_PHASE_STEP_1P66)
+ xscale_filter_sel = 1;
+ else if (phase_step_x > HAL_MDP_PHASE_STEP_1P25)
+ xscale_filter_sel = 2;
+ else
+ xscale_filter_sel = 3;
+
+ if (phase_step_y > HAL_MDP_PHASE_STEP_2P50)
+ yscale_filter_sel = 0;
+ else if (phase_step_y > HAL_MDP_PHASE_STEP_1P66)
+ yscale_filter_sel = 1;
+ else if (phase_step_y > HAL_MDP_PHASE_STEP_1P25)
+ yscale_filter_sel = 2;
+ else
+ yscale_filter_sel = 3;
+
+ /* calculate phase init for the x direction */
+
+ /* if using FIR scalar */
+ if (scale_unit_sel_x == 0) {
+ if (out_ROI_width == 1)
+ phase_init_x =
+ (uint32_t) ((src_ROI_width - 1) <<
+ SCALER_PHASE_BITS);
+ else
+ phase_init_x = 0;
+ } else if (scale_unit_sel_x == 1) /* M over N scalar */
+ phase_init_x = 0;
+
+ /*
+ * calculate phase init for the y direction if using FIR
+ * scalar
+ */
+ if (scale_unit_sel_y == 0) {
+ if (out_ROI_height == 1)
+ phase_init_y =
+ (uint32_t) ((src_ROI_height -
+ 1) << SCALER_PHASE_BITS);
+ else
+ phase_init_y = 0;
+ } else if (scale_unit_sel_y == 1) /* M over N scalar */
+ phase_init_y = 0;
+
+ strip_info.phase_step_x = phase_step_x;
+ strip_info.phase_step_y = phase_step_y;
+ strip_info.phase_init_x = phase_init_x;
+ strip_info.phase_init_y = phase_init_y;
+ VPE_DBG("phase step x = %d, step y = %d.\n",
+ strip_info.phase_step_x, strip_info.phase_step_y);
+ VPE_DBG("phase init x = %d, init y = %d.\n",
+ strip_info.phase_init_x, strip_info.phase_init_y);
+
+ msm_camera_io_w(strip_info.phase_step_x, vpe_dev->base +
+ VPE_SCALE_PHASEX_STEP_OFFSET);
+ msm_camera_io_w(strip_info.phase_step_y, vpe_dev->base +
+ VPE_SCALE_PHASEY_STEP_OFFSET);
+
+ msm_camera_io_w(strip_info.phase_init_x, vpe_dev->base +
+ VPE_SCALE_PHASEX_INIT_OFFSET);
+ msm_camera_io_w(strip_info.phase_init_y, vpe_dev->base +
+ VPE_SCALE_PHASEY_INIT_OFFSET);
+}
+
+static void vpe_program_buffer_addresses(
+ struct vpe_device *vpe_dev,
+ unsigned long srcP0,
+ unsigned long srcP1,
+ unsigned long outP0,
+ unsigned long outP1)
+{
+ VPE_DBG("%s VPE Configured with:\n"
+ "Src %x, %x Dest %x, %x",
+ __func__, (uint32_t)srcP0, (uint32_t)srcP1,
+ (uint32_t)outP0, (uint32_t)outP1);
+
+ msm_camera_io_w(srcP0, vpe_dev->base + VPE_SRCP0_ADDR_OFFSET);
+ msm_camera_io_w(srcP1, vpe_dev->base + VPE_SRCP1_ADDR_OFFSET);
+ msm_camera_io_w(outP0, vpe_dev->base + VPE_OUTP0_ADDR_OFFSET);
+ msm_camera_io_w(outP1, vpe_dev->base + VPE_OUTP1_ADDR_OFFSET);
+}
+
+static int vpe_start(struct vpe_device *vpe_dev)
+{
+ /* enable the frame irq, bit 0 = Display list 0 ROI done */
+ msm_camera_io_w_mb(1, vpe_dev->base + VPE_INTR_ENABLE_OFFSET);
+ msm_camera_io_dump(vpe_dev->base, 0x120, CONFIG_MSM_VPE_DBG);
+ msm_camera_io_dump(vpe_dev->base + 0x00400, 0x18, CONFIG_MSM_VPE_DBG);
+ msm_camera_io_dump(vpe_dev->base + 0x10000, 0x250, CONFIG_MSM_VPE_DBG);
+ msm_camera_io_dump(vpe_dev->base + 0x30000, 0x20, CONFIG_MSM_VPE_DBG);
+ msm_camera_io_dump(vpe_dev->base + 0x50000, 0x30, CONFIG_MSM_VPE_DBG);
+ msm_camera_io_dump(vpe_dev->base + 0x50400, 0x10, CONFIG_MSM_VPE_DBG);
+
+ /*
+ * This triggers the operation. When the VPE is done,
+ * msm_vpe_irq will fire.
+ */
+ msm_camera_io_w_mb(1, vpe_dev->base + VPE_DL0_START_OFFSET);
+ return 0;
+}
+
+static void vpe_config_axi_default(struct vpe_device *vpe_dev)
+{
+ msm_camera_io_w(0x25, vpe_dev->base + VPE_AXI_ARB_2_OFFSET);
+}
+
+static int vpe_reset(struct vpe_device *vpe_dev)
+{
+ uint32_t vpe_version;
+ uint32_t rc = 0;
+
+ vpe_version = msm_camera_io_r(
+ vpe_dev->base + VPE_HW_VERSION_OFFSET);
+ VPE_DBG("vpe_version = 0x%x\n", vpe_version);
+ /* disable all interrupts.*/
+ msm_camera_io_w(0, vpe_dev->base + VPE_INTR_ENABLE_OFFSET);
+ /* clear all pending interrupts*/
+ msm_camera_io_w(0x1fffff, vpe_dev->base + VPE_INTR_CLEAR_OFFSET);
+ /* write sw_reset to reset the core. */
+ msm_camera_io_w(0x10, vpe_dev->base + VPE_SW_RESET_OFFSET);
+ /* then poll the reset bit, it should be self-cleared. */
+ while (1) {
+ rc = msm_camera_io_r(
+ vpe_dev->base + VPE_SW_RESET_OFFSET) & 0x10;
+ if (rc == 0)
+ break;
+ cpu_relax();
+ }
+ /*
+ * at this point, hardware is reset. Then pogram to default
+ * values.
+ */
+ msm_camera_io_w(VPE_AXI_RD_ARB_CONFIG_VALUE,
+ vpe_dev->base + VPE_AXI_RD_ARB_CONFIG_OFFSET);
+
+ msm_camera_io_w(VPE_CGC_ENABLE_VALUE,
+ vpe_dev->base + VPE_CGC_EN_OFFSET);
+ msm_camera_io_w(1, vpe_dev->base + VPE_CMD_MODE_OFFSET);
+ msm_camera_io_w(VPE_DEFAULT_OP_MODE_VALUE,
+ vpe_dev->base + VPE_OP_MODE_OFFSET);
+ msm_camera_io_w(VPE_DEFAULT_SCALE_CONFIG,
+ vpe_dev->base + VPE_SCALE_CONFIG_OFFSET);
+ vpe_config_axi_default(vpe_dev);
+ return rc;
+}
+
+static void vpe_update_scale_coef(struct vpe_device *vpe_dev, uint32_t *p)
+{
+ uint32_t i, offset;
+ offset = *p;
+ for (i = offset; i < (VPE_SCALE_COEFF_NUM + offset); i++) {
+ VPE_DBG("Setting scale table %d\n", i);
+ msm_camera_io_w(*(++p),
+ vpe_dev->base + VPE_SCALE_COEFF_LSBn(i));
+ msm_camera_io_w(*(++p),
+ vpe_dev->base + VPE_SCALE_COEFF_MSBn(i));
+ }
+}
+
+static void vpe_input_plane_config(struct vpe_device *vpe_dev, uint32_t *p)
+{
+ msm_camera_io_w(*p, vpe_dev->base + VPE_SRC_FORMAT_OFFSET);
+ msm_camera_io_w(*(++p),
+ vpe_dev->base + VPE_SRC_UNPACK_PATTERN1_OFFSET);
+ msm_camera_io_w(*(++p), vpe_dev->base + VPE_SRC_IMAGE_SIZE_OFFSET);
+ msm_camera_io_w(*(++p), vpe_dev->base + VPE_SRC_YSTRIDE1_OFFSET);
+ msm_camera_io_w(*(++p), vpe_dev->base + VPE_SRC_SIZE_OFFSET);
+ msm_camera_io_w(*(++p), vpe_dev->base + VPE_SRC_XY_OFFSET);
+}
+
+static void vpe_output_plane_config(struct vpe_device *vpe_dev, uint32_t *p)
+{
+ msm_camera_io_w(*p, vpe_dev->base + VPE_OUT_FORMAT_OFFSET);
+ msm_camera_io_w(*(++p),
+ vpe_dev->base + VPE_OUT_PACK_PATTERN1_OFFSET);
+ msm_camera_io_w(*(++p), vpe_dev->base + VPE_OUT_YSTRIDE1_OFFSET);
+ msm_camera_io_w(*(++p), vpe_dev->base + VPE_OUT_SIZE_OFFSET);
+ msm_camera_io_w(*(++p), vpe_dev->base + VPE_OUT_XY_OFFSET);
+}
+
+static void vpe_operation_config(struct vpe_device *vpe_dev, uint32_t *p)
+{
+ msm_camera_io_w(*p, vpe_dev->base + VPE_OP_MODE_OFFSET);
+}
+
+/**
+ * msm_vpe_transaction_setup() - send setup for one frame to VPE
+ * @vpe_dev: vpe device
+ * @data: packed setup commands
+ *
+ * See msm_vpe.h for the expected format of `data'
+ */
+static void msm_vpe_transaction_setup(struct vpe_device *vpe_dev, void *data)
+{
+ int i;
+ void *iter = data;
+
+ vpe_mem_dump("vpe_transaction", data, VPE_TRANSACTION_SETUP_CONFIG_LEN);
+
+ for (i = 0; i < VPE_NUM_SCALER_TABLES; ++i) {
+ vpe_update_scale_coef(vpe_dev, (uint32_t *)iter);
+ iter += VPE_SCALER_CONFIG_LEN;
+ }
+ vpe_input_plane_config(vpe_dev, (uint32_t *)iter);
+ iter += VPE_INPUT_PLANE_CFG_LEN;
+ vpe_output_plane_config(vpe_dev, (uint32_t *)iter);
+ iter += VPE_OUTPUT_PLANE_CFG_LEN;
+ vpe_operation_config(vpe_dev, (uint32_t *)iter);
+}
+
+static int msm_vpe_send_frame_to_hardware(struct vpe_device *vpe_dev,
+ struct msm_queue_cmd *frame_qcmd)
+{
+ struct msm_vpe_frame_info_t *process_frame;
+
+ if (vpe_dev->processing_q.len < MAX_VPE_PROCESSING_FRAME) {
+ process_frame = frame_qcmd->command;
+ msm_enqueue(&vpe_dev->processing_q,
+ &frame_qcmd->list_frame);
+
+ vpe_update_scaler_params(vpe_dev, process_frame->strip_info);
+ vpe_program_buffer_addresses(
+ vpe_dev,
+ process_frame->src_phyaddr,
+ process_frame->src_phyaddr
+ + process_frame->src_chroma_plane_offset,
+ process_frame->dest_phyaddr,
+ process_frame->dest_phyaddr
+ + process_frame->dest_chroma_plane_offset);
+ vpe_start(vpe_dev);
+ do_gettimeofday(&(process_frame->in_time));
+ }
+ return 0;
+}
+
+static int msm_vpe_cfg(struct vpe_device *vpe_dev,
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr)
+{
+ int rc = 0;
+ struct msm_queue_cmd *frame_qcmd = NULL;
+ struct msm_vpe_frame_info_t *new_frame =
+ kzalloc(sizeof(struct msm_vpe_frame_info_t), GFP_KERNEL);
+ unsigned long in_phyaddr, out_phyaddr;
+ struct msm_buf_mngr_info buff_mgr_info;
+
+ if (!new_frame) {
+ pr_err("Insufficient memory. return\n");
+ return -ENOMEM;
+ }
+
+ rc = copy_from_user(new_frame, (void __user *)ioctl_ptr->ioctl_ptr,
+ sizeof(struct msm_vpe_frame_info_t));
+ if (rc) {
+ pr_err("%s:%d copy from user\n", __func__, __LINE__);
+ rc = -EINVAL;
+ goto err_free_new_frame;
+ }
+
+ in_phyaddr = msm_vpe_fetch_buffer_info(vpe_dev,
+ &new_frame->input_buffer_info,
+ ((new_frame->identity >> 16) & 0xFFFF),
+ (new_frame->identity & 0xFFFF));
+ if (!in_phyaddr) {
+ pr_err("error gettting input physical address\n");
+ rc = -EINVAL;
+ goto err_free_new_frame;
+ }
+
+ memset(&new_frame->output_buffer_info, 0,
+ sizeof(struct msm_vpe_buffer_info_t));
+ memset(&buff_mgr_info, 0, sizeof(struct msm_buf_mngr_info));
+ buff_mgr_info.session_id = ((new_frame->identity >> 16) & 0xFFFF);
+ buff_mgr_info.stream_id = (new_frame->identity & 0xFFFF);
+ buff_mgr_info.type = MSM_CAMERA_BUF_MNGR_BUF_PLANAR;
+ rc = msm_vpe_buffer_ops(vpe_dev, VIDIOC_MSM_BUF_MNGR_GET_BUF,
+ &buff_mgr_info);
+ if (rc < 0) {
+ pr_err("error getting buffer\n");
+ rc = -EINVAL;
+ goto err_free_new_frame;
+ }
+
+ new_frame->output_buffer_info.index = buff_mgr_info.index;
+ out_phyaddr = msm_vpe_fetch_buffer_info(vpe_dev,
+ &new_frame->output_buffer_info,
+ ((new_frame->identity >> 16) & 0xFFFF),
+ (new_frame->identity & 0xFFFF));
+ if (!out_phyaddr) {
+ pr_err("error gettting output physical address\n");
+ rc = -EINVAL;
+ goto err_put_buf;
+ }
+
+ new_frame->src_phyaddr = in_phyaddr;
+ new_frame->dest_phyaddr = out_phyaddr;
+
+ frame_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_KERNEL);
+ if (!frame_qcmd) {
+ pr_err("Insufficient memory. return\n");
+ rc = -ENOMEM;
+ goto err_put_buf;
+ }
+
+ atomic_set(&frame_qcmd->on_heap, 1);
+ frame_qcmd->command = new_frame;
+ rc = msm_vpe_send_frame_to_hardware(vpe_dev, frame_qcmd);
+ if (rc < 0) {
+ pr_err("error cannot send frame to hardware\n");
+ rc = -EINVAL;
+ goto err_free_frame_qcmd;
+ }
+
+ return rc;
+
+err_free_frame_qcmd:
+ kfree(frame_qcmd);
+err_put_buf:
+ msm_vpe_buffer_ops(vpe_dev, VIDIOC_MSM_BUF_MNGR_PUT_BUF,
+ &buff_mgr_info);
+err_free_new_frame:
+ kfree(new_frame);
+ return rc;
+}
+
+static long msm_vpe_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct vpe_device *vpe_dev = v4l2_get_subdevdata(sd);
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr = arg;
+ int rc = 0;
+
+ mutex_lock(&vpe_dev->mutex);
+ switch (cmd) {
+ case VIDIOC_MSM_VPE_TRANSACTION_SETUP: {
+ struct msm_vpe_transaction_setup_cfg *cfg;
+ VPE_DBG("VIDIOC_MSM_VPE_TRANSACTION_SETUP\n");
+ if (sizeof(*cfg) != ioctl_ptr->len) {
+ pr_err("%s: size mismatch cmd=%d, len=%zu, expected=%zu",
+ __func__, cmd, ioctl_ptr->len,
+ sizeof(*cfg));
+ rc = -EINVAL;
+ break;
+ }
+
+ cfg = kzalloc(ioctl_ptr->len, GFP_KERNEL);
+ if (!cfg) {
+ pr_err("%s:%d: malloc error\n", __func__, __LINE__);
+ mutex_unlock(&vpe_dev->mutex);
+ return -EINVAL;
+ }
+
+ rc = copy_from_user(cfg, (void __user *)ioctl_ptr->ioctl_ptr,
+ ioctl_ptr->len);
+ if (rc) {
+ pr_err("%s:%d copy from user\n", __func__, __LINE__);
+ kfree(cfg);
+ break;
+ }
+
+ msm_vpe_transaction_setup(vpe_dev, (void *)cfg);
+ kfree(cfg);
+ break;
+ }
+ case VIDIOC_MSM_VPE_CFG: {
+ VPE_DBG("VIDIOC_MSM_VPE_CFG\n");
+ rc = msm_vpe_cfg(vpe_dev, ioctl_ptr);
+ break;
+ }
+ case VIDIOC_MSM_VPE_ENQUEUE_STREAM_BUFF_INFO: {
+ struct msm_vpe_stream_buff_info_t *u_stream_buff_info;
+ struct msm_vpe_stream_buff_info_t k_stream_buff_info;
+
+ VPE_DBG("VIDIOC_MSM_VPE_ENQUEUE_STREAM_BUFF_INFO\n");
+
+ if (sizeof(struct msm_vpe_stream_buff_info_t) !=
+ ioctl_ptr->len) {
+ pr_err("%s:%d: invalid length\n", __func__, __LINE__);
+ mutex_unlock(&vpe_dev->mutex);
+ return -EINVAL;
+ }
+
+ u_stream_buff_info = kzalloc(ioctl_ptr->len, GFP_KERNEL);
+ if (!u_stream_buff_info) {
+ pr_err("%s:%d: malloc error\n", __func__, __LINE__);
+ mutex_unlock(&vpe_dev->mutex);
+ return -EINVAL;
+ }
+
+ rc = (copy_from_user(u_stream_buff_info,
+ (void __user *)ioctl_ptr->ioctl_ptr,
+ ioctl_ptr->len) ? -EFAULT : 0);
+ if (rc) {
+ pr_err("%s:%d copy from user\n", __func__, __LINE__);
+ kfree(u_stream_buff_info);
+ mutex_unlock(&vpe_dev->mutex);
+ return -EINVAL;
+ }
+
+ if ((u_stream_buff_info->num_buffs == 0) ||
+ (u_stream_buff_info->num_buffs >
+ MSM_CAMERA_MAX_STREAM_BUF)) {
+ pr_err("%s:%d: Invalid number of buffers\n", __func__,
+ __LINE__);
+ kfree(u_stream_buff_info);
+ mutex_unlock(&vpe_dev->mutex);
+ return -EINVAL;
+ }
+ k_stream_buff_info.num_buffs = u_stream_buff_info->num_buffs;
+ k_stream_buff_info.identity = u_stream_buff_info->identity;
+ k_stream_buff_info.buffer_info =
+ kzalloc(k_stream_buff_info.num_buffs *
+ sizeof(struct msm_vpe_buffer_info_t), GFP_KERNEL);
+ if (!k_stream_buff_info.buffer_info) {
+ pr_err("%s:%d: malloc error\n", __func__, __LINE__);
+ kfree(u_stream_buff_info);
+ mutex_unlock(&vpe_dev->mutex);
+ return -EINVAL;
+ }
+
+ rc = (copy_from_user(k_stream_buff_info.buffer_info,
+ (void __user *)u_stream_buff_info->buffer_info,
+ k_stream_buff_info.num_buffs *
+ sizeof(struct msm_vpe_buffer_info_t)) ?
+ -EFAULT : 0);
+ if (rc) {
+ pr_err("%s:%d copy from user\n", __func__, __LINE__);
+ kfree(k_stream_buff_info.buffer_info);
+ kfree(u_stream_buff_info);
+ mutex_unlock(&vpe_dev->mutex);
+ return -EINVAL;
+ }
+
+ rc = msm_vpe_add_buff_queue_entry(vpe_dev,
+ ((k_stream_buff_info.identity >> 16) & 0xFFFF),
+ (k_stream_buff_info.identity & 0xFFFF));
+ if (!rc)
+ rc = msm_vpe_enqueue_buff_info_list(vpe_dev,
+ &k_stream_buff_info);
+
+ kfree(k_stream_buff_info.buffer_info);
+ kfree(u_stream_buff_info);
+ break;
+ }
+ case VIDIOC_MSM_VPE_DEQUEUE_STREAM_BUFF_INFO: {
+ uint32_t identity;
+ struct msm_vpe_buff_queue_info_t *buff_queue_info;
+
+ VPE_DBG("VIDIOC_MSM_VPE_DEQUEUE_STREAM_BUFF_INFO\n");
+ if (ioctl_ptr->len != sizeof(uint32_t)) {
+ pr_err("%s:%d Invalid len\n", __func__, __LINE__);
+ mutex_unlock(&vpe_dev->mutex);
+ return -EINVAL;
+ }
+
+ rc = (copy_from_user(&identity,
+ (void __user *)ioctl_ptr->ioctl_ptr,
+ ioctl_ptr->len) ? -EFAULT : 0);
+ if (rc) {
+ pr_err("%s:%d copy from user\n", __func__, __LINE__);
+ mutex_unlock(&vpe_dev->mutex);
+ return -EINVAL;
+ }
+
+ buff_queue_info = msm_vpe_get_buff_queue_entry(vpe_dev,
+ ((identity >> 16) & 0xFFFF), (identity & 0xFFFF));
+ if (buff_queue_info == NULL) {
+ pr_err("error finding buffer queue entry for identity:%d\n",
+ identity);
+ mutex_unlock(&vpe_dev->mutex);
+ return -EINVAL;
+ }
+
+ msm_vpe_dequeue_buff_info_list(vpe_dev, buff_queue_info);
+ rc = msm_vpe_free_buff_queue_entry(vpe_dev,
+ buff_queue_info->session_id,
+ buff_queue_info->stream_id);
+ break;
+ }
+ case VIDIOC_MSM_VPE_GET_EVENTPAYLOAD: {
+ struct msm_device_queue *queue = &vpe_dev->eventData_q;
+ struct msm_queue_cmd *event_qcmd;
+ struct msm_vpe_frame_info_t *process_frame;
+ VPE_DBG("VIDIOC_MSM_VPE_GET_EVENTPAYLOAD\n");
+ event_qcmd = msm_dequeue(queue, list_eventdata);
+ if (!event_qcmd) {
+ pr_err("%s: %d event_qcmd is NULL\n",
+ __func__ , __LINE__);
+ return -EINVAL;
+ }
+ process_frame = event_qcmd->command;
+ VPE_DBG("fid %d\n", process_frame->frame_id);
+ if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
+ process_frame,
+ sizeof(struct msm_vpe_frame_info_t))) {
+ mutex_unlock(&vpe_dev->mutex);
+ kfree(process_frame);
+ kfree(event_qcmd);
+ return -EINVAL;
+ }
+
+ kfree(process_frame);
+ kfree(event_qcmd);
+ break;
+ }
+ }
+ mutex_unlock(&vpe_dev->mutex);
+ return rc;
+}
+
+static int msm_vpe_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_subscribe(fh, sub, MAX_VPE_V4l2_EVENTS, NULL);
+}
+
+static int msm_vpe_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ return v4l2_event_unsubscribe(fh, sub);
+}
+
+static struct v4l2_subdev_core_ops msm_vpe_subdev_core_ops = {
+ .ioctl = msm_vpe_subdev_ioctl,
+ .subscribe_event = msm_vpe_subscribe_event,
+ .unsubscribe_event = msm_vpe_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_ops msm_vpe_subdev_ops = {
+ .core = &msm_vpe_subdev_core_ops,
+};
+
+static struct v4l2_file_operations msm_vpe_v4l2_subdev_fops;
+
+static long msm_vpe_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct v4l2_fh *vfh = file->private_data;
+
+ switch (cmd) {
+ case VIDIOC_DQEVENT:
+ if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
+ return -ENOIOCTLCMD;
+
+ return v4l2_event_dequeue(vfh, arg, file->f_flags & O_NONBLOCK);
+
+ case VIDIOC_SUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg);
+
+ case VIDIOC_UNSUBSCRIBE_EVENT:
+ return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg);
+ case VIDIOC_MSM_VPE_GET_INST_INFO: {
+ uint32_t i;
+ struct vpe_device *vpe_dev = v4l2_get_subdevdata(sd);
+ struct msm_camera_v4l2_ioctl_t *ioctl_ptr = arg;
+ struct msm_vpe_frame_info_t inst_info;
+ memset(&inst_info, 0, sizeof(struct msm_vpe_frame_info_t));
+ for (i = 0; i < MAX_ACTIVE_VPE_INSTANCE; i++) {
+ if (vpe_dev->vpe_subscribe_list[i].vfh == vfh) {
+ inst_info.inst_id = i;
+ break;
+ }
+ }
+ if (copy_to_user(
+ (void __user *)ioctl_ptr->ioctl_ptr, &inst_info,
+ sizeof(struct msm_vpe_frame_info_t))) {
+ return -EINVAL;
+ }
+ }
+ default:
+ return v4l2_subdev_call(sd, core, ioctl, cmd, arg);
+ }
+
+ return 0;
+}
+
+static long msm_vpe_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_vpe_subdev_do_ioctl);
+}
+
+static int vpe_register_domain(void)
+{
+ struct msm_iova_partition vpe_iommu_partition = {
+ /* TODO: verify that these are correct? */
+ .start = SZ_128K,
+ .size = SZ_2G - SZ_128K,
+ };
+ struct msm_iova_layout vpe_iommu_layout = {
+ .partitions = &vpe_iommu_partition,
+ .npartitions = 1,
+ .client_name = "camera_vpe",
+ .domain_flags = 0,
+ };
+
+ return msm_register_domain(&vpe_iommu_layout);
+}
+
+static int vpe_probe(struct platform_device *pdev)
+{
+ struct vpe_device *vpe_dev;
+ int rc = 0;
+
+ vpe_dev = kzalloc(sizeof(struct vpe_device), GFP_KERNEL);
+ if (!vpe_dev) {
+ pr_err("not enough memory\n");
+ return -ENOMEM;
+ }
+
+ vpe_dev->vpe_clk = kzalloc(sizeof(struct clk *) *
+ ARRAY_SIZE(vpe_clk_info), GFP_KERNEL);
+ if (!vpe_dev->vpe_clk) {
+ pr_err("not enough memory\n");
+ rc = -ENOMEM;
+ goto err_free_vpe_dev;
+ }
+
+ v4l2_subdev_init(&vpe_dev->msm_sd.sd, &msm_vpe_subdev_ops);
+ vpe_dev->msm_sd.sd.internal_ops = &msm_vpe_internal_ops;
+ snprintf(vpe_dev->msm_sd.sd.name, ARRAY_SIZE(vpe_dev->msm_sd.sd.name),
+ "vpe");
+ vpe_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ vpe_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
+ v4l2_set_subdevdata(&vpe_dev->msm_sd.sd, vpe_dev);
+ platform_set_drvdata(pdev, &vpe_dev->msm_sd.sd);
+ mutex_init(&vpe_dev->mutex);
+ spin_lock_init(&vpe_dev->tasklet_lock);
+
+ vpe_dev->pdev = pdev;
+
+ vpe_dev->mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "vpe");
+ if (!vpe_dev->mem) {
+ pr_err("no mem resource?\n");
+ rc = -ENODEV;
+ goto err_free_vpe_clk;
+ }
+
+ vpe_dev->irq = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "vpe");
+ if (!vpe_dev->irq) {
+ pr_err("%s: no irq resource?\n", __func__);
+ rc = -ENODEV;
+ goto err_release_mem;
+ }
+
+ vpe_dev->domain_num = vpe_register_domain();
+ if (vpe_dev->domain_num < 0) {
+ pr_err("%s: could not register domain\n", __func__);
+ rc = -ENODEV;
+ goto err_release_mem;
+ }
+
+ vpe_dev->domain =
+ msm_get_iommu_domain(vpe_dev->domain_num);
+ if (!vpe_dev->domain) {
+ pr_err("%s: cannot find domain\n", __func__);
+ rc = -ENODEV;
+ goto err_release_mem;
+ }
+
+ vpe_dev->iommu_ctx_src = msm_iommu_get_ctx("vpe_src");
+ vpe_dev->iommu_ctx_dst = msm_iommu_get_ctx("vpe_dst");
+ if (!vpe_dev->iommu_ctx_src || !vpe_dev->iommu_ctx_dst) {
+ pr_err("%s: cannot get iommu_ctx\n", __func__);
+ rc = -ENODEV;
+ goto err_release_mem;
+ }
+
+ media_entity_init(&vpe_dev->msm_sd.sd.entity, 0, NULL, 0);
+ vpe_dev->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ vpe_dev->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_VPE;
+ vpe_dev->msm_sd.sd.entity.name = pdev->name;
+ msm_sd_register(&vpe_dev->msm_sd);
+ msm_cam_copy_v4l2_subdev_fops(&msm_vpe_v4l2_subdev_fops);
+ vpe_dev->msm_sd.sd.devnode->fops = &msm_vpe_v4l2_subdev_fops;
+ vpe_dev->msm_sd.sd.entity.revision = vpe_dev->msm_sd.sd.devnode->num;
+ vpe_dev->state = VPE_STATE_BOOT;
+ rc = vpe_init_hardware(vpe_dev);
+ if (rc < 0) {
+ pr_err("%s: Couldn't init vpe hardware\n", __func__);
+ goto err_unregister_sd;
+ }
+ vpe_reset(vpe_dev);
+ vpe_release_hardware(vpe_dev);
+ vpe_dev->state = VPE_STATE_OFF;
+
+ rc = iommu_attach_device(vpe_dev->domain, vpe_dev->iommu_ctx_src);
+ if (rc < 0) {
+ pr_err("Couldn't attach to vpe_src context bank\n");
+ rc = -ENODEV;
+ goto err_unregister_sd;
+ }
+ rc = iommu_attach_device(vpe_dev->domain, vpe_dev->iommu_ctx_dst);
+ if (rc < 0) {
+ pr_err("Couldn't attach to vpe_dst context bank\n");
+ rc = -ENODEV;
+ goto err_detach_src;
+ }
+
+ vpe_dev->state = VPE_STATE_OFF;
+
+ msm_queue_init(&vpe_dev->eventData_q, "vpe-eventdata");
+ msm_queue_init(&vpe_dev->processing_q, "vpe-frame");
+ INIT_LIST_HEAD(&vpe_dev->tasklet_q);
+ tasklet_init(&vpe_dev->vpe_tasklet, msm_vpe_do_tasklet,
+ (unsigned long)vpe_dev);
+ vpe_dev->vpe_open_cnt = 0;
+
+ return rc;
+
+err_detach_src:
+ iommu_detach_device(vpe_dev->domain, vpe_dev->iommu_ctx_src);
+err_unregister_sd:
+ msm_sd_unregister(&vpe_dev->msm_sd);
+err_release_mem:
+ release_mem_region(vpe_dev->mem->start, resource_size(vpe_dev->mem));
+err_free_vpe_clk:
+ kfree(vpe_dev->vpe_clk);
+err_free_vpe_dev:
+ kfree(vpe_dev);
+ return rc;
+}
+
+static int vpe_device_remove(struct platform_device *dev)
+{
+ struct v4l2_subdev *sd = platform_get_drvdata(dev);
+ struct vpe_device *vpe_dev;
+ if (!sd) {
+ pr_err("%s: Subdevice is NULL\n", __func__);
+ return 0;
+ }
+
+ vpe_dev = (struct vpe_device *)v4l2_get_subdevdata(sd);
+ if (!vpe_dev) {
+ pr_err("%s: vpe device is NULL\n", __func__);
+ return 0;
+ }
+
+ iommu_detach_device(vpe_dev->domain, vpe_dev->iommu_ctx_dst);
+ iommu_detach_device(vpe_dev->domain, vpe_dev->iommu_ctx_src);
+ msm_sd_unregister(&vpe_dev->msm_sd);
+ release_mem_region(vpe_dev->mem->start, resource_size(vpe_dev->mem));
+ mutex_destroy(&vpe_dev->mutex);
+ kfree(vpe_dev);
+ return 0;
+}
+
+static struct platform_driver vpe_driver = {
+ .probe = vpe_probe,
+ .remove = vpe_device_remove,
+ .driver = {
+ .name = MSM_VPE_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init msm_vpe_init_module(void)
+{
+ return platform_driver_register(&vpe_driver);
+}
+
+static void __exit msm_vpe_exit_module(void)
+{
+ platform_driver_unregister(&vpe_driver);
+}
+
+module_init(msm_vpe_init_module);
+module_exit(msm_vpe_exit_module);
+MODULE_DESCRIPTION("MSM VPE driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/pproc/vpe/msm_vpe.h b/drivers/media/platform/msm/camera_v2/pproc/vpe/msm_vpe.h
new file mode 100644
index 000000000000..f1869a2b9776
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/pproc/vpe/msm_vpe.h
@@ -0,0 +1,257 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_VPE_H__
+#define __MSM_VPE_H__
+
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-subdev.h>
+#include "msm_sd.h"
+
+/*********** start of register offset *********************/
+#define VPE_INTR_ENABLE_OFFSET 0x0020
+#define VPE_INTR_STATUS_OFFSET 0x0024
+#define VPE_INTR_CLEAR_OFFSET 0x0028
+#define VPE_DL0_START_OFFSET 0x0030
+#define VPE_HW_VERSION_OFFSET 0x0070
+#define VPE_SW_RESET_OFFSET 0x0074
+#define VPE_AXI_RD_ARB_CONFIG_OFFSET 0x0078
+#define VPE_SEL_CLK_OR_HCLK_TEST_BUS_OFFSET 0x007C
+#define VPE_CGC_EN_OFFSET 0x0100
+#define VPE_CMD_STATUS_OFFSET 0x10008
+#define VPE_PROFILE_EN_OFFSET 0x10010
+#define VPE_PROFILE_COUNT_OFFSET 0x10014
+#define VPE_CMD_MODE_OFFSET 0x10060
+#define VPE_SRC_SIZE_OFFSET 0x10108
+#define VPE_SRCP0_ADDR_OFFSET 0x1010C
+#define VPE_SRCP1_ADDR_OFFSET 0x10110
+#define VPE_SRC_YSTRIDE1_OFFSET 0x1011C
+#define VPE_SRC_FORMAT_OFFSET 0x10124
+#define VPE_SRC_UNPACK_PATTERN1_OFFSET 0x10128
+#define VPE_OP_MODE_OFFSET 0x10138
+#define VPE_SCALE_PHASEX_INIT_OFFSET 0x1013C
+#define VPE_SCALE_PHASEY_INIT_OFFSET 0x10140
+#define VPE_SCALE_PHASEX_STEP_OFFSET 0x10144
+#define VPE_SCALE_PHASEY_STEP_OFFSET 0x10148
+#define VPE_OUT_FORMAT_OFFSET 0x10150
+#define VPE_OUT_PACK_PATTERN1_OFFSET 0x10154
+#define VPE_OUT_SIZE_OFFSET 0x10164
+#define VPE_OUTP0_ADDR_OFFSET 0x10168
+#define VPE_OUTP1_ADDR_OFFSET 0x1016C
+#define VPE_OUT_YSTRIDE1_OFFSET 0x10178
+#define VPE_OUT_XY_OFFSET 0x1019C
+#define VPE_SRC_XY_OFFSET 0x10200
+#define VPE_SRC_IMAGE_SIZE_OFFSET 0x10208
+#define VPE_SCALE_CONFIG_OFFSET 0x10230
+#define VPE_DEINT_STATUS_OFFSET 0x30000
+#define VPE_DEINT_DECISION_OFFSET 0x30004
+#define VPE_DEINT_COEFF0_OFFSET 0x30010
+#define VPE_SCALE_STATUS_OFFSET 0x50000
+#define VPE_SCALE_SVI_PARAM_OFFSET 0x50010
+#define VPE_SCALE_SHARPEN_CFG_OFFSET 0x50020
+#define VPE_SCALE_COEFF_LSP_0_OFFSET 0x50400
+#define VPE_SCALE_COEFF_MSP_0_OFFSET 0x50404
+
+#define VPE_AXI_ARB_1_OFFSET 0x00408
+#define VPE_AXI_ARB_2_OFFSET 0x0040C
+
+#define VPE_SCALE_COEFF_LSBn(n) (0x50400 + 8 * (n))
+#define VPE_SCALE_COEFF_MSBn(n) (0x50404 + 8 * (n))
+#define VPE_SCALE_COEFF_NUM 32
+
+/*********** end of register offset ********************/
+
+
+#define VPE_HARDWARE_VERSION 0x00080308
+#define VPE_SW_RESET_VALUE 0x00000010 /* bit 4 for PPP*/
+#define VPE_AXI_RD_ARB_CONFIG_VALUE 0x124924
+#define VPE_CMD_MODE_VALUE 0x1
+#define VPE_DEFAULT_OP_MODE_VALUE 0x40FC0004
+#define VPE_CGC_ENABLE_VALUE 0xffff
+#define VPE_DEFAULT_SCALE_CONFIG 0x3c
+
+#define VPE_NORMAL_MODE_CLOCK_RATE 150000000
+#define VPE_TURBO_MODE_CLOCK_RATE 200000000
+#define VPE_SUBDEV_MAX_EVENTS 30
+
+/**************************************************/
+/*********** End of command id ********************/
+/**************************************************/
+
+#define SCALER_PHASE_BITS 29
+#define HAL_MDP_PHASE_STEP_2P50 0x50000000
+#define HAL_MDP_PHASE_STEP_1P66 0x35555555
+#define HAL_MDP_PHASE_STEP_1P25 0x28000000
+
+
+#define MAX_ACTIVE_VPE_INSTANCE 8
+#define MAX_VPE_PROCESSING_FRAME 2
+#define MAX_VPE_V4l2_EVENTS 30
+
+#define MSM_VPE_TASKLETQ_SIZE 16
+
+/**
+ * The format of the msm_vpe_transaction_setup_cfg is as follows:
+ *
+ * - vpe_update_scale_coef (65*4 uint32_t's)
+ * - Each table is 65 uint32_t's long
+ * - 1st uint32_t in each table indicates offset
+ * - Following 64 uint32_t's are the data
+ *
+ * - vpe_input_plane_config (6 uint32_t's)
+ * - VPE_SRC_FORMAT_OFFSET
+ * - VPE_SRC_UNPACK_PATTERN1_OFFSET
+ * - VPE_SRC_IMAGE_SIZE_OFFSET
+ * - VPE_SRC_YSTRIDE1_OFFSET
+ * - VPE_SRC_SIZE_OFFSET
+ * - VPE_SRC_XY_OFFSET
+ *
+ * - vpe_output_plane_config (5 uint32_t's)
+ * - VPE_OUT_FORMAT_OFFSET
+ * - VPE_OUT_PACK_PATTERN1_OFFSET
+ * - VPE_OUT_YSTRIDE1_OFFSET
+ * - VPE_OUT_SIZE_OFFSET
+ * - VPE_OUT_XY_OFFSET
+ *
+ * - vpe_operation_config (1 uint32_t)
+ * - VPE_OP_MODE_OFFSET
+ *
+ */
+
+#define VPE_SCALER_CONFIG_LEN 260
+#define VPE_INPUT_PLANE_CFG_LEN 24
+#define VPE_OUTPUT_PLANE_CFG_LEN 20
+#define VPE_OPERATION_MODE_CFG_LEN 4
+#define VPE_NUM_SCALER_TABLES 4
+
+#define VPE_TRANSACTION_SETUP_CONFIG_LEN ( \
+ (VPE_SCALER_CONFIG_LEN * VPE_NUM_SCALER_TABLES) \
+ + VPE_INPUT_PLANE_CFG_LEN \
+ + VPE_OUTPUT_PLANE_CFG_LEN \
+ + VPE_OPERATION_MODE_CFG_LEN)
+/* VPE_TRANSACTION_SETUP_CONFIG_LEN = 1088 */
+
+struct msm_vpe_transaction_setup_cfg {
+ uint8_t scaler_cfg[VPE_TRANSACTION_SETUP_CONFIG_LEN];
+};
+
+struct vpe_subscribe_info {
+ struct v4l2_fh *vfh;
+ uint32_t active;
+};
+
+enum vpe_state {
+ VPE_STATE_BOOT,
+ VPE_STATE_IDLE,
+ VPE_STATE_ACTIVE,
+ VPE_STATE_OFF,
+};
+
+struct msm_queue_cmd {
+ struct list_head list_config;
+ struct list_head list_control;
+ struct list_head list_frame;
+ struct list_head list_pict;
+ struct list_head list_vpe_frame;
+ struct list_head list_eventdata;
+ void *command;
+ atomic_t on_heap;
+ struct timespec ts;
+ uint32_t error_code;
+ uint32_t trans_code;
+};
+
+struct msm_device_queue {
+ struct list_head list;
+ spinlock_t lock;
+ wait_queue_head_t wait;
+ int max;
+ int len;
+ const char *name;
+};
+
+struct msm_vpe_tasklet_queue_cmd {
+ struct list_head list;
+ uint32_t irq_status;
+ uint8_t cmd_used;
+};
+
+struct msm_vpe_buffer_map_info_t {
+ unsigned long len;
+ dma_addr_t phy_addr;
+ struct dma_buf *dbuf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *table;
+ struct msm_vpe_buffer_info_t buff_info;
+};
+
+struct msm_vpe_buffer_map_list_t {
+ struct msm_vpe_buffer_map_info_t map_info;
+ struct list_head entry;
+};
+
+struct msm_vpe_buff_queue_info_t {
+ uint32_t used;
+ uint16_t session_id;
+ uint16_t stream_id;
+ struct list_head vb2_buff_head;
+ struct list_head native_buff_head;
+};
+
+struct vpe_device {
+ struct platform_device *pdev;
+ struct msm_sd_subdev msm_sd;
+ struct v4l2_subdev subdev;
+ struct resource *mem;
+ struct resource *irq;
+ void __iomem *base;
+ struct clk **vpe_clk;
+ struct regulator *fs_vpe;
+ struct mutex mutex;
+ enum vpe_state state;
+
+ int domain_num;
+ struct iommu_domain *domain;
+ struct device *iommu_ctx_src;
+ struct device *iommu_ctx_dst;
+ struct ion_client *client;
+ struct kref refcount;
+
+ /* Reusing proven tasklet from msm isp */
+ atomic_t irq_cnt;
+ uint8_t taskletq_idx;
+ spinlock_t tasklet_lock;
+ struct list_head tasklet_q;
+ struct tasklet_struct vpe_tasklet;
+ struct msm_vpe_tasklet_queue_cmd
+ tasklet_queue_cmd[MSM_VPE_TASKLETQ_SIZE];
+
+ struct vpe_subscribe_info vpe_subscribe_list[MAX_ACTIVE_VPE_INSTANCE];
+ uint32_t vpe_open_cnt;
+
+ struct msm_device_queue eventData_q; /* V4L2 Event Payload Queue */
+
+ /*
+ * Processing Queue: store frame info for frames sent to
+ * microcontroller
+ */
+ struct msm_device_queue processing_q;
+
+ struct msm_vpe_buff_queue_info_t *buff_queue;
+ uint32_t num_buffq;
+ struct v4l2_subdev *buf_mgr_subdev;
+};
+
+#endif /* __MSM_VPE_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/sensor/Makefile b/drivers/media/platform/msm/camera_v2/sensor/Makefile
new file mode 100644
index 000000000000..539ba24e109b
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/Makefile
@@ -0,0 +1,8 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/msm_vb2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/camera
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
+obj-$(CONFIG_MSMB_CAMERA) += cci/ io/ csiphy/ csid/ actuator/ eeprom/ ois/ flash/
+obj-$(CONFIG_MSM_CAMERA_SENSOR) += msm_sensor_init.o msm_sensor_driver.o msm_sensor.o
diff --git a/drivers/media/platform/msm/camera_v2/sensor/actuator/Makefile b/drivers/media/platform/msm/camera_v2/sensor/actuator/Makefile
new file mode 100644
index 000000000000..37dda0f3089f
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/actuator/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
+obj-$(CONFIG_MSMB_CAMERA) += msm_actuator.o
diff --git a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
new file mode 100644
index 000000000000..6d1d3a4f601e
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
@@ -0,0 +1,1974 @@
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/module.h>
+#include "msm_sd.h"
+#include "msm_actuator.h"
+#include "msm_cci.h"
+
+DEFINE_MSM_MUTEX(msm_actuator_mutex);
+
+#undef CDBG
+#ifdef MSM_ACTUATOR_DEBUG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define PARK_LENS_LONG_STEP 7
+#define PARK_LENS_MID_STEP 5
+#define PARK_LENS_SMALL_STEP 3
+#define MAX_QVALUE 4096
+
+static struct v4l2_file_operations msm_actuator_v4l2_subdev_fops;
+static int32_t msm_actuator_power_up(struct msm_actuator_ctrl_t *a_ctrl);
+static int32_t msm_actuator_power_down(struct msm_actuator_ctrl_t *a_ctrl);
+
+static struct msm_actuator msm_vcm_actuator_table;
+static struct msm_actuator msm_piezo_actuator_table;
+static struct msm_actuator msm_hvcm_actuator_table;
+static struct msm_actuator msm_bivcm_actuator_table;
+
+static struct i2c_driver msm_actuator_i2c_driver;
+static struct msm_actuator *actuators[] = {
+ &msm_vcm_actuator_table,
+ &msm_piezo_actuator_table,
+ &msm_hvcm_actuator_table,
+ &msm_bivcm_actuator_table,
+};
+
+static int32_t msm_actuator_piezo_set_default_focus(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ struct msm_actuator_move_params_t *move_params)
+{
+ int32_t rc = 0;
+ struct msm_camera_i2c_reg_setting reg_setting;
+ CDBG("Enter\n");
+
+ if (a_ctrl->curr_step_pos != 0) {
+ a_ctrl->i2c_tbl_index = 0;
+ a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl,
+ a_ctrl->initial_code, 0, 0);
+ a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl,
+ a_ctrl->initial_code, 0, 0);
+ reg_setting.reg_setting = a_ctrl->i2c_reg_tbl;
+ reg_setting.data_type = a_ctrl->i2c_data_type;
+ reg_setting.size = a_ctrl->i2c_tbl_index;
+ rc = a_ctrl->i2c_client.i2c_func_tbl->
+ i2c_write_table_w_microdelay(
+ &a_ctrl->i2c_client, &reg_setting);
+ if (rc < 0) {
+ pr_err("%s: i2c write error:%d\n",
+ __func__, rc);
+ return rc;
+ }
+ a_ctrl->i2c_tbl_index = 0;
+ a_ctrl->curr_step_pos = 0;
+ }
+ CDBG("Exit\n");
+ return rc;
+}
+
+static void msm_actuator_parse_i2c_params(struct msm_actuator_ctrl_t *a_ctrl,
+ int16_t next_lens_position, uint32_t hw_params, uint16_t delay)
+{
+ struct msm_actuator_reg_params_t *write_arr = NULL;
+ uint32_t hw_dword = hw_params;
+ uint16_t i2c_byte1 = 0, i2c_byte2 = 0;
+ uint16_t value = 0;
+ uint32_t size = 0, i = 0;
+ struct msm_camera_i2c_reg_array *i2c_tbl = NULL;
+ CDBG("Enter\n");
+
+ if (a_ctrl == NULL) {
+ pr_err("failed. actuator ctrl is NULL");
+ return;
+ }
+
+ size = a_ctrl->reg_tbl_size;
+ write_arr = a_ctrl->reg_tbl;
+ i2c_tbl = a_ctrl->i2c_reg_tbl;
+
+ for (i = 0; i < size; i++) {
+ /* check that the index into i2c_tbl cannot grow larger that
+ the allocated size of i2c_tbl */
+ if ((a_ctrl->total_steps + 1) < (a_ctrl->i2c_tbl_index))
+ break;
+
+ if (write_arr[i].reg_write_type == MSM_ACTUATOR_WRITE_DAC) {
+ value = (next_lens_position <<
+ write_arr[i].data_shift) |
+ ((hw_dword & write_arr[i].hw_mask) >>
+ write_arr[i].hw_shift);
+
+ if (write_arr[i].reg_addr != 0xFFFF) {
+ i2c_byte1 = write_arr[i].reg_addr;
+ i2c_byte2 = value;
+ if (size != (i+1)) {
+ i2c_byte2 = value & 0xFF;
+ CDBG("byte1:0x%x, byte2:0x%x\n",
+ i2c_byte1, i2c_byte2);
+ i2c_tbl[a_ctrl->i2c_tbl_index].
+ reg_addr = i2c_byte1;
+ i2c_tbl[a_ctrl->i2c_tbl_index].
+ reg_data = i2c_byte2;
+ i2c_tbl[a_ctrl->i2c_tbl_index].
+ delay = 0;
+ a_ctrl->i2c_tbl_index++;
+ i++;
+ i2c_byte1 = write_arr[i].reg_addr;
+ i2c_byte2 = (value & 0xFF00) >> 8;
+ }
+ } else {
+ i2c_byte1 = (value & 0xFF00) >> 8;
+ i2c_byte2 = value & 0xFF;
+ }
+ } else {
+ i2c_byte1 = write_arr[i].reg_addr;
+ i2c_byte2 = (hw_dword & write_arr[i].hw_mask) >>
+ write_arr[i].hw_shift;
+ }
+ CDBG("i2c_byte1:0x%x, i2c_byte2:0x%x\n", i2c_byte1, i2c_byte2);
+ i2c_tbl[a_ctrl->i2c_tbl_index].reg_addr = i2c_byte1;
+ i2c_tbl[a_ctrl->i2c_tbl_index].reg_data = i2c_byte2;
+ i2c_tbl[a_ctrl->i2c_tbl_index].delay = delay;
+ a_ctrl->i2c_tbl_index++;
+ }
+ CDBG("Exit\n");
+}
+
+static int msm_actuator_bivcm_handle_i2c_ops(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ int16_t next_lens_position, uint32_t hw_params, uint16_t delay)
+{
+ struct msm_actuator_reg_params_t *write_arr = a_ctrl->reg_tbl;
+ uint32_t hw_dword = hw_params;
+ uint16_t i2c_byte1 = 0, i2c_byte2 = 0;
+ uint16_t value = 0, reg_data = 0;
+ uint32_t size = a_ctrl->reg_tbl_size, i = 0, j = 0;
+ int32_t rc = 0;
+ struct msm_camera_i2c_reg_array i2c_tbl;
+ struct msm_camera_i2c_reg_setting reg_setting;
+ enum msm_camera_i2c_reg_addr_type save_addr_type =
+ a_ctrl->i2c_client.addr_type;
+
+ for (i = 0; i < size; i++) {
+ reg_setting.size = 1;
+ switch (write_arr[i].reg_write_type) {
+ case MSM_ACTUATOR_WRITE_DAC:
+ value = (next_lens_position <<
+ write_arr[i].data_shift) |
+ ((hw_dword & write_arr[i].hw_mask) >>
+ write_arr[i].hw_shift);
+ if (write_arr[i].reg_addr != 0xFFFF) {
+ i2c_byte1 = write_arr[i].reg_addr;
+ i2c_byte2 = value;
+ } else {
+ i2c_byte1 = (value & 0xFF00) >> 8;
+ i2c_byte2 = value & 0xFF;
+ }
+ i2c_tbl.reg_addr = i2c_byte1;
+ i2c_tbl.reg_data = i2c_byte2;
+ i2c_tbl.delay = delay;
+ a_ctrl->i2c_tbl_index++;
+
+ reg_setting.reg_setting = &i2c_tbl;
+ reg_setting.data_type = a_ctrl->i2c_data_type;
+ rc = a_ctrl->i2c_client.
+ i2c_func_tbl->i2c_write_table_w_microdelay(
+ &a_ctrl->i2c_client, &reg_setting);
+ if (rc < 0) {
+ pr_err("i2c write error:%d\n", rc);
+ return rc;
+ }
+ break;
+ case MSM_ACTUATOR_WRITE:
+ i2c_tbl.reg_data = write_arr[i].reg_data;
+ i2c_tbl.reg_addr = write_arr[i].reg_addr;
+ i2c_tbl.delay = write_arr[i].delay;
+ reg_setting.reg_setting = &i2c_tbl;
+ reg_setting.data_type = write_arr[i].data_type;
+ switch (write_arr[i].addr_type) {
+ case MSM_ACTUATOR_BYTE_ADDR:
+ a_ctrl->i2c_client.addr_type =
+ MSM_CAMERA_I2C_BYTE_ADDR;
+ break;
+ case MSM_ACTUATOR_WORD_ADDR:
+ a_ctrl->i2c_client.addr_type =
+ MSM_CAMERA_I2C_WORD_ADDR;
+ break;
+ default:
+ pr_err("Unsupport addr type: %d\n",
+ write_arr[i].addr_type);
+ break;
+ }
+
+ rc = a_ctrl->i2c_client.
+ i2c_func_tbl->i2c_write_table_w_microdelay(
+ &a_ctrl->i2c_client, &reg_setting);
+ if (rc < 0) {
+ pr_err("i2c write error:%d\n", rc);
+ return rc;
+ }
+ break;
+ case MSM_ACTUATOR_WRITE_DIR_REG:
+ i2c_tbl.reg_data = hw_dword & 0xFFFF;
+ i2c_tbl.reg_addr = write_arr[i].reg_addr;
+ i2c_tbl.delay = write_arr[i].delay;
+ reg_setting.reg_setting = &i2c_tbl;
+ reg_setting.data_type = write_arr[i].data_type;
+ switch (write_arr[i].addr_type) {
+ case MSM_ACTUATOR_BYTE_ADDR:
+ a_ctrl->i2c_client.addr_type =
+ MSM_CAMERA_I2C_BYTE_ADDR;
+ break;
+ case MSM_ACTUATOR_WORD_ADDR:
+ a_ctrl->i2c_client.addr_type =
+ MSM_CAMERA_I2C_WORD_ADDR;
+ break;
+ default:
+ pr_err("Unsupport addr type: %d\n",
+ write_arr[i].addr_type);
+ break;
+ }
+
+ rc = a_ctrl->i2c_client.
+ i2c_func_tbl->i2c_write_table_w_microdelay(
+ &a_ctrl->i2c_client, &reg_setting);
+ if (rc < 0) {
+ pr_err("i2c write error:%d\n", rc);
+ return rc;
+ }
+ break;
+ case MSM_ACTUATOR_POLL:
+ switch (write_arr[i].addr_type) {
+ case MSM_ACTUATOR_BYTE_ADDR:
+ a_ctrl->i2c_client.addr_type =
+ MSM_CAMERA_I2C_BYTE_ADDR;
+ break;
+ case MSM_ACTUATOR_WORD_ADDR:
+ a_ctrl->i2c_client.addr_type =
+ MSM_CAMERA_I2C_WORD_ADDR;
+ break;
+ default:
+ pr_err("Unsupport addr type: %d\n",
+ write_arr[i].addr_type);
+ break;
+ }
+ for (j = 0; j < ACTUATOR_MAX_POLL_COUNT; j++) {
+ rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_poll(
+ &a_ctrl->i2c_client,
+ write_arr[i].reg_addr,
+ write_arr[i].reg_data,
+ write_arr[i].data_type);
+ if (rc == 1)
+ continue;
+ if (rc < 0) {
+ pr_err("i2c poll error:%d\n", rc);
+ return rc;
+ }
+ break;
+ }
+ if (j == ACTUATOR_MAX_POLL_COUNT)
+ CDBG("%s:%d Poll register not as expected\n",
+ __func__, __LINE__);
+ break;
+ case MSM_ACTUATOR_READ_WRITE:
+ i2c_tbl.reg_addr = write_arr[i].reg_addr;
+ i2c_tbl.delay = write_arr[i].delay;
+ reg_setting.reg_setting = &i2c_tbl;
+ reg_setting.data_type = write_arr[i].data_type;
+
+ switch (write_arr[i].addr_type) {
+ case MSM_ACTUATOR_BYTE_ADDR:
+ a_ctrl->i2c_client.addr_type =
+ MSM_CAMERA_I2C_BYTE_ADDR;
+ break;
+ case MSM_ACTUATOR_WORD_ADDR:
+ a_ctrl->i2c_client.addr_type =
+ MSM_CAMERA_I2C_WORD_ADDR;
+ break;
+ default:
+ pr_err("Unsupport addr type: %d\n",
+ write_arr[i].addr_type);
+ break;
+ }
+ rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_read(
+ &a_ctrl->i2c_client,
+ write_arr[i].reg_addr,
+ &reg_data,
+ write_arr[i].data_type);
+ if (rc < 0) {
+ pr_err("i2c poll error:%d\n", rc);
+ return rc;
+ }
+
+ i2c_tbl.reg_addr = write_arr[i].reg_data;
+ i2c_tbl.reg_data = reg_data;
+ i2c_tbl.delay = write_arr[i].delay;
+ reg_setting.reg_setting = &i2c_tbl;
+ reg_setting.data_type = write_arr[i].data_type;
+
+ rc = a_ctrl->i2c_client.
+ i2c_func_tbl->i2c_write_table_w_microdelay(
+ &a_ctrl->i2c_client, &reg_setting);
+ if (rc < 0) {
+ pr_err("i2c write error:%d\n", rc);
+ return rc;
+ }
+ break;
+ case MSM_ACTUATOR_WRITE_HW_DAMP:
+ i2c_tbl.reg_addr = write_arr[i].reg_addr;
+ i2c_tbl.reg_data = (hw_dword & write_arr[i].hw_mask) >>
+ write_arr[i].hw_shift;
+ i2c_tbl.delay = 0;
+ reg_setting.reg_setting = &i2c_tbl;
+ reg_setting.data_type = a_ctrl->i2c_data_type;
+
+ rc = a_ctrl->i2c_client.
+ i2c_func_tbl->i2c_write_table_w_microdelay(
+ &a_ctrl->i2c_client, &reg_setting);
+ if (rc < 0) {
+ pr_err("i2c write error:%d\n", rc);
+ return rc;
+ }
+ break;
+ default:
+ pr_err("%s:%d Invalid selection\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ a_ctrl->i2c_client.addr_type = save_addr_type;
+ }
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_actuator_init_focus(struct msm_actuator_ctrl_t *a_ctrl,
+ uint16_t size, struct reg_settings_t *settings)
+{
+ int32_t rc = -EFAULT;
+ int32_t i = 0;
+ enum msm_camera_i2c_reg_addr_type save_addr_type;
+ CDBG("Enter\n");
+
+ save_addr_type = a_ctrl->i2c_client.addr_type;
+ for (i = 0; i < size; i++) {
+
+ switch (settings[i].addr_type) {
+ case MSM_ACTUATOR_BYTE_ADDR:
+ a_ctrl->i2c_client.addr_type = MSM_CAMERA_I2C_BYTE_ADDR;
+ break;
+ case MSM_ACTUATOR_WORD_ADDR:
+ a_ctrl->i2c_client.addr_type = MSM_CAMERA_I2C_WORD_ADDR;
+ break;
+ default:
+ pr_err("Unsupport addr type: %d\n",
+ settings[i].addr_type);
+ break;
+ }
+
+ switch (settings[i].i2c_operation) {
+ case MSM_ACT_WRITE:
+ rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_write(
+ &a_ctrl->i2c_client,
+ settings[i].reg_addr,
+ settings[i].reg_data,
+ settings[i].data_type);
+ break;
+ case MSM_ACT_POLL:
+ rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_poll(
+ &a_ctrl->i2c_client,
+ settings[i].reg_addr,
+ settings[i].reg_data,
+ settings[i].data_type);
+ break;
+ default:
+ pr_err("Unsupport i2c_operation: %d\n",
+ settings[i].i2c_operation);
+ break;
+
+ if (0 != settings[i].delay)
+ msleep(settings[i].delay);
+
+ if (rc < 0)
+ break;
+ }
+ }
+
+ a_ctrl->curr_step_pos = 0;
+ /*
+ * Recover register addr_type after the init
+ * settings are written.
+ */
+ a_ctrl->i2c_client.addr_type = save_addr_type;
+ CDBG("Exit\n");
+ return rc;
+}
+
+static void msm_actuator_write_focus(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ uint16_t curr_lens_pos,
+ struct damping_params_t *damping_params,
+ int8_t sign_direction,
+ int16_t code_boundary)
+{
+ int16_t next_lens_pos = 0;
+ uint16_t damping_code_step = 0;
+ uint16_t wait_time = 0;
+ CDBG("Enter\n");
+
+ damping_code_step = damping_params->damping_step;
+ wait_time = damping_params->damping_delay;
+
+ /* Write code based on damping_code_step in a loop */
+ for (next_lens_pos =
+ curr_lens_pos + (sign_direction * damping_code_step);
+ (sign_direction * next_lens_pos) <=
+ (sign_direction * code_boundary);
+ next_lens_pos =
+ (next_lens_pos +
+ (sign_direction * damping_code_step))) {
+ a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl,
+ next_lens_pos, damping_params->hw_params, wait_time);
+ curr_lens_pos = next_lens_pos;
+ }
+
+ if (curr_lens_pos != code_boundary) {
+ a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl,
+ code_boundary, damping_params->hw_params, wait_time);
+ }
+ CDBG("Exit\n");
+}
+
+static int msm_actuator_bivcm_write_focus(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ uint16_t curr_lens_pos,
+ struct damping_params_t *damping_params,
+ int8_t sign_direction,
+ int16_t code_boundary)
+{
+ int16_t next_lens_pos = 0;
+ uint16_t damping_code_step = 0;
+ uint16_t wait_time = 0;
+ int32_t rc = 0;
+ CDBG("Enter\n");
+
+ damping_code_step = damping_params->damping_step;
+ wait_time = damping_params->damping_delay;
+
+ /* Write code based on damping_code_step in a loop */
+ for (next_lens_pos =
+ curr_lens_pos + (sign_direction * damping_code_step);
+ (sign_direction * next_lens_pos) <=
+ (sign_direction * code_boundary);
+ next_lens_pos =
+ (next_lens_pos +
+ (sign_direction * damping_code_step))) {
+ rc = msm_actuator_bivcm_handle_i2c_ops(a_ctrl,
+ next_lens_pos, damping_params->hw_params, wait_time);
+ if (rc < 0) {
+ pr_err("%s:%d msm_actuator_bivcm_handle_i2c_ops failed\n",
+ __func__, __LINE__);
+ return rc;
+ }
+ curr_lens_pos = next_lens_pos;
+ }
+
+ if (curr_lens_pos != code_boundary) {
+ rc = msm_actuator_bivcm_handle_i2c_ops(a_ctrl,
+ code_boundary, damping_params->hw_params, wait_time);
+ if (rc < 0) {
+ pr_err("%s:%d msm_actuator_bivcm_handle_i2c_ops failed\n",
+ __func__, __LINE__);
+ return rc;
+ }
+ }
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_actuator_piezo_move_focus(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ struct msm_actuator_move_params_t *move_params)
+{
+ int32_t dest_step_position = move_params->dest_step_pos;
+ struct damping_params_t ringing_params_kernel;
+ int32_t rc = 0;
+ int32_t num_steps = move_params->num_steps;
+ struct msm_camera_i2c_reg_setting reg_setting;
+ CDBG("Enter\n");
+
+ if (copy_from_user(&ringing_params_kernel,
+ &(move_params->ringing_params[0]),
+ sizeof(struct damping_params_t))) {
+ pr_err("copy_from_user failed\n");
+ return -EFAULT;
+ }
+
+ if (num_steps <= 0 || num_steps > MAX_NUMBER_OF_STEPS) {
+ pr_err("num_steps out of range = %d\n",
+ num_steps);
+ return -EFAULT;
+ }
+
+ a_ctrl->i2c_tbl_index = 0;
+ a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl,
+ (num_steps *
+ a_ctrl->region_params[0].code_per_step),
+ ringing_params_kernel.hw_params, 0);
+
+ reg_setting.reg_setting = a_ctrl->i2c_reg_tbl;
+ reg_setting.data_type = a_ctrl->i2c_data_type;
+ reg_setting.size = a_ctrl->i2c_tbl_index;
+ rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_write_table_w_microdelay(
+ &a_ctrl->i2c_client, &reg_setting);
+ if (rc < 0) {
+ pr_err("i2c write error:%d\n", rc);
+ return rc;
+ }
+ a_ctrl->i2c_tbl_index = 0;
+ a_ctrl->curr_step_pos = dest_step_position;
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_actuator_move_focus(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ struct msm_actuator_move_params_t *move_params)
+{
+ int32_t rc = 0;
+ struct damping_params_t *ringing_params_kernel = NULL;
+ int8_t sign_dir = move_params->sign_dir;
+ uint16_t step_boundary = 0;
+ uint16_t target_step_pos = 0;
+ uint16_t target_lens_pos = 0;
+ int16_t dest_step_pos = move_params->dest_step_pos;
+ uint16_t curr_lens_pos = 0;
+ int dir = move_params->dir;
+ int32_t num_steps = move_params->num_steps;
+ struct msm_camera_i2c_reg_setting reg_setting;
+
+ CDBG("called, dir %d, num_steps %d\n", dir, num_steps);
+
+ if (dest_step_pos == a_ctrl->curr_step_pos)
+ return rc;
+
+ if ((sign_dir > MSM_ACTUATOR_MOVE_SIGNED_NEAR) ||
+ (sign_dir < MSM_ACTUATOR_MOVE_SIGNED_FAR)) {
+ pr_err("Invalid sign_dir = %d\n", sign_dir);
+ return -EFAULT;
+ }
+ if ((dir > MOVE_FAR) || (dir < MOVE_NEAR)) {
+ pr_err("Invalid direction = %d\n", dir);
+ return -EFAULT;
+ }
+ if (dest_step_pos > a_ctrl->total_steps) {
+ pr_err("Step pos greater than total steps = %d\n",
+ dest_step_pos);
+ return -EFAULT;
+ }
+ if ((a_ctrl->region_size <= 0) ||
+ (a_ctrl->region_size > MAX_ACTUATOR_REGION) ||
+ (!move_params->ringing_params)) {
+ pr_err("Invalid-region size = %d, ringing_params = %p\n",
+ a_ctrl->region_size, move_params->ringing_params);
+ return -EFAULT;
+ }
+ /*Allocate memory for damping parameters of all regions*/
+ ringing_params_kernel = kmalloc(
+ sizeof(struct damping_params_t)*(a_ctrl->region_size),
+ GFP_KERNEL);
+ if (!ringing_params_kernel) {
+ pr_err("kmalloc for damping parameters failed\n");
+ return -EFAULT;
+ }
+ if (copy_from_user(ringing_params_kernel,
+ &(move_params->ringing_params[0]),
+ (sizeof(struct damping_params_t))*(a_ctrl->region_size))) {
+ pr_err("copy_from_user failed\n");
+ /*Free the allocated memory for damping parameters*/
+ kfree(ringing_params_kernel);
+ return -EFAULT;
+ }
+ curr_lens_pos = a_ctrl->step_position_table[a_ctrl->curr_step_pos];
+ a_ctrl->i2c_tbl_index = 0;
+ CDBG("curr_step_pos =%d dest_step_pos =%d curr_lens_pos=%d\n",
+ a_ctrl->curr_step_pos, dest_step_pos, curr_lens_pos);
+
+ while (a_ctrl->curr_step_pos != dest_step_pos) {
+ step_boundary =
+ a_ctrl->region_params[a_ctrl->curr_region_index].
+ step_bound[dir];
+ if ((dest_step_pos * sign_dir) <=
+ (step_boundary * sign_dir)) {
+
+ target_step_pos = dest_step_pos;
+ target_lens_pos =
+ a_ctrl->step_position_table[target_step_pos];
+ a_ctrl->func_tbl->actuator_write_focus(a_ctrl,
+ curr_lens_pos,
+ &ringing_params_kernel
+ [a_ctrl->curr_region_index],
+ sign_dir,
+ target_lens_pos);
+ curr_lens_pos = target_lens_pos;
+
+ } else {
+ target_step_pos = step_boundary;
+ target_lens_pos =
+ a_ctrl->step_position_table[target_step_pos];
+ a_ctrl->func_tbl->actuator_write_focus(a_ctrl,
+ curr_lens_pos,
+ &ringing_params_kernel
+ [a_ctrl->curr_region_index],
+ sign_dir,
+ target_lens_pos);
+ curr_lens_pos = target_lens_pos;
+
+ a_ctrl->curr_region_index += sign_dir;
+ }
+ a_ctrl->curr_step_pos = target_step_pos;
+ }
+ /*Free the memory allocated for damping parameters*/
+ kfree(ringing_params_kernel);
+
+ move_params->curr_lens_pos = curr_lens_pos;
+ reg_setting.reg_setting = a_ctrl->i2c_reg_tbl;
+ reg_setting.data_type = a_ctrl->i2c_data_type;
+ reg_setting.size = a_ctrl->i2c_tbl_index;
+ rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_write_table_w_microdelay(
+ &a_ctrl->i2c_client, &reg_setting);
+ if (rc < 0) {
+ pr_err("i2c write error:%d\n", rc);
+ return rc;
+ }
+ a_ctrl->i2c_tbl_index = 0;
+ CDBG("Exit\n");
+
+ return rc;
+}
+
+static int32_t msm_actuator_bivcm_move_focus(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ struct msm_actuator_move_params_t *move_params)
+{
+ int32_t rc = 0;
+ struct damping_params_t *ringing_params_kernel = NULL;
+ int8_t sign_dir = move_params->sign_dir;
+ uint16_t step_boundary = 0;
+ uint16_t target_step_pos = 0;
+ uint16_t target_lens_pos = 0;
+ int16_t dest_step_pos = move_params->dest_step_pos;
+ uint16_t curr_lens_pos = 0;
+ int dir = move_params->dir;
+ int32_t num_steps = move_params->num_steps;
+
+ if (a_ctrl->step_position_table == NULL) {
+ pr_err("Step Position Table is NULL");
+ return -EFAULT;
+ }
+
+ CDBG("called, dir %d, num_steps %d\n", dir, num_steps);
+
+ if (dest_step_pos == a_ctrl->curr_step_pos)
+ return rc;
+
+ if ((sign_dir > MSM_ACTUATOR_MOVE_SIGNED_NEAR) ||
+ (sign_dir < MSM_ACTUATOR_MOVE_SIGNED_FAR)) {
+ pr_err("Invalid sign_dir = %d\n", sign_dir);
+ return -EFAULT;
+ }
+ if ((dir > MOVE_FAR) || (dir < MOVE_NEAR)) {
+ pr_err("Invalid direction = %d\n", dir);
+ return -EFAULT;
+ }
+ if (dest_step_pos > a_ctrl->total_steps) {
+ pr_err("Step pos greater than total steps = %d\n",
+ dest_step_pos);
+ return -EFAULT;
+ }
+ if ((a_ctrl->region_size <= 0) ||
+ (a_ctrl->region_size > MAX_ACTUATOR_REGION) ||
+ (!move_params->ringing_params)) {
+ pr_err("Invalid-region size = %d, ringing_params = %p\n",
+ a_ctrl->region_size, move_params->ringing_params);
+ return -EFAULT;
+ }
+ /*Allocate memory for damping parameters of all regions*/
+ ringing_params_kernel = kmalloc(
+ sizeof(struct damping_params_t)*(a_ctrl->region_size),
+ GFP_KERNEL);
+ if (!ringing_params_kernel) {
+ pr_err("kmalloc for damping parameters failed\n");
+ return -EFAULT;
+ }
+ if (copy_from_user(ringing_params_kernel,
+ &(move_params->ringing_params[0]),
+ (sizeof(struct damping_params_t))*(a_ctrl->region_size))) {
+ pr_err("copy_from_user failed\n");
+ /*Free the allocated memory for damping parameters*/
+ kfree(ringing_params_kernel);
+ return -EFAULT;
+ }
+ curr_lens_pos = a_ctrl->step_position_table[a_ctrl->curr_step_pos];
+ a_ctrl->i2c_tbl_index = 0;
+ CDBG("curr_step_pos =%d dest_step_pos =%d curr_lens_pos=%d\n",
+ a_ctrl->curr_step_pos, dest_step_pos, curr_lens_pos);
+
+ while (a_ctrl->curr_step_pos != dest_step_pos) {
+ step_boundary =
+ a_ctrl->region_params[a_ctrl->curr_region_index].
+ step_bound[dir];
+ if ((dest_step_pos * sign_dir) <=
+ (step_boundary * sign_dir)) {
+
+ target_step_pos = dest_step_pos;
+ target_lens_pos =
+ a_ctrl->step_position_table[target_step_pos];
+ rc = msm_actuator_bivcm_write_focus(a_ctrl,
+ curr_lens_pos,
+ &ringing_params_kernel
+ [a_ctrl->curr_region_index],
+ sign_dir,
+ target_lens_pos);
+ if (rc < 0) {
+ kfree(ringing_params_kernel);
+ return rc;
+ }
+ curr_lens_pos = target_lens_pos;
+ } else {
+ target_step_pos = step_boundary;
+ target_lens_pos =
+ a_ctrl->step_position_table[target_step_pos];
+ rc = msm_actuator_bivcm_write_focus(a_ctrl,
+ curr_lens_pos,
+ &ringing_params_kernel
+ [a_ctrl->curr_region_index],
+ sign_dir,
+ target_lens_pos);
+ if (rc < 0) {
+ kfree(ringing_params_kernel);
+ return rc;
+ }
+ curr_lens_pos = target_lens_pos;
+
+ a_ctrl->curr_region_index += sign_dir;
+ }
+ a_ctrl->curr_step_pos = target_step_pos;
+ }
+ /*Free the memory allocated for damping parameters*/
+ kfree(ringing_params_kernel);
+
+ move_params->curr_lens_pos = curr_lens_pos;
+ a_ctrl->i2c_tbl_index = 0;
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_actuator_park_lens(struct msm_actuator_ctrl_t *a_ctrl)
+{
+ int32_t rc = 0;
+ uint16_t next_lens_pos = 0;
+ struct msm_camera_i2c_reg_setting reg_setting;
+
+ a_ctrl->i2c_tbl_index = 0;
+ if ((a_ctrl->curr_step_pos > a_ctrl->total_steps) ||
+ (!a_ctrl->park_lens.max_step) ||
+ (!a_ctrl->step_position_table) ||
+ (!a_ctrl->i2c_reg_tbl) ||
+ (!a_ctrl->func_tbl) ||
+ (!a_ctrl->func_tbl->actuator_parse_i2c_params)) {
+ pr_err("%s:%d Failed to park lens.\n",
+ __func__, __LINE__);
+ return 0;
+ }
+
+ if (a_ctrl->park_lens.max_step > a_ctrl->max_code_size)
+ a_ctrl->park_lens.max_step = a_ctrl->max_code_size;
+
+ next_lens_pos = a_ctrl->step_position_table[a_ctrl->curr_step_pos];
+ while (next_lens_pos) {
+ /* conditions which help to reduce park lens time */
+ if (next_lens_pos > (a_ctrl->park_lens.max_step *
+ PARK_LENS_LONG_STEP)) {
+ next_lens_pos = next_lens_pos -
+ (a_ctrl->park_lens.max_step *
+ PARK_LENS_LONG_STEP);
+ } else if (next_lens_pos > (a_ctrl->park_lens.max_step *
+ PARK_LENS_MID_STEP)) {
+ next_lens_pos = next_lens_pos -
+ (a_ctrl->park_lens.max_step *
+ PARK_LENS_MID_STEP);
+ } else if (next_lens_pos > (a_ctrl->park_lens.max_step *
+ PARK_LENS_SMALL_STEP)) {
+ next_lens_pos = next_lens_pos -
+ (a_ctrl->park_lens.max_step *
+ PARK_LENS_SMALL_STEP);
+ } else {
+ next_lens_pos = (next_lens_pos >
+ a_ctrl->park_lens.max_step) ?
+ (next_lens_pos - a_ctrl->park_lens.
+ max_step) : 0;
+ }
+ a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl,
+ next_lens_pos, a_ctrl->park_lens.hw_params,
+ a_ctrl->park_lens.damping_delay);
+
+ reg_setting.reg_setting = a_ctrl->i2c_reg_tbl;
+ reg_setting.size = a_ctrl->i2c_tbl_index;
+ reg_setting.data_type = a_ctrl->i2c_data_type;
+
+ rc = a_ctrl->i2c_client.i2c_func_tbl->
+ i2c_write_table_w_microdelay(
+ &a_ctrl->i2c_client, &reg_setting);
+ if (rc < 0) {
+ pr_err("%s Failed I2C write Line %d\n",
+ __func__, __LINE__);
+ return rc;
+ }
+ a_ctrl->i2c_tbl_index = 0;
+ /* Use typical damping time delay to avoid tick sound */
+ usleep_range(10000, 12000);
+ }
+
+ return 0;
+}
+
+static int32_t msm_actuator_bivcm_init_step_table(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ struct msm_actuator_set_info_t *set_info)
+{
+ int16_t code_per_step = 0;
+ int16_t cur_code = 0;
+ int16_t step_index = 0, region_index = 0;
+ uint16_t step_boundary = 0;
+ uint32_t max_code_size = 1;
+ uint16_t data_size = set_info->actuator_params.data_size;
+ uint16_t mask = 0, i = 0;
+ uint32_t qvalue = 0;
+ CDBG("Enter\n");
+
+ for (; data_size > 0; data_size--) {
+ max_code_size *= 2;
+ mask |= (1 << i++);
+ }
+
+ a_ctrl->max_code_size = max_code_size;
+ kfree(a_ctrl->step_position_table);
+ a_ctrl->step_position_table = NULL;
+
+ if (set_info->af_tuning_params.total_steps
+ > MAX_ACTUATOR_AF_TOTAL_STEPS) {
+ pr_err("Max actuator totalsteps exceeded = %d\n",
+ set_info->af_tuning_params.total_steps);
+ return -EFAULT;
+ }
+ /* Fill step position table */
+ a_ctrl->step_position_table =
+ kmalloc(sizeof(uint16_t) *
+ (set_info->af_tuning_params.total_steps + 1), GFP_KERNEL);
+
+ if (a_ctrl->step_position_table == NULL)
+ return -ENOMEM;
+
+ cur_code = set_info->af_tuning_params.initial_code;
+ a_ctrl->step_position_table[step_index++] = cur_code;
+ for (region_index = 0;
+ region_index < a_ctrl->region_size;
+ region_index++) {
+ code_per_step =
+ a_ctrl->region_params[region_index].code_per_step;
+ step_boundary =
+ a_ctrl->region_params[region_index].
+ step_bound[MOVE_NEAR];
+ qvalue = a_ctrl->region_params[region_index].qvalue;
+ for (; step_index <= step_boundary;
+ step_index++) {
+ if (qvalue > 1 && qvalue <= MAX_QVALUE)
+ cur_code = step_index * code_per_step / qvalue;
+ else
+ cur_code = step_index * code_per_step;
+ cur_code = (set_info->af_tuning_params.initial_code +
+ cur_code) & mask;
+ if (cur_code < max_code_size)
+ a_ctrl->step_position_table[step_index] =
+ cur_code;
+ else {
+ for (; step_index <
+ set_info->af_tuning_params.total_steps;
+ step_index++)
+ a_ctrl->
+ step_position_table[
+ step_index] =
+ max_code_size;
+ }
+ CDBG("step_position_table[%d] = %d\n", step_index,
+ a_ctrl->step_position_table[step_index]);
+ }
+ }
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t msm_actuator_init_step_table(struct msm_actuator_ctrl_t *a_ctrl,
+ struct msm_actuator_set_info_t *set_info)
+{
+ int16_t code_per_step = 0;
+ uint32_t qvalue = 0;
+ int16_t cur_code = 0;
+ int16_t step_index = 0, region_index = 0;
+ uint16_t step_boundary = 0;
+ uint32_t max_code_size = 1;
+ uint16_t data_size = set_info->actuator_params.data_size;
+ CDBG("Enter\n");
+
+ for (; data_size > 0; data_size--)
+ max_code_size *= 2;
+
+ a_ctrl->max_code_size = max_code_size;
+ if ((a_ctrl->actuator_state == ACT_OPS_ACTIVE) &&
+ (a_ctrl->step_position_table != NULL)) {
+ kfree(a_ctrl->step_position_table);
+ }
+ a_ctrl->step_position_table = NULL;
+
+ if (set_info->af_tuning_params.total_steps
+ > MAX_ACTUATOR_AF_TOTAL_STEPS) {
+ pr_err("Max actuator totalsteps exceeded = %d\n",
+ set_info->af_tuning_params.total_steps);
+ return -EFAULT;
+ }
+ /* Fill step position table */
+ a_ctrl->step_position_table =
+ kmalloc(sizeof(uint16_t) *
+ (set_info->af_tuning_params.total_steps + 1), GFP_KERNEL);
+
+ if (a_ctrl->step_position_table == NULL)
+ return -ENOMEM;
+
+ cur_code = set_info->af_tuning_params.initial_code;
+ a_ctrl->step_position_table[step_index++] = cur_code;
+ for (region_index = 0;
+ region_index < a_ctrl->region_size;
+ region_index++) {
+ code_per_step =
+ a_ctrl->region_params[region_index].code_per_step;
+ qvalue =
+ a_ctrl->region_params[region_index].qvalue;
+ step_boundary =
+ a_ctrl->region_params[region_index].
+ step_bound[MOVE_NEAR];
+ for (; step_index <= step_boundary;
+ step_index++) {
+ if (qvalue > 1 && qvalue <= MAX_QVALUE)
+ cur_code = step_index * code_per_step / qvalue;
+ else
+ cur_code = step_index * code_per_step;
+ cur_code += set_info->af_tuning_params.initial_code;
+ if (cur_code < max_code_size)
+ a_ctrl->step_position_table[step_index] =
+ cur_code;
+ else {
+ for (; step_index <
+ set_info->af_tuning_params.total_steps;
+ step_index++)
+ a_ctrl->
+ step_position_table[
+ step_index] =
+ max_code_size;
+ }
+ CDBG("step_position_table[%d] = %d\n", step_index,
+ a_ctrl->step_position_table[step_index]);
+ }
+ }
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t msm_actuator_set_default_focus(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ struct msm_actuator_move_params_t *move_params)
+{
+ int32_t rc = 0;
+ CDBG("Enter\n");
+
+ if (a_ctrl->curr_step_pos != 0)
+ rc = a_ctrl->func_tbl->actuator_move_focus(a_ctrl, move_params);
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_actuator_vreg_control(struct msm_actuator_ctrl_t *a_ctrl,
+ int config)
+{
+ int rc = 0, i, cnt;
+ struct msm_actuator_vreg *vreg_cfg;
+
+ vreg_cfg = &a_ctrl->vreg_cfg;
+ cnt = vreg_cfg->num_vreg;
+ if (!cnt)
+ return 0;
+
+ if (cnt >= MSM_ACTUATOT_MAX_VREGS) {
+ pr_err("%s failed %d cnt %d\n", __func__, __LINE__, cnt);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (a_ctrl->act_device_type == MSM_CAMERA_PLATFORM_DEVICE) {
+ rc = msm_camera_config_single_vreg(&(a_ctrl->pdev->dev),
+ &vreg_cfg->cam_vreg[i],
+ (struct regulator **)&vreg_cfg->data[i],
+ config);
+ } else if (a_ctrl->act_device_type ==
+ MSM_CAMERA_I2C_DEVICE) {
+ rc = msm_camera_config_single_vreg(
+ &(a_ctrl->i2c_client.client->dev),
+ &vreg_cfg->cam_vreg[i],
+ (struct regulator **)&vreg_cfg->data[i],
+ config);
+ }
+ }
+ return rc;
+}
+
+static int32_t msm_actuator_power_down(struct msm_actuator_ctrl_t *a_ctrl)
+{
+ int32_t rc = 0;
+ CDBG("Enter\n");
+ if (a_ctrl->actuator_state != ACT_DISABLE_STATE) {
+
+ if (a_ctrl->func_tbl && a_ctrl->func_tbl->actuator_park_lens) {
+ rc = a_ctrl->func_tbl->actuator_park_lens(a_ctrl);
+ if (rc < 0)
+ pr_err("%s:%d Lens park failed.\n",
+ __func__, __LINE__);
+ }
+
+ rc = msm_actuator_vreg_control(a_ctrl, 0);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
+
+ kfree(a_ctrl->step_position_table);
+ a_ctrl->step_position_table = NULL;
+ kfree(a_ctrl->i2c_reg_tbl);
+ a_ctrl->i2c_reg_tbl = NULL;
+ a_ctrl->i2c_tbl_index = 0;
+ a_ctrl->actuator_state = ACT_OPS_INACTIVE;
+ }
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_actuator_set_position(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ struct msm_actuator_set_position_t *set_pos)
+{
+ int32_t rc = 0;
+ int32_t index;
+ uint16_t next_lens_position;
+ uint16_t delay;
+ uint32_t hw_params = 0;
+ struct msm_camera_i2c_reg_setting reg_setting;
+ CDBG("%s Enter %d\n", __func__, __LINE__);
+ if (set_pos->number_of_steps <= 0 ||
+ set_pos->number_of_steps > MAX_NUMBER_OF_STEPS) {
+ pr_err("num_steps out of range = %d\n",
+ set_pos->number_of_steps);
+ return -EFAULT;
+ }
+
+ if (!a_ctrl || !a_ctrl->func_tbl ||
+ !a_ctrl->func_tbl->actuator_parse_i2c_params) {
+ pr_err("failed. NULL actuator pointers.");
+ return -EFAULT;
+ }
+
+ if (a_ctrl->actuator_state != ACT_OPS_ACTIVE) {
+ pr_err("failed. Invalid actuator state.");
+ return -EFAULT;
+ }
+
+ a_ctrl->i2c_tbl_index = 0;
+ for (index = 0; index < set_pos->number_of_steps; index++) {
+ next_lens_position = set_pos->pos[index];
+ delay = set_pos->delay[index];
+ a_ctrl->func_tbl->actuator_parse_i2c_params(a_ctrl,
+ next_lens_position, hw_params, delay);
+
+ reg_setting.reg_setting = a_ctrl->i2c_reg_tbl;
+ reg_setting.size = a_ctrl->i2c_tbl_index;
+ reg_setting.data_type = a_ctrl->i2c_data_type;
+
+ rc = a_ctrl->i2c_client.i2c_func_tbl->
+ i2c_write_table_w_microdelay(
+ &a_ctrl->i2c_client, &reg_setting);
+ if (rc < 0) {
+ pr_err("%s Failed I2C write Line %d\n",
+ __func__, __LINE__);
+ return rc;
+ }
+ a_ctrl->i2c_tbl_index = 0;
+ }
+ CDBG("%s exit %d\n", __func__, __LINE__);
+ return rc;
+}
+
+static int32_t msm_actuator_bivcm_set_position(
+ struct msm_actuator_ctrl_t *a_ctrl,
+ struct msm_actuator_set_position_t *set_pos)
+{
+ int32_t rc = 0;
+ int32_t index;
+ uint16_t next_lens_position;
+ uint16_t delay;
+ uint32_t hw_params = 0;
+ CDBG("%s Enter %d\n", __func__, __LINE__);
+ if (set_pos->number_of_steps <= 0 ||
+ set_pos->number_of_steps > MAX_NUMBER_OF_STEPS) {
+ pr_err("num_steps out of range = %d\n",
+ set_pos->number_of_steps);
+ return -EFAULT;
+ }
+
+ if (!a_ctrl) {
+ pr_err("failed. NULL actuator pointers.");
+ return -EFAULT;
+ }
+
+ if (a_ctrl->actuator_state != ACT_OPS_ACTIVE) {
+ pr_err("failed. Invalid actuator state.");
+ return -EFAULT;
+ }
+
+ a_ctrl->i2c_tbl_index = 0;
+ hw_params = set_pos->hw_params;
+ for (index = 0; index < set_pos->number_of_steps; index++) {
+ next_lens_position = set_pos->pos[index];
+ delay = set_pos->delay[index];
+ rc = msm_actuator_bivcm_handle_i2c_ops(a_ctrl,
+ next_lens_position, hw_params, delay);
+ a_ctrl->i2c_tbl_index = 0;
+ }
+ CDBG("%s exit %d\n", __func__, __LINE__);
+ return rc;
+}
+
+static int32_t msm_actuator_set_param(struct msm_actuator_ctrl_t *a_ctrl,
+ struct msm_actuator_set_info_t *set_info) {
+ struct reg_settings_t *init_settings = NULL;
+ int32_t rc = -EFAULT;
+ uint16_t i = 0;
+ struct msm_camera_cci_client *cci_client = NULL;
+ CDBG("Enter\n");
+
+ for (i = 0; i < ARRAY_SIZE(actuators); i++) {
+ if (set_info->actuator_params.act_type ==
+ actuators[i]->act_type) {
+ a_ctrl->func_tbl = &actuators[i]->func_tbl;
+ rc = 0;
+ }
+ }
+
+ if (rc < 0) {
+ pr_err("Actuator function table not found\n");
+ return rc;
+ }
+ if (set_info->af_tuning_params.total_steps
+ > MAX_ACTUATOR_AF_TOTAL_STEPS) {
+ pr_err("Max actuator totalsteps exceeded = %d\n",
+ set_info->af_tuning_params.total_steps);
+ return -EFAULT;
+ }
+ if (set_info->af_tuning_params.region_size
+ > MAX_ACTUATOR_REGION) {
+ pr_err("MAX_ACTUATOR_REGION is exceeded.\n");
+ return -EFAULT;
+ }
+
+ a_ctrl->region_size = set_info->af_tuning_params.region_size;
+ a_ctrl->pwd_step = set_info->af_tuning_params.pwd_step;
+ a_ctrl->total_steps = set_info->af_tuning_params.total_steps;
+
+ if (copy_from_user(&a_ctrl->region_params,
+ (void *)set_info->af_tuning_params.region_params,
+ a_ctrl->region_size * sizeof(struct region_params_t)))
+ return -EFAULT;
+
+ if (a_ctrl->act_device_type == MSM_CAMERA_PLATFORM_DEVICE) {
+ cci_client = a_ctrl->i2c_client.cci_client;
+ cci_client->sid =
+ set_info->actuator_params.i2c_addr >> 1;
+ cci_client->retries = 3;
+ cci_client->id_map = 0;
+ cci_client->cci_i2c_master = a_ctrl->cci_master;
+ cci_client->i2c_freq_mode =
+ set_info->actuator_params.i2c_freq_mode;
+ } else {
+ a_ctrl->i2c_client.client->addr =
+ set_info->actuator_params.i2c_addr;
+ }
+
+ a_ctrl->i2c_data_type = set_info->actuator_params.i2c_data_type;
+ a_ctrl->i2c_client.addr_type = set_info->actuator_params.i2c_addr_type;
+ if (set_info->actuator_params.reg_tbl_size <=
+ MAX_ACTUATOR_REG_TBL_SIZE) {
+ a_ctrl->reg_tbl_size = set_info->actuator_params.reg_tbl_size;
+ } else {
+ a_ctrl->reg_tbl_size = 0;
+ pr_err("MAX_ACTUATOR_REG_TBL_SIZE is exceeded.\n");
+ return -EFAULT;
+ }
+
+ if ((a_ctrl->actuator_state == ACT_OPS_ACTIVE) &&
+ (a_ctrl->i2c_reg_tbl != NULL)) {
+ kfree(a_ctrl->i2c_reg_tbl);
+ }
+ a_ctrl->i2c_reg_tbl = NULL;
+ a_ctrl->i2c_reg_tbl =
+ kmalloc(sizeof(struct msm_camera_i2c_reg_array) *
+ (set_info->af_tuning_params.total_steps + 1), GFP_KERNEL);
+ if (!a_ctrl->i2c_reg_tbl) {
+ pr_err("kmalloc fail\n");
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(&a_ctrl->reg_tbl,
+ (void *)set_info->actuator_params.reg_tbl_params,
+ a_ctrl->reg_tbl_size *
+ sizeof(struct msm_actuator_reg_params_t))) {
+ kfree(a_ctrl->i2c_reg_tbl);
+ a_ctrl->i2c_reg_tbl = NULL;
+ return -EFAULT;
+ }
+
+ if (set_info->actuator_params.init_setting_size &&
+ set_info->actuator_params.init_setting_size
+ <= MAX_ACTUATOR_INIT_SET) {
+ if (a_ctrl->func_tbl->actuator_init_focus) {
+ init_settings = kmalloc(sizeof(struct reg_settings_t) *
+ (set_info->actuator_params.init_setting_size),
+ GFP_KERNEL);
+ if (init_settings == NULL) {
+ kfree(a_ctrl->i2c_reg_tbl);
+ a_ctrl->i2c_reg_tbl = NULL;
+ pr_err("Error allocating memory for init_settings\n");
+ return -EFAULT;
+ }
+ if (copy_from_user(init_settings,
+ (void *)set_info->actuator_params.init_settings,
+ set_info->actuator_params.init_setting_size *
+ sizeof(struct reg_settings_t))) {
+ kfree(init_settings);
+ kfree(a_ctrl->i2c_reg_tbl);
+ a_ctrl->i2c_reg_tbl = NULL;
+ pr_err("Error copying init_settings\n");
+ return -EFAULT;
+ }
+ rc = a_ctrl->func_tbl->actuator_init_focus(a_ctrl,
+ set_info->actuator_params.init_setting_size,
+ init_settings);
+ kfree(init_settings);
+ if (rc < 0) {
+ kfree(a_ctrl->i2c_reg_tbl);
+ a_ctrl->i2c_reg_tbl = NULL;
+ pr_err("Error actuator_init_focus\n");
+ return -EFAULT;
+ }
+ }
+ }
+
+ /* Park lens data */
+ a_ctrl->park_lens = set_info->actuator_params.park_lens;
+ a_ctrl->initial_code = set_info->af_tuning_params.initial_code;
+ if (a_ctrl->func_tbl->actuator_init_step_table)
+ rc = a_ctrl->func_tbl->
+ actuator_init_step_table(a_ctrl, set_info);
+
+ a_ctrl->curr_step_pos = 0;
+ a_ctrl->curr_region_index = 0;
+ CDBG("Exit\n");
+
+ return rc;
+}
+
+static int msm_actuator_init(struct msm_actuator_ctrl_t *a_ctrl)
+{
+ int rc = 0;
+ CDBG("Enter\n");
+ if (!a_ctrl) {
+ pr_err("failed\n");
+ return -EINVAL;
+ }
+ if (a_ctrl->act_device_type == MSM_CAMERA_PLATFORM_DEVICE) {
+ rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_util(
+ &a_ctrl->i2c_client, MSM_CCI_INIT);
+ if (rc < 0)
+ pr_err("cci_init failed\n");
+ }
+ a_ctrl->actuator_state = ACT_OPS_ACTIVE;
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_actuator_config(struct msm_actuator_ctrl_t *a_ctrl,
+ void __user *argp)
+{
+ struct msm_actuator_cfg_data *cdata =
+ (struct msm_actuator_cfg_data *)argp;
+ int32_t rc = 0;
+ mutex_lock(a_ctrl->actuator_mutex);
+ CDBG("Enter\n");
+ CDBG("%s type %d\n", __func__, cdata->cfgtype);
+
+ if (cdata->cfgtype != CFG_ACTUATOR_INIT &&
+ cdata->cfgtype != CFG_ACTUATOR_POWERUP &&
+ a_ctrl->actuator_state == ACT_DISABLE_STATE) {
+ pr_err("actuator disabled %d\n", rc);
+ mutex_unlock(a_ctrl->actuator_mutex);
+ return -EINVAL;
+ }
+
+ switch (cdata->cfgtype) {
+ case CFG_ACTUATOR_INIT:
+ rc = msm_actuator_init(a_ctrl);
+ if (rc < 0)
+ pr_err("msm_actuator_init failed %d\n", rc);
+ break;
+ case CFG_GET_ACTUATOR_INFO:
+ cdata->is_af_supported = 1;
+ cdata->cfg.cam_name = a_ctrl->cam_name;
+ break;
+
+ case CFG_SET_ACTUATOR_INFO:
+ rc = msm_actuator_set_param(a_ctrl, &cdata->cfg.set_info);
+ if (rc < 0)
+ pr_err("init table failed %d\n", rc);
+ break;
+
+ case CFG_SET_DEFAULT_FOCUS:
+ rc = a_ctrl->func_tbl->actuator_set_default_focus(a_ctrl,
+ &cdata->cfg.move);
+ if (rc < 0)
+ pr_err("move focus failed %d\n", rc);
+ break;
+
+ case CFG_MOVE_FOCUS:
+ rc = a_ctrl->func_tbl->actuator_move_focus(a_ctrl,
+ &cdata->cfg.move);
+ if (rc < 0)
+ pr_err("move focus failed %d\n", rc);
+ break;
+ case CFG_ACTUATOR_POWERDOWN:
+ rc = msm_actuator_power_down(a_ctrl);
+ if (rc < 0)
+ pr_err("msm_actuator_power_down failed %d\n", rc);
+ break;
+
+ case CFG_SET_POSITION:
+ rc = a_ctrl->func_tbl->actuator_set_position(a_ctrl,
+ &cdata->cfg.setpos);
+ if (rc < 0)
+ pr_err("actuator_set_position failed %d\n", rc);
+ break;
+
+ case CFG_ACTUATOR_POWERUP:
+ rc = msm_actuator_power_up(a_ctrl);
+ if (rc < 0)
+ pr_err("Failed actuator power up%d\n", rc);
+ break;
+
+ default:
+ break;
+ }
+ mutex_unlock(a_ctrl->actuator_mutex);
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_actuator_get_subdev_id(struct msm_actuator_ctrl_t *a_ctrl,
+ void *arg)
+{
+ uint32_t *subdev_id = (uint32_t *)arg;
+ CDBG("Enter\n");
+ if (!subdev_id) {
+ pr_err("failed\n");
+ return -EINVAL;
+ }
+ if (a_ctrl->act_device_type == MSM_CAMERA_PLATFORM_DEVICE)
+ *subdev_id = a_ctrl->pdev->id;
+ else
+ *subdev_id = a_ctrl->subdev_id;
+
+ CDBG("subdev_id %d\n", *subdev_id);
+ CDBG("Exit\n");
+ return 0;
+}
+
+static struct msm_camera_i2c_fn_t msm_sensor_cci_func_tbl = {
+ .i2c_read = msm_camera_cci_i2c_read,
+ .i2c_read_seq = msm_camera_cci_i2c_read_seq,
+ .i2c_write = msm_camera_cci_i2c_write,
+ .i2c_write_table = msm_camera_cci_i2c_write_table,
+ .i2c_write_seq_table = msm_camera_cci_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_cci_i2c_write_table_w_microdelay,
+ .i2c_util = msm_sensor_cci_i2c_util,
+ .i2c_poll = msm_camera_cci_i2c_poll,
+};
+
+static struct msm_camera_i2c_fn_t msm_sensor_qup_func_tbl = {
+ .i2c_read = msm_camera_qup_i2c_read,
+ .i2c_read_seq = msm_camera_qup_i2c_read_seq,
+ .i2c_write = msm_camera_qup_i2c_write,
+ .i2c_write_table = msm_camera_qup_i2c_write_table,
+ .i2c_write_seq_table = msm_camera_qup_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_qup_i2c_write_table_w_microdelay,
+ .i2c_poll = msm_camera_qup_i2c_poll,
+};
+
+static int msm_actuator_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh) {
+ int rc = 0;
+ struct msm_actuator_ctrl_t *a_ctrl = v4l2_get_subdevdata(sd);
+ CDBG("Enter\n");
+ if (!a_ctrl) {
+ pr_err("failed\n");
+ return -EINVAL;
+ }
+ mutex_lock(a_ctrl->actuator_mutex);
+ if (a_ctrl->act_device_type == MSM_CAMERA_PLATFORM_DEVICE &&
+ a_ctrl->actuator_state != ACT_DISABLE_STATE) {
+ rc = a_ctrl->i2c_client.i2c_func_tbl->i2c_util(
+ &a_ctrl->i2c_client, MSM_CCI_RELEASE);
+ if (rc < 0)
+ pr_err("cci_init failed\n");
+ }
+ kfree(a_ctrl->i2c_reg_tbl);
+ a_ctrl->i2c_reg_tbl = NULL;
+ a_ctrl->actuator_state = ACT_DISABLE_STATE;
+ mutex_unlock(a_ctrl->actuator_mutex);
+ CDBG("Exit\n");
+ return rc;
+}
+
+static const struct v4l2_subdev_internal_ops msm_actuator_internal_ops = {
+ .close = msm_actuator_close,
+};
+
+static long msm_actuator_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc;
+ struct msm_actuator_ctrl_t *a_ctrl = v4l2_get_subdevdata(sd);
+ void __user *argp = (void __user *)arg;
+ CDBG("Enter\n");
+ CDBG("%s:%d a_ctrl %p argp %p\n", __func__, __LINE__, a_ctrl, argp);
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ return msm_actuator_get_subdev_id(a_ctrl, argp);
+ case VIDIOC_MSM_ACTUATOR_CFG:
+ return msm_actuator_config(a_ctrl, argp);
+ case MSM_SD_NOTIFY_FREEZE:
+ return 0;
+ case MSM_SD_SHUTDOWN:
+ if (!a_ctrl->i2c_client.i2c_func_tbl) {
+ pr_err("a_ctrl->i2c_client.i2c_func_tbl NULL\n");
+ return -EINVAL;
+ }
+ rc = msm_actuator_power_down(a_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d Actuator Power down failed\n",
+ __func__, __LINE__);
+ }
+ return msm_actuator_close(sd, NULL);
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_actuator_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct msm_actuator_cfg_data32 *u32 =
+ (struct msm_actuator_cfg_data32 *)arg;
+ struct msm_actuator_cfg_data actuator_data;
+ void *parg = arg;
+ long rc;
+
+ switch (cmd) {
+ case VIDIOC_MSM_ACTUATOR_CFG32:
+ cmd = VIDIOC_MSM_ACTUATOR_CFG;
+ switch (u32->cfgtype) {
+ case CFG_SET_ACTUATOR_INFO:
+ actuator_data.cfgtype = u32->cfgtype;
+ actuator_data.is_af_supported = u32->is_af_supported;
+ actuator_data.cfg.set_info.actuator_params.act_type =
+ u32->cfg.set_info.actuator_params.act_type;
+
+ actuator_data.cfg.set_info.actuator_params
+ .reg_tbl_size =
+ u32->cfg.set_info.actuator_params.reg_tbl_size;
+
+ actuator_data.cfg.set_info.actuator_params.data_size =
+ u32->cfg.set_info.actuator_params.data_size;
+
+ actuator_data.cfg.set_info.actuator_params
+ .init_setting_size =
+ u32->cfg.set_info.actuator_params
+ .init_setting_size;
+
+ actuator_data.cfg.set_info.actuator_params.i2c_addr =
+ u32->cfg.set_info.actuator_params.i2c_addr;
+
+ actuator_data.cfg.set_info.actuator_params.
+ i2c_freq_mode =
+ u32->cfg.set_info.actuator_params.i2c_freq_mode;
+
+ actuator_data.cfg.set_info.actuator_params
+ .i2c_addr_type =
+ u32->cfg.set_info.actuator_params.i2c_addr_type;
+
+ actuator_data.cfg.set_info.actuator_params
+ .i2c_data_type =
+ u32->cfg.set_info.actuator_params.i2c_data_type;
+
+ actuator_data.cfg.set_info.actuator_params
+ .reg_tbl_params =
+ compat_ptr(
+ u32->cfg.set_info.actuator_params
+ .reg_tbl_params);
+
+ actuator_data.cfg.set_info.actuator_params
+ .init_settings =
+ compat_ptr(
+ u32->cfg.set_info.actuator_params
+ .init_settings);
+
+ actuator_data.cfg.set_info.af_tuning_params
+ .initial_code =
+ u32->cfg.set_info.af_tuning_params.initial_code;
+
+ actuator_data.cfg.set_info.af_tuning_params.pwd_step =
+ u32->cfg.set_info.af_tuning_params.pwd_step;
+
+ actuator_data.cfg.set_info.af_tuning_params
+ .region_size =
+ u32->cfg.set_info.af_tuning_params.region_size;
+
+ actuator_data.cfg.set_info.af_tuning_params
+ .total_steps =
+ u32->cfg.set_info.af_tuning_params.total_steps;
+
+ actuator_data.cfg.set_info.af_tuning_params
+ .region_params = compat_ptr(
+ u32->cfg.set_info.af_tuning_params
+ .region_params);
+
+ actuator_data.cfg.set_info.actuator_params.park_lens =
+ u32->cfg.set_info.actuator_params.park_lens;
+
+ parg = &actuator_data;
+ break;
+ case CFG_SET_DEFAULT_FOCUS:
+ case CFG_MOVE_FOCUS:
+ actuator_data.cfgtype = u32->cfgtype;
+ actuator_data.is_af_supported = u32->is_af_supported;
+ actuator_data.cfg.move.dir = u32->cfg.move.dir;
+
+ actuator_data.cfg.move.sign_dir =
+ u32->cfg.move.sign_dir;
+
+ actuator_data.cfg.move.dest_step_pos =
+ u32->cfg.move.dest_step_pos;
+
+ actuator_data.cfg.move.num_steps =
+ u32->cfg.move.num_steps;
+
+ actuator_data.cfg.move.curr_lens_pos =
+ u32->cfg.move.curr_lens_pos;
+
+ actuator_data.cfg.move.ringing_params =
+ compat_ptr(u32->cfg.move.ringing_params);
+ parg = &actuator_data;
+ break;
+ case CFG_SET_POSITION:
+ actuator_data.cfgtype = u32->cfgtype;
+ actuator_data.is_af_supported = u32->is_af_supported;
+ memcpy(&actuator_data.cfg.setpos, &(u32->cfg.setpos),
+ sizeof(struct msm_actuator_set_position_t));
+ break;
+ default:
+ actuator_data.cfgtype = u32->cfgtype;
+ parg = &actuator_data;
+ break;
+ }
+ }
+
+ rc = msm_actuator_subdev_ioctl(sd, cmd, parg);
+
+ switch (cmd) {
+
+ case VIDIOC_MSM_ACTUATOR_CFG:
+
+ switch (u32->cfgtype) {
+
+ case CFG_SET_DEFAULT_FOCUS:
+ case CFG_MOVE_FOCUS:
+ u32->cfg.move.curr_lens_pos =
+ actuator_data.cfg.move.curr_lens_pos;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static long msm_actuator_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_actuator_subdev_do_ioctl);
+}
+#endif
+
+static int32_t msm_actuator_power_up(struct msm_actuator_ctrl_t *a_ctrl)
+{
+ int rc = 0;
+ CDBG("%s called\n", __func__);
+
+ rc = msm_actuator_vreg_control(a_ctrl, 1);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
+
+ a_ctrl->actuator_state = ACT_ENABLE_STATE;
+
+ CDBG("Exit\n");
+ return rc;
+}
+
+static struct v4l2_subdev_core_ops msm_actuator_subdev_core_ops = {
+ .ioctl = msm_actuator_subdev_ioctl,
+};
+
+static struct v4l2_subdev_ops msm_actuator_subdev_ops = {
+ .core = &msm_actuator_subdev_core_ops,
+};
+
+static const struct i2c_device_id msm_actuator_i2c_id[] = {
+ {"qcom,actuator", (kernel_ulong_t)NULL},
+ { }
+};
+
+static int32_t msm_actuator_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc = 0;
+ struct msm_actuator_ctrl_t *act_ctrl_t = NULL;
+ struct msm_actuator_vreg *vreg_cfg = NULL;
+ CDBG("Enter\n");
+
+ if (client == NULL) {
+ pr_err("msm_actuator_i2c_probe: client is null\n");
+ return -EINVAL;
+ }
+
+ act_ctrl_t = kzalloc(sizeof(struct msm_actuator_ctrl_t),
+ GFP_KERNEL);
+ if (!act_ctrl_t) {
+ pr_err("%s:%d failed no memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ pr_err("i2c_check_functionality failed\n");
+ goto probe_failure;
+ }
+
+ CDBG("client = 0x%p\n", client);
+
+ rc = of_property_read_u32(client->dev.of_node, "cell-index",
+ &act_ctrl_t->subdev_id);
+ CDBG("cell-index %d, rc %d\n", act_ctrl_t->subdev_id, rc);
+ if (rc < 0) {
+ pr_err("failed rc %d\n", rc);
+ goto probe_failure;
+ }
+
+ if (of_find_property(client->dev.of_node,
+ "qcom,cam-vreg-name", NULL)) {
+ vreg_cfg = &act_ctrl_t->vreg_cfg;
+ rc = msm_camera_get_dt_vreg_data(client->dev.of_node,
+ &vreg_cfg->cam_vreg, &vreg_cfg->num_vreg);
+ if (rc < 0) {
+ pr_err("failed rc %d\n", rc);
+ goto probe_failure;
+ }
+ }
+
+ act_ctrl_t->i2c_driver = &msm_actuator_i2c_driver;
+ act_ctrl_t->i2c_client.client = client;
+ act_ctrl_t->curr_step_pos = 0,
+ act_ctrl_t->curr_region_index = 0,
+ /* Set device type as I2C */
+ act_ctrl_t->act_device_type = MSM_CAMERA_I2C_DEVICE;
+ act_ctrl_t->i2c_client.i2c_func_tbl = &msm_sensor_qup_func_tbl;
+ act_ctrl_t->act_v4l2_subdev_ops = &msm_actuator_subdev_ops;
+ act_ctrl_t->actuator_mutex = &msm_actuator_mutex;
+ act_ctrl_t->cam_name = act_ctrl_t->subdev_id;
+ CDBG("act_ctrl_t->cam_name: %d", act_ctrl_t->cam_name);
+ /* Assign name for sub device */
+ snprintf(act_ctrl_t->msm_sd.sd.name, sizeof(act_ctrl_t->msm_sd.sd.name),
+ "%s", act_ctrl_t->i2c_driver->driver.name);
+
+ /* Initialize sub device */
+ v4l2_i2c_subdev_init(&act_ctrl_t->msm_sd.sd,
+ act_ctrl_t->i2c_client.client,
+ act_ctrl_t->act_v4l2_subdev_ops);
+ v4l2_set_subdevdata(&act_ctrl_t->msm_sd.sd, act_ctrl_t);
+ act_ctrl_t->msm_sd.sd.internal_ops = &msm_actuator_internal_ops;
+ act_ctrl_t->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ media_entity_init(&act_ctrl_t->msm_sd.sd.entity, 0, NULL, 0);
+ act_ctrl_t->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ act_ctrl_t->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_ACTUATOR;
+ act_ctrl_t->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x2;
+ msm_sd_register(&act_ctrl_t->msm_sd);
+ msm_cam_copy_v4l2_subdev_fops(&msm_actuator_v4l2_subdev_fops);
+#ifdef CONFIG_COMPAT
+ msm_actuator_v4l2_subdev_fops.compat_ioctl32 =
+ msm_actuator_subdev_fops_ioctl;
+#endif
+ act_ctrl_t->msm_sd.sd.devnode->fops =
+ &msm_actuator_v4l2_subdev_fops;
+ act_ctrl_t->actuator_state = ACT_DISABLE_STATE;
+ pr_info("msm_actuator_i2c_probe: succeeded\n");
+ CDBG("Exit\n");
+
+ return 0;
+
+probe_failure:
+ kfree(act_ctrl_t);
+ return rc;
+}
+
+static int32_t msm_actuator_platform_probe(struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ struct msm_camera_cci_client *cci_client = NULL;
+ struct msm_actuator_ctrl_t *msm_actuator_t = NULL;
+ struct msm_actuator_vreg *vreg_cfg;
+ CDBG("Enter\n");
+
+ if (!pdev->dev.of_node) {
+ pr_err("of_node NULL\n");
+ return -EINVAL;
+ }
+
+ msm_actuator_t = kzalloc(sizeof(struct msm_actuator_ctrl_t),
+ GFP_KERNEL);
+ if (!msm_actuator_t) {
+ pr_err("%s:%d failed no memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ rc = of_property_read_u32((&pdev->dev)->of_node, "cell-index",
+ &pdev->id);
+ CDBG("cell-index %d, rc %d\n", pdev->id, rc);
+ if (rc < 0) {
+ kfree(msm_actuator_t);
+ pr_err("failed rc %d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32((&pdev->dev)->of_node, "qcom,cci-master",
+ &msm_actuator_t->cci_master);
+ CDBG("qcom,cci-master %d, rc %d\n", msm_actuator_t->cci_master, rc);
+ if (rc < 0 || msm_actuator_t->cci_master >= MASTER_MAX) {
+ kfree(msm_actuator_t);
+ pr_err("failed rc %d\n", rc);
+ return rc;
+ }
+
+ if (of_find_property((&pdev->dev)->of_node,
+ "qcom,cam-vreg-name", NULL)) {
+ vreg_cfg = &msm_actuator_t->vreg_cfg;
+ rc = msm_camera_get_dt_vreg_data((&pdev->dev)->of_node,
+ &vreg_cfg->cam_vreg, &vreg_cfg->num_vreg);
+ if (rc < 0) {
+ kfree(msm_actuator_t);
+ pr_err("failed rc %d\n", rc);
+ return rc;
+ }
+ }
+
+ msm_actuator_t->act_v4l2_subdev_ops = &msm_actuator_subdev_ops;
+ msm_actuator_t->actuator_mutex = &msm_actuator_mutex;
+ msm_actuator_t->cam_name = pdev->id;
+
+ /* Set platform device handle */
+ msm_actuator_t->pdev = pdev;
+ /* Set device type as platform device */
+ msm_actuator_t->act_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+ msm_actuator_t->i2c_client.i2c_func_tbl = &msm_sensor_cci_func_tbl;
+ msm_actuator_t->i2c_client.cci_client = kzalloc(sizeof(
+ struct msm_camera_cci_client), GFP_KERNEL);
+ if (!msm_actuator_t->i2c_client.cci_client) {
+ kfree(msm_actuator_t->vreg_cfg.cam_vreg);
+ kfree(msm_actuator_t);
+ pr_err("failed no memory\n");
+ return -ENOMEM;
+ }
+
+ cci_client = msm_actuator_t->i2c_client.cci_client;
+ cci_client->cci_subdev = msm_cci_get_subdev();
+ cci_client->cci_i2c_master = msm_actuator_t->cci_master;
+ v4l2_subdev_init(&msm_actuator_t->msm_sd.sd,
+ msm_actuator_t->act_v4l2_subdev_ops);
+ v4l2_set_subdevdata(&msm_actuator_t->msm_sd.sd, msm_actuator_t);
+ msm_actuator_t->msm_sd.sd.internal_ops = &msm_actuator_internal_ops;
+ msm_actuator_t->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(msm_actuator_t->msm_sd.sd.name,
+ ARRAY_SIZE(msm_actuator_t->msm_sd.sd.name), "msm_actuator");
+ media_entity_init(&msm_actuator_t->msm_sd.sd.entity, 0, NULL, 0);
+ msm_actuator_t->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ msm_actuator_t->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_ACTUATOR;
+ msm_actuator_t->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x2;
+ msm_sd_register(&msm_actuator_t->msm_sd);
+ msm_actuator_t->actuator_state = ACT_DISABLE_STATE;
+ msm_cam_copy_v4l2_subdev_fops(&msm_actuator_v4l2_subdev_fops);
+#ifdef CONFIG_COMPAT
+ msm_actuator_v4l2_subdev_fops.compat_ioctl32 =
+ msm_actuator_subdev_fops_ioctl;
+#endif
+ msm_actuator_t->msm_sd.sd.devnode->fops =
+ &msm_actuator_v4l2_subdev_fops;
+
+ CDBG("Exit\n");
+ return rc;
+}
+
+static const struct of_device_id msm_actuator_i2c_dt_match[] = {
+ {.compatible = "qcom,actuator"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_actuator_i2c_dt_match);
+
+static struct i2c_driver msm_actuator_i2c_driver = {
+ .id_table = msm_actuator_i2c_id,
+ .probe = msm_actuator_i2c_probe,
+ .remove = __exit_p(msm_actuator_i2c_remove),
+ .driver = {
+ .name = "qcom,actuator",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_actuator_i2c_dt_match,
+ },
+};
+
+static const struct of_device_id msm_actuator_dt_match[] = {
+ {.compatible = "qcom,actuator", .data = NULL},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_actuator_dt_match);
+
+static struct platform_driver msm_actuator_platform_driver = {
+ .probe = msm_actuator_platform_probe,
+ .driver = {
+ .name = "qcom,actuator",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_actuator_dt_match,
+ },
+};
+
+static int __init msm_actuator_init_module(void)
+{
+ int32_t rc = 0;
+ CDBG("Enter\n");
+ rc = platform_driver_register(&msm_actuator_platform_driver);
+ if (!rc)
+ return rc;
+
+ CDBG("%s:%d rc %d\n", __func__, __LINE__, rc);
+ return i2c_add_driver(&msm_actuator_i2c_driver);
+}
+
+static struct msm_actuator msm_vcm_actuator_table = {
+ .act_type = ACTUATOR_VCM,
+ .func_tbl = {
+ .actuator_init_step_table = msm_actuator_init_step_table,
+ .actuator_move_focus = msm_actuator_move_focus,
+ .actuator_write_focus = msm_actuator_write_focus,
+ .actuator_set_default_focus = msm_actuator_set_default_focus,
+ .actuator_init_focus = msm_actuator_init_focus,
+ .actuator_parse_i2c_params = msm_actuator_parse_i2c_params,
+ .actuator_set_position = msm_actuator_set_position,
+ .actuator_park_lens = msm_actuator_park_lens,
+ },
+};
+
+static struct msm_actuator msm_piezo_actuator_table = {
+ .act_type = ACTUATOR_PIEZO,
+ .func_tbl = {
+ .actuator_init_step_table = NULL,
+ .actuator_move_focus = msm_actuator_piezo_move_focus,
+ .actuator_write_focus = NULL,
+ .actuator_set_default_focus =
+ msm_actuator_piezo_set_default_focus,
+ .actuator_init_focus = msm_actuator_init_focus,
+ .actuator_parse_i2c_params = msm_actuator_parse_i2c_params,
+ .actuator_park_lens = NULL,
+ },
+};
+
+static struct msm_actuator msm_hvcm_actuator_table = {
+ .act_type = ACTUATOR_HVCM,
+ .func_tbl = {
+ .actuator_init_step_table = msm_actuator_init_step_table,
+ .actuator_move_focus = msm_actuator_move_focus,
+ .actuator_write_focus = msm_actuator_write_focus,
+ .actuator_set_default_focus = msm_actuator_set_default_focus,
+ .actuator_init_focus = msm_actuator_init_focus,
+ .actuator_parse_i2c_params = msm_actuator_parse_i2c_params,
+ .actuator_set_position = msm_actuator_set_position,
+ .actuator_park_lens = msm_actuator_park_lens,
+ },
+};
+
+static struct msm_actuator msm_bivcm_actuator_table = {
+ .act_type = ACTUATOR_BIVCM,
+ .func_tbl = {
+ .actuator_init_step_table = msm_actuator_bivcm_init_step_table,
+ .actuator_move_focus = msm_actuator_bivcm_move_focus,
+ .actuator_write_focus = NULL,
+ .actuator_set_default_focus = msm_actuator_set_default_focus,
+ .actuator_init_focus = msm_actuator_init_focus,
+ .actuator_parse_i2c_params = NULL,
+ .actuator_set_position = msm_actuator_bivcm_set_position,
+ .actuator_park_lens = NULL,
+ },
+};
+
+module_init(msm_actuator_init_module);
+MODULE_DESCRIPTION("MSM ACTUATOR");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.h b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.h
new file mode 100644
index 000000000000..fb819a7935cb
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.h
@@ -0,0 +1,110 @@
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef MSM_ACTUATOR_H
+#define MSM_ACTUATOR_H
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <soc/qcom/camera2.h>
+#include <media/v4l2-subdev.h>
+#include <media/msmb_camera.h>
+#include "msm_camera_i2c.h"
+#include "msm_camera_dt_util.h"
+#include "msm_camera_io_util.h"
+
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+ static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+#define MSM_ACTUATOT_MAX_VREGS (10)
+#define ACTUATOR_MAX_POLL_COUNT 10
+
+struct msm_actuator_ctrl_t;
+
+enum msm_actuator_state_t {
+ ACT_ENABLE_STATE,
+ ACT_OPS_ACTIVE,
+ ACT_OPS_INACTIVE,
+ ACT_DISABLE_STATE,
+};
+
+struct msm_actuator_func_tbl {
+ int32_t (*actuator_i2c_write_b_af)(struct msm_actuator_ctrl_t *,
+ uint8_t,
+ uint8_t);
+ int32_t (*actuator_init_step_table)(struct msm_actuator_ctrl_t *,
+ struct msm_actuator_set_info_t *);
+ int32_t (*actuator_init_focus)(struct msm_actuator_ctrl_t *,
+ uint16_t, struct reg_settings_t *);
+ int32_t (*actuator_set_default_focus)(struct msm_actuator_ctrl_t *,
+ struct msm_actuator_move_params_t *);
+ int32_t (*actuator_move_focus)(struct msm_actuator_ctrl_t *,
+ struct msm_actuator_move_params_t *);
+ void (*actuator_parse_i2c_params)(struct msm_actuator_ctrl_t *,
+ int16_t, uint32_t, uint16_t);
+ void (*actuator_write_focus)(struct msm_actuator_ctrl_t *,
+ uint16_t,
+ struct damping_params_t *,
+ int8_t,
+ int16_t);
+ int32_t (*actuator_set_position)(struct msm_actuator_ctrl_t *,
+ struct msm_actuator_set_position_t *);
+ int32_t (*actuator_park_lens)(struct msm_actuator_ctrl_t *);
+};
+
+struct msm_actuator {
+ enum actuator_type act_type;
+ struct msm_actuator_func_tbl func_tbl;
+};
+
+struct msm_actuator_vreg {
+ struct camera_vreg_t *cam_vreg;
+ void *data[MSM_ACTUATOT_MAX_VREGS];
+ int num_vreg;
+};
+
+struct msm_actuator_ctrl_t {
+ struct i2c_driver *i2c_driver;
+ struct platform_driver *pdriver;
+ struct platform_device *pdev;
+ struct msm_camera_i2c_client i2c_client;
+ enum msm_camera_device_type_t act_device_type;
+ struct msm_sd_subdev msm_sd;
+ enum af_camera_name cam_name;
+ struct mutex *actuator_mutex;
+ struct msm_actuator_func_tbl *func_tbl;
+ enum msm_actuator_data_type i2c_data_type;
+ struct v4l2_subdev sdev;
+ struct v4l2_subdev_ops *act_v4l2_subdev_ops;
+
+ int16_t curr_step_pos;
+ uint16_t curr_region_index;
+ uint16_t *step_position_table;
+ struct region_params_t region_params[MAX_ACTUATOR_REGION];
+ uint16_t reg_tbl_size;
+ struct msm_actuator_reg_params_t reg_tbl[MAX_ACTUATOR_REG_TBL_SIZE];
+ uint16_t region_size;
+ void *user_data;
+ uint32_t total_steps;
+ uint16_t pwd_step;
+ uint16_t initial_code;
+ struct msm_camera_i2c_reg_array *i2c_reg_tbl;
+ uint16_t i2c_tbl_index;
+ enum cci_i2c_master_t cci_master;
+ uint32_t subdev_id;
+ enum msm_actuator_state_t actuator_state;
+ struct msm_actuator_vreg vreg_cfg;
+ struct park_lens_data_t park_lens;
+ uint32_t max_code_size;
+};
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/Makefile b/drivers/media/platform/msm/camera_v2/sensor/cci/Makefile
new file mode 100644
index 000000000000..5815bbea2a0f
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/cci/Makefile
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+obj-$(CONFIG_MSM_CCI) += msm_cci.o
diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cam_cci_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cam_cci_hwreg.h
new file mode 100644
index 000000000000..d70b8e3452b7
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cam_cci_hwreg.h
@@ -0,0 +1,69 @@
+/* Copyright (c) 2012-2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CAM_CCI_HWREG__
+#define __MSM_CAM_CCI_HWREG__
+
+#define CCI_HW_VERSION_ADDR 0x00000000
+#define CCI_RESET_CMD_ADDR 0x00000004
+#define CCI_RESET_CMD_RMSK 0x0f73f3f7
+#define CCI_M0_RESET_RMSK 0x3F1
+#define CCI_M1_RESET_RMSK 0x3F001
+#define CCI_QUEUE_START_ADDR 0x00000008
+#define CCI_SET_CID_SYNC_TIMER_ADDR 0x00000010
+#define CCI_SET_CID_SYNC_TIMER_OFFSET 0x00000004
+#define CCI_I2C_M0_SCL_CTL_ADDR 0x00000100
+#define CCI_I2C_M0_SDA_CTL_0_ADDR 0x00000104
+#define CCI_I2C_M0_SDA_CTL_1_ADDR 0x00000108
+#define CCI_I2C_M0_SDA_CTL_2_ADDR 0x0000010c
+#define CCI_I2C_M0_READ_DATA_ADDR 0x00000118
+#define CCI_I2C_M0_MISC_CTL_ADDR 0x00000110
+#define CCI_I2C_M0_READ_BUF_LEVEL_ADDR 0x0000011C
+#define CCI_HALT_REQ_ADDR 0x00000034
+#define CCI_M0_HALT_REQ_RMSK 0x1
+#define CCI_M1_HALT_REQ_RMSK 0x2
+#define CCI_I2C_M1_SCL_CTL_ADDR 0x00000200
+#define CCI_I2C_M1_SDA_CTL_0_ADDR 0x00000204
+#define CCI_I2C_M1_SDA_CTL_1_ADDR 0x00000208
+#define CCI_I2C_M1_SDA_CTL_2_ADDR 0x0000020c
+#define CCI_I2C_M1_MISC_CTL_ADDR 0x00000210
+#define CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR 0x00000304
+#define CCI_I2C_M0_Q0_CUR_CMD_ADDR 0x00000308
+#define CCI_I2C_M0_Q0_REPORT_STATUS_ADDR 0x0000030c
+#define CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR 0x00000300
+#define CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x00000310
+#define CCI_IRQ_MASK_0_ADDR 0x00000c04
+#define CCI_IRQ_MASK_0_RMSK 0x7fff7ff7
+#define CCI_IRQ_CLEAR_0_ADDR 0x00000c08
+#define CCI_IRQ_STATUS_0_ADDR 0x00000c0c
+#define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK 0x4000000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK 0x2000000
+#define CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK 0x1000000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK 0x100000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK 0x10000
+#define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK 0x1000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK 0x100
+#define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK 0x10
+#define CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK 0x18000EE6
+#define CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK 0x60EE6000
+#define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK 0x1
+#define CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR 0x00000c00
+
+#define DEBUG_TOP_REG_START 0x0
+#define DEBUG_TOP_REG_COUNT 14
+#define DEBUG_MASTER_REG_START 0x100
+#define DEBUG_MASTER_REG_COUNT 8
+#define DEBUG_MASTER_QUEUE_REG_START 0x300
+#define DEBUG_MASTER_QUEUE_REG_COUNT 6
+#define DEBUG_INTR_REG_START 0xC00
+#define DEBUG_INTR_REG_COUNT 7
+#endif /* __MSM_CAM_CCI_HWREG__ */
diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
new file mode 100644
index 000000000000..d9bab85cc75c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
@@ -0,0 +1,2172 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <media/msm_isp.h>
+#include "msm_sd.h"
+#include "msm_cci.h"
+#include "msm_cam_cci_hwreg.h"
+#include "msm_camera_io_util.h"
+#include "msm_camera_dt_util.h"
+#include "cam_hw_ops.h"
+
+#define V4L2_IDENT_CCI 50005
+#define CCI_I2C_QUEUE_0_SIZE 64
+#define CCI_I2C_QUEUE_1_SIZE 16
+#define CYCLES_PER_MICRO_SEC_DEFAULT 4915
+#define CCI_MAX_DELAY 1000000
+
+#define CCI_TIMEOUT msecs_to_jiffies(100)
+
+/* TODO move this somewhere else */
+#define MSM_CCI_DRV_NAME "msm_cci"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+#undef CCI_DBG
+#ifdef MSM_CCI_DEBUG
+#define CCI_DBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CCI_DBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+/* Max bytes that can be read per CCI read transaction */
+#define CCI_READ_MAX 12
+#define CCI_I2C_READ_MAX_RETRIES 3
+#define CCI_I2C_MAX_READ 8192
+#define CCI_I2C_MAX_WRITE 8192
+
+#define PRIORITY_QUEUE (QUEUE_0)
+#define SYNC_QUEUE (QUEUE_1)
+
+static struct v4l2_subdev *g_cci_subdev;
+
+static struct msm_cam_clk_info cci_clk_info[CCI_NUM_CLK_CASES][CCI_NUM_CLK_MAX];
+
+static void msm_cci_dump_registers(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master, enum cci_i2c_queue_t queue)
+{
+ uint32_t read_val = 0;
+ uint32_t i = 0;
+ uint32_t reg_offset = 0;
+
+ /* CCI Top Registers */
+ CCI_DBG(" **** %s : %d CCI TOP Registers ****\n", __func__, __LINE__);
+ for (i = 0; i < DEBUG_TOP_REG_COUNT; i++) {
+ reg_offset = DEBUG_TOP_REG_START + i * 4;
+ read_val = msm_camera_io_r_mb(cci_dev->base + reg_offset);
+ CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
+ __func__, __LINE__, reg_offset, read_val);
+ }
+
+ /* CCI Master registers */
+ CCI_DBG(" **** %s : %d CCI MASTER%d Registers ****\n",
+ __func__, __LINE__, master);
+ for (i = 0; i < DEBUG_MASTER_REG_COUNT; i++) {
+ if (i == 6)
+ continue;
+ reg_offset = DEBUG_MASTER_REG_START + master*0x100 + i * 4;
+ read_val = msm_camera_io_r_mb(cci_dev->base + reg_offset);
+ CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
+ __func__, __LINE__, reg_offset, read_val);
+ }
+
+ /* CCI Master Queue registers */
+ CCI_DBG(" **** %s : %d CCI MASTER%d QUEUE%d Registers ****\n",
+ __func__, __LINE__, master, queue);
+ for (i = 0; i < DEBUG_MASTER_QUEUE_REG_COUNT; i++) {
+ reg_offset = DEBUG_MASTER_QUEUE_REG_START + master*0x200 +
+ queue*0x100 + i * 4;
+ read_val = msm_camera_io_r_mb(cci_dev->base + reg_offset);
+ CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
+ __func__, __LINE__, reg_offset, read_val);
+ }
+
+ /* CCI Interrupt registers */
+ CCI_DBG(" **** %s : %d CCI Interrupt Registers ****\n",
+ __func__, __LINE__);
+ for (i = 0; i < DEBUG_INTR_REG_COUNT; i++) {
+ reg_offset = DEBUG_INTR_REG_START + i * 4;
+ read_val = msm_camera_io_r_mb(cci_dev->base + reg_offset);
+ CCI_DBG("%s : %d offset = 0x%X value = 0x%X\n",
+ __func__, __LINE__, reg_offset, read_val);
+ }
+}
+
+static int32_t msm_cci_set_clk_param(struct cci_device *cci_dev,
+ struct msm_camera_cci_ctrl *c_ctrl)
+{
+ struct msm_cci_clk_params_t *clk_params = NULL;
+ enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master;
+ enum i2c_freq_mode_t i2c_freq_mode = c_ctrl->cci_info->i2c_freq_mode;
+
+ clk_params = &cci_dev->cci_clk_params[i2c_freq_mode];
+
+ if ((i2c_freq_mode >= I2C_MAX_MODES) || (i2c_freq_mode < 0)) {
+ pr_err("%s:%d invalid i2c_freq_mode = %d",
+ __func__, __LINE__, i2c_freq_mode);
+ return -EINVAL;
+ }
+ if (cci_dev->i2c_freq_mode[master] == i2c_freq_mode)
+ return 0;
+ if (MASTER_0 == master) {
+ msm_camera_io_w_mb(clk_params->hw_thigh << 16 |
+ clk_params->hw_tlow,
+ cci_dev->base + CCI_I2C_M0_SCL_CTL_ADDR);
+ msm_camera_io_w_mb(clk_params->hw_tsu_sto << 16 |
+ clk_params->hw_tsu_sta,
+ cci_dev->base + CCI_I2C_M0_SDA_CTL_0_ADDR);
+ msm_camera_io_w_mb(clk_params->hw_thd_dat << 16 |
+ clk_params->hw_thd_sta,
+ cci_dev->base + CCI_I2C_M0_SDA_CTL_1_ADDR);
+ msm_camera_io_w_mb(clk_params->hw_tbuf,
+ cci_dev->base + CCI_I2C_M0_SDA_CTL_2_ADDR);
+ msm_camera_io_w_mb(clk_params->hw_scl_stretch_en << 8 |
+ clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
+ cci_dev->base + CCI_I2C_M0_MISC_CTL_ADDR);
+ } else if (MASTER_1 == master) {
+ msm_camera_io_w_mb(clk_params->hw_thigh << 16 |
+ clk_params->hw_tlow,
+ cci_dev->base + CCI_I2C_M1_SCL_CTL_ADDR);
+ msm_camera_io_w_mb(clk_params->hw_tsu_sto << 16 |
+ clk_params->hw_tsu_sta,
+ cci_dev->base + CCI_I2C_M1_SDA_CTL_0_ADDR);
+ msm_camera_io_w_mb(clk_params->hw_thd_dat << 16 |
+ clk_params->hw_thd_sta,
+ cci_dev->base + CCI_I2C_M1_SDA_CTL_1_ADDR);
+ msm_camera_io_w_mb(clk_params->hw_tbuf,
+ cci_dev->base + CCI_I2C_M1_SDA_CTL_2_ADDR);
+ msm_camera_io_w_mb(clk_params->hw_scl_stretch_en << 8 |
+ clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
+ cci_dev->base + CCI_I2C_M1_MISC_CTL_ADDR);
+ }
+ cci_dev->i2c_freq_mode[master] = i2c_freq_mode;
+ return 0;
+}
+
+static void msm_cci_flush_queue(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master)
+{
+ int32_t rc = 0;
+
+ msm_camera_io_w_mb(1 << master, cci_dev->base + CCI_HALT_REQ_ADDR);
+ rc = wait_for_completion_timeout(
+ &cci_dev->cci_master_info[master].reset_complete, CCI_TIMEOUT);
+ if (rc < 0) {
+ pr_err("%s:%d wait failed\n", __func__, __LINE__);
+ } else if (rc == 0) {
+ pr_err("%s:%d wait timeout\n", __func__, __LINE__);
+
+ /* Set reset pending flag to TRUE */
+ cci_dev->cci_master_info[master].reset_pending = TRUE;
+
+ /* Set proper mask to RESET CMD address based on MASTER */
+ if (master == MASTER_0)
+ msm_camera_io_w_mb(CCI_M0_RESET_RMSK,
+ cci_dev->base + CCI_RESET_CMD_ADDR);
+ else
+ msm_camera_io_w_mb(CCI_M1_RESET_RMSK,
+ cci_dev->base + CCI_RESET_CMD_ADDR);
+
+ /* wait for reset done irq */
+ rc = wait_for_completion_timeout(
+ &cci_dev->cci_master_info[master].reset_complete,
+ CCI_TIMEOUT);
+ if (rc <= 0)
+ pr_err("%s:%d wait failed %d\n", __func__, __LINE__,
+ rc);
+ }
+ return;
+}
+
+static int32_t msm_cci_validate_queue(struct cci_device *cci_dev,
+ uint32_t len,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ int32_t rc = 0;
+ uint32_t read_val = 0;
+ uint32_t reg_offset = master * 0x200 + queue * 0x100;
+ read_val = msm_camera_io_r_mb(cci_dev->base +
+ CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+ CDBG("%s line %d CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d len %d max %d\n",
+ __func__, __LINE__, read_val, len,
+ cci_dev->cci_i2c_queue_info[master][queue].max_queue_size);
+ if ((read_val + len + 1) > cci_dev->
+ cci_i2c_queue_info[master][queue].max_queue_size) {
+ uint32_t reg_val = 0;
+ uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
+ CDBG("%s:%d CCI_I2C_REPORT_CMD\n", __func__, __LINE__);
+ msm_camera_io_w_mb(report_val,
+ cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset);
+ read_val++;
+ CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d, queue: %d\n",
+ __func__, __LINE__, read_val, queue);
+ msm_camera_io_w_mb(read_val, cci_dev->base +
+ CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+ reg_val = 1 << ((master * 2) + queue);
+ CDBG("%s:%d CCI_QUEUE_START_ADDR\n", __func__, __LINE__);
+ atomic_set(&cci_dev->cci_master_info[master].
+ done_pending[queue], 1);
+ msm_camera_io_w_mb(reg_val, cci_dev->base +
+ CCI_QUEUE_START_ADDR);
+ CDBG("%s line %d wait_for_completion_timeout\n",
+ __func__, __LINE__);
+ atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
+ rc = wait_for_completion_timeout(&cci_dev->
+ cci_master_info[master].report_q[queue], CCI_TIMEOUT);
+ if (rc <= 0) {
+ pr_err("%s: wait_for_completion_timeout %d\n",
+ __func__, __LINE__);
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ msm_cci_flush_queue(cci_dev, master);
+ return rc;
+ }
+ rc = cci_dev->cci_master_info[master].status;
+ if (rc < 0)
+ pr_err("%s failed rc %d\n", __func__, rc);
+ }
+ return rc;
+}
+
+static int32_t msm_cci_write_i2c_queue(struct cci_device *cci_dev,
+ uint32_t val,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ int32_t rc = 0;
+ uint32_t reg_offset = master * 0x200 + queue * 0x100;
+
+ if (!cci_dev) {
+ pr_err("%s: failed %d", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ CDBG("%s:%d called\n", __func__, __LINE__);
+ rc = msm_cci_validate_queue(cci_dev, 1, master, queue);
+ if (rc < 0) {
+ pr_err("%s: failed %d", __func__, __LINE__);
+ return rc;
+ }
+ CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x\n",
+ __func__, CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset, val);
+ msm_camera_io_w_mb(val, cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset);
+ return rc;
+}
+
+static uint32_t msm_cci_wait(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ int32_t rc = 0;
+
+ if (!cci_dev) {
+ pr_err("%s: failed %d", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ rc = wait_for_completion_timeout(&cci_dev->
+ cci_master_info[master].report_q[queue], CCI_TIMEOUT);
+ CDBG("%s line %d wait DONE_for_completion_timeout\n",
+ __func__, __LINE__);
+
+ if (rc <= 0) {
+ msm_cci_dump_registers(cci_dev, master, queue);
+ pr_err("%s: %d wait for queue: %d\n",
+ __func__, __LINE__, queue);
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ msm_cci_flush_queue(cci_dev, master);
+ return rc;
+ }
+ rc = cci_dev->cci_master_info[master].status;
+ if (rc < 0) {
+ pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ return 0;
+}
+
+static int32_t msm_cci_addr_to_num_bytes(
+ enum msm_camera_i2c_reg_addr_type addr_type)
+{
+ int32_t retVal;
+
+ switch (addr_type) {
+ case MSM_CAMERA_I2C_BYTE_ADDR:
+ retVal = 1;
+ break;
+ case MSM_CAMERA_I2C_WORD_ADDR:
+ retVal = 2;
+ break;
+ case MSM_CAMERA_I2C_3B_ADDR:
+ retVal = 3;
+ break;
+ default:
+ pr_err("%s: %d failed: %d\n", __func__, __LINE__, addr_type);
+ retVal = 1;
+ break;
+ }
+ return retVal;
+}
+
+static int32_t msm_cci_data_to_num_bytes(
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t retVal;
+
+ switch (data_type) {
+ case MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA:
+ case MSM_CAMERA_I2C_SET_BYTE_MASK:
+ case MSM_CAMERA_I2C_UNSET_BYTE_MASK:
+ case MSM_CAMERA_I2C_BYTE_DATA:
+ retVal = 1;
+ break;
+ case MSM_CAMERA_I2C_SET_WORD_MASK:
+ case MSM_CAMERA_I2C_UNSET_WORD_MASK:
+ case MSM_CAMERA_I2C_WORD_DATA:
+ retVal = 2;
+ break;
+ case MSM_CAMERA_I2C_DWORD_DATA:
+ retVal = 4;
+ break;
+ default:
+ pr_err("%s: %d failed: %d\n", __func__, __LINE__, data_type);
+ retVal = 1;
+ break;
+ }
+ return retVal;
+}
+
+static int32_t msm_cci_calc_cmd_len(struct cci_device *cci_dev,
+ struct msm_camera_cci_ctrl *c_ctrl, uint32_t cmd_size,
+ struct msm_camera_i2c_reg_array *i2c_cmd, uint32_t *pack)
+{
+ uint8_t i;
+ uint32_t len = 0;
+ uint8_t data_len = 0, addr_len = 0;
+ uint8_t pack_max_len;
+ struct msm_camera_i2c_reg_setting *msg;
+ struct msm_camera_i2c_reg_array *cmd = i2c_cmd;
+ uint32_t size = cmd_size;
+
+ if (!cci_dev || !c_ctrl) {
+ pr_err("%s: failed %d", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ msg = &c_ctrl->cfg.cci_i2c_write_cfg;
+ *pack = 0;
+
+ if (c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ) {
+ addr_len = msm_cci_addr_to_num_bytes(msg->addr_type);
+ len = (size + addr_len) <= (cci_dev->payload_size) ?
+ (size + addr_len):cci_dev->payload_size;
+ } else {
+ addr_len = msm_cci_addr_to_num_bytes(msg->addr_type);
+ data_len = msm_cci_data_to_num_bytes(msg->data_type);
+ len = data_len + addr_len;
+ pack_max_len = size < (cci_dev->payload_size-len) ?
+ size : (cci_dev->payload_size-len);
+ for (i = 0; i < pack_max_len;) {
+ if (cmd->delay || ((cmd - i2c_cmd) >= (cmd_size - 1)))
+ break;
+ if (cmd->reg_addr + 1 ==
+ (cmd+1)->reg_addr) {
+ len += data_len;
+ *pack += data_len;
+ } else
+ break;
+ i += data_len;
+ cmd++;
+ }
+ }
+
+ if (len > cci_dev->payload_size) {
+ pr_err("Len error: %d", len);
+ return -EINVAL;
+ }
+
+ len += 1; /*add i2c WR command*/
+ len = len/4 + 1;
+
+ return len;
+}
+
+static void msm_cci_load_report_cmd(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ uint32_t reg_offset = master * 0x200 + queue * 0x100;
+ uint32_t read_val = msm_camera_io_r_mb(cci_dev->base +
+ CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+ uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
+
+ CDBG("%s:%d CCI_I2C_REPORT_CMD curr_w_cnt: %d\n",
+ __func__, __LINE__, read_val);
+ msm_camera_io_w_mb(report_val,
+ cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset);
+ read_val++;
+
+ CDBG("%s:%d CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d\n",
+ __func__, __LINE__, read_val);
+ msm_camera_io_w_mb(read_val, cci_dev->base +
+ CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+}
+
+static int32_t msm_cci_wait_report_cmd(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ uint32_t reg_val = 1 << ((master * 2) + queue);
+ msm_cci_load_report_cmd(cci_dev, master, queue);
+ atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
+ atomic_set(&cci_dev->cci_master_info[master].done_pending[queue], 1);
+ msm_camera_io_w_mb(reg_val, cci_dev->base +
+ CCI_QUEUE_START_ADDR);
+ return msm_cci_wait(cci_dev, master, queue);
+}
+
+static void msm_cci_process_half_q(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ uint32_t reg_val = 1 << ((master * 2) + queue);
+ if (0 == atomic_read(&cci_dev->cci_master_info[master].q_free[queue])) {
+ msm_cci_load_report_cmd(cci_dev, master, queue);
+ atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
+ msm_camera_io_w_mb(reg_val, cci_dev->base +
+ CCI_QUEUE_START_ADDR);
+ }
+}
+
+static int32_t msm_cci_process_full_q(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ int32_t rc = 0;
+ if (1 == atomic_read(&cci_dev->cci_master_info[master].q_free[queue])) {
+ atomic_set(&cci_dev->cci_master_info[master].
+ done_pending[queue], 1);
+ rc = msm_cci_wait(cci_dev, master, queue);
+ if (rc < 0) {
+ pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ } else {
+ rc = msm_cci_wait_report_cmd(cci_dev, master, queue);
+ if (rc < 0) {
+ pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ }
+ return rc;
+}
+
+static int32_t msm_cci_lock_queue(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue, uint32_t en)
+{
+ uint32_t val;
+
+ if (queue != PRIORITY_QUEUE)
+ return 0;
+
+ val = en ? CCI_I2C_LOCK_CMD : CCI_I2C_UNLOCK_CMD;
+ return msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+}
+
+static int32_t msm_cci_transfer_end(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ int32_t rc = 0;
+
+ if (0 == atomic_read(&cci_dev->cci_master_info[master].q_free[queue])) {
+ rc = msm_cci_lock_queue(cci_dev, master, queue, 0);
+ if (rc < 0) {
+ pr_err("%s failed line %d\n", __func__, __LINE__);
+ return rc;
+ }
+ rc = msm_cci_wait_report_cmd(cci_dev, master, queue);
+ if (rc < 0) {
+ pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ } else {
+ atomic_set(&cci_dev->cci_master_info[master].
+ done_pending[queue], 1);
+ rc = msm_cci_wait(cci_dev, master, queue);
+ if (rc < 0) {
+ pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ rc = msm_cci_lock_queue(cci_dev, master, queue, 0);
+ if (rc < 0) {
+ pr_err("%s failed line %d\n", __func__, __LINE__);
+ return rc;
+ }
+ rc = msm_cci_wait_report_cmd(cci_dev, master, queue);
+ if (rc < 0) {
+ pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ }
+ return rc;
+}
+
+static int32_t msm_cci_get_queue_free_size(struct cci_device *cci_dev,
+ enum cci_i2c_master_t master,
+ enum cci_i2c_queue_t queue)
+{
+ uint32_t read_val = 0;
+ uint32_t reg_offset = master * 0x200 + queue * 0x100;
+ read_val = msm_camera_io_r_mb(cci_dev->base +
+ CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+ CDBG("%s line %d CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d max %d\n",
+ __func__, __LINE__, read_val,
+ cci_dev->cci_i2c_queue_info[master][queue].max_queue_size);
+ return (cci_dev->
+ cci_i2c_queue_info[master][queue].max_queue_size) -
+ read_val;
+}
+
+static int32_t msm_cci_data_queue(struct cci_device *cci_dev,
+ struct msm_camera_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue,
+ enum cci_i2c_sync sync_en)
+{
+ uint16_t i = 0, j = 0, k = 0, h = 0, len = 0;
+ int32_t rc = 0, free_size = 0, en_seq_write = 0;
+ uint32_t cmd = 0, delay = 0;
+ uint8_t data[12];
+ uint16_t reg_addr = 0;
+ struct msm_camera_i2c_reg_setting *i2c_msg =
+ &c_ctrl->cfg.cci_i2c_write_cfg;
+ uint16_t cmd_size = i2c_msg->size;
+ struct msm_camera_i2c_reg_array *i2c_cmd = i2c_msg->reg_setting;
+ enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master;
+
+ uint32_t read_val = 0;
+ uint32_t reg_offset;
+ uint32_t val = 0;
+ uint32_t max_queue_size;
+
+ if (i2c_cmd == NULL) {
+ pr_err("%s:%d Failed line\n", __func__,
+ __LINE__);
+ return -EINVAL;
+ }
+
+ if ((!cmd_size) || (cmd_size > CCI_I2C_MAX_WRITE)) {
+ pr_err("%s:%d failed: invalid cmd_size %d\n",
+ __func__, __LINE__, cmd_size);
+ return -EINVAL;
+ }
+
+ CDBG("%s addr type %d data type %d cmd_size %d\n", __func__,
+ i2c_msg->addr_type, i2c_msg->data_type, cmd_size);
+
+ if (i2c_msg->addr_type >= MSM_CAMERA_I2C_ADDR_TYPE_MAX) {
+ pr_err("%s:%d failed: invalid addr_type 0x%X\n",
+ __func__, __LINE__, i2c_msg->addr_type);
+ return -EINVAL;
+ }
+ if (i2c_msg->data_type >= MSM_CAMERA_I2C_DATA_TYPE_MAX) {
+ pr_err("%s:%d failed: invalid data_type 0x%X\n",
+ __func__, __LINE__, i2c_msg->data_type);
+ return -EINVAL;
+ }
+ reg_offset = master * 0x200 + queue * 0x100;
+
+ msm_camera_io_w_mb(cci_dev->cci_wait_sync_cfg.cid,
+ cci_dev->base + CCI_SET_CID_SYNC_TIMER_ADDR +
+ cci_dev->cci_wait_sync_cfg.csid *
+ CCI_SET_CID_SYNC_TIMER_OFFSET);
+
+ val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
+ c_ctrl->cci_info->retries << 16 |
+ c_ctrl->cci_info->id_map << 18;
+
+ CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x\n",
+ __func__, CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset, val);
+ msm_camera_io_w_mb(val, cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset);
+
+ atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 0);
+
+ max_queue_size = cci_dev->cci_i2c_queue_info[master][queue].
+ max_queue_size;
+ reg_addr = i2c_cmd->reg_addr;
+
+ if (sync_en == MSM_SYNC_ENABLE && cci_dev->valid_sync &&
+ cmd_size < max_queue_size) {
+ val = CCI_I2C_WAIT_SYNC_CMD |
+ ((cci_dev->cci_wait_sync_cfg.line) << 4);
+ msm_camera_io_w_mb(val,
+ cci_dev->base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ reg_offset);
+ }
+
+ rc = msm_cci_lock_queue(cci_dev, master, queue, 1);
+ if (rc < 0) {
+ pr_err("%s failed line %d\n", __func__, __LINE__);
+ return rc;
+ }
+
+ while (cmd_size) {
+ uint32_t pack = 0;
+ len = msm_cci_calc_cmd_len(cci_dev, c_ctrl, cmd_size,
+ i2c_cmd, &pack);
+ if (len <= 0) {
+ pr_err("%s failed line %d\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ read_val = msm_camera_io_r_mb(cci_dev->base +
+ CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+ CDBG("%s line %d CUR_WORD_CNT_ADDR %d len %d max %d\n",
+ __func__, __LINE__, read_val, len, max_queue_size);
+ /* + 1 - space alocation for Report CMD*/
+ if ((read_val + len + 1) > max_queue_size/2) {
+ if ((read_val + len + 1) > max_queue_size) {
+ rc = msm_cci_process_full_q(cci_dev,
+ master, queue);
+ if (rc < 0) {
+ pr_err("%s failed line %d\n",
+ __func__, __LINE__);
+ return rc;
+ }
+ continue;
+ }
+ msm_cci_process_half_q(cci_dev, master, queue);
+ }
+
+ CDBG("%s cmd_size %d addr 0x%x data 0x%x\n", __func__,
+ cmd_size, i2c_cmd->reg_addr, i2c_cmd->reg_data);
+ delay = i2c_cmd->delay;
+ i = 0;
+ data[i++] = CCI_I2C_WRITE_CMD;
+
+ /* in case of multiple command
+ * MSM_CCI_I2C_WRITE : address is not continuous, so update
+ * address for a new packet.
+ * MSM_CCI_I2C_WRITE_SEQ : address is continuous, need to keep
+ * the incremented address for a
+ * new packet */
+ if (c_ctrl->cmd == MSM_CCI_I2C_WRITE ||
+ c_ctrl->cmd == MSM_CCI_I2C_WRITE_ASYNC ||
+ c_ctrl->cmd == MSM_CCI_I2C_WRITE_SYNC ||
+ c_ctrl->cmd == MSM_CCI_I2C_WRITE_SYNC_BLOCK)
+ reg_addr = i2c_cmd->reg_addr;
+
+ if (en_seq_write == 0) {
+ /* either byte or word addr */
+ if (i2c_msg->addr_type == MSM_CAMERA_I2C_BYTE_ADDR)
+ data[i++] = reg_addr;
+ else {
+ data[i++] = (reg_addr & 0xFF00) >> 8;
+ data[i++] = reg_addr & 0x00FF;
+ }
+ }
+ /* max of 10 data bytes */
+ do {
+ if (i2c_msg->data_type == MSM_CAMERA_I2C_BYTE_DATA) {
+ data[i++] = i2c_cmd->reg_data;
+ reg_addr++;
+ } else {
+ if ((i + 1) <= cci_dev->payload_size) {
+ data[i++] = (i2c_cmd->reg_data &
+ 0xFF00) >> 8; /* MSB */
+ data[i++] = i2c_cmd->reg_data &
+ 0x00FF; /* LSB */
+ reg_addr++;
+ } else
+ break;
+ }
+ i2c_cmd++;
+ --cmd_size;
+ } while (((c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ) || pack--) &&
+ (cmd_size > 0) && (i <= cci_dev->payload_size));
+ free_size = msm_cci_get_queue_free_size(cci_dev, master,
+ queue);
+ if ((c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ) &&
+ ((i-1) == MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11) &&
+ cci_dev->support_seq_write && cmd_size > 0 &&
+ free_size > BURST_MIN_FREE_SIZE) {
+ data[0] |= 0xF0;
+ en_seq_write = 1;
+ } else {
+ data[0] |= ((i-1) << 4);
+ en_seq_write = 0;
+ }
+ len = ((i-1)/4) + 1;
+
+ read_val = msm_camera_io_r_mb(cci_dev->base +
+ CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+ for (h = 0, k = 0; h < len; h++) {
+ cmd = 0;
+ for (j = 0; (j < 4 && k < i); j++)
+ cmd |= (data[k++] << (j * 8));
+ CDBG("%s LOAD_DATA_ADDR 0x%x, q: %d, len:%d, cnt: %d\n",
+ __func__, cmd, queue, len, read_val);
+ msm_camera_io_w_mb(cmd, cci_dev->base +
+ CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ master * 0x200 + queue * 0x100);
+
+ read_val += 1;
+ msm_camera_io_w_mb(read_val, cci_dev->base +
+ CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+ }
+
+ if ((delay > 0) && (delay < CCI_MAX_DELAY) &&
+ en_seq_write == 0) {
+ cmd = (uint32_t)((delay * cci_dev->cycles_per_us) /
+ 0x100);
+ cmd <<= 4;
+ cmd |= CCI_I2C_WAIT_CMD;
+ CDBG("%s CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x%x\n",
+ __func__, cmd);
+ msm_camera_io_w_mb(cmd, cci_dev->base +
+ CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+ master * 0x200 + queue * 0x100);
+ read_val += 1;
+ msm_camera_io_w_mb(read_val, cci_dev->base +
+ CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+ }
+ }
+
+ rc = msm_cci_transfer_end(cci_dev, master, queue);
+ if (rc < 0) {
+ pr_err("%s: %d failed rc %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ return rc;
+}
+
+static int32_t msm_cci_i2c_read(struct v4l2_subdev *sd,
+ struct msm_camera_cci_ctrl *c_ctrl)
+{
+ int32_t rc = 0;
+ uint32_t val = 0;
+ int32_t read_words = 0, exp_words = 0;
+ int32_t index = 0, first_byte = 0;
+ uint32_t i = 0;
+ enum cci_i2c_master_t master;
+ enum cci_i2c_queue_t queue = QUEUE_1;
+ struct cci_device *cci_dev = NULL;
+ struct msm_camera_cci_i2c_read_cfg *read_cfg = NULL;
+ CDBG("%s line %d\n", __func__, __LINE__);
+ cci_dev = v4l2_get_subdevdata(sd);
+ master = c_ctrl->cci_info->cci_i2c_master;
+ read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
+ mutex_lock(&cci_dev->cci_master_info[master].mutex_q[queue]);
+
+ /* Set the I2C Frequency */
+ rc = msm_cci_set_clk_param(cci_dev, c_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d msm_cci_set_clk_param failed rc = %d\n",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+
+ /*
+ * Call validate queue to make sure queue is empty before starting.
+ * If this call fails, don't proceed with i2c_read call. This is to
+ * avoid overflow / underflow of queue
+ */
+ rc = msm_cci_validate_queue(cci_dev,
+ cci_dev->cci_i2c_queue_info[master][queue].max_queue_size - 1,
+ master, queue);
+ if (rc < 0) {
+ pr_err("%s:%d Initial validataion failed rc %d\n", __func__,
+ __LINE__, rc);
+ goto ERROR;
+ }
+
+ if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
+ pr_err("%s:%d More than max retries\n", __func__,
+ __LINE__);
+ goto ERROR;
+ }
+
+ if (read_cfg->data == NULL) {
+ pr_err("%s:%d Data ptr is NULL\n", __func__,
+ __LINE__);
+ goto ERROR;
+ }
+
+ CDBG("%s master %d, queue %d\n", __func__, master, queue);
+ CDBG("%s set param sid 0x%x retries %d id_map %d\n", __func__,
+ c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
+ c_ctrl->cci_info->id_map);
+ val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
+ c_ctrl->cci_info->retries << 16 |
+ c_ctrl->cci_info->id_map << 18;
+ rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ val = CCI_I2C_LOCK_CMD;
+ rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ if (read_cfg->addr_type >= MSM_CAMERA_I2C_ADDR_TYPE_MAX) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ if (read_cfg->addr_type == MSM_CAMERA_I2C_BYTE_ADDR)
+ val = CCI_I2C_WRITE_DISABLE_P_CMD | (read_cfg->addr_type << 4) |
+ ((read_cfg->addr & 0xFF) << 8);
+ if (read_cfg->addr_type == MSM_CAMERA_I2C_WORD_ADDR)
+ val = CCI_I2C_WRITE_DISABLE_P_CMD | (read_cfg->addr_type << 4) |
+ (((read_cfg->addr & 0xFF00) >> 8) << 8) |
+ ((read_cfg->addr & 0xFF) << 16);
+ rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ val = CCI_I2C_READ_CMD | (read_cfg->num_byte << 4);
+ rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ val = CCI_I2C_UNLOCK_CMD;
+ rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
+ if (rc < 0) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ val = msm_camera_io_r_mb(cci_dev->base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR
+ + master * 0x200 + queue * 0x100);
+ CDBG("%s cur word cnt 0x%x\n", __func__, val);
+ msm_camera_io_w_mb(val, cci_dev->base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR
+ + master * 0x200 + queue * 0x100);
+
+ val = 1 << ((master * 2) + queue);
+ msm_camera_io_w_mb(val, cci_dev->base + CCI_QUEUE_START_ADDR);
+ CDBG("%s:%d E wait_for_completion_timeout\n", __func__,
+ __LINE__);
+
+ rc = wait_for_completion_timeout(&cci_dev->
+ cci_master_info[master].reset_complete, CCI_TIMEOUT);
+ if (rc <= 0) {
+ msm_cci_dump_registers(cci_dev, master, queue);
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ pr_err("%s: %d wait_for_completion_timeout rc = %d\n",
+ __func__, __LINE__, rc);
+ msm_cci_flush_queue(cci_dev, master);
+ goto ERROR;
+ } else {
+ rc = 0;
+ }
+
+ read_words = msm_camera_io_r_mb(cci_dev->base +
+ CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
+ exp_words = ((read_cfg->num_byte / 4) + 1);
+ if (read_words != exp_words) {
+ pr_err("%s:%d read_words = %d, exp words = %d\n", __func__,
+ __LINE__, read_words, exp_words);
+ memset(read_cfg->data, 0, read_cfg->num_byte);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ index = 0;
+ CDBG("%s index %d num_type %d\n", __func__, index,
+ read_cfg->num_byte);
+ first_byte = 0;
+ do {
+ val = msm_camera_io_r_mb(cci_dev->base +
+ CCI_I2C_M0_READ_DATA_ADDR + master * 0x100);
+ CDBG("%s read val 0x%x\n", __func__, val);
+ for (i = 0; (i < 4) && (index < read_cfg->num_byte); i++) {
+ CDBG("%s i %d index %d\n", __func__, i, index);
+ if (!first_byte) {
+ CDBG("%s sid 0x%x\n", __func__, val & 0xFF);
+ first_byte++;
+ } else {
+ read_cfg->data[index] =
+ (val >> (i * 8)) & 0xFF;
+ CDBG("%s data[%d] 0x%x\n", __func__, index,
+ read_cfg->data[index]);
+ index++;
+ }
+ }
+ } while (--read_words > 0);
+ERROR:
+ mutex_unlock(&cci_dev->cci_master_info[master].mutex_q[queue]);
+ return rc;
+}
+
+static int32_t msm_cci_i2c_read_bytes(struct v4l2_subdev *sd,
+ struct msm_camera_cci_ctrl *c_ctrl)
+{
+ int32_t rc = 0;
+ struct cci_device *cci_dev = NULL;
+ enum cci_i2c_master_t master;
+ struct msm_camera_cci_i2c_read_cfg *read_cfg = NULL;
+ uint16_t read_bytes = 0;
+
+ if (!sd || !c_ctrl) {
+ pr_err("%s:%d sd %p c_ctrl %p\n", __func__,
+ __LINE__, sd, c_ctrl);
+ return -EINVAL;
+ }
+ if (!c_ctrl->cci_info) {
+ pr_err("%s:%d cci_info NULL\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ cci_dev = v4l2_get_subdevdata(sd);
+ if (!cci_dev) {
+ pr_err("%s:%d cci_dev NULL\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ if (cci_dev->cci_state != CCI_STATE_ENABLED) {
+ pr_err("%s invalid cci state %d\n",
+ __func__, cci_dev->cci_state);
+ return -EINVAL;
+ }
+
+ if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
+ || c_ctrl->cci_info->cci_i2c_master < 0) {
+ pr_err("%s:%d Invalid I2C master addr\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ master = c_ctrl->cci_info->cci_i2c_master;
+ read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
+ if ((!read_cfg->num_byte) || (read_cfg->num_byte > CCI_I2C_MAX_READ)) {
+ pr_err("%s:%d read num bytes 0\n", __func__, __LINE__);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+
+ read_bytes = read_cfg->num_byte;
+ do {
+ if (read_bytes > CCI_READ_MAX)
+ read_cfg->num_byte = CCI_READ_MAX;
+ else
+ read_cfg->num_byte = read_bytes;
+ rc = msm_cci_i2c_read(sd, c_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d failed rc %d\n", __func__, __LINE__, rc);
+ goto ERROR;
+ }
+ if (read_bytes > CCI_READ_MAX) {
+ read_cfg->addr += CCI_READ_MAX;
+ read_cfg->data += CCI_READ_MAX;
+ read_bytes -= CCI_READ_MAX;
+ } else {
+ read_bytes = 0;
+ }
+ } while (read_bytes);
+ERROR:
+ return rc;
+}
+
+static int32_t msm_cci_i2c_write(struct v4l2_subdev *sd,
+ struct msm_camera_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue,
+ enum cci_i2c_sync sync_en)
+{
+ int32_t rc = 0;
+ struct cci_device *cci_dev;
+ enum cci_i2c_master_t master;
+
+ cci_dev = v4l2_get_subdevdata(sd);
+ if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
+ || c_ctrl->cci_info->cci_i2c_master < 0) {
+ pr_err("%s:%d Invalid I2C master addr\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ if (cci_dev->cci_state != CCI_STATE_ENABLED) {
+ pr_err("%s invalid cci state %d\n",
+ __func__, cci_dev->cci_state);
+ return -EINVAL;
+ }
+ master = c_ctrl->cci_info->cci_i2c_master;
+ CDBG("%s set param sid 0x%x retries %d id_map %d\n", __func__,
+ c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
+ c_ctrl->cci_info->id_map);
+
+ /* Set the I2C Frequency */
+ rc = msm_cci_set_clk_param(cci_dev, c_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d msm_cci_set_clk_param failed rc = %d\n",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+ /*
+ * Call validate queue to make sure queue is empty before starting.
+ * If this call fails, don't proceed with i2c_write call. This is to
+ * avoid overflow / underflow of queue
+ */
+ rc = msm_cci_validate_queue(cci_dev,
+ cci_dev->cci_i2c_queue_info[master][queue].max_queue_size-1,
+ master, queue);
+ if (rc < 0) {
+ pr_err("%s:%d Initial validataion failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ }
+ if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
+ pr_err("%s:%d More than max retries\n", __func__,
+ __LINE__);
+ goto ERROR;
+ }
+ rc = msm_cci_data_queue(cci_dev, c_ctrl, queue, sync_en);
+ if (rc < 0) {
+ CDBG("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ERROR:
+ return rc;
+}
+
+static void msm_cci_write_async_helper(struct work_struct *work)
+{
+ int rc;
+ struct cci_device *cci_dev;
+ struct cci_write_async *write_async =
+ container_of(work, struct cci_write_async, work);
+ struct msm_camera_i2c_reg_setting *i2c_msg;
+ enum cci_i2c_master_t master;
+ struct msm_camera_cci_master_info *cci_master_info;
+
+ cci_dev = write_async->cci_dev;
+ i2c_msg = &write_async->c_ctrl.cfg.cci_i2c_write_cfg;
+ master = write_async->c_ctrl.cci_info->cci_i2c_master;
+ cci_master_info = &cci_dev->cci_master_info[master];
+
+ mutex_lock(&cci_master_info->mutex_q[write_async->queue]);
+ rc = msm_cci_i2c_write(&cci_dev->msm_sd.sd,
+ &write_async->c_ctrl, write_async->queue, write_async->sync_en);
+ mutex_unlock(&cci_master_info->mutex_q[write_async->queue]);
+ if (rc < 0)
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+
+ kfree(write_async->c_ctrl.cfg.cci_i2c_write_cfg.reg_setting);
+ kfree(write_async);
+
+ CDBG("%s: %d Exit\n", __func__, __LINE__);
+}
+
+static int32_t msm_cci_i2c_write_async(struct v4l2_subdev *sd,
+ struct msm_camera_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue,
+ enum cci_i2c_sync sync_en)
+{
+ struct cci_write_async *write_async;
+ struct cci_device *cci_dev;
+ struct msm_camera_i2c_reg_setting *cci_i2c_write_cfg;
+ struct msm_camera_i2c_reg_setting *cci_i2c_write_cfg_w;
+
+ cci_dev = v4l2_get_subdevdata(sd);
+
+ CDBG("%s: %d Enter\n", __func__, __LINE__);
+
+ write_async = kzalloc(sizeof(*write_async), GFP_KERNEL);
+ if (!write_async) {
+ pr_err("%s: %d Couldn't allocate memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&write_async->work, msm_cci_write_async_helper);
+ write_async->cci_dev = cci_dev;
+ write_async->c_ctrl = *c_ctrl;
+ write_async->queue = queue;
+ write_async->sync_en = sync_en;
+
+ cci_i2c_write_cfg = &c_ctrl->cfg.cci_i2c_write_cfg;
+ cci_i2c_write_cfg_w = &write_async->c_ctrl.cfg.cci_i2c_write_cfg;
+
+ if (cci_i2c_write_cfg->size == 0) {
+ pr_err("%s: %d Size = 0\n", __func__, __LINE__);
+ kfree(write_async);
+ return -EINVAL;
+ }
+
+ cci_i2c_write_cfg_w->reg_setting =
+ kzalloc(sizeof(struct msm_camera_i2c_reg_array)*
+ cci_i2c_write_cfg->size, GFP_KERNEL);
+ if (!cci_i2c_write_cfg_w->reg_setting) {
+ pr_err("%s: %d Couldn't allocate memory\n", __func__, __LINE__);
+ kfree(write_async);
+ return -ENOMEM;
+ }
+ memcpy(cci_i2c_write_cfg_w->reg_setting,
+ cci_i2c_write_cfg->reg_setting,
+ (sizeof(struct msm_camera_i2c_reg_array)*
+ cci_i2c_write_cfg->size));
+
+ cci_i2c_write_cfg_w->addr_type = cci_i2c_write_cfg->addr_type;
+ cci_i2c_write_cfg_w->data_type = cci_i2c_write_cfg->data_type;
+ cci_i2c_write_cfg_w->size = cci_i2c_write_cfg->size;
+ cci_i2c_write_cfg_w->delay = cci_i2c_write_cfg->delay;
+
+ queue_work(cci_dev->write_wq[write_async->queue], &write_async->work);
+
+ CDBG("%s: %d Exit\n", __func__, __LINE__);
+
+ return 0;
+}
+
+static int32_t msm_cci_pinctrl_init(struct cci_device *cci_dev)
+{
+ struct msm_pinctrl_info *cci_pctrl = NULL;
+
+ cci_pctrl = &cci_dev->cci_pinctrl;
+ cci_pctrl->pinctrl = devm_pinctrl_get(&cci_dev->pdev->dev);
+ if (IS_ERR_OR_NULL(cci_pctrl->pinctrl)) {
+ pr_err("%s:%d devm_pinctrl_get cci_pinctrl failed\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ cci_pctrl->gpio_state_active = pinctrl_lookup_state(
+ cci_pctrl->pinctrl,
+ CCI_PINCTRL_STATE_DEFAULT);
+ if (IS_ERR_OR_NULL(cci_pctrl->gpio_state_active)) {
+ pr_err("%s:%d look up state for active state failed\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ cci_pctrl->gpio_state_suspend = pinctrl_lookup_state(
+ cci_pctrl->pinctrl,
+ CCI_PINCTRL_STATE_SLEEP);
+ if (IS_ERR_OR_NULL(cci_pctrl->gpio_state_suspend)) {
+ pr_err("%s:%d look up state for suspend state failed\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static uint32_t msm_cci_cycles_per_ms(unsigned long clk)
+{
+ uint32_t cycles_per_us;
+
+ if (clk)
+ cycles_per_us = ((clk/1000)*256)/1000;
+ else {
+ pr_err("%s:%d, failed: Can use default: %d",
+ __func__, __LINE__, CYCLES_PER_MICRO_SEC_DEFAULT);
+ cycles_per_us = CYCLES_PER_MICRO_SEC_DEFAULT;
+ }
+ return cycles_per_us;
+}
+
+static struct msm_cam_clk_info *msm_cci_get_clk(struct cci_device *cci_dev,
+ struct msm_camera_cci_ctrl *c_ctrl)
+{
+ uint32_t j;
+ int32_t idx;
+ uint32_t cci_clk_src;
+ unsigned long clk;
+
+ struct msm_cci_clk_params_t *clk_params = NULL;
+ enum i2c_freq_mode_t i2c_freq_mode = c_ctrl->cci_info->i2c_freq_mode;
+ struct device_node *of_node = cci_dev->pdev->dev.of_node;
+ clk_params = &cci_dev->cci_clk_params[i2c_freq_mode];
+ cci_clk_src = clk_params->cci_clk_src;
+
+ idx = of_property_match_string(of_node,
+ "clock-names", CCI_CLK_SRC_NAME);
+ if (idx < 0) {
+ cci_dev->cycles_per_us = CYCLES_PER_MICRO_SEC_DEFAULT;
+ return &cci_clk_info[0][0];
+ }
+
+ if (cci_clk_src == 0) {
+ clk = cci_clk_info[0][idx].clk_rate;
+ cci_dev->cycles_per_us = msm_cci_cycles_per_ms(clk);
+ return &cci_clk_info[0][0];
+ }
+
+ for (j = 0; j < cci_dev->num_clk_cases; j++) {
+ clk = cci_clk_info[j][idx].clk_rate;
+ if (clk == cci_clk_src) {
+ cci_dev->cycles_per_us = msm_cci_cycles_per_ms(clk);
+ cci_dev->cci_clk_src = cci_clk_src;
+ return &cci_clk_info[j][0];
+ }
+ }
+
+ return NULL;
+}
+
+static int32_t msm_cci_i2c_set_sync_prms(struct v4l2_subdev *sd,
+ struct msm_camera_cci_ctrl *c_ctrl)
+{
+ int32_t rc = 0;
+ struct cci_device *cci_dev;
+
+ cci_dev = v4l2_get_subdevdata(sd);
+ if (!cci_dev || !c_ctrl) {
+ pr_err("%s:%d failed: invalid params %p %p\n", __func__,
+ __LINE__, cci_dev, c_ctrl);
+ rc = -EINVAL;
+ return rc;
+ }
+ cci_dev->cci_wait_sync_cfg = c_ctrl->cfg.cci_wait_sync_cfg;
+ cci_dev->valid_sync = cci_dev->cci_wait_sync_cfg.csid < 0 ? 0 : 1;
+
+ return rc;
+}
+
+static int32_t msm_cci_init(struct v4l2_subdev *sd,
+ struct msm_camera_cci_ctrl *c_ctrl)
+{
+ uint8_t i = 0;
+ int32_t rc = 0, ret = 0;
+ struct cci_device *cci_dev;
+ enum cci_i2c_master_t master = MASTER_0;
+ struct msm_cam_clk_info *clk_info = NULL;
+
+ cci_dev = v4l2_get_subdevdata(sd);
+ if (!cci_dev || !c_ctrl) {
+ pr_err("%s:%d failed: invalid params %p %p\n", __func__,
+ __LINE__, cci_dev, c_ctrl);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ rc = cam_config_ahb_clk(CAM_AHB_CLIENT_CCI, CAMERA_AHB_SVS_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ return rc;
+ }
+
+ if (cci_dev->ref_count++) {
+ CDBG("%s ref_count %d\n", __func__, cci_dev->ref_count);
+ master = c_ctrl->cci_info->cci_i2c_master;
+ CDBG("%s:%d master %d\n", __func__, __LINE__, master);
+ if (master < MASTER_MAX && master >= 0) {
+ mutex_lock(&cci_dev->cci_master_info[master].mutex);
+ flush_workqueue(cci_dev->write_wq[master]);
+ /* Re-initialize the completion */
+ reinit_completion(&cci_dev->
+ cci_master_info[master].reset_complete);
+ for (i = 0; i < NUM_QUEUES; i++)
+ reinit_completion(&cci_dev->
+ cci_master_info[master].report_q[i]);
+ /* Set reset pending flag to TRUE */
+ cci_dev->cci_master_info[master].reset_pending = TRUE;
+ /* Set proper mask to RESET CMD address */
+ if (master == MASTER_0)
+ msm_camera_io_w_mb(CCI_M0_RESET_RMSK,
+ cci_dev->base + CCI_RESET_CMD_ADDR);
+ else
+ msm_camera_io_w_mb(CCI_M1_RESET_RMSK,
+ cci_dev->base + CCI_RESET_CMD_ADDR);
+ /* wait for reset done irq */
+ rc = wait_for_completion_timeout(
+ &cci_dev->cci_master_info[master].
+ reset_complete,
+ CCI_TIMEOUT);
+ if (rc <= 0)
+ pr_err("%s:%d wait failed %d\n", __func__,
+ __LINE__, rc);
+ mutex_unlock(&cci_dev->cci_master_info[master].mutex);
+ }
+ return 0;
+ }
+ ret = msm_cci_pinctrl_init(cci_dev);
+ if (ret < 0) {
+ pr_err("%s:%d Initialization of pinctrl failed\n",
+ __func__, __LINE__);
+ cci_dev->cci_pinctrl_status = 0;
+ } else {
+ cci_dev->cci_pinctrl_status = 1;
+ }
+ rc = msm_camera_request_gpio_table(cci_dev->cci_gpio_tbl,
+ cci_dev->cci_gpio_tbl_size, 1);
+ if (cci_dev->cci_pinctrl_status) {
+ ret = pinctrl_select_state(cci_dev->cci_pinctrl.pinctrl,
+ cci_dev->cci_pinctrl.gpio_state_active);
+ if (ret)
+ pr_err("%s:%d cannot set pin to active state\n",
+ __func__, __LINE__);
+ }
+ if (rc < 0) {
+ CDBG("%s: request gpio failed\n", __func__);
+ goto request_gpio_failed;
+ }
+
+ rc = msm_camera_config_vreg(&cci_dev->pdev->dev, cci_dev->cci_vreg,
+ cci_dev->regulator_count, NULL, 0, &cci_dev->cci_reg_ptr[0], 1);
+ if (rc < 0) {
+ pr_err("%s:%d cci config_vreg failed\n", __func__, __LINE__);
+ goto clk_enable_failed;
+ }
+
+ rc = msm_camera_enable_vreg(&cci_dev->pdev->dev, cci_dev->cci_vreg,
+ cci_dev->regulator_count, NULL, 0, &cci_dev->cci_reg_ptr[0], 1);
+ if (rc < 0) {
+ pr_err("%s:%d cci enable_vreg failed\n", __func__, __LINE__);
+ goto reg_enable_failed;
+ }
+
+ clk_info = msm_cci_get_clk(cci_dev, c_ctrl);
+ if (!clk_info) {
+ pr_err("%s: clk enable failed\n", __func__);
+ goto reg_enable_failed;
+ }
+
+ rc = msm_cam_clk_enable(&cci_dev->pdev->dev, clk_info,
+ cci_dev->cci_clk, cci_dev->num_clk, 1);
+ if (rc < 0) {
+ CDBG("%s: clk enable failed\n", __func__);
+ goto reg_enable_failed;
+ }
+ /* Re-initialize the completion */
+ reinit_completion(&cci_dev->cci_master_info[master].reset_complete);
+ for (i = 0; i < NUM_QUEUES; i++)
+ reinit_completion(&cci_dev->cci_master_info[master].
+ report_q[i]);
+ enable_irq(cci_dev->irq->start);
+ cci_dev->hw_version = msm_camera_io_r_mb(cci_dev->base +
+ CCI_HW_VERSION_ADDR);
+ pr_info("%s:%d: hw_version = 0x%x\n", __func__, __LINE__,
+ cci_dev->hw_version);
+ cci_dev->payload_size =
+ MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_10;
+ cci_dev->support_seq_write = 0;
+ if (cci_dev->hw_version >= 0x10020000) {
+ cci_dev->payload_size =
+ MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11;
+ cci_dev->support_seq_write = 1;
+ }
+ cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
+ msm_camera_io_w_mb(CCI_RESET_CMD_RMSK, cci_dev->base +
+ CCI_RESET_CMD_ADDR);
+ msm_camera_io_w_mb(0x1, cci_dev->base + CCI_RESET_CMD_ADDR);
+ rc = wait_for_completion_timeout(
+ &cci_dev->cci_master_info[MASTER_0].reset_complete,
+ CCI_TIMEOUT);
+ if (rc <= 0) {
+ pr_err("%s: wait_for_completion_timeout %d\n",
+ __func__, __LINE__);
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ goto reset_complete_failed;
+ }
+ for (i = 0; i < MASTER_MAX; i++)
+ cci_dev->i2c_freq_mode[i] = I2C_MAX_MODES;
+ msm_camera_io_w_mb(CCI_IRQ_MASK_0_RMSK,
+ cci_dev->base + CCI_IRQ_MASK_0_ADDR);
+ msm_camera_io_w_mb(CCI_IRQ_MASK_0_RMSK,
+ cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
+ msm_camera_io_w_mb(0x1, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+ for (i = 0; i < MASTER_MAX; i++) {
+ if (!cci_dev->write_wq[i]) {
+ pr_err("Failed to flush write wq\n");
+ rc = -ENOMEM;
+ goto reset_complete_failed;
+ } else {
+ flush_workqueue(cci_dev->write_wq[i]);
+ }
+ }
+ cci_dev->cci_state = CCI_STATE_ENABLED;
+
+ return 0;
+
+reset_complete_failed:
+ disable_irq(cci_dev->irq->start);
+ msm_cam_clk_enable(&cci_dev->pdev->dev, clk_info,
+ cci_dev->cci_clk, cci_dev->num_clk, 0);
+reg_enable_failed:
+ msm_camera_config_vreg(&cci_dev->pdev->dev, cci_dev->cci_vreg,
+ cci_dev->regulator_count, NULL, 0, &cci_dev->cci_reg_ptr[0], 0);
+clk_enable_failed:
+ if (cci_dev->cci_pinctrl_status) {
+ ret = pinctrl_select_state(cci_dev->cci_pinctrl.pinctrl,
+ cci_dev->cci_pinctrl.gpio_state_suspend);
+ if (ret)
+ pr_err("%s:%d cannot set pin to suspend state\n",
+ __func__, __LINE__);
+ }
+ msm_camera_request_gpio_table(cci_dev->cci_gpio_tbl,
+ cci_dev->cci_gpio_tbl_size, 0);
+request_gpio_failed:
+ cci_dev->ref_count--;
+ if (cam_config_ahb_clk(CAM_AHB_CLIENT_CCI,
+ CAMERA_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+ return rc;
+}
+
+static int32_t msm_cci_release(struct v4l2_subdev *sd)
+{
+ uint8_t i = 0, rc = 0;
+ struct cci_device *cci_dev;
+
+ cci_dev = v4l2_get_subdevdata(sd);
+ if (!cci_dev->ref_count || cci_dev->cci_state != CCI_STATE_ENABLED) {
+ pr_err("%s invalid ref count %d / cci state %d\n",
+ __func__, cci_dev->ref_count, cci_dev->cci_state);
+ rc = -EINVAL;
+ goto ahb_vote_suspend;
+ }
+ if (--cci_dev->ref_count) {
+ CDBG("%s ref_count Exit %d\n", __func__, cci_dev->ref_count);
+ rc = 0;
+ goto ahb_vote_suspend;
+ }
+ for (i = 0; i < MASTER_MAX; i++)
+ if (cci_dev->write_wq[i])
+ flush_workqueue(cci_dev->write_wq[i]);
+
+ disable_irq(cci_dev->irq->start);
+ msm_cam_clk_enable(&cci_dev->pdev->dev, &cci_clk_info[0][0],
+ cci_dev->cci_clk, cci_dev->num_clk, 0);
+
+ rc = msm_camera_enable_vreg(&cci_dev->pdev->dev, cci_dev->cci_vreg,
+ cci_dev->regulator_count, NULL, 0, &cci_dev->cci_reg_ptr[0], 0);
+ if (rc < 0)
+ pr_err("%s:%d cci disable_vreg failed\n", __func__, __LINE__);
+
+ rc = msm_camera_config_vreg(&cci_dev->pdev->dev, cci_dev->cci_vreg,
+ cci_dev->regulator_count, NULL, 0, &cci_dev->cci_reg_ptr[0], 0);
+ if (rc < 0)
+ pr_err("%s:%d cci unconfig_vreg failed\n", __func__, __LINE__);
+
+ if (cci_dev->cci_pinctrl_status) {
+ rc = pinctrl_select_state(cci_dev->cci_pinctrl.pinctrl,
+ cci_dev->cci_pinctrl.gpio_state_suspend);
+ if (rc)
+ pr_err("%s:%d cannot set pin to active state\n",
+ __func__, __LINE__);
+ }
+ cci_dev->cci_pinctrl_status = 0;
+ msm_camera_request_gpio_table(cci_dev->cci_gpio_tbl,
+ cci_dev->cci_gpio_tbl_size, 0);
+ for (i = 0; i < MASTER_MAX; i++)
+ cci_dev->i2c_freq_mode[i] = I2C_MAX_MODES;
+ cci_dev->cci_state = CCI_STATE_DISABLED;
+ cci_dev->cycles_per_us = 0;
+ cci_dev->cci_clk_src = 0;
+
+ahb_vote_suspend:
+ if (cam_config_ahb_clk(CAM_AHB_CLIENT_CCI,
+ CAMERA_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+ return rc;
+}
+
+static int32_t msm_cci_write(struct v4l2_subdev *sd,
+ struct msm_camera_cci_ctrl *c_ctrl)
+{
+ int32_t rc = 0;
+ struct cci_device *cci_dev;
+ enum cci_i2c_master_t master;
+ struct msm_camera_cci_master_info *cci_master_info;
+ uint32_t i;
+
+ cci_dev = v4l2_get_subdevdata(sd);
+ if (!cci_dev || !c_ctrl) {
+ pr_err("%s:%d failed: invalid params %p %p\n", __func__,
+ __LINE__, cci_dev, c_ctrl);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ master = c_ctrl->cci_info->cci_i2c_master;
+ cci_master_info = &cci_dev->cci_master_info[master];
+
+ switch (c_ctrl->cmd) {
+ case MSM_CCI_I2C_WRITE_SYNC_BLOCK:
+ mutex_lock(&cci_master_info->mutex_q[SYNC_QUEUE]);
+ rc = msm_cci_i2c_write(sd, c_ctrl,
+ SYNC_QUEUE, MSM_SYNC_ENABLE);
+ mutex_unlock(&cci_master_info->mutex_q[SYNC_QUEUE]);
+ break;
+ case MSM_CCI_I2C_WRITE_SYNC:
+ rc = msm_cci_i2c_write_async(sd, c_ctrl,
+ SYNC_QUEUE, MSM_SYNC_ENABLE);
+ break;
+ case MSM_CCI_I2C_WRITE:
+ case MSM_CCI_I2C_WRITE_SEQ:
+ for (i = 0; i < NUM_QUEUES; i++) {
+ if (mutex_trylock(&cci_master_info->mutex_q[i])) {
+ rc = msm_cci_i2c_write(sd, c_ctrl, i,
+ MSM_SYNC_DISABLE);
+ mutex_unlock(&cci_master_info->mutex_q[i]);
+ return rc;
+ }
+ }
+ mutex_lock(&cci_master_info->mutex_q[PRIORITY_QUEUE]);
+ rc = msm_cci_i2c_write(sd, c_ctrl,
+ PRIORITY_QUEUE, MSM_SYNC_DISABLE);
+ mutex_unlock(&cci_master_info->mutex_q[PRIORITY_QUEUE]);
+ break;
+ case MSM_CCI_I2C_WRITE_ASYNC:
+ rc = msm_cci_i2c_write_async(sd, c_ctrl,
+ PRIORITY_QUEUE, MSM_SYNC_DISABLE);
+ break;
+ default:
+ rc = -ENOIOCTLCMD;
+ }
+ return rc;
+}
+
+static int32_t msm_cci_config(struct v4l2_subdev *sd,
+ struct msm_camera_cci_ctrl *cci_ctrl)
+{
+ int32_t rc = 0;
+ CDBG("%s line %d cmd %d\n", __func__, __LINE__,
+ cci_ctrl->cmd);
+ switch (cci_ctrl->cmd) {
+ case MSM_CCI_INIT:
+ rc = msm_cci_init(sd, cci_ctrl);
+ break;
+ case MSM_CCI_RELEASE:
+ rc = msm_cci_release(sd);
+ break;
+ case MSM_CCI_I2C_READ:
+ rc = msm_cci_i2c_read_bytes(sd, cci_ctrl);
+ break;
+ case MSM_CCI_I2C_WRITE:
+ case MSM_CCI_I2C_WRITE_SEQ:
+ case MSM_CCI_I2C_WRITE_SYNC:
+ case MSM_CCI_I2C_WRITE_ASYNC:
+ case MSM_CCI_I2C_WRITE_SYNC_BLOCK:
+ rc = msm_cci_write(sd, cci_ctrl);
+ break;
+ case MSM_CCI_GPIO_WRITE:
+ break;
+ case MSM_CCI_SET_SYNC_CID:
+ rc = msm_cci_i2c_set_sync_prms(sd, cci_ctrl);
+ break;
+
+ default:
+ rc = -ENOIOCTLCMD;
+ }
+ CDBG("%s line %d rc %d\n", __func__, __LINE__, rc);
+ cci_ctrl->status = rc;
+ return rc;
+}
+
+static irqreturn_t msm_cci_irq(int irq_num, void *data)
+{
+ uint32_t irq;
+ struct cci_device *cci_dev = data;
+ irq = msm_camera_io_r_mb(cci_dev->base + CCI_IRQ_STATUS_0_ADDR);
+ msm_camera_io_w_mb(irq, cci_dev->base + CCI_IRQ_CLEAR_0_ADDR);
+ msm_camera_io_w_mb(0x1, cci_dev->base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+ CDBG("%s CCI_I2C_M0_STATUS_ADDR = 0x%x\n", __func__, irq);
+ if (irq & CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK) {
+ if (cci_dev->cci_master_info[MASTER_0].reset_pending == TRUE) {
+ cci_dev->cci_master_info[MASTER_0].reset_pending =
+ FALSE;
+ complete(&cci_dev->cci_master_info[MASTER_0].
+ reset_complete);
+ }
+ if (cci_dev->cci_master_info[MASTER_1].reset_pending == TRUE) {
+ cci_dev->cci_master_info[MASTER_1].reset_pending =
+ FALSE;
+ complete(&cci_dev->cci_master_info[MASTER_1].
+ reset_complete);
+ }
+ }
+ if (irq & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) {
+ cci_dev->cci_master_info[MASTER_0].status = 0;
+ complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
+ }
+ if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK) {
+ struct msm_camera_cci_master_info *cci_master_info;
+ cci_master_info = &cci_dev->cci_master_info[MASTER_0];
+ atomic_set(&cci_master_info->q_free[QUEUE_0], 0);
+ cci_master_info->status = 0;
+ if (atomic_read(&cci_master_info->done_pending[QUEUE_0]) == 1) {
+ complete(&cci_master_info->report_q[QUEUE_0]);
+ atomic_set(&cci_master_info->done_pending[QUEUE_0], 0);
+ }
+ }
+ if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK) {
+ struct msm_camera_cci_master_info *cci_master_info;
+ cci_master_info = &cci_dev->cci_master_info[MASTER_0];
+ atomic_set(&cci_master_info->q_free[QUEUE_1], 0);
+ cci_master_info->status = 0;
+ if (atomic_read(&cci_master_info->done_pending[QUEUE_1]) == 1) {
+ complete(&cci_master_info->report_q[QUEUE_1]);
+ atomic_set(&cci_master_info->done_pending[QUEUE_1], 0);
+ }
+ }
+ if (irq & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) {
+ cci_dev->cci_master_info[MASTER_1].status = 0;
+ complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
+ }
+ if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK) {
+ struct msm_camera_cci_master_info *cci_master_info;
+ cci_master_info = &cci_dev->cci_master_info[MASTER_1];
+ atomic_set(&cci_master_info->q_free[QUEUE_0], 0);
+ cci_master_info->status = 0;
+ if (atomic_read(&cci_master_info->done_pending[QUEUE_0]) == 1) {
+ complete(&cci_master_info->report_q[QUEUE_0]);
+ atomic_set(&cci_master_info->done_pending[QUEUE_0], 0);
+ }
+ }
+ if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK) {
+ struct msm_camera_cci_master_info *cci_master_info;
+ cci_master_info = &cci_dev->cci_master_info[MASTER_1];
+ atomic_set(&cci_master_info->q_free[QUEUE_1], 0);
+ cci_master_info->status = 0;
+ if (atomic_read(&cci_master_info->done_pending[QUEUE_1]) == 1) {
+ complete(&cci_master_info->report_q[QUEUE_1]);
+ atomic_set(&cci_master_info->done_pending[QUEUE_1], 0);
+ }
+ }
+ if (irq & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) {
+ cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
+ msm_camera_io_w_mb(CCI_M0_RESET_RMSK,
+ cci_dev->base + CCI_RESET_CMD_ADDR);
+ }
+ if (irq & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK) {
+ cci_dev->cci_master_info[MASTER_1].reset_pending = TRUE;
+ msm_camera_io_w_mb(CCI_M1_RESET_RMSK,
+ cci_dev->base + CCI_RESET_CMD_ADDR);
+ }
+ if (irq & CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK) {
+ pr_err("%s:%d MASTER_0 error 0x%x\n", __func__, __LINE__, irq);
+ cci_dev->cci_master_info[MASTER_0].status = -EINVAL;
+ msm_camera_io_w_mb(CCI_M0_HALT_REQ_RMSK,
+ cci_dev->base + CCI_HALT_REQ_ADDR);
+ }
+ if (irq & CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK) {
+ pr_err("%s:%d MASTER_1 error 0x%x\n", __func__, __LINE__, irq);
+ cci_dev->cci_master_info[MASTER_1].status = -EINVAL;
+ msm_camera_io_w_mb(CCI_M1_HALT_REQ_RMSK,
+ cci_dev->base + CCI_HALT_REQ_ADDR);
+ }
+ return IRQ_HANDLED;
+}
+
+static int msm_cci_irq_routine(struct v4l2_subdev *sd, u32 status,
+ bool *handled)
+{
+ struct cci_device *cci_dev = v4l2_get_subdevdata(sd);
+ irqreturn_t ret;
+ CDBG("%s line %d\n", __func__, __LINE__);
+ ret = msm_cci_irq(cci_dev->irq->start, cci_dev);
+ CDBG("%s: msm_cci_irq return %d\n", __func__, ret);
+ *handled = TRUE;
+ return 0;
+}
+
+static long msm_cci_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int32_t rc = 0;
+ CDBG("%s line %d\n", __func__, __LINE__);
+ switch (cmd) {
+ case VIDIOC_MSM_CCI_CFG:
+ rc = msm_cci_config(sd, arg);
+ break;
+ case MSM_SD_NOTIFY_FREEZE:
+ break;
+ case MSM_SD_SHUTDOWN: {
+ struct msm_camera_cci_ctrl ctrl_cmd;
+ ctrl_cmd.cmd = MSM_CCI_RELEASE;
+ rc = msm_cci_config(sd, &ctrl_cmd);
+ break;
+ }
+ default:
+ rc = -ENOIOCTLCMD;
+ }
+ CDBG("%s line %d rc %d\n", __func__, __LINE__, rc);
+ return rc;
+}
+
+static struct v4l2_subdev_core_ops msm_cci_subdev_core_ops = {
+ .ioctl = &msm_cci_subdev_ioctl,
+ .interrupt_service_routine = msm_cci_irq_routine,
+};
+
+static const struct v4l2_subdev_ops msm_cci_subdev_ops = {
+ .core = &msm_cci_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops msm_cci_internal_ops;
+
+static void msm_cci_init_cci_params(struct cci_device *new_cci_dev)
+{
+ uint8_t i = 0, j = 0;
+ for (i = 0; i < NUM_MASTERS; i++) {
+ new_cci_dev->cci_master_info[i].status = 0;
+ mutex_init(&new_cci_dev->cci_master_info[i].mutex);
+ init_completion(&new_cci_dev->
+ cci_master_info[i].reset_complete);
+
+ for (j = 0; j < NUM_QUEUES; j++) {
+ mutex_init(&new_cci_dev->cci_master_info[i].mutex_q[j]);
+ init_completion(&new_cci_dev->
+ cci_master_info[i].report_q[j]);
+ if (j == QUEUE_0)
+ new_cci_dev->cci_i2c_queue_info[i][j].
+ max_queue_size = CCI_I2C_QUEUE_0_SIZE;
+ else
+ new_cci_dev->cci_i2c_queue_info[i][j].
+ max_queue_size = CCI_I2C_QUEUE_1_SIZE;
+ }
+ }
+ return;
+}
+
+static int32_t msm_cci_init_gpio_params(struct cci_device *cci_dev)
+{
+ int32_t rc = 0, i = 0;
+ uint32_t *val_array = NULL;
+ uint8_t tbl_size = 0;
+ struct device_node *of_node = cci_dev->pdev->dev.of_node;
+ struct gpio *gpio_tbl = NULL;
+
+ cci_dev->cci_gpio_tbl_size = tbl_size = of_gpio_count(of_node);
+ CDBG("%s gpio count %d\n", __func__, tbl_size);
+ if (!tbl_size) {
+ pr_err("%s:%d gpio count 0\n", __func__, __LINE__);
+ return 0;
+ }
+
+ gpio_tbl = cci_dev->cci_gpio_tbl =
+ kzalloc(sizeof(struct gpio) * tbl_size, GFP_KERNEL);
+ if (!gpio_tbl) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return 0;
+ }
+
+ for (i = 0; i < tbl_size; i++) {
+ gpio_tbl[i].gpio = of_get_gpio(of_node, i);
+ CDBG("%s gpio_tbl[%d].gpio = %d\n", __func__, i,
+ gpio_tbl[i].gpio);
+ }
+
+ val_array = kzalloc(sizeof(uint32_t) * tbl_size, GFP_KERNEL);
+ if (!val_array) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,gpio-tbl-flags",
+ val_array, tbl_size);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < tbl_size; i++) {
+ gpio_tbl[i].flags = val_array[i];
+ CDBG("%s gpio_tbl[%d].flags = %ld\n", __func__, i,
+ gpio_tbl[i].flags);
+ }
+
+ for (i = 0; i < tbl_size; i++) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,gpio-tbl-label", i, &gpio_tbl[i].label);
+ CDBG("%s gpio_tbl[%d].label = %s\n", __func__, i,
+ gpio_tbl[i].label);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ }
+
+ kfree(val_array);
+ return rc;
+
+ERROR2:
+ kfree(val_array);
+ERROR1:
+ kfree(cci_dev->cci_gpio_tbl);
+ cci_dev->cci_gpio_tbl = NULL;
+ cci_dev->cci_gpio_tbl_size = 0;
+ return rc;
+}
+
+static void msm_cci_init_default_clk_params(struct cci_device *cci_dev,
+ uint8_t index)
+{
+ /* default clock params are for 100Khz */
+ cci_dev->cci_clk_params[index].hw_thigh = 78;
+ cci_dev->cci_clk_params[index].hw_tlow = 114;
+ cci_dev->cci_clk_params[index].hw_tsu_sto = 28;
+ cci_dev->cci_clk_params[index].hw_tsu_sta = 28;
+ cci_dev->cci_clk_params[index].hw_thd_dat = 10;
+ cci_dev->cci_clk_params[index].hw_thd_sta = 77;
+ cci_dev->cci_clk_params[index].hw_tbuf = 118;
+ cci_dev->cci_clk_params[index].hw_scl_stretch_en = 0;
+ cci_dev->cci_clk_params[index].hw_trdhld = 6;
+ cci_dev->cci_clk_params[index].hw_tsp = 1;
+ cci_dev->cci_clk_params[index].cci_clk_src = 19200000;
+}
+
+static void msm_cci_init_clk_params(struct cci_device *cci_dev)
+{
+ int32_t rc = 0;
+ uint32_t val = 0;
+ uint8_t count = 0;
+ struct device_node *of_node = cci_dev->pdev->dev.of_node;
+ struct device_node *src_node = NULL;
+
+ for (count = 0; count < I2C_MAX_MODES; count++) {
+
+ if (I2C_STANDARD_MODE == count)
+ src_node = of_find_node_by_name(of_node,
+ "qcom,i2c_standard_mode");
+ else if (I2C_FAST_MODE == count)
+ src_node = of_find_node_by_name(of_node,
+ "qcom,i2c_fast_mode");
+ else if (I2C_FAST_PLUS_MODE == count)
+ src_node = of_find_node_by_name(of_node,
+ "qcom,i2c_fast_plus_mode");
+ else
+ src_node = of_find_node_by_name(of_node,
+ "qcom,i2c_custom_mode");
+
+ rc = of_property_read_u32(src_node, "qcom,hw-thigh", &val);
+ CDBG("%s qcom,hw-thigh %d, rc %d\n", __func__, val, rc);
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_thigh = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-tlow",
+ &val);
+ CDBG("%s qcom,hw-tlow %d, rc %d\n", __func__, val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_tlow = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-tsu-sto",
+ &val);
+ CDBG("%s qcom,hw-tsu-sto %d, rc %d\n",
+ __func__, val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_tsu_sto = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-tsu-sta",
+ &val);
+ CDBG("%s qcom,hw-tsu-sta %d, rc %d\n",
+ __func__, val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_tsu_sta = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-thd-dat",
+ &val);
+ CDBG("%s qcom,hw-thd-dat %d, rc %d\n",
+ __func__, val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_thd_dat = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-thd-sta",
+ &val);
+ CDBG("%s qcom,hw-thd-sta %d, rc %d\n", __func__,
+ val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_thd_sta = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-tbuf",
+ &val);
+ CDBG("%s qcom,hw-tbuf %d, rc %d\n", __func__, val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_tbuf = val;
+ rc = of_property_read_u32(src_node,
+ "qcom,hw-scl-stretch-en", &val);
+ CDBG("%s qcom,hw-scl-stretch-en %d, rc %d\n",
+ __func__, val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_scl_stretch_en = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-trdhld",
+ &val);
+ CDBG("%s qcom,hw-trdhld %d, rc %d\n",
+ __func__, val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_trdhld = val;
+ rc = of_property_read_u32(src_node, "qcom,hw-tsp",
+ &val);
+ CDBG("%s qcom,hw-tsp %d, rc %d\n", __func__, val, rc);
+ }
+ if (!rc) {
+ cci_dev->cci_clk_params[count].hw_tsp = val;
+ val = 0;
+ rc = of_property_read_u32(src_node, "qcom,cci-clk-src",
+ &val);
+ CDBG("%s qcom,cci-clk-src %d, rc %d\n",
+ __func__, val, rc);
+ cci_dev->cci_clk_params[count].cci_clk_src = val;
+ }
+ else
+ msm_cci_init_default_clk_params(cci_dev, count);
+
+
+
+ of_node_put(src_node);
+ src_node = NULL;
+ }
+ return;
+}
+
+struct v4l2_subdev *msm_cci_get_subdev(void)
+{
+ return g_cci_subdev;
+}
+
+static int msm_cci_get_clk_info(struct cci_device *cci_dev,
+ struct platform_device *pdev)
+{
+ uint32_t count;
+ uint32_t count_r;
+ int i, j, rc;
+ const uint32_t *p;
+ int index = 0;
+
+ struct device_node *of_node;
+ of_node = pdev->dev.of_node;
+
+ count = of_property_count_strings(of_node, "clock-names");
+ cci_dev->num_clk = count;
+
+ CDBG("%s: count = %d\n", __func__, count);
+ if (count == 0) {
+ pr_err("%s: no clocks found in device tree, count=%d",
+ __func__, count);
+ return 0;
+ }
+
+ if (count > CCI_NUM_CLK_MAX) {
+ pr_err("%s: invalid count=%d, max is %d\n", __func__,
+ count, CCI_NUM_CLK_MAX);
+ return -EINVAL;
+ }
+
+ p = of_get_property(of_node, "qcom,clock-rates", &count_r);
+ if (!p || !count_r) {
+ pr_err("failed\n");
+ return -EINVAL;
+ }
+
+ count_r /= sizeof(uint32_t);
+ cci_dev->num_clk_cases = count_r/count;
+
+ if (cci_dev->num_clk_cases > CCI_NUM_CLK_CASES) {
+ pr_err("%s: invalid count=%d, max is %d\n", __func__,
+ cci_dev->num_clk_cases, CCI_NUM_CLK_CASES);
+ return -EINVAL;
+ }
+
+ index = 0;
+ for (i = 0; i < count_r/count; i++) {
+ for (j = 0; j < count; j++) {
+ rc = of_property_read_string_index(of_node,
+ "clock-names", j,
+ &(cci_clk_info[i][j].clk_name));
+ CDBG("%s: clock-names[%d][%d] = %s\n", __func__,
+ i, j, cci_clk_info[i][j].clk_name);
+ if (rc < 0) {
+ pr_err("%s:%d, failed\n", __func__, __LINE__);
+ return rc;
+ }
+
+ cci_clk_info[i][j].clk_rate =
+ (be32_to_cpu(p[index]) == 0) ?
+ (long)-1 : be32_to_cpu(p[index]);
+ CDBG("%s: clk_rate[%d][%d] = %ld\n", __func__, i, j,
+ cci_clk_info[i][j].clk_rate);
+ index++;
+ }
+ }
+ return 0;
+}
+
+
+
+static int msm_cci_probe(struct platform_device *pdev)
+{
+ struct cci_device *new_cci_dev;
+ int rc = 0, i = 0;
+ CDBG("%s: pdev %p device id = %d\n", __func__, pdev, pdev->id);
+ new_cci_dev = kzalloc(sizeof(struct cci_device), GFP_KERNEL);
+ if (!new_cci_dev) {
+ pr_err("%s: no enough memory\n", __func__);
+ return -ENOMEM;
+ }
+ v4l2_subdev_init(&new_cci_dev->msm_sd.sd, &msm_cci_subdev_ops);
+ new_cci_dev->msm_sd.sd.internal_ops = &msm_cci_internal_ops;
+ snprintf(new_cci_dev->msm_sd.sd.name,
+ ARRAY_SIZE(new_cci_dev->msm_sd.sd.name), "msm_cci");
+ v4l2_set_subdevdata(&new_cci_dev->msm_sd.sd, new_cci_dev);
+ platform_set_drvdata(pdev, &new_cci_dev->msm_sd.sd);
+ CDBG("%s sd %p\n", __func__, &new_cci_dev->msm_sd.sd);
+ if (pdev->dev.of_node)
+ of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+
+ rc = msm_cci_get_clk_info(new_cci_dev, pdev);
+ if (rc < 0) {
+ pr_err("%s: msm_cci_get_clk_info() failed", __func__);
+ kfree(new_cci_dev);
+ return -EFAULT;
+ }
+
+ new_cci_dev->ref_count = 0;
+ new_cci_dev->mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "cci");
+ if (!new_cci_dev->mem) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto cci_no_resource;
+ }
+ new_cci_dev->irq = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "cci");
+ CDBG("%s line %d cci irq start %d end %d\n", __func__,
+ __LINE__,
+ (int) new_cci_dev->irq->start,
+ (int) new_cci_dev->irq->end);
+ if (!new_cci_dev->irq) {
+ pr_err("%s: no irq resource?\n", __func__);
+ rc = -ENODEV;
+ goto cci_no_resource;
+ }
+ new_cci_dev->io = request_mem_region(new_cci_dev->mem->start,
+ resource_size(new_cci_dev->mem), pdev->name);
+ if (!new_cci_dev->io) {
+ pr_err("%s: no valid mem region\n", __func__);
+ rc = -EBUSY;
+ goto cci_no_resource;
+ }
+
+ new_cci_dev->base = ioremap(new_cci_dev->mem->start,
+ resource_size(new_cci_dev->mem));
+ if (!new_cci_dev->base) {
+ rc = -ENOMEM;
+ goto cci_release_mem;
+ }
+ rc = request_irq(new_cci_dev->irq->start, msm_cci_irq,
+ IRQF_TRIGGER_RISING, "cci", new_cci_dev);
+ if (rc < 0) {
+ pr_err("%s: irq request fail\n", __func__);
+ rc = -EBUSY;
+ goto cci_release_mem;
+ }
+
+ disable_irq(new_cci_dev->irq->start);
+ new_cci_dev->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x6;
+ msm_sd_register(&new_cci_dev->msm_sd);
+ new_cci_dev->pdev = pdev;
+ msm_cci_init_cci_params(new_cci_dev);
+ msm_cci_init_clk_params(new_cci_dev);
+ msm_cci_init_gpio_params(new_cci_dev);
+
+ rc = msm_camera_get_dt_vreg_data(new_cci_dev->pdev->dev.of_node,
+ &(new_cci_dev->cci_vreg), &(new_cci_dev->regulator_count));
+ if (rc < 0) {
+ pr_err("%s: msm_camera_get_dt_vreg_data fail\n", __func__);
+ rc = -EFAULT;
+ goto cci_release_mem;
+ }
+
+ if ((new_cci_dev->regulator_count < 0) ||
+ (new_cci_dev->regulator_count > MAX_REGULATOR)) {
+ pr_err("%s: invalid reg count = %d, max is %d\n", __func__,
+ new_cci_dev->regulator_count, MAX_REGULATOR);
+ rc = -EFAULT;
+ goto cci_invalid_vreg_data;
+ }
+
+ rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (rc)
+ pr_err("%s: failed to add child nodes, rc=%d\n", __func__, rc);
+ new_cci_dev->cci_state = CCI_STATE_DISABLED;
+ g_cci_subdev = &new_cci_dev->msm_sd.sd;
+ for (i = 0; i < MASTER_MAX; i++) {
+ new_cci_dev->write_wq[i] = create_singlethread_workqueue(
+ "msm_cci_wq");
+ if (!new_cci_dev->write_wq[i])
+ pr_err("Failed to create write wq\n");
+ }
+ CDBG("%s cci subdev %p\n", __func__, &new_cci_dev->msm_sd.sd);
+ CDBG("%s line %d\n", __func__, __LINE__);
+ return 0;
+
+cci_invalid_vreg_data:
+ kfree(new_cci_dev->cci_vreg);
+cci_release_mem:
+ release_mem_region(new_cci_dev->mem->start,
+ resource_size(new_cci_dev->mem));
+cci_no_resource:
+ kfree(new_cci_dev);
+ return rc;
+}
+
+static int msm_cci_exit(struct platform_device *pdev)
+{
+ struct v4l2_subdev *subdev = platform_get_drvdata(pdev);
+ struct cci_device *cci_dev =
+ v4l2_get_subdevdata(subdev);
+ release_mem_region(cci_dev->mem->start, resource_size(cci_dev->mem));
+ kfree(cci_dev);
+ return 0;
+}
+
+static const struct of_device_id msm_cci_dt_match[] = {
+ {.compatible = "qcom,cci"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_cci_dt_match);
+
+static struct platform_driver cci_driver = {
+ .probe = msm_cci_probe,
+ .remove = msm_cci_exit,
+ .driver = {
+ .name = MSM_CCI_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_cci_dt_match,
+ },
+};
+
+static int __init msm_cci_init_module(void)
+{
+ return platform_driver_register(&cci_driver);
+}
+
+static void __exit msm_cci_exit_module(void)
+{
+ platform_driver_unregister(&cci_driver);
+}
+
+module_init(msm_cci_init_module);
+module_exit(msm_cci_exit_module);
+MODULE_DESCRIPTION("MSM CCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h
new file mode 100644
index 000000000000..a0d409bfd413
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h
@@ -0,0 +1,238 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CCI_H
+#define MSM_CCI_H
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-subdev.h>
+#include <linux/workqueue.h>
+#include <media/msm_cam_sensor.h>
+#include <soc/qcom/camera2.h>
+#include "msm_sd.h"
+
+#define NUM_MASTERS 2
+#define NUM_QUEUES 2
+
+#define TRUE 1
+#define FALSE 0
+
+#define CCI_PINCTRL_STATE_DEFAULT "cci_default"
+#define CCI_PINCTRL_STATE_SLEEP "cci_suspend"
+
+#define CCI_NUM_CLK_MAX 16
+#define CCI_NUM_CLK_CASES 5
+#define CCI_CLK_SRC_NAME "cci_src_clk"
+#define MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_10 10
+#define MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11 11
+#define BURST_MIN_FREE_SIZE 8
+
+enum cci_i2c_sync {
+ MSM_SYNC_DISABLE,
+ MSM_SYNC_ENABLE,
+};
+
+enum cci_i2c_queue_t {
+ QUEUE_0,
+ QUEUE_1,
+ QUEUE_INVALID,
+};
+
+struct msm_camera_cci_client {
+ struct v4l2_subdev *cci_subdev;
+ uint32_t freq;
+ enum i2c_freq_mode_t i2c_freq_mode;
+ enum cci_i2c_master_t cci_i2c_master;
+ uint16_t sid;
+ uint16_t cid;
+ uint32_t timeout;
+ uint16_t retries;
+ uint16_t id_map;
+};
+
+enum msm_cci_cmd_type {
+ MSM_CCI_INIT,
+ MSM_CCI_RELEASE,
+ MSM_CCI_SET_SID,
+ MSM_CCI_SET_FREQ,
+ MSM_CCI_SET_SYNC_CID,
+ MSM_CCI_I2C_READ,
+ MSM_CCI_I2C_WRITE,
+ MSM_CCI_I2C_WRITE_SEQ,
+ MSM_CCI_I2C_WRITE_ASYNC,
+ MSM_CCI_GPIO_WRITE,
+ MSM_CCI_I2C_WRITE_SYNC,
+ MSM_CCI_I2C_WRITE_SYNC_BLOCK,
+};
+
+struct msm_camera_cci_wait_sync_cfg {
+ uint16_t cid;
+ int16_t csid;
+ uint16_t line;
+ uint16_t delay;
+};
+
+struct msm_camera_cci_gpio_cfg {
+ uint16_t gpio_queue;
+ uint16_t i2c_queue;
+};
+
+struct msm_camera_cci_i2c_read_cfg {
+ uint16_t addr;
+ enum msm_camera_i2c_reg_addr_type addr_type;
+ uint8_t *data;
+ uint16_t num_byte;
+};
+
+struct msm_camera_cci_i2c_queue_info {
+ uint32_t max_queue_size;
+ uint32_t report_id;
+ uint32_t irq_en;
+ uint32_t capture_rep_data;
+};
+
+struct msm_camera_cci_ctrl {
+ int32_t status;
+ struct msm_camera_cci_client *cci_info;
+ enum msm_cci_cmd_type cmd;
+ union {
+ struct msm_camera_i2c_reg_setting cci_i2c_write_cfg;
+ struct msm_camera_cci_i2c_read_cfg cci_i2c_read_cfg;
+ struct msm_camera_cci_wait_sync_cfg cci_wait_sync_cfg;
+ struct msm_camera_cci_gpio_cfg gpio_cfg;
+ } cfg;
+};
+
+struct msm_camera_cci_master_info {
+ uint32_t status;
+ atomic_t q_free[NUM_QUEUES];
+ uint8_t q_lock[NUM_QUEUES];
+ uint8_t reset_pending;
+ struct mutex mutex;
+ struct completion reset_complete;
+ struct mutex mutex_q[NUM_QUEUES];
+ struct completion report_q[NUM_QUEUES];
+ atomic_t done_pending[NUM_QUEUES];
+};
+
+struct msm_cci_clk_params_t {
+ uint16_t hw_thigh;
+ uint16_t hw_tlow;
+ uint16_t hw_tsu_sto;
+ uint16_t hw_tsu_sta;
+ uint16_t hw_thd_dat;
+ uint16_t hw_thd_sta;
+ uint16_t hw_tbuf;
+ uint8_t hw_scl_stretch_en;
+ uint8_t hw_trdhld;
+ uint8_t hw_tsp;
+ uint32_t cci_clk_src;
+};
+
+enum msm_cci_state_t {
+ CCI_STATE_ENABLED,
+ CCI_STATE_DISABLED,
+};
+
+struct cci_device {
+ struct platform_device *pdev;
+ struct msm_sd_subdev msm_sd;
+ struct v4l2_subdev subdev;
+ struct resource *mem;
+ struct resource *irq;
+ struct resource *io;
+ void __iomem *base;
+
+ uint32_t hw_version;
+ uint8_t ref_count;
+ enum msm_cci_state_t cci_state;
+ uint32_t num_clk;
+ uint32_t num_clk_cases;
+
+ struct clk *cci_clk[CCI_NUM_CLK_MAX];
+ struct msm_camera_cci_i2c_queue_info
+ cci_i2c_queue_info[NUM_MASTERS][NUM_QUEUES];
+ struct msm_camera_cci_master_info cci_master_info[NUM_MASTERS];
+ enum i2c_freq_mode_t i2c_freq_mode[NUM_MASTERS];
+ struct msm_cci_clk_params_t cci_clk_params[I2C_MAX_MODES];
+ struct gpio *cci_gpio_tbl;
+ uint8_t cci_gpio_tbl_size;
+ struct msm_pinctrl_info cci_pinctrl;
+ uint8_t cci_pinctrl_status;
+ uint32_t cycles_per_us;
+ uint32_t cci_clk_src;
+ struct camera_vreg_t *cci_vreg;
+ struct regulator *cci_reg_ptr[MAX_REGULATOR];
+ int32_t regulator_count;
+ uint8_t payload_size;
+ uint8_t support_seq_write;
+ struct workqueue_struct *write_wq[MASTER_MAX];
+ struct msm_camera_cci_wait_sync_cfg cci_wait_sync_cfg;
+ uint8_t valid_sync;
+};
+
+enum msm_cci_i2c_cmd_type {
+ CCI_I2C_SET_PARAM_CMD = 1,
+ CCI_I2C_WAIT_CMD,
+ CCI_I2C_WAIT_SYNC_CMD,
+ CCI_I2C_WAIT_GPIO_EVENT_CMD,
+ CCI_I2C_TRIG_I2C_EVENT_CMD,
+ CCI_I2C_LOCK_CMD,
+ CCI_I2C_UNLOCK_CMD,
+ CCI_I2C_REPORT_CMD,
+ CCI_I2C_WRITE_CMD,
+ CCI_I2C_READ_CMD,
+ CCI_I2C_WRITE_DISABLE_P_CMD,
+ CCI_I2C_READ_DISABLE_P_CMD,
+ CCI_I2C_WRITE_CMD2,
+ CCI_I2C_WRITE_CMD3,
+ CCI_I2C_REPEAT_CMD,
+ CCI_I2C_INVALID_CMD,
+};
+
+enum msm_cci_gpio_cmd_type {
+ CCI_GPIO_SET_PARAM_CMD = 1,
+ CCI_GPIO_WAIT_CMD,
+ CCI_GPIO_WAIT_SYNC_CMD,
+ CCI_GPIO_WAIT_GPIO_IN_EVENT_CMD,
+ CCI_GPIO_WAIT_I2C_Q_TRIG_EVENT_CMD,
+ CCI_GPIO_OUT_CMD,
+ CCI_GPIO_TRIG_EVENT_CMD,
+ CCI_GPIO_REPORT_CMD,
+ CCI_GPIO_REPEAT_CMD,
+ CCI_GPIO_CONTINUE_CMD,
+ CCI_GPIO_INVALID_CMD,
+};
+
+struct cci_write_async {
+ struct cci_device *cci_dev;
+ struct msm_camera_cci_ctrl c_ctrl;
+ enum cci_i2c_queue_t queue;
+ struct work_struct work;
+ enum cci_i2c_sync sync_en;
+};
+
+#ifdef CONFIG_MSM_CCI
+struct v4l2_subdev *msm_cci_get_subdev(void);
+#else
+static inline struct v4l2_subdev *msm_cci_get_subdev(void)
+{
+ return NULL;
+}
+#endif
+
+#define VIDIOC_MSM_CCI_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 23, struct msm_camera_cci_ctrl *)
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/Makefile b/drivers/media/platform/msm/camera_v2/sensor/csid/Makefile
new file mode 100644
index 000000000000..c51555d5be89
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/Makefile
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+obj-$(CONFIG_MSM_CSID) += msm_csid.o
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_2_0_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_2_0_hwreg.h
new file mode 100644
index 000000000000..dbe672ac2c3a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_2_0_hwreg.h
@@ -0,0 +1,65 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_2_0_HWREG_H
+#define MSM_CSID_2_0_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+
+uint8_t csid_lane_assign_v2_0[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
+
+struct csid_reg_parms_t csid_v2_0 = {
+
+ /* MIPI CSID registers */
+ 0x0,
+ 0x4,
+ 0x4,
+ 0x8,
+ 0xc,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x1C,
+ 0x5c,
+ 0x60,
+ 0x64,
+ 0x68,
+ 0x6c,
+ 0x70,
+ 0x74,
+ 0x78,
+ 0x7C,
+ 0x80,
+ 0x84,
+ 0x88,
+ 0x8C,
+ 0x90,
+ 0x94,
+ 0x9C,
+ 0xA0,
+ 0xA8,
+ 0xAC,
+ 0xB0,
+ 11,
+ 0x7FFF,
+ 0x2,
+ 17,
+ 0x02000011,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0x7f010800,
+ 20,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_2_2_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_2_2_hwreg.h
new file mode 100644
index 000000000000..94c629570621
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_2_2_hwreg.h
@@ -0,0 +1,64 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_2_2_HWREG_H
+#define MSM_CSID_2_2_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+
+uint8_t csid_lane_assign_v2_2[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
+
+struct csid_reg_parms_t csid_v2_2 = {
+ /* MIPI CSID registers */
+ 0x0,
+ 0x4,
+ 0x4,
+ 0x8,
+ 0xc,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x1C,
+ 0x5c,
+ 0x60,
+ 0x64,
+ 0x68,
+ 0x6c,
+ 0x70,
+ 0x74,
+ 0x78,
+ 0x7C,
+ 0x80,
+ 0x84,
+ 0x88,
+ 0x8C,
+ 0x90,
+ 0x94,
+ 0x9C,
+ 0xA0,
+ 0xA8,
+ 0xAC,
+ 0xB0,
+ 11,
+ 0x7FFF,
+ 0x2,
+ 17,
+ 0x02001000,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0x7f010800,
+ 20,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_0_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_0_hwreg.h
new file mode 100644
index 000000000000..74f79af72b0b
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_0_hwreg.h
@@ -0,0 +1,64 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_3_0_HWREG_H
+#define MSM_CSID_3_0_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+
+uint8_t csid_lane_assign_v3_0[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
+
+struct csid_reg_parms_t csid_v3_0 = {
+ /* MIPI CSID registers */
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x1C,
+ 0x20,
+ 0x60,
+ 0x64,
+ 0x68,
+ 0x6C,
+ 0x70,
+ 0x74,
+ 0x78,
+ 0x7C,
+ 0x80,
+ 0x84,
+ 0x88,
+ 0x8C,
+ 0x90,
+ 0x94,
+ 0x98,
+ 0xA0,
+ 0xA4,
+ 0xAC,
+ 0xB0,
+ 0xB4,
+ 11,
+ 0x7FFF,
+ 0x4,
+ 17,
+ 0x30000000,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0x7f010800,
+ 20,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_1_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_1_hwreg.h
new file mode 100644
index 000000000000..23553779ac69
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_1_hwreg.h
@@ -0,0 +1,64 @@
+ /* Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_3_1_HWREG_H
+#define MSM_CSID_3_1_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+
+uint8_t csid_lane_assign_v3_1[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
+
+struct csid_reg_parms_t csid_v3_1 = {
+ /* MIPI CSID registers */
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x1C,
+ 0x20,
+ 0x60,
+ 0x64,
+ 0x68,
+ 0x6C,
+ 0x70,
+ 0x74,
+ 0x78,
+ 0x7C,
+ 0x80,
+ 0x84,
+ 0x88,
+ 0x8C,
+ 0x90,
+ 0x94,
+ 0x98,
+ 0xA0,
+ 0xA4,
+ 0xAC,
+ 0xB0,
+ 0xB4,
+ 11,
+ 0x7FFF,
+ 0x4,
+ 17,
+ 0x30010000,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0x7f010800,
+ 20,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_2_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_2_hwreg.h
new file mode 100644
index 000000000000..efab52a05779
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_2_hwreg.h
@@ -0,0 +1,64 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_3_2_HWREG_H
+#define MSM_CSID_3_2_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+
+uint8_t csid_lane_assign_v3_2[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
+
+struct csid_reg_parms_t csid_v3_2 = {
+ /* MIPI CSID registers */
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x1C,
+ 0x20,
+ 0x60,
+ 0x64,
+ 0x68,
+ 0x6C,
+ 0x70,
+ 0x74,
+ 0x78,
+ 0x7C,
+ 0x80,
+ 0x84,
+ 0x88,
+ 0x8C,
+ 0x90,
+ 0x94,
+ 0x98,
+ 0xA0,
+ 0xA4,
+ 0xAC,
+ 0xB0,
+ 0xB4,
+ 11,
+ 0x7FFF,
+ 0x4,
+ 17,
+ 0x30020000,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0x7f010800,
+ 20,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_4_1_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_4_1_hwreg.h
new file mode 100644
index 000000000000..31a0fda0a9e3
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_4_1_hwreg.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_3_4_1_HWREG_H
+#define MSM_CSID_3_4_1_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+uint8_t csid_lane_assign_v3_4_1[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
+
+struct csid_reg_parms_t csid_v3_4_1 = {
+ /* MIPI CSID registers */
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x1C,
+ 0x20,
+ 0x60,
+ 0x64,
+ 0x68,
+ 0x6C,
+ 0x70,
+ 0x74,
+ 0x78,
+ 0x7C,
+ 0x80,
+ 0x84,
+ 0x88,
+ 0x8C,
+ 0x90,
+ 0x94,
+ 0x98,
+ 0xA0,
+ 0xA4,
+ 0xAC,
+ 0xB0,
+ 0xB4,
+ 11,
+ 0x7FFF,
+ 0x4,
+ 17,
+ 0x30040001,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0x7f010800,
+ 20,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_4_2_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_4_2_hwreg.h
new file mode 100644
index 000000000000..70c23edfc3ed
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_4_2_hwreg.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_3_4_2_HWREG_H
+#define MSM_CSID_3_4_2_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+
+uint8_t csid_lane_assign_v3_4_2[PHY_LANE_MAX] = {0, 4, 1, 2, 3};
+struct csid_reg_parms_t csid_v3_4_2 = {
+ /* MIPI CSID registers */
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x1C,
+ 0x20,
+ 0x60,
+ 0x64,
+ 0x68,
+ 0x6C,
+ 0x70,
+ 0x74,
+ 0x78,
+ 0x7C,
+ 0x80,
+ 0x84,
+ 0x88,
+ 0x8C,
+ 0x90,
+ 0x94,
+ 0x98,
+ 0xA0,
+ 0xA4,
+ 0xAC,
+ 0xB0,
+ 0xB4,
+ 11,
+ 0x7FFF,
+ 0x4,
+ 17,
+ 0x30040002,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0x7f010800,
+ 20,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_5_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_5_hwreg.h
new file mode 100644
index 000000000000..e1623e5a8662
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_5_hwreg.h
@@ -0,0 +1,64 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_3_5_HWREG_H
+#define MSM_CSID_3_5_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+
+uint8_t csid_lane_assign_v3_5[PHY_LANE_MAX] = {0, 4, 1, 2, 3};
+
+struct csid_reg_parms_t csid_v3_5 = {
+ /* MIPI CSID registers */
+ 0x0,
+ 0x4,
+ 0x8,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x1C,
+ 0x20,
+ 0x24,
+ 0x64,
+ 0x68,
+ 0x6C,
+ 0x70,
+ 0x74,
+ 0x78,
+ 0x7C,
+ 0x80,
+ 0x88,
+ 0x8C,
+ 0x90,
+ 0x94,
+ 0x98,
+ 0x9C,
+ 0xA0,
+ 0xA8,
+ 0xAC,
+ 0xB4,
+ 0xB8,
+ 0xBC,
+ 11,
+ 0x7FFF,
+ 0x4,
+ 17,
+ 0x30050000,
+ 0xC,
+ 0x84,
+ 0xA4,
+ 0x7f010800,
+ 20,
+ 17,
+ 16,
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_6_0_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_6_0_hwreg.h
new file mode 100644
index 000000000000..879b0ba0e2ae
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/include/msm_csid_3_6_0_hwreg.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_3_6_0_HWREG_H
+#define MSM_CSID_3_6_0_HWREG_H
+
+#include <sensor/csid/msm_csid.h>
+
+uint8_t csid_lane_assign_v3_6_0[PHY_LANE_MAX] = {0, 1, 2, 3, 4};
+struct csid_reg_parms_t csid_v3_6_0 = {
+ /* MIPI CSID registers */
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x1C,
+ 0x20,
+ 0x60,
+ 0x64,
+ 0x68,
+ 0x6C,
+ 0x70,
+ 0x74,
+ 0x78,
+ 0x7C,
+ 0x80,
+ 0x84,
+ 0x88,
+ 0x8C,
+ 0x90,
+ 0x94,
+ 0x98,
+ 0xA0,
+ 0xA4,
+ 0xAC,
+ 0xB0,
+ 0xB4,
+ 11,
+ 0x7FFF,
+ 0x4,
+ 17,
+ 0x30060000,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+ 0x7f010800,
+ 20,
+ 0xFFFFFFFF,
+ 0xFFFFFFFF,
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
new file mode 100644
index 000000000000..b09f392481c2
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
@@ -0,0 +1,1317 @@
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/irqreturn.h>
+#include "msm_csid.h"
+#include "msm_sd.h"
+#include "msm_camera_io_util.h"
+#include "msm_camera_dt_util.h"
+#include "include/msm_csid_2_0_hwreg.h"
+#include "include/msm_csid_2_2_hwreg.h"
+#include "include/msm_csid_3_0_hwreg.h"
+#include "include/msm_csid_3_1_hwreg.h"
+#include "include/msm_csid_3_2_hwreg.h"
+#include "include/msm_csid_3_5_hwreg.h"
+#include "include/msm_csid_3_4_1_hwreg.h"
+#include "include/msm_csid_3_4_2_hwreg.h"
+#include "include/msm_csid_3_6_0_hwreg.h"
+#include "cam_hw_ops.h"
+
+#define V4L2_IDENT_CSID 50002
+#define CSID_VERSION_V20 0x02000011
+#define CSID_VERSION_V22 0x02001000
+#define CSID_VERSION_V30 0x30000000
+#define CSID_VERSION_V31 0x30010000
+#define CSID_VERSION_V31_1 0x30010001
+#define CSID_VERSION_V31_3 0x30010003
+#define CSID_VERSION_V32 0x30020000
+#define CSID_VERSION_V33 0x30030000
+#define CSID_VERSION_V34 0x30040000
+#define CSID_VERSION_V34_1 0x30040001
+#define CSID_VERSION_V34_2 0x30040002
+#define CSID_VERSION_V36 0x30060000
+#define CSID_VERSION_V37 0x30070000
+#define CSID_VERSION_V35 0x30050000
+#define CSID_VERSION_V40 0x40000000
+#define MSM_CSID_DRV_NAME "msm_csid"
+
+#define DBG_CSID 0
+#define SHORT_PKT_CAPTURE 0
+#define SHORT_PKT_OFFSET 0x200
+#define ENABLE_3P_BIT 1
+#define SOF_DEBUG_ENABLE 1
+#define SOF_DEBUG_DISABLE 0
+
+#define TRUE 1
+#define FALSE 0
+
+#define CSID_TIMEOUT msecs_to_jiffies(100)
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static struct msm_cam_clk_info csid_clk_info[CSID_NUM_CLK_MAX];
+static struct msm_cam_clk_info csid_clk_src_info[CSID_NUM_CLK_MAX];
+
+static struct camera_vreg_t csid_vreg_info[] = {
+ {"qcom,mipi-csi-vdd", 0, 0, 12000},
+};
+
+static struct camera_vreg_t csid_8960_vreg_info[] = {
+ {"mipi_csi_vdd", 1200000, 1200000, 20000},
+};
+#ifdef CONFIG_COMPAT
+static struct v4l2_file_operations msm_csid_v4l2_subdev_fops;
+#endif
+
+static int msm_csid_cid_lut(
+ struct msm_camera_csid_lut_params *csid_lut_params,
+ struct csid_device *csid_dev)
+{
+ int rc = 0, i = 0;
+ uint32_t val = 0;
+
+ if (!csid_lut_params) {
+ pr_err("%s:%d csid_lut_params NULL\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ if (csid_lut_params->num_cid > MAX_CID) {
+ pr_err("%s:%d num_cid exceeded limit num_cid = %d max = %d\n",
+ __func__, __LINE__, csid_lut_params->num_cid, MAX_CID);
+ return -EINVAL;
+ }
+ for (i = 0; i < csid_lut_params->num_cid; i++) {
+ if (csid_lut_params->vc_cfg[i]->cid >= MAX_CID) {
+ pr_err("%s: cid outside range %d\n",
+ __func__, csid_lut_params->vc_cfg[i]->cid);
+ return -EINVAL;
+ }
+ CDBG("%s lut params num_cid = %d, cid = %d\n",
+ __func__,
+ csid_lut_params->num_cid,
+ csid_lut_params->vc_cfg[i]->cid);
+ CDBG("%s lut params dt = 0x%x, df = %d\n", __func__,
+ csid_lut_params->vc_cfg[i]->dt,
+ csid_lut_params->vc_cfg[i]->decode_format);
+ if (csid_lut_params->vc_cfg[i]->dt < 0x12 ||
+ csid_lut_params->vc_cfg[i]->dt > 0x37) {
+ pr_err("%s: unsupported data type 0x%x\n",
+ __func__, csid_lut_params->vc_cfg[i]->dt);
+ return rc;
+ }
+ val = msm_camera_io_r(csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_cid_lut_vc_0_addr +
+ (csid_lut_params->vc_cfg[i]->cid >> 2) * 4)
+ & ~(0xFF << ((csid_lut_params->vc_cfg[i]->cid % 4) *
+ 8));
+ val |= (csid_lut_params->vc_cfg[i]->dt <<
+ ((csid_lut_params->vc_cfg[i]->cid % 4) * 8));
+ msm_camera_io_w(val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_cid_lut_vc_0_addr +
+ (csid_lut_params->vc_cfg[i]->cid >> 2) * 4);
+
+ val = (csid_lut_params->vc_cfg[i]->decode_format << 4) | 0x3;
+ msm_camera_io_w(val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_cid_n_cfg_addr +
+ (csid_lut_params->vc_cfg[i]->cid * 4));
+ }
+ return rc;
+}
+
+#if (DBG_CSID)
+static void msm_csid_set_debug_reg(struct csid_device *csid_dev,
+ struct msm_camera_csid_params *csid_params)
+{
+ uint32_t val = 0;
+
+ if ((csid_dev->hw_dts_version == CSID_VERSION_V34_1) ||
+ (csid_dev->hw_dts_version == CSID_VERSION_V36)) {
+ val = ((1 << csid_params->lane_cnt) - 1) << 20;
+ msm_camera_io_w(0x7f010800 | val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_mask_addr);
+ msm_camera_io_w(0x7f010800 | val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_clear_cmd_addr);
+ } else {
+ if (csid_dev->csid_3p_enabled == 1) {
+ val = ((1 << csid_params->lane_cnt) - 1) <<
+ csid_dev->ctrl_reg->
+ csid_reg.csid_err_lane_overflow_offset_3p;
+ } else {
+ val = ((1 << csid_params->lane_cnt) - 1) <<
+ csid_dev->ctrl_reg->
+ csid_reg.csid_err_lane_overflow_offset_2p;
+ }
+ val |= csid_dev->ctrl_reg->csid_reg.csid_irq_mask_val;
+ msm_camera_io_w(val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_mask_addr);
+ msm_camera_io_w(val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_clear_cmd_addr);
+ }
+}
+#elif(SHORT_PKT_CAPTURE)
+static void msm_csid_set_debug_reg(struct csid_device *csid_dev,
+ struct msm_camera_csid_params *csid_params)
+{
+ uint32_t val = 0;
+
+ if ((csid_dev->hw_dts_version == CSID_VERSION_V34_1) ||
+ (csid_dev->hw_dts_version == CSID_VERSION_V36)) {
+ val = ((1 << csid_params->lane_cnt) - 1) << 20;
+ msm_camera_io_w(0x7f010a00 | val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_mask_addr);
+ msm_camera_io_w(0x7f010a00 | val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_clear_cmd_addr);
+ } else {
+ if (csid_dev->csid_3p_enabled == 1) {
+ val = ((1 << csid_params->lane_cnt) - 1) <<
+ csid_dev->ctrl_reg->
+ csid_reg.csid_err_lane_overflow_offset_3p;
+ } else {
+ val = ((1 << csid_params->lane_cnt) - 1) <<
+ csid_dev->ctrl_reg->
+ csid_reg.csid_err_lane_overflow_offset_2p;
+ }
+ val |= csid_dev->ctrl_reg->csid_reg.csid_irq_mask_val;
+ val |= SHORT_PKT_OFFSET;
+ msm_camera_io_w(val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_mask_addr);
+ msm_camera_io_w(val, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_clear_cmd_addr);
+ }
+}
+#else
+static void msm_csid_set_debug_reg(struct csid_device *csid_dev,
+ struct msm_camera_csid_params *csid_params) {}
+#endif
+
+static int msm_csid_reset(struct csid_device *csid_dev)
+{
+ int32_t rc = 0;
+ msm_camera_io_w(csid_dev->ctrl_reg->csid_reg.csid_rst_stb_all,
+ csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_rst_cmd_addr);
+ rc = wait_for_completion_timeout(&csid_dev->reset_complete,
+ CSID_TIMEOUT);
+ if (rc <= 0) {
+ pr_err("wait_for_completion in msm_csid_reset fail rc = %d\n",
+ rc);
+ if (rc == 0)
+ rc = -ETIMEDOUT;
+ }
+ return rc;
+}
+
+static int msm_csid_config(struct csid_device *csid_dev,
+ struct msm_camera_csid_params *csid_params)
+{
+ int rc = 0;
+ uint32_t val = 0, clk_rate = 0;
+ uint32_t round_rate = 0, input_sel;
+ uint32_t lane_assign = 0;
+ uint8_t lane_num = 0;
+ uint8_t i, j;
+ struct clk **csid_clk_ptr;
+ void __iomem *csidbase;
+ csidbase = csid_dev->base;
+ if (!csidbase || !csid_params) {
+ pr_err("%s:%d csidbase %p, csid params %p\n", __func__,
+ __LINE__, csidbase, csid_params);
+ return -EINVAL;
+ }
+
+ CDBG("%s csid_params, lane_cnt = %d, lane_assign = 0x%x\n",
+ __func__,
+ csid_params->lane_cnt,
+ csid_params->lane_assign);
+ CDBG("%s csid_params phy_sel = %d\n", __func__,
+ csid_params->phy_sel);
+
+ csid_dev->csid_lane_cnt = csid_params->lane_cnt;
+ rc = msm_csid_reset(csid_dev);
+ if (rc < 0) {
+ pr_err("%s:%d msm_csid_reset failed\n", __func__, __LINE__);
+ return rc;
+ }
+
+ csid_clk_ptr = csid_dev->csid_clk;
+ if (!csid_clk_ptr) {
+ pr_err("csi_src_clk get failed\n");
+ return -EINVAL;
+ }
+
+ clk_rate = (csid_params->csi_clk > 0) ?
+ (csid_params->csi_clk) : csid_dev->csid_max_clk;
+ round_rate = clk_round_rate(csid_clk_ptr[csid_dev->csid_clk_index],
+ clk_rate);
+ if (round_rate > csid_dev->csid_max_clk)
+ round_rate = csid_dev->csid_max_clk;
+ pr_debug("usr set rate csi_clk clk_rate = %u round_rate = %u\n",
+ clk_rate, round_rate);
+ rc = clk_set_rate(csid_clk_ptr[csid_dev->csid_clk_index],
+ round_rate);
+ if (rc < 0) {
+ pr_err("csi_src_clk set failed\n");
+ return rc;
+ }
+
+ if (csid_dev->is_testmode == 1) {
+ struct msm_camera_csid_testmode_parms *tm;
+ tm = &csid_dev->testmode_params;
+
+ /* 31:24 V blank, 23:13 H blank, 3:2 num of active DT, 1:0 VC */
+ val = ((tm->v_blanking_count & 0xFF) << 24) |
+ ((tm->h_blanking_count & 0x7FF) << 13);
+ msm_camera_io_w(val, csidbase +
+ csid_dev->ctrl_reg->csid_reg.csid_tg_vc_cfg_addr);
+ CDBG("[TG] CSID_TG_VC_CFG_ADDR 0x%08x\n", val);
+
+ /* 28:16 bytes per lines, 12:0 num of lines */
+ val = ((tm->num_bytes_per_line & 0x1FFF) << 16) |
+ (tm->num_lines & 0x1FFF);
+ msm_camera_io_w(val, csidbase +
+ csid_dev->ctrl_reg->csid_reg.csid_tg_dt_n_cfg_0_addr);
+ CDBG("[TG] CSID_TG_DT_n_CFG_0_ADDR 0x%08x\n", val);
+
+ /* 5:0 data type */
+ val = csid_params->lut_params.vc_cfg[0]->dt;
+ msm_camera_io_w(val, csidbase +
+ csid_dev->ctrl_reg->csid_reg.csid_tg_dt_n_cfg_1_addr);
+ CDBG("[TG] CSID_TG_DT_n_CFG_1_ADDR 0x%08x\n", val);
+
+ /* 2:0 output random */
+ msm_camera_io_w(csid_dev->testmode_params.payload_mode,
+ csidbase +
+ csid_dev->ctrl_reg->csid_reg.csid_tg_dt_n_cfg_2_addr);
+ } else {
+ val = csid_params->lane_cnt - 1;
+
+ for (i = 0, j = 0; i < PHY_LANE_MAX; i++) {
+ if (i == PHY_LANE_CLK)
+ continue;
+ lane_num = (csid_params->lane_assign >> j) & 0xF;
+ if (lane_num >= PHY_LANE_MAX) {
+ pr_err("%s:%d invalid lane number %d\n",
+ __func__, __LINE__, lane_num);
+ return -EINVAL;
+ }
+ if (csid_dev->ctrl_reg->csid_lane_assign[lane_num] >=
+ PHY_LANE_MAX){
+ pr_err("%s:%d invalid lane map %d\n",
+ __func__, __LINE__,
+ csid_dev->ctrl_reg->
+ csid_lane_assign[lane_num]);
+ return -EINVAL;
+ }
+ lane_assign |=
+ csid_dev->ctrl_reg->csid_lane_assign[lane_num]
+ << j;
+ j += 4;
+ }
+
+ CDBG("%s csid_params calculated lane_assign = 0x%X\n",
+ __func__, lane_assign);
+
+ val |= lane_assign <<
+ csid_dev->ctrl_reg->csid_reg.csid_dl_input_sel_shift;
+ if (csid_dev->hw_version < CSID_VERSION_V30) {
+ val |= (0xF << 10);
+ msm_camera_io_w(val, csidbase +
+ csid_dev->ctrl_reg->csid_reg.csid_core_ctrl_0_addr);
+ } else {
+ msm_camera_io_w(val, csidbase +
+ csid_dev->ctrl_reg->csid_reg.csid_core_ctrl_0_addr);
+ val = csid_params->phy_sel <<
+ csid_dev->ctrl_reg->csid_reg.csid_phy_sel_shift;
+ val |= 0xF;
+ msm_camera_io_w(val, csidbase +
+ csid_dev->ctrl_reg->csid_reg.csid_core_ctrl_1_addr);
+ }
+ if (csid_dev->hw_version == CSID_VERSION_V35 &&
+ csid_params->csi_3p_sel == 1) {
+ csid_dev->csid_3p_enabled = 1;
+ val = (csid_params->lane_cnt - 1) << ENABLE_3P_BIT;
+
+ for (i = 0; i < csid_params->lane_cnt; i++) {
+ input_sel =
+ (csid_params->lane_assign >> (4*i))
+ & 0xF;
+ val |= input_sel << (4*(i+1));
+ }
+ val |= csid_params->phy_sel <<
+ csid_dev->ctrl_reg->csid_reg.csid_phy_sel_shift_3p;
+ val |= ENABLE_3P_BIT;
+ msm_camera_io_w(val, csidbase + csid_dev->ctrl_reg
+ ->csid_reg.csid_3p_ctrl_0_addr);
+ }
+ }
+
+ rc = msm_csid_cid_lut(&csid_params->lut_params, csid_dev);
+ if (rc < 0) {
+ pr_err("%s:%d config cid lut failed\n", __func__, __LINE__);
+ return rc;
+ }
+ msm_csid_set_debug_reg(csid_dev, csid_params);
+
+ if (csid_dev->is_testmode == 1)
+ msm_camera_io_w(0x00A06437, csidbase +
+ csid_dev->ctrl_reg->csid_reg.csid_tg_ctrl_addr);
+
+ return rc;
+}
+
+#if SHORT_PKT_CAPTURE
+static irqreturn_t msm_csid_irq(int irq_num, void *data)
+{
+ uint32_t irq;
+ uint32_t short_dt = 0;
+ uint32_t count = 0, dt = 0;
+ struct csid_device *csid_dev = data;
+
+ if (!csid_dev) {
+ pr_err("%s:%d csid_dev NULL\n", __func__, __LINE__);
+ return IRQ_HANDLED;
+ }
+ irq = msm_camera_io_r(csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_status_addr);
+ CDBG("%s CSID%d_IRQ_STATUS_ADDR = 0x%x\n",
+ __func__, csid_dev->pdev->id, irq);
+ if (irq & (0x1 <<
+ csid_dev->ctrl_reg->csid_reg.csid_rst_done_irq_bitshift))
+ complete(&csid_dev->reset_complete);
+ if (irq & SHORT_PKT_OFFSET) {
+ short_dt = msm_camera_io_r(csid_dev->base +
+ csid_dev->ctrl_reg->
+ csid_reg.csid_captured_short_pkt_addr);
+ count = (short_dt >> 8) & 0xffff;
+ dt = short_dt >> 24;
+ CDBG("CSID:: %s:%d core %d dt: 0x%x, count: %d\n",
+ __func__, __LINE__, csid_dev->pdev->id, dt, count);
+ msm_camera_io_w(0x101, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_rst_cmd_addr);
+ }
+ msm_camera_io_w(irq, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_clear_cmd_addr);
+ return IRQ_HANDLED;
+}
+#else
+static irqreturn_t msm_csid_irq(int irq_num, void *data)
+{
+ uint32_t irq;
+ struct csid_device *csid_dev = data;
+
+ if (!csid_dev) {
+ pr_err("%s:%d csid_dev NULL\n", __func__, __LINE__);
+ return IRQ_HANDLED;
+ }
+ irq = msm_camera_io_r(csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_status_addr);
+ CDBG("%s CSID%d_IRQ_STATUS_ADDR = 0x%x\n",
+ __func__, csid_dev->pdev->id, irq);
+ if (irq & (0x1 <<
+ csid_dev->ctrl_reg->csid_reg.csid_rst_done_irq_bitshift))
+ complete(&csid_dev->reset_complete);
+ msm_camera_io_w(irq, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_clear_cmd_addr);
+ return IRQ_HANDLED;
+}
+#endif
+
+static int msm_csid_irq_routine(struct v4l2_subdev *sd, u32 status,
+ bool *handled)
+{
+ struct csid_device *csid_dev = v4l2_get_subdevdata(sd);
+ irqreturn_t ret;
+ CDBG("%s E\n", __func__);
+ ret = msm_csid_irq(csid_dev->irq->start, csid_dev);
+ *handled = TRUE;
+ return 0;
+}
+
+static int msm_csid_init(struct csid_device *csid_dev, uint32_t *csid_version)
+{
+ int rc = 0;
+
+ if (!csid_version) {
+ pr_err("%s:%d csid_version NULL\n", __func__, __LINE__);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ csid_dev->reg_ptr = NULL;
+
+ if (csid_dev->csid_state == CSID_POWER_UP) {
+ pr_err("%s: csid invalid state %d\n", __func__,
+ csid_dev->csid_state);
+ return -EINVAL;
+ }
+
+ rc = cam_config_ahb_clk(CAM_AHB_CLIENT_CSID, CAMERA_AHB_SVS_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ return rc;
+ }
+
+ csid_dev->base = ioremap(csid_dev->mem->start,
+ resource_size(csid_dev->mem));
+ if (!csid_dev->base) {
+ pr_err("%s csid_dev->base NULL\n", __func__);
+ rc = -ENOMEM;
+ goto ioremap_fail;
+ }
+
+ pr_info("%s: CSID_VERSION = 0x%x\n", __func__,
+ csid_dev->ctrl_reg->csid_reg.csid_version);
+ /* power up */
+ rc = msm_camera_config_vreg(&csid_dev->pdev->dev, csid_dev->csid_vreg,
+ csid_dev->regulator_count, NULL, 0,
+ &csid_dev->csid_reg_ptr[0], 1);
+ if (rc < 0) {
+ pr_err("%s:%d csid config_vreg failed\n", __func__, __LINE__);
+ goto top_vreg_config_failed;
+ }
+
+ if (csid_dev->ctrl_reg->csid_reg.csid_version < CSID_VERSION_V22) {
+ rc = msm_camera_config_vreg(&csid_dev->pdev->dev,
+ csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info),
+ NULL, 0, &csid_dev->csi_vdd, 1);
+ } else {
+ rc = msm_camera_config_vreg(&csid_dev->pdev->dev,
+ csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
+ NULL, 0, &csid_dev->csi_vdd, 1);
+ }
+ if (rc < 0) {
+ pr_err("%s: regulator on failed\n", __func__);
+ goto csid_vreg_config_failed;
+ }
+
+ rc = msm_camera_enable_vreg(&csid_dev->pdev->dev, csid_dev->csid_vreg,
+ csid_dev->regulator_count, NULL, 0,
+ &csid_dev->csid_reg_ptr[0], 1);
+ if (rc < 0) {
+ pr_err("%s:%d csid enable_vreg failed\n", __func__, __LINE__);
+ goto top_vreg_enable_failed;
+ }
+
+ if (csid_dev->ctrl_reg->csid_reg.csid_version < CSID_VERSION_V22) {
+ rc = msm_camera_enable_vreg(&csid_dev->pdev->dev,
+ csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info),
+ NULL, 0, &csid_dev->csi_vdd, 1);
+ } else {
+ rc = msm_camera_enable_vreg(&csid_dev->pdev->dev,
+ csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
+ NULL, 0, &csid_dev->csi_vdd, 1);
+ }
+ if (rc < 0) {
+ pr_err("%s: regulator enable failed\n", __func__);
+ goto csid_vreg_enable_failed;
+ }
+
+ if (csid_dev->ctrl_reg->csid_reg.csid_version == CSID_VERSION_V22)
+ msm_cam_clk_sel_src(&csid_dev->pdev->dev,
+ &csid_clk_info[3], csid_clk_src_info,
+ csid_dev->num_clk_src_info);
+
+ rc = msm_cam_clk_enable(&csid_dev->pdev->dev,
+ csid_clk_info, csid_dev->csid_clk,
+ csid_dev->num_clk, 1);
+ if (rc < 0) {
+ pr_err("%s:%d clock enable failed\n",
+ __func__, __LINE__);
+ goto clk_enable_failed;
+ }
+ CDBG("%s:%d called\n", __func__, __LINE__);
+ csid_dev->hw_version =
+ msm_camera_io_r(csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_hw_version_addr);
+ CDBG("%s:%d called csid_dev->hw_version %x\n", __func__, __LINE__,
+ csid_dev->hw_version);
+ *csid_version = csid_dev->hw_version;
+ csid_dev->csid_sof_debug = SOF_DEBUG_DISABLE;
+
+ csid_dev->is_testmode = 0;
+
+ init_completion(&csid_dev->reset_complete);
+
+ enable_irq(csid_dev->irq->start);
+
+ rc = msm_csid_reset(csid_dev);
+ if (rc < 0) {
+ pr_err("%s:%d msm_csid_reset failed\n", __func__, __LINE__);
+ goto msm_csid_reset_fail;
+ }
+
+ csid_dev->csid_state = CSID_POWER_UP;
+ return rc;
+
+msm_csid_reset_fail:
+ disable_irq(csid_dev->irq->start);
+ msm_cam_clk_enable(&csid_dev->pdev->dev, csid_clk_info,
+ csid_dev->csid_clk, csid_dev->num_clk, 0);
+clk_enable_failed:
+ if (csid_dev->ctrl_reg->csid_reg.csid_version < CSID_VERSION_V22) {
+ msm_camera_enable_vreg(&csid_dev->pdev->dev,
+ csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info),
+ NULL, 0, &csid_dev->csi_vdd, 0);
+ } else {
+ msm_camera_enable_vreg(&csid_dev->pdev->dev,
+ csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
+ NULL, 0, &csid_dev->csi_vdd, 0);
+ }
+csid_vreg_enable_failed:
+ msm_camera_enable_vreg(&csid_dev->pdev->dev, csid_dev->csid_vreg,
+ csid_dev->regulator_count, NULL, 0,
+ &csid_dev->csid_reg_ptr[0], 0);
+top_vreg_enable_failed:
+ if (csid_dev->ctrl_reg->csid_reg.csid_version < CSID_VERSION_V22) {
+ msm_camera_config_vreg(&csid_dev->pdev->dev,
+ csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info),
+ NULL, 0, &csid_dev->csi_vdd, 0);
+ } else {
+ msm_camera_config_vreg(&csid_dev->pdev->dev,
+ csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
+ NULL, 0, &csid_dev->csi_vdd, 0);
+ }
+csid_vreg_config_failed:
+ msm_camera_config_vreg(&csid_dev->pdev->dev, csid_dev->csid_vreg,
+ csid_dev->regulator_count, NULL, 0,
+ &csid_dev->csid_reg_ptr[0], 0);
+top_vreg_config_failed:
+ iounmap(csid_dev->base);
+ csid_dev->base = NULL;
+ioremap_fail:
+ if (cam_config_ahb_clk(CAM_AHB_CLIENT_CSID,
+ CAMERA_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote from AHB\n", __func__);
+ return rc;
+}
+
+static int msm_csid_release(struct csid_device *csid_dev)
+{
+ uint32_t irq;
+
+ if (csid_dev->csid_state != CSID_POWER_UP) {
+ pr_err("%s: csid invalid state %d\n", __func__,
+ csid_dev->csid_state);
+ return -EINVAL;
+ }
+
+ CDBG("%s:%d, hw_version = 0x%x\n", __func__, __LINE__,
+ csid_dev->hw_version);
+
+ irq = msm_camera_io_r(csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_status_addr);
+ msm_camera_io_w(irq, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_clear_cmd_addr);
+ msm_camera_io_w(0, csid_dev->base +
+ csid_dev->ctrl_reg->csid_reg.csid_irq_mask_addr);
+
+ disable_irq(csid_dev->irq->start);
+
+ if (csid_dev->hw_version == CSID_VERSION_V20) {
+ msm_cam_clk_enable(&csid_dev->pdev->dev, csid_clk_info,
+ csid_dev->csid_clk, csid_dev->num_clk, 0);
+
+ msm_camera_enable_vreg(&csid_dev->pdev->dev,
+ csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info),
+ NULL, 0, &csid_dev->csi_vdd, 0);
+
+ msm_camera_config_vreg(&csid_dev->pdev->dev,
+ csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info),
+ NULL, 0, &csid_dev->csi_vdd, 0);
+ } else {
+ msm_cam_clk_enable(&csid_dev->pdev->dev,
+ csid_clk_info,
+ csid_dev->csid_clk,
+ csid_dev->num_clk, 0);
+
+ msm_camera_enable_vreg(&csid_dev->pdev->dev,
+ csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
+ NULL, 0, &csid_dev->csi_vdd, 0);
+
+ msm_camera_enable_vreg(&csid_dev->pdev->dev,
+ csid_dev->csid_vreg, csid_dev->regulator_count, NULL,
+ 0, &csid_dev->csid_reg_ptr[0], 0);
+
+ msm_camera_config_vreg(&csid_dev->pdev->dev,
+ csid_vreg_info, ARRAY_SIZE(csid_vreg_info),
+ NULL, 0, &csid_dev->csi_vdd, 0);
+
+ msm_camera_config_vreg(&csid_dev->pdev->dev,
+ csid_dev->csid_vreg, csid_dev->regulator_count, NULL,
+ 0, &csid_dev->csid_reg_ptr[0], 0);
+ }
+
+ if (!IS_ERR_OR_NULL(csid_dev->reg_ptr)) {
+ regulator_disable(csid_dev->reg_ptr);
+ regulator_put(csid_dev->reg_ptr);
+ }
+
+ iounmap(csid_dev->base);
+ csid_dev->base = NULL;
+ csid_dev->csid_state = CSID_POWER_DOWN;
+
+ if (cam_config_ahb_clk(CAM_AHB_CLIENT_CSID,
+ CAMERA_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote from AHB\n", __func__);
+ return 0;
+}
+
+static int32_t msm_csid_cmd(struct csid_device *csid_dev, void __user *arg)
+{
+ int rc = 0;
+ struct csid_cfg_data *cdata = (struct csid_cfg_data *)arg;
+
+ if (!csid_dev || !cdata) {
+ pr_err("%s:%d csid_dev %p, cdata %p\n", __func__, __LINE__,
+ csid_dev, cdata);
+ return -EINVAL;
+ }
+ CDBG("%s cfgtype = %d\n", __func__, cdata->cfgtype);
+ switch (cdata->cfgtype) {
+ case CSID_INIT:
+ rc = msm_csid_init(csid_dev, &cdata->cfg.csid_version);
+ CDBG("%s csid version 0x%x\n", __func__,
+ cdata->cfg.csid_version);
+ break;
+ case CSID_TESTMODE_CFG: {
+ csid_dev->is_testmode = 1;
+ if (copy_from_user(&csid_dev->testmode_params,
+ (void *)cdata->cfg.csid_testmode_params,
+ sizeof(struct msm_camera_csid_testmode_parms))) {
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ break;
+ }
+ case CSID_CFG: {
+ struct msm_camera_csid_params csid_params;
+ struct msm_camera_csid_vc_cfg *vc_cfg = NULL;
+ int i = 0;
+ if (copy_from_user(&csid_params,
+ (void *)cdata->cfg.csid_params,
+ sizeof(struct msm_camera_csid_params))) {
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ if (csid_params.lut_params.num_cid < 1 ||
+ csid_params.lut_params.num_cid > MAX_CID) {
+ pr_err("%s: %d num_cid outside range\n",
+ __func__, __LINE__);
+ rc = -EINVAL;
+ break;
+ }
+ for (i = 0; i < csid_params.lut_params.num_cid; i++) {
+ vc_cfg = kzalloc(sizeof(struct msm_camera_csid_vc_cfg),
+ GFP_KERNEL);
+ if (!vc_cfg) {
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto MEM_CLEAN;
+ }
+ if (copy_from_user(vc_cfg,
+ (void *)csid_params.lut_params.vc_cfg[i],
+ sizeof(struct msm_camera_csid_vc_cfg))) {
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ kfree(vc_cfg);
+ rc = -EFAULT;
+ goto MEM_CLEAN;
+ }
+ csid_params.lut_params.vc_cfg[i] = vc_cfg;
+ }
+ csid_dev->csid_sof_debug = SOF_DEBUG_DISABLE;
+ rc = msm_csid_config(csid_dev, &csid_params);
+MEM_CLEAN:
+ for (i--; i >= 0; i--)
+ kfree(csid_params.lut_params.vc_cfg[i]);
+ break;
+ }
+ case CSID_RELEASE:
+ rc = msm_csid_release(csid_dev);
+ break;
+ default:
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -ENOIOCTLCMD;
+ break;
+ }
+ return rc;
+}
+
+static int32_t msm_csid_get_subdev_id(struct csid_device *csid_dev, void *arg)
+{
+ uint32_t *subdev_id = (uint32_t *)arg;
+ if (!subdev_id) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ *subdev_id = csid_dev->pdev->id;
+ pr_debug("%s:%d subdev_id %d\n", __func__, __LINE__, *subdev_id);
+ return 0;
+}
+
+static long msm_csid_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc = -ENOIOCTLCMD;
+ struct csid_device *csid_dev = v4l2_get_subdevdata(sd);
+ mutex_lock(&csid_dev->mutex);
+ CDBG("%s:%d id %d\n", __func__, __LINE__, csid_dev->pdev->id);
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ rc = msm_csid_get_subdev_id(csid_dev, arg);
+ break;
+ case VIDIOC_MSM_CSID_IO_CFG:
+ rc = msm_csid_cmd(csid_dev, arg);
+ break;
+ case MSM_SD_NOTIFY_FREEZE:
+ if (csid_dev->csid_state != CSID_POWER_UP)
+ break;
+ csid_dev->csid_sof_debug = SOF_DEBUG_ENABLE;
+ break;
+ case VIDIOC_MSM_CSID_RELEASE:
+ case MSM_SD_SHUTDOWN:
+ rc = msm_csid_release(csid_dev);
+ break;
+ default:
+ pr_err_ratelimited("%s: command not found\n", __func__);
+ }
+ CDBG("%s:%d\n", __func__, __LINE__);
+ mutex_unlock(&csid_dev->mutex);
+ return rc;
+}
+
+
+#ifdef CONFIG_COMPAT
+static int32_t msm_csid_cmd32(struct csid_device *csid_dev, void __user *arg)
+{
+ int rc = 0;
+ struct csid_cfg_data *cdata;
+ struct csid_cfg_data32 *arg32 = (struct csid_cfg_data32 *) (arg);
+ struct csid_cfg_data local_arg;
+ local_arg.cfgtype = arg32->cfgtype;
+ cdata = &local_arg;
+
+ if (!csid_dev || !cdata) {
+ pr_err("%s:%d csid_dev %p, cdata %p\n", __func__, __LINE__,
+ csid_dev, cdata);
+ return -EINVAL;
+ }
+
+ CDBG("%s cfgtype = %d\n", __func__, cdata->cfgtype);
+ switch (cdata->cfgtype) {
+ case CSID_INIT:
+ rc = msm_csid_init(csid_dev, &cdata->cfg.csid_version);
+ arg32->cfg.csid_version = local_arg.cfg.csid_version;
+ CDBG("%s csid version 0x%x\n", __func__,
+ cdata->cfg.csid_version);
+ break;
+ case CSID_TESTMODE_CFG: {
+ csid_dev->is_testmode = 1;
+ if (copy_from_user(&csid_dev->testmode_params,
+ (void *)compat_ptr(arg32->cfg.csid_testmode_params),
+ sizeof(struct msm_camera_csid_testmode_parms))) {
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ break;
+ }
+ case CSID_CFG: {
+
+ struct msm_camera_csid_params csid_params;
+ struct msm_camera_csid_vc_cfg *vc_cfg = NULL;
+ int i = 0;
+ struct msm_camera_csid_lut_params32 lut_par32;
+ struct msm_camera_csid_params32 csid_params32;
+ struct msm_camera_csid_vc_cfg vc_cfg32;
+
+ if (copy_from_user(&csid_params32,
+ (void *)compat_ptr(arg32->cfg.csid_params),
+ sizeof(struct msm_camera_csid_params32))) {
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ csid_params.lane_cnt = csid_params32.lane_cnt;
+ csid_params.lane_assign = csid_params32.lane_assign;
+ csid_params.phy_sel = csid_params32.phy_sel;
+ csid_params.csi_clk = csid_params32.csi_clk;
+ csid_params.csi_3p_sel = csid_params32.csi_3p_sel;
+
+ lut_par32 = csid_params32.lut_params;
+ csid_params.lut_params.num_cid = lut_par32.num_cid;
+
+ if (csid_params.lut_params.num_cid < 1 ||
+ csid_params.lut_params.num_cid > MAX_CID) {
+ pr_err("%s: %d num_cid outside range %d\n", __func__,
+ __LINE__, csid_params.lut_params.num_cid);
+ rc = -EINVAL;
+ break;
+ }
+
+ for (i = 0; i < lut_par32.num_cid; i++) {
+ vc_cfg = kzalloc(sizeof(struct msm_camera_csid_vc_cfg),
+ GFP_KERNEL);
+ if (!vc_cfg) {
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto MEM_CLEAN32;
+ }
+ /* msm_camera_csid_vc_cfg size
+ * does not change in COMPAT MODE
+ */
+ if (copy_from_user(&vc_cfg32,
+ (void *)compat_ptr(lut_par32.vc_cfg[i]),
+ sizeof(vc_cfg32))) {
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ kfree(vc_cfg);
+ vc_cfg = NULL;
+ rc = -EFAULT;
+ goto MEM_CLEAN32;
+ }
+ vc_cfg->cid = vc_cfg32.cid;
+ vc_cfg->dt = vc_cfg32.dt;
+ vc_cfg->decode_format = vc_cfg32.decode_format;
+ csid_params.lut_params.vc_cfg[i] = vc_cfg;
+ }
+ rc = msm_csid_config(csid_dev, &csid_params);
+
+MEM_CLEAN32:
+ for (i--; i >= 0; i--) {
+ kfree(csid_params.lut_params.vc_cfg[i]);
+ csid_params.lut_params.vc_cfg[i] = NULL;
+ }
+ break;
+ }
+ case CSID_RELEASE:
+ rc = msm_csid_release(csid_dev);
+ break;
+ default:
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -ENOIOCTLCMD;
+ break;
+ }
+ return rc;
+}
+
+static long msm_csid_subdev_ioctl32(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc = -ENOIOCTLCMD;
+ struct csid_device *csid_dev = v4l2_get_subdevdata(sd);
+
+ mutex_lock(&csid_dev->mutex);
+ CDBG("%s:%d id %d\n", __func__, __LINE__, csid_dev->pdev->id);
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ rc = msm_csid_get_subdev_id(csid_dev, arg);
+ break;
+ case VIDIOC_MSM_CSID_IO_CFG32:
+ rc = msm_csid_cmd32(csid_dev, arg);
+ break;
+ case MSM_SD_NOTIFY_FREEZE:
+ if (csid_dev->csid_state != CSID_POWER_UP)
+ break;
+ csid_dev->csid_sof_debug = SOF_DEBUG_ENABLE;
+ break;
+ case VIDIOC_MSM_CSID_RELEASE:
+ case MSM_SD_SHUTDOWN:
+ rc = msm_csid_release(csid_dev);
+ break;
+ default:
+ pr_err_ratelimited("%s: command not found\n", __func__);
+ }
+ CDBG("%s:%d\n", __func__, __LINE__);
+ mutex_unlock(&csid_dev->mutex);
+ return rc;
+}
+
+static long msm_csid_subdev_do_ioctl32(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+ return msm_csid_subdev_ioctl32(sd, cmd, arg);
+}
+
+static long msm_csid_subdev_fops_ioctl32(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_csid_subdev_do_ioctl32);
+}
+#endif
+static const struct v4l2_subdev_internal_ops msm_csid_internal_ops;
+
+static struct v4l2_subdev_core_ops msm_csid_subdev_core_ops = {
+ .ioctl = &msm_csid_subdev_ioctl,
+ .interrupt_service_routine = msm_csid_irq_routine,
+};
+
+static const struct v4l2_subdev_ops msm_csid_subdev_ops = {
+ .core = &msm_csid_subdev_core_ops,
+};
+
+static int msm_csid_get_clk_info(struct csid_device *csid_dev,
+ struct platform_device *pdev)
+{
+ uint32_t count;
+ uint32_t cnt = 0;
+ int i, rc;
+ int ii = 0;
+ uint32_t rates[CSID_NUM_CLK_MAX];
+ const char *clock_name;
+ struct device_node *of_node;
+ of_node = pdev->dev.of_node;
+
+ count = of_property_count_strings(of_node, "clock-names");
+ csid_dev->num_clk = count;
+
+ CDBG("%s: count = %d\n", __func__, count);
+ if (count == 0) {
+ pr_err("%s: no clocks found in device tree, count=%d",
+ __func__, count);
+ return -EINVAL;
+ }
+
+ if (count > CSID_NUM_CLK_MAX) {
+ pr_err("%s: invalid count=%d, max is %d\n", __func__,
+ count, CSID_NUM_CLK_MAX);
+ return -EINVAL;
+ }
+
+ if (csid_dev->hw_dts_version == CSID_VERSION_V22) {
+ cnt = count;
+ count = 0;
+
+ for (i = 0; i < cnt; i++) {
+ count++;
+ rc = of_property_read_string_index(of_node,
+ "clock-names", i, &clock_name);
+ CDBG("%s: clock_names[%d] = %s\n", __func__,
+ i, clock_name);
+ if (rc < 0) {
+ pr_err("%s:%d, failed\n", __func__, __LINE__);
+ return rc;
+ }
+ if (strcmp(clock_name, "csi_phy_src_clk") == 0)
+ break;
+ }
+ csid_dev->num_clk = count;
+ }
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node, "clock-names",
+ i, &(csid_clk_info[i].clk_name));
+ CDBG("%s: clock-names[%d] = %s\n", __func__,
+ i, csid_clk_info[i].clk_name);
+ if (rc < 0) {
+ pr_err("%s:%d, failed\n", __func__, __LINE__);
+ return rc;
+ }
+ }
+ rc = of_property_read_u32_array(of_node, "qcom,clock-rates",
+ rates, count);
+ if (rc < 0) {
+ pr_err("%s:%d, failed", __func__, __LINE__);
+ return rc;
+ }
+ for (i = 0; i < count; i++) {
+ csid_clk_info[i].clk_rate = (rates[i] == 0) ?
+ (long)-1 : rates[i];
+ if (!strcmp(csid_clk_info[i].clk_name, "csi_src_clk")) {
+ CDBG("%s:%d, copy csi_src_clk",
+ __func__, __LINE__);
+ csid_dev->csid_max_clk = rates[i];
+ csid_dev->csid_clk_index = i;
+ }
+ CDBG("%s: clk_rate[%d] = %ld\n", __func__, i,
+ csid_clk_info[i].clk_rate);
+ }
+
+ if (csid_dev->hw_dts_version == CSID_VERSION_V22) {
+ csid_dev->num_clk_src_info = cnt - count;
+ CDBG("%s: count = %d\n", __func__, (cnt - count));
+
+ for (i = count; i < cnt; i++) {
+ ii++;
+ rc = of_property_read_string_index(of_node,
+ "clock-names", i,
+ &(csid_clk_src_info[ii].clk_name));
+ CDBG("%s: clock-names[%d] = %s\n", __func__,
+ ii, csid_clk_src_info[ii].clk_name);
+ if (rc < 0) {
+ pr_err("%s:%d, failed\n", __func__, __LINE__);
+ return rc;
+ }
+ }
+ ii = 0;
+ rc = of_property_read_u32_array(of_node, "qcom,clock-rates",
+ rates, cnt);
+ if (rc < 0) {
+ pr_err("%s:%d, failed", __func__, __LINE__);
+ return rc;
+ }
+ for (i = count; i < cnt; i++) {
+ ii++;
+ csid_clk_src_info[ii].clk_rate = rates[i];
+ CDBG("%s: clk_rate[%d] = %ld\n", __func__, ii,
+ csid_clk_src_info[ii].clk_rate);
+ }
+ }
+ return 0;
+}
+
+static int csid_probe(struct platform_device *pdev)
+{
+ struct csid_device *new_csid_dev;
+ uint32_t csi_vdd_voltage = 0;
+ int rc = 0;
+ new_csid_dev = kzalloc(sizeof(struct csid_device), GFP_KERNEL);
+ if (!new_csid_dev) {
+ pr_err("%s: no enough memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ new_csid_dev->csid_3p_enabled = 0;
+ new_csid_dev->ctrl_reg = NULL;
+ new_csid_dev->ctrl_reg = kzalloc(sizeof(struct csid_ctrl_t),
+ GFP_KERNEL);
+ if (!new_csid_dev->ctrl_reg) {
+ pr_err("%s:%d kzalloc failed\n", __func__, __LINE__);
+ kfree(new_csid_dev);
+ return -ENOMEM;
+ }
+
+ v4l2_subdev_init(&new_csid_dev->msm_sd.sd, &msm_csid_subdev_ops);
+ v4l2_set_subdevdata(&new_csid_dev->msm_sd.sd, new_csid_dev);
+ platform_set_drvdata(pdev, &new_csid_dev->msm_sd.sd);
+ mutex_init(&new_csid_dev->mutex);
+
+ if (pdev->dev.of_node) {
+ rc = of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+ if (rc < 0) {
+ pr_err("%s:%d failed to read cell-index\n", __func__,
+ __LINE__);
+ goto csid_no_resource;
+ }
+ CDBG("%s device id %d\n", __func__, pdev->id);
+
+ rc = of_property_read_u32((&pdev->dev)->of_node,
+ "qcom,csi-vdd-voltage", &csi_vdd_voltage);
+ if (rc < 0) {
+ pr_err("%s:%d failed to read qcom,csi-vdd-voltage\n",
+ __func__, __LINE__);
+ goto csid_no_resource;
+ }
+ CDBG("%s:%d reading mipi_csi_vdd is %d\n", __func__, __LINE__,
+ csi_vdd_voltage);
+
+ csid_vreg_info[0].min_voltage = csi_vdd_voltage;
+ csid_vreg_info[0].max_voltage = csi_vdd_voltage;
+ }
+
+ rc = msm_csid_get_clk_info(new_csid_dev, pdev);
+ if (rc < 0) {
+ pr_err("%s: msm_csid_get_clk_info() failed", __func__);
+ rc = -EFAULT;
+ goto csid_no_resource;
+ }
+
+ rc = msm_camera_get_dt_vreg_data(pdev->dev.of_node,
+ &(new_csid_dev->csid_vreg), &(new_csid_dev->regulator_count));
+ if (rc < 0) {
+ pr_err("%s: get vreg data from dtsi fail\n", __func__);
+ rc = -EFAULT;
+ goto csid_no_resource;
+ }
+
+ if ((new_csid_dev->regulator_count < 0) ||
+ (new_csid_dev->regulator_count > MAX_REGULATOR)) {
+ pr_err("%s: invalid reg count = %d, max is %d\n", __func__,
+ new_csid_dev->regulator_count, MAX_REGULATOR);
+ rc = -EFAULT;
+ goto csid_no_resource;
+ }
+
+ new_csid_dev->mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "csid");
+ if (!new_csid_dev->mem) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto csid_invalid_vreg_data;
+ }
+ new_csid_dev->irq = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "csid");
+ if (!new_csid_dev->irq) {
+ pr_err("%s: no irq resource?\n", __func__);
+ rc = -ENODEV;
+ goto csid_invalid_vreg_data;
+ }
+ new_csid_dev->io = request_mem_region(new_csid_dev->mem->start,
+ resource_size(new_csid_dev->mem), pdev->name);
+ if (!new_csid_dev->io) {
+ pr_err("%s: no valid mem region\n", __func__);
+ rc = -EBUSY;
+ goto csid_invalid_vreg_data;
+ }
+
+ new_csid_dev->pdev = pdev;
+ new_csid_dev->msm_sd.sd.internal_ops = &msm_csid_internal_ops;
+ new_csid_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(new_csid_dev->msm_sd.sd.name,
+ ARRAY_SIZE(new_csid_dev->msm_sd.sd.name), "msm_csid");
+ media_entity_init(&new_csid_dev->msm_sd.sd.entity, 0, NULL, 0);
+ new_csid_dev->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ new_csid_dev->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_CSID;
+ new_csid_dev->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x5;
+ msm_sd_register(&new_csid_dev->msm_sd);
+
+#ifdef CONFIG_COMPAT
+ msm_cam_copy_v4l2_subdev_fops(&msm_csid_v4l2_subdev_fops);
+ msm_csid_v4l2_subdev_fops.compat_ioctl32 = msm_csid_subdev_fops_ioctl32;
+ new_csid_dev->msm_sd.sd.devnode->fops = &msm_csid_v4l2_subdev_fops;
+#endif
+
+ rc = request_irq(new_csid_dev->irq->start, msm_csid_irq,
+ IRQF_TRIGGER_RISING, "csid", new_csid_dev);
+ if (rc < 0) {
+ release_mem_region(new_csid_dev->mem->start,
+ resource_size(new_csid_dev->mem));
+ pr_err("%s: irq request fail\n", __func__);
+ rc = -EBUSY;
+ goto csid_invalid_vreg_data;
+ }
+ disable_irq(new_csid_dev->irq->start);
+ if (rc < 0) {
+ release_mem_region(new_csid_dev->mem->start,
+ resource_size(new_csid_dev->mem));
+ pr_err("%s Error registering irq ", __func__);
+ rc = -EBUSY;
+ goto csid_invalid_vreg_data;
+ }
+
+ if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v2.0")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v2_0;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v2_0;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V20;
+ } else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v2.2")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v2_2;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v2_2;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V22;
+ } else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v3.0")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v3_0;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v3_0;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V30;
+ } else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v4.0")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v3_0;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v3_0;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V40;
+ } else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v3.1")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v3_1;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v3_1;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V31;
+ } else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v3.2")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v3_2;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v3_2;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V32;
+ } else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v3.4.1")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v3_4_1;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V34_1;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v3_4_1;
+ } else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v3.4.2")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v3_4_2;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V34_2;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v3_4_2;
+ } else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v3.6.0")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v3_6_0;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V36;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v3_6_0;
+ } else if (of_device_is_compatible(new_csid_dev->pdev->dev.of_node,
+ "qcom,csid-v3.5")) {
+ new_csid_dev->ctrl_reg->csid_reg = csid_v3_5;
+ new_csid_dev->ctrl_reg->csid_lane_assign =
+ csid_lane_assign_v3_5;
+ new_csid_dev->hw_dts_version = CSID_VERSION_V35;
+ } else {
+ pr_err("%s:%d, invalid hw version : 0x%x", __func__, __LINE__,
+ new_csid_dev->hw_dts_version);
+ rc = -EINVAL;
+ goto csid_invalid_vreg_data;
+ }
+
+ new_csid_dev->csid_state = CSID_POWER_DOWN;
+ return 0;
+
+csid_invalid_vreg_data:
+ kfree(new_csid_dev->csid_vreg);
+csid_no_resource:
+ mutex_destroy(&new_csid_dev->mutex);
+ kfree(new_csid_dev->ctrl_reg);
+ kfree(new_csid_dev);
+ return rc;
+}
+
+static const struct of_device_id msm_csid_dt_match[] = {
+ {.compatible = "qcom,csid"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_csid_dt_match);
+
+static struct platform_driver csid_driver = {
+ .probe = csid_probe,
+ .driver = {
+ .name = MSM_CSID_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_csid_dt_match,
+ },
+};
+
+static int __init msm_csid_init_module(void)
+{
+ return platform_driver_register(&csid_driver);
+}
+
+static void __exit msm_csid_exit_module(void)
+{
+ platform_driver_unregister(&csid_driver);
+}
+
+module_init(msm_csid_init_module);
+module_exit(msm_csid_exit_module);
+MODULE_DESCRIPTION("MSM CSID driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.h b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.h
new file mode 100644
index 000000000000..f48830b5ba3c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.h
@@ -0,0 +1,121 @@
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSID_H
+#define MSM_CSID_H
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-subdev.h>
+#include <media/msm_cam_sensor.h>
+#include "msm_sd.h"
+
+#define CSID_NUM_CLK_MAX 16
+
+enum csiphy_lane_assign {
+ PHY_LANE_D0,
+ PHY_LANE_CLK,
+ PHY_LANE_D1,
+ PHY_LANE_D2,
+ PHY_LANE_D3,
+ PHY_LANE_MAX,
+};
+
+struct csid_reg_parms_t {
+/* MIPI CSID registers */
+ uint32_t csid_hw_version_addr;
+ uint32_t csid_core_ctrl_0_addr;
+ uint32_t csid_core_ctrl_1_addr;
+ uint32_t csid_rst_cmd_addr;
+ uint32_t csid_cid_lut_vc_0_addr;
+ uint32_t csid_cid_lut_vc_1_addr;
+ uint32_t csid_cid_lut_vc_2_addr;
+ uint32_t csid_cid_lut_vc_3_addr;
+ uint32_t csid_cid_n_cfg_addr;
+ uint32_t csid_irq_clear_cmd_addr;
+ uint32_t csid_irq_mask_addr;
+ uint32_t csid_irq_status_addr;
+ uint32_t csid_captured_unmapped_long_pkt_hdr_addr;
+ uint32_t csid_captured_mmaped_long_pkt_hdr_addr;
+ uint32_t csid_captured_short_pkt_addr;
+ uint32_t csid_captured_long_pkt_hdr_addr;
+ uint32_t csid_captured_long_pkt_ftr_addr;
+ uint32_t csid_pif_misr_dl0_addr;
+ uint32_t csid_pif_misr_dl1_addr;
+ uint32_t csid_pif_misr_dl2_addr;
+ uint32_t csid_pif_misr_dl3_addr;
+ uint32_t csid_stats_total_pkts_rcvd_addr;
+ uint32_t csid_stats_ecc_addr;
+ uint32_t csid_stats_crc_addr;
+ uint32_t csid_tg_ctrl_addr;
+ uint32_t csid_tg_vc_cfg_addr;
+ uint32_t csid_tg_dt_n_cfg_0_addr;
+ uint32_t csid_tg_dt_n_cfg_1_addr;
+ uint32_t csid_tg_dt_n_cfg_2_addr;
+ uint32_t csid_rst_done_irq_bitshift;
+ uint32_t csid_rst_stb_all;
+ uint32_t csid_dl_input_sel_shift;
+ uint32_t csid_phy_sel_shift;
+ uint32_t csid_version;
+ uint32_t csid_3p_ctrl_0_addr;
+ uint32_t csid_3p_pkt_hdr_addr;
+ uint32_t csid_test_bus_ctrl;
+ uint32_t csid_irq_mask_val;
+ uint32_t csid_err_lane_overflow_offset_2p;
+ uint32_t csid_err_lane_overflow_offset_3p;
+ uint32_t csid_phy_sel_shift_3p;
+};
+
+struct csid_ctrl_t {
+ struct csid_reg_parms_t csid_reg;
+ uint8_t *csid_lane_assign;
+};
+
+enum msm_csid_state_t {
+ CSID_POWER_UP,
+ CSID_POWER_DOWN,
+};
+
+struct csid_device {
+ struct platform_device *pdev;
+ struct msm_sd_subdev msm_sd;
+ struct resource *mem;
+ struct resource *irq;
+ struct resource *io;
+ struct regulator *csi_vdd;
+ void __iomem *base;
+ struct mutex mutex;
+ struct completion reset_complete;
+ uint32_t hw_version;
+ uint32_t hw_dts_version;
+ enum msm_csid_state_t csid_state;
+ struct csid_ctrl_t *ctrl_reg;
+ uint32_t num_clk;
+ uint32_t num_clk_src_info;
+ struct regulator *reg_ptr;
+ struct clk *csid_clk[CSID_NUM_CLK_MAX];
+ uint32_t csid_clk_index;
+ uint32_t csid_max_clk;
+ uint32_t csid_3p_enabled;
+ struct camera_vreg_t *csid_vreg;
+ struct regulator *csid_reg_ptr[MAX_REGULATOR];
+ int32_t regulator_count;
+ uint8_t is_testmode;
+ struct msm_camera_csid_testmode_parms testmode_params;
+ uint32_t csid_sof_debug;
+ uint32_t csid_lane_cnt;
+};
+
+#define VIDIOC_MSM_CSID_RELEASE \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 12, struct v4l2_subdev*)
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/Makefile b/drivers/media/platform/msm/camera_v2/sensor/csiphy/Makefile
new file mode 100644
index 000000000000..e0cbdb879224
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/Makefile
@@ -0,0 +1,4 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+obj-$(CONFIG_MSM_CSIPHY) += msm_csiphy.o
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_2_0_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_2_0_hwreg.h
new file mode 100644
index 000000000000..3b9213c4ca28
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_2_0_hwreg.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_2_0_HWREG_H
+#define MSM_CSIPHY_2_0_HWREG_H
+
+#include <sensor/csiphy/msm_csiphy.h>
+
+struct csiphy_reg_parms_t csiphy_v2_0 = {
+ /*MIPI CSI PHY registers*/
+ 0x17C,
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x100,
+ 0x104,
+ 0x108,
+ 0x10C,
+ 0x110,
+ 0x128,
+ 0x140,
+ 0x144,
+ 0x164,
+ 0x180,
+ 0x1A0,
+ 0x6F,
+ 0x1A4,
+ 0x1C0,
+ 0x1C4,
+ 0x4,
+ 0x1E0,
+ 0x1E8,
+ 0x0,
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_2_2_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_2_2_hwreg.h
new file mode 100644
index 000000000000..7ed88c564c5a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_2_2_hwreg.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_2_2_HWREG_H
+#define MSM_CSIPHY_2_2_HWREG_H
+
+#include <sensor/csiphy/msm_csiphy.h>
+
+struct csiphy_reg_parms_t csiphy_v2_2 = {
+ /*MIPI CSI PHY registers*/
+ 0x17C,
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x100,
+ 0x104,
+ 0x108,
+ 0x10C,
+ 0x110,
+ 0x128,
+ 0x140,
+ 0x144,
+ 0x164,
+ 0x180,
+ 0x1A0,
+ 0x6F,
+ 0x1A4,
+ 0x1C0,
+ 0x1C4,
+ 0x4,
+ 0x1E0,
+ 0x1E8,
+ 0x1,
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_0_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_0_hwreg.h
new file mode 100644
index 000000000000..238fef0f4d5a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_0_hwreg.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_3_0_HWREG_H
+#define MSM_CSIPHY_3_0_HWREG_H
+
+#include <sensor/csiphy/msm_csiphy.h>
+
+struct csiphy_reg_parms_t csiphy_v3_0 = {
+ /*MIPI CSI PHY registers*/
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x100,
+ 0x104,
+ 0x108,
+ 0x10C,
+ 0x110,
+ 0x128,
+ 0x140,
+ 0x144,
+ 0x164,
+ 0x188,
+ 0x18C,
+ 0x1AC,
+ 0x3F,
+ 0x1AC,
+ 0x1CC,
+ 0x1CC,
+ 0x4,
+ 0x1EC,
+ 0x1F4,
+ 0x10,
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_1_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_1_hwreg.h
new file mode 100644
index 000000000000..b9c3c28a3a3e
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_1_hwreg.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_3_1_HWREG_H
+#define MSM_CSIPHY_3_1_HWREG_H
+
+#include <sensor/csiphy/msm_csiphy.h>
+
+struct csiphy_reg_parms_t csiphy_v3_1 = {
+ /*MIPI CSI PHY registers*/
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x100,
+ 0x104,
+ 0x108,
+ 0x10C,
+ 0x1C,
+ 0x28,
+ 0x140,
+ 0x144,
+ 0x164,
+ 0x188,
+ 0x18C,
+ 0x1AC,
+ 0x3F,
+ 0x1AC,
+ 0x1CC,
+ 0x1CC,
+ 0x4,
+ 0x1EC,
+ 0x1F4,
+ 0x31,
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_2_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_2_hwreg.h
new file mode 100644
index 000000000000..77129f08c8c2
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_2_hwreg.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_3_2_HWREG_H
+#define MSM_CSIPHY_3_2_HWREG_H
+
+#include <sensor/csiphy/msm_csiphy.h>
+
+struct csiphy_reg_parms_t csiphy_v3_2 = {
+ /*MIPI CSI PHY registers*/
+ 0x0,
+ 0x4,
+ 0x8,
+ 0xC,
+ 0x10,
+ 0x100,
+ 0x104,
+ 0x108,
+ 0x10C,
+ 0x110,
+ 0x128,
+ 0x140,
+ 0x144,
+ 0x164,
+ 0x188,
+ 0x18C,
+ 0x1AC,
+ 0x3F,
+ 0x1AC,
+ 0x1CC,
+ 0x1CC,
+ 0x4,
+ 0x1EC,
+ 0x1F4,
+ 0x32,
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_4_2_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_4_2_hwreg.h
new file mode 100644
index 000000000000..8255884a022c
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_4_2_hwreg.h
@@ -0,0 +1,93 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_3_4_2_HWREG_H
+#define MSM_CSIPHY_3_4_2_HWREG_H
+
+#define ULPM_WAKE_UP_TIMER_MODE 2
+#define GLITCH_ELIMINATION_NUM 0x12 /* bit [6:4] */
+
+#include <sensor/csiphy/msm_csiphy.h>
+
+struct csiphy_reg_parms_t csiphy_v3_4_2 = {
+ .mipi_csiphy_interrupt_status0_addr = 0x8B0,
+ .mipi_csiphy_interrupt_clear0_addr = 0x858,
+ .mipi_csiphy_glbl_irq_cmd_addr = 0x828,
+};
+
+struct csiphy_reg_3ph_parms_t csiphy_v3_4_2_3ph = {
+ /*MIPI CSI PHY registers*/
+ {0x814, 0x0},
+ {0x818, 0x1},
+ {0x188, 0x7F},
+ {0x18C, 0x7F},
+ {0x190, 0x0},
+ {0x104, 0x6},
+ {0x108, 0x0},
+ {0x10c, 0x0},
+ {0x114, 0x20},
+ {0x118, 0x3E},
+ {0x11c, 0x41},
+ {0x120, 0x41},
+ {0x124, 0x7F},
+ {0x128, 0x0},
+ {0x12c, 0x0},
+ {0x130, 0x1},
+ {0x134, 0x0},
+ {0x138, 0x0},
+ {0x13C, 0x10},
+ {0x140, 0x1},
+ {0x144, GLITCH_ELIMINATION_NUM},
+ {0x148, 0xFE},
+ {0x14C, 0x1},
+ {0x154, 0x0},
+ {0x15C, 0x33},
+ {0x160, ULPM_WAKE_UP_TIMER_MODE},
+ {0x164, 0x48},
+ {0x168, 0xA0},
+ {0x16C, 0x17},
+ {0x170, 0x41},
+ {0x174, 0x41},
+ {0x178, 0x3E},
+ {0x17C, 0x0},
+ {0x180, 0x0},
+ {0x184, 0x7F},
+ {0x1cc, 0x10},
+ {0x81c, 0x6},
+ {0x82c, 0xFF},
+ {0x830, 0xFF},
+ {0x834, 0xFB},
+ {0x838, 0xFF},
+ {0x83c, 0x7F},
+ {0x840, 0xFF},
+ {0x844, 0xFF},
+ {0x848, 0xEF},
+ {0x84c, 0xFF},
+ {0x850, 0xFF},
+ {0x854, 0xFF},
+ {0x28, 0x0},
+ {0x800, 0x2},
+ {0x0, 0x8E},
+ {0x4, 0x8},
+ {0x8, 0x0},
+ {0xC, 0xFF},
+ {0x10, 0x56},
+ {0x2C, 0x1},
+ {0x30, 0x0},
+ {0x34, 0x3},
+ {0x38, 0xfe},
+ {0x3C, 0xB8},
+ {0x1C, 0xE7},
+ {0x14, 0x0},
+ {0x14, 0x60}
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h
new file mode 100644
index 000000000000..cf1b0ad2dd1b
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h
@@ -0,0 +1,93 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_3_5_HWREG_H
+#define MSM_CSIPHY_3_5_HWREG_H
+
+#define ULPM_WAKE_UP_TIMER_MODE 2
+#define GLITCH_ELIMINATION_NUM 0x12 /* bit [6:4] */
+
+#include <sensor/csiphy/msm_csiphy.h>
+
+struct csiphy_reg_parms_t csiphy_v3_5 = {
+ .mipi_csiphy_interrupt_status0_addr = 0x8B0,
+ .mipi_csiphy_interrupt_clear0_addr = 0x858,
+ .mipi_csiphy_glbl_irq_cmd_addr = 0x828,
+};
+
+struct csiphy_reg_3ph_parms_t csiphy_v3_5_3ph = {
+ /*MIPI CSI PHY registers*/
+ {0x814, 0x0},
+ {0x818, 0x1},
+ {0x188, 0x7F},
+ {0x18C, 0x7F},
+ {0x190, 0x0},
+ {0x104, 0x6},
+ {0x108, 0x0},
+ {0x10c, 0x0},
+ {0x114, 0x20},
+ {0x118, 0x3E},
+ {0x11c, 0x41},
+ {0x120, 0x41},
+ {0x124, 0x7F},
+ {0x128, 0x0},
+ {0x12c, 0x0},
+ {0x130, 0x1},
+ {0x134, 0x0},
+ {0x138, 0x0},
+ {0x13C, 0x10},
+ {0x140, 0x1},
+ {0x144, GLITCH_ELIMINATION_NUM},
+ {0x148, 0xFE},
+ {0x14C, 0x1},
+ {0x154, 0x0},
+ {0x15C, 0x33},
+ {0x160, ULPM_WAKE_UP_TIMER_MODE},
+ {0x164, 0x48},
+ {0x168, 0xA0},
+ {0x16C, 0x17},
+ {0x170, 0x41},
+ {0x174, 0x41},
+ {0x178, 0x3E},
+ {0x17C, 0x0},
+ {0x180, 0x0},
+ {0x184, 0x7F},
+ {0x1cc, 0x10},
+ {0x81c, 0x6},
+ {0x82c, 0xFF},
+ {0x830, 0xFF},
+ {0x834, 0xFB},
+ {0x838, 0xFF},
+ {0x83c, 0x7F},
+ {0x840, 0xFF},
+ {0x844, 0xFF},
+ {0x848, 0xEF},
+ {0x84c, 0xFF},
+ {0x850, 0xFF},
+ {0x854, 0xFF},
+ {0x28, 0x0},
+ {0x800, 0x0},
+ {0x0, 0xCF},
+ {0x4, 0x8},
+ {0x8, 0x0},
+ {0xC, 0xA5},
+ {0x10, 0x52},
+ {0x2C, 0x1},
+ {0x30, 0x2},
+ {0x34, 0x3},
+ {0x38, 0x1},
+ {0x3C, 0xB8},
+ {0x1C, 0xA},
+ {0x14, 0x0},
+ {0x0, 0x0},
+};
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
new file mode 100644
index 000000000000..fc397e675840
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
@@ -0,0 +1,1505 @@
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/irqreturn.h>
+#include "msm_csiphy.h"
+#include "msm_sd.h"
+#include "include/msm_csiphy_2_0_hwreg.h"
+#include "include/msm_csiphy_2_2_hwreg.h"
+#include "include/msm_csiphy_3_0_hwreg.h"
+#include "include/msm_csiphy_3_1_hwreg.h"
+#include "include/msm_csiphy_3_2_hwreg.h"
+#include "include/msm_csiphy_3_4_2_hwreg.h"
+#include "include/msm_csiphy_3_5_hwreg.h"
+#include "cam_hw_ops.h"
+
+#define DBG_CSIPHY 0
+#define SOF_DEBUG_ENABLE 1
+#define SOF_DEBUG_DISABLE 0
+
+#define V4L2_IDENT_CSIPHY 50003
+#define CSIPHY_VERSION_V22 0x01
+#define CSIPHY_VERSION_V20 0x00
+#define CSIPHY_VERSION_V30 0x10
+#define CSIPHY_VERSION_V31 0x31
+#define CSIPHY_VERSION_V32 0x32
+#define CSIPHY_VERSION_V342 0x342
+#define CSIPHY_VERSION_V35 0x35
+#define MSM_CSIPHY_DRV_NAME "msm_csiphy"
+#define CLK_LANE_OFFSET 1
+#define NUM_LANES_OFFSET 4
+
+#define CSI_3PHASE_HW 1
+#define MAX_LANES 4
+#define CLOCK_OFFSET 0x700
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static struct v4l2_file_operations msm_csiphy_v4l2_subdev_fops;
+
+static void msm_csiphy_cphy_irq_config(
+ struct csiphy_device *csiphy_dev,
+ struct msm_camera_csiphy_params *csiphy_params)
+{
+ void __iomem *csiphybase;
+
+ csiphybase = csiphy_dev->base;
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl11.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl11.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl12.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl12.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl13.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl13.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl14.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl14.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl15.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl15.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl16.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl16.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl17.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl17.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl18.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl18.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl19.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl19.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl20.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl20.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl21.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl21.addr);
+}
+
+static int msm_csiphy_3phase_lane_config(
+ struct csiphy_device *csiphy_dev,
+ struct msm_camera_csiphy_params *csiphy_params)
+{
+ uint8_t i = 0;
+ uint16_t lane_mask = 0, lane_enable = 0, temp;
+ void __iomem *csiphybase;
+
+ csiphybase = csiphy_dev->base;
+ lane_mask = csiphy_params->lane_mask & 0x7;
+ while (lane_mask != 0) {
+ temp = (i << 1)+1;
+ lane_enable |= ((lane_mask & 0x1) << temp);
+ lane_mask >>= 1;
+ i++;
+ }
+ msm_camera_io_w(lane_enable,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl5.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl6.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl6.addr);
+ lane_mask = csiphy_params->lane_mask & 0x7;
+ i = 0;
+ while (lane_mask & 0x7) {
+ if (!(lane_mask & 0x1)) {
+ i++;
+ lane_mask >>= 1;
+ continue;
+ }
+
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl21.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl21.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl23.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl23.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl26.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl26.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl27.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl27.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl1.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl1.addr + 0x200*i);
+ msm_camera_io_w(((csiphy_params->settle_cnt >> 8) & 0xff),
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl2.addr + 0x200*i);
+ msm_camera_io_w((csiphy_params->settle_cnt & 0xff),
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl3.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl5.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl5.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl6.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl6.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl7.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl7.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl8.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl8.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl9.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl9.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl10.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl10.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl11.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl11.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl12.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl12.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl15.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl15.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl16.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl16.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl17.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl17.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl18.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl18.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl19.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl19.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl23.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl23.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl24.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl24.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl28.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl28.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl29.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl29.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl30.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl30.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl33.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl33.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl34.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl34.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl35.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl35.addr + 0x200*i);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl36.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl36.addr + 0x200*i);
+
+ if (ULPM_WAKE_UP_TIMER_MODE == 0x22) {
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl51.data,
+ csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.mipi_csiphy_3ph_lnn_ctrl51.addr +
+ 0x200*i);
+ }
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl25.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_lnn_ctrl25.addr + 0x200*i);
+
+ lane_mask >>= 1;
+ i++;
+ }
+ if (csiphy_params->combo_mode == 1) {
+ msm_camera_io_w(0x2,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl7.addr);
+ } else {
+ msm_camera_io_w(0x6,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl7.addr);
+ }
+ /* Delay for stabilizing the regulator*/
+ usleep_range(10, 15);
+ msm_csiphy_cphy_irq_config(csiphy_dev, csiphy_params);
+ return 0;
+}
+
+static int msm_csiphy_2phase_lane_config(
+ struct csiphy_device *csiphy_dev,
+ struct msm_camera_csiphy_params *csiphy_params)
+{
+ uint32_t val = 0, lane_enable = 0, clk_lane, mask = 1;
+ uint16_t lane_mask = 0, i = 0, offset;
+ void __iomem *csiphybase;
+
+ csiphybase = csiphy_dev->base;
+ lane_mask = csiphy_params->lane_mask & 0x1f;
+ for (i = 0; i < MAX_LANES; i++) {
+ if (mask == 0x2) {
+ if (lane_mask & mask)
+ lane_enable |= 0x80;
+ i--;
+ } else if (lane_mask & mask)
+ lane_enable |= 0x1 << (i<<1);
+ mask <<= 1;
+ }
+ CDBG("%s:%d lane_enable: %d\n", __func__, __LINE__, lane_enable);
+
+ msm_camera_io_w(lane_enable,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl5.addr);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl6.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl6.addr);
+
+ for (i = 0, mask = 0x1; i < MAX_LANES; i++) {
+ if (!(lane_mask & mask)) {
+ if (mask == 0x2)
+ i--;
+ mask <<= 0x1;
+ continue;
+ }
+ if (mask == 0x2) {
+ val = 4;
+ offset = CLOCK_OFFSET;
+ clk_lane = 1;
+ i--;
+ } else {
+ offset = 0x200*i;
+ val = 0;
+ clk_lane = 0;
+ }
+
+ if (csiphy_params->combo_mode == 1) {
+ val |= 0xA;
+ if (mask == 0x8) {
+ /* lane 6 is second clock lane for combo mode */
+ val |= 0x4;
+ clk_lane = 1;
+ }
+ }
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg7.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg7.addr + offset);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg6.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg6.addr + offset);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg8.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg8.addr + offset);
+ msm_camera_io_w(val, csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_misc1.addr + offset);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl15.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl15.addr + offset);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg2.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg2.addr + offset);
+
+ msm_camera_io_w((csiphy_params->settle_cnt & 0xFF),
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg3.addr + offset);
+
+ if (clk_lane == 1) {
+ if (csiphy_dev->hw_version == CSIPHY_VERSION_V342) {
+ msm_camera_io_w(0x80,
+ csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg1.addr + offset);
+ }
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg4.data, csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg4.addr + offset);
+ } else {
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg1.data,
+ csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg1.addr + offset);
+ }
+ if (csiphy_dev->hw_version == CSIPHY_VERSION_V342 &&
+ csiphy_params->combo_mode == 1) {
+ msm_camera_io_w(0x52,
+ csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg5.addr + offset);
+ } else {
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg5.data,
+ csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg5.addr + offset);
+ }
+ if (clk_lane == 1 &&
+ csiphy_dev->hw_version == CSIPHY_VERSION_V342) {
+ msm_camera_io_w(0x1f,
+ csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg9.addr + offset);
+ } else {
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg9.data,
+ csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_cfg9.addr + offset);
+ }
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_test_imp.data,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_test_imp.addr + offset);
+ if (csiphy_dev->hw_version == CSIPHY_VERSION_V342) {
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl5.data,
+ csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl5.addr + offset);
+ }
+ mask <<= 1;
+ }
+ if (csiphy_dev->hw_version == CSIPHY_VERSION_V342) {
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl0.data,
+ csiphy_dev->base + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl0.addr);
+ }
+ msm_csiphy_cphy_irq_config(csiphy_dev, csiphy_params);
+ return 0;
+}
+
+static int msm_csiphy_lane_config(struct csiphy_device *csiphy_dev,
+ struct msm_camera_csiphy_params *csiphy_params)
+{
+ int rc = 0;
+ int j = 0, curr_lane = 0;
+ uint32_t val = 0, clk_rate = 0, round_rate = 0;
+ uint8_t lane_cnt = 0;
+ uint16_t lane_mask = 0;
+ void __iomem *csiphybase;
+ uint8_t csiphy_id = csiphy_dev->pdev->id;
+ int32_t lane_val = 0, lane_right = 0, num_lanes = 0;
+ struct clk **csid_phy_clk_ptr;
+ int ratio = 1;
+
+ csiphybase = csiphy_dev->base;
+ if (!csiphybase) {
+ pr_err("%s: csiphybase NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ csiphy_dev->lane_mask[csiphy_id] |= csiphy_params->lane_mask;
+ lane_mask = csiphy_dev->lane_mask[csiphy_id];
+ lane_cnt = csiphy_params->lane_cnt;
+ if (csiphy_params->lane_cnt < 1 || csiphy_params->lane_cnt > 4) {
+ pr_err("%s: unsupported lane cnt %d\n",
+ __func__, csiphy_params->lane_cnt);
+ return rc;
+ }
+
+ csid_phy_clk_ptr = csiphy_dev->csiphy_clk;
+ if (!csid_phy_clk_ptr) {
+ pr_err("csiphy_timer_src_clk get failed\n");
+ return -EINVAL;
+ }
+
+ clk_rate = (csiphy_params->csiphy_clk > 0)
+ ? csiphy_params->csiphy_clk :
+ csiphy_dev->csiphy_max_clk;
+ round_rate = clk_round_rate(
+ csid_phy_clk_ptr[csiphy_dev->csiphy_clk_index],
+ clk_rate);
+ if (round_rate >= csiphy_dev->csiphy_max_clk)
+ round_rate = csiphy_dev->csiphy_max_clk;
+ else {
+ ratio = csiphy_dev->csiphy_max_clk/round_rate;
+ csiphy_params->settle_cnt = csiphy_params->settle_cnt/ratio;
+ }
+
+ CDBG("set from usr csiphy_clk clk_rate = %u round_rate = %u\n",
+ clk_rate, round_rate);
+ rc = clk_set_rate(
+ csid_phy_clk_ptr[csiphy_dev->csiphy_clk_index],
+ round_rate);
+ if (rc < 0) {
+ pr_err("csiphy_timer_src_clk set failed\n");
+ return rc;
+ }
+
+ CDBG("%s csiphy_params, mask = 0x%x cnt = %d\n",
+ __func__,
+ csiphy_params->lane_mask,
+ csiphy_params->lane_cnt);
+ CDBG("%s csiphy_params, settle cnt = 0x%x csid %d\n",
+ __func__, csiphy_params->settle_cnt,
+ csiphy_params->csid_core);
+
+ if (csiphy_dev->hw_version >= CSIPHY_VERSION_V30) {
+ val = msm_camera_io_r(csiphy_dev->clk_mux_base);
+ if (csiphy_params->combo_mode &&
+ (csiphy_params->lane_mask & 0x18) == 0x18) {
+ val &= ~0xf0;
+ val |= csiphy_params->csid_core << 4;
+ } else {
+ val &= ~0xf;
+ val |= csiphy_params->csid_core;
+ }
+ msm_camera_io_w(val, csiphy_dev->clk_mux_base);
+ CDBG("%s clk mux addr %p val 0x%x\n", __func__,
+ csiphy_dev->clk_mux_base, val);
+ /* ensure write is done */
+ mb();
+ }
+
+ if (csiphy_dev->csiphy_3phase == CSI_3PHASE_HW) {
+ if (csiphy_params->csi_3phase == 1) {
+ rc = msm_csiphy_3phase_lane_config(csiphy_dev,
+ csiphy_params);
+ csiphy_dev->num_irq_registers = 20;
+ } else {
+ rc = msm_csiphy_2phase_lane_config(csiphy_dev,
+ csiphy_params);
+ csiphy_dev->num_irq_registers = 11;
+ }
+ if (rc < 0) {
+ pr_err("%s:%d: Error in setting lane configuration\n",
+ __func__, __LINE__);
+ }
+ return rc;
+ }
+
+ msm_camera_io_w(0x1, csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_reg.mipi_csiphy_glbl_t_init_cfg0_addr);
+ msm_camera_io_w(0x1, csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_reg.mipi_csiphy_t_wakeup_cfg0_addr);
+
+ if (csiphy_dev->hw_version < CSIPHY_VERSION_V30) {
+ val = 0x3;
+ msm_camera_io_w((lane_mask << 2) | val,
+ csiphybase +
+ csiphy_dev->ctrl_reg->
+ csiphy_reg.mipi_csiphy_glbl_pwr_cfg_addr);
+ msm_camera_io_w(0x10, csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnck_cfg2_addr);
+ msm_camera_io_w(csiphy_params->settle_cnt,
+ csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnck_cfg3_addr);
+ msm_camera_io_w(0x24,
+ csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_reg.mipi_csiphy_interrupt_mask0_addr);
+ msm_camera_io_w(0x24,
+ csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_reg.mipi_csiphy_interrupt_clear0_addr);
+ } else {
+ val = 0x1;
+ msm_camera_io_w((lane_mask << 1) | val,
+ csiphybase +
+ csiphy_dev->ctrl_reg->
+ csiphy_reg.mipi_csiphy_glbl_pwr_cfg_addr);
+ msm_camera_io_w(csiphy_params->combo_mode <<
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_mode_config_shift,
+ csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_glbl_reset_addr);
+ }
+
+ lane_mask &= 0x1f;
+ while (lane_mask & 0x1f) {
+ if (!(lane_mask & 0x1)) {
+ j++;
+ lane_mask >>= 1;
+ continue;
+ }
+ msm_camera_io_w(0x10,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_cfg2_addr + 0x40*j);
+ msm_camera_io_w(csiphy_params->settle_cnt,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_cfg3_addr + 0x40*j);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_interrupt_mask_val, csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_interrupt_mask_addr + 0x4*j);
+ msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_interrupt_mask_val, csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_interrupt_clear_addr + 0x4*j);
+ if (csiphy_dev->is_3_1_20nm_hw == 1) {
+ if (j > CLK_LANE_OFFSET) {
+ lane_right = 0x8;
+ num_lanes = (lane_cnt - curr_lane)
+ << NUM_LANES_OFFSET;
+ if (lane_cnt < curr_lane) {
+ pr_err("%s: Lane_cnt is less than curr_lane number\n",
+ __func__);
+ return -EINVAL;
+ }
+ lane_val = lane_right|num_lanes;
+ } else if (j == 1) {
+ lane_val = 0x4;
+ }
+ if (csiphy_params->combo_mode == 1) {
+ /*
+ * In the case of combo mode, the clock is always
+ * 4th lane for the second sensor.
+ * So check whether the sensor is of one lane
+ * sensor and curr_lane for 0.
+ */
+ if (curr_lane == 0 &&
+ ((csiphy_params->lane_mask &
+ 0x18) == 0x18))
+ lane_val = 0x4;
+ }
+ msm_camera_io_w(lane_val, csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_misc1_addr + 0x40*j);
+ msm_camera_io_w(0x17, csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_test_imp + 0x40*j);
+ curr_lane++;
+ }
+ j++;
+ lane_mask >>= 1;
+ }
+ return rc;
+}
+
+static irqreturn_t msm_csiphy_irq(int irq_num, void *data)
+{
+ uint32_t irq;
+ int i;
+ struct csiphy_device *csiphy_dev = data;
+
+ for (i = 0; i < csiphy_dev->num_irq_registers; i++) {
+ irq = msm_camera_io_r(
+ csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_interrupt_status0_addr + 0x4*i);
+ msm_camera_io_w(irq,
+ csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_interrupt_clear0_addr + 0x4*i);
+ CDBG("%s MIPI_CSIPHY%d_INTERRUPT_STATUS%d = 0x%x\n",
+ __func__, csiphy_dev->pdev->id, i, irq);
+ msm_camera_io_w(0x0,
+ csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_interrupt_clear0_addr + 0x4*i);
+ }
+ msm_camera_io_w(0x1, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->
+ csiphy_reg.mipi_csiphy_glbl_irq_cmd_addr);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->
+ csiphy_reg.mipi_csiphy_glbl_irq_cmd_addr);
+ return IRQ_HANDLED;
+}
+
+static void msm_csiphy_reset(struct csiphy_device *csiphy_dev)
+{
+ msm_camera_io_w(0x1, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.mipi_csiphy_glbl_reset_addr);
+ usleep_range(5000, 8000);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.mipi_csiphy_glbl_reset_addr);
+}
+
+static void msm_csiphy_3ph_reset(struct csiphy_device *csiphy_dev)
+{
+ msm_camera_io_w(0x1, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl0.addr);
+ usleep_range(5000, 8000);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl0.addr);
+}
+
+#if DBG_CSIPHY
+static int msm_csiphy_init(struct csiphy_device *csiphy_dev)
+{
+ int rc = 0;
+ if (csiphy_dev == NULL) {
+ pr_err("%s: csiphy_dev NULL\n", __func__);
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ CDBG("%s:%d called\n", __func__, __LINE__);
+ if (csiphy_dev->csiphy_state == CSIPHY_POWER_UP) {
+ pr_err("%s: csiphy invalid state %d\n", __func__,
+ csiphy_dev->csiphy_state);
+ rc = -EINVAL;
+ return rc;
+ }
+ CDBG("%s:%d called\n", __func__, __LINE__);
+
+ if (csiphy_dev->ref_count++) {
+ CDBG("%s csiphy refcount = %d\n", __func__,
+ csiphy_dev->ref_count);
+ return rc;
+ }
+ CDBG("%s:%d called\n", __func__, __LINE__);
+
+ rc = cam_config_ahb_clk(CAM_AHB_CLIENT_CSIPHY, CAMERA_AHB_SVS_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ return rc;
+ }
+
+ csiphy_dev->base = ioremap(csiphy_dev->mem->start,
+ resource_size(csiphy_dev->mem));
+ if (!csiphy_dev->base) {
+ pr_err("%s: csiphy_dev->base NULL\n", __func__);
+ csiphy_dev->ref_count--;
+ rc = -ENOMEM;
+ goto ioremap_fail;
+ }
+ CDBG("%s:%d called\n", __func__, __LINE__);
+
+ if (csiphy_dev->hw_dts_version < CSIPHY_VERSION_V30) {
+ rc = msm_cam_clk_enable(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
+ csiphy_dev->num_clk, 1);
+ } else if (csiphy_dev->hw_dts_version >= CSIPHY_VERSION_V30) {
+ if (!csiphy_dev->clk_mux_mem || !csiphy_dev->clk_mux_io) {
+ pr_err("%s clk mux mem %p io %p\n", __func__,
+ csiphy_dev->clk_mux_mem,
+ csiphy_dev->clk_mux_io);
+ rc = -ENOMEM;
+ goto csiphy_base_fail;
+ }
+ csiphy_dev->clk_mux_base = ioremap(
+ csiphy_dev->clk_mux_mem->start,
+ resource_size(csiphy_dev->clk_mux_mem));
+ if (!csiphy_dev->clk_mux_base) {
+ pr_err("%s: ERROR %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto csiphy_base_fail;
+ }
+
+ CDBG("%s:%d called\n", __func__, __LINE__);
+ rc = msm_cam_clk_enable(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
+ csiphy_dev->num_clk, 1);
+ } else {
+ pr_err("%s: ERROR Invalid CSIPHY Version %d",
+ __func__, __LINE__);
+ rc = -EINVAL;
+ goto csiphy_base_fail;
+ }
+
+ CDBG("%s:%d called\n", __func__, __LINE__);
+ if (rc < 0) {
+ pr_err("%s: csiphy clk enable failed\n", __func__);
+ csiphy_dev->ref_count--;
+ goto csiphy_mux_base_fail;
+ }
+ CDBG("%s:%d called\n", __func__, __LINE__);
+
+ enable_irq(csiphy_dev->irq->start);
+
+ if (csiphy_dev->csiphy_3phase == CSI_3PHASE_HW)
+ msm_csiphy_3ph_reset(csiphy_dev);
+ else
+ msm_csiphy_reset(csiphy_dev);
+
+ CDBG("%s:%d called\n", __func__, __LINE__);
+
+ if (csiphy_dev->hw_dts_version == CSIPHY_VERSION_V30)
+ csiphy_dev->hw_version =
+ msm_camera_io_r(csiphy_dev->base +
+ csiphy_dev->ctrl_reg->
+ csiphy_reg.mipi_csiphy_hw_version_addr);
+ else
+ csiphy_dev->hw_version = csiphy_dev->hw_dts_version;
+
+ CDBG("%s:%d called csiphy_dev->hw_version 0x%x\n", __func__, __LINE__,
+ csiphy_dev->hw_version);
+ csiphy_dev->csiphy_state = CSIPHY_POWER_UP;
+ return 0;
+
+csiphy_mux_base_fail:
+ iounmap(csiphy_dev->clk_mux_base);
+ csiphy_dev->clk_mux_base = NULL;
+csiphy_base_fail:
+ iounmap(csiphy_dev->base);
+ csiphy_dev->base = NULL;
+ioremap_fail:
+ if (cam_config_ahb_clk(CAM_AHB_CLIENT_CSIPHY,
+ CAMERA_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ return rc;
+}
+#else
+static int msm_csiphy_init(struct csiphy_device *csiphy_dev)
+{
+ int rc = 0;
+ if (csiphy_dev == NULL) {
+ pr_err("%s: csiphy_dev NULL\n", __func__);
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ CDBG("%s:%d called\n", __func__, __LINE__);
+ if (csiphy_dev->csiphy_state == CSIPHY_POWER_UP) {
+ pr_err("%s: csiphy invalid state %d\n", __func__,
+ csiphy_dev->csiphy_state);
+ rc = -EINVAL;
+ return rc;
+ }
+ CDBG("%s:%d called\n", __func__, __LINE__);
+
+ if (csiphy_dev->ref_count++) {
+ CDBG("%s csiphy refcount = %d\n", __func__,
+ csiphy_dev->ref_count);
+ return rc;
+ }
+ CDBG("%s:%d called\n", __func__, __LINE__);
+ rc = cam_config_ahb_clk(CAM_AHB_CLIENT_CSIPHY, CAMERA_AHB_SVS_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ return rc;
+ }
+
+ csiphy_dev->base = ioremap(csiphy_dev->mem->start,
+ resource_size(csiphy_dev->mem));
+ if (!csiphy_dev->base) {
+ pr_err("%s: csiphy_dev->base NULL\n", __func__);
+ csiphy_dev->ref_count--;
+ rc = -ENOMEM;
+ goto ioremap_fail;
+ }
+ if (csiphy_dev->hw_dts_version <= CSIPHY_VERSION_V22) {
+ CDBG("%s:%d called\n", __func__, __LINE__);
+ rc = msm_cam_clk_enable(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
+ csiphy_dev->num_clk, 1);
+ } else if (csiphy_dev->hw_dts_version >= CSIPHY_VERSION_V30) {
+ if (!csiphy_dev->clk_mux_mem || !csiphy_dev->clk_mux_io) {
+ pr_err("%s clk mux mem %p io %p\n", __func__,
+ csiphy_dev->clk_mux_mem,
+ csiphy_dev->clk_mux_io);
+ rc = -ENOMEM;
+ goto csiphy_base_fail;
+ }
+ csiphy_dev->clk_mux_base = ioremap(
+ csiphy_dev->clk_mux_mem->start,
+ resource_size(csiphy_dev->clk_mux_mem));
+ if (!csiphy_dev->clk_mux_base) {
+ pr_err("%s: ERROR %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto csiphy_base_fail;
+ }
+ CDBG("%s:%d called\n", __func__, __LINE__);
+ rc = msm_cam_clk_enable(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
+ csiphy_dev->num_clk, 1);
+ } else {
+ pr_err("%s: ERROR Invalid CSIPHY Version %d",
+ __func__, __LINE__);
+ rc = -EINVAL;
+ goto csiphy_base_fail;
+ }
+
+ CDBG("%s:%d called\n", __func__, __LINE__);
+ if (rc < 0) {
+ pr_err("%s: csiphy clk enable failed\n", __func__);
+ csiphy_dev->ref_count--;
+ goto csiphy_mux_base_fail;
+ }
+ CDBG("%s:%d called\n", __func__, __LINE__);
+
+ if (csiphy_dev->csiphy_3phase == CSI_3PHASE_HW)
+ msm_csiphy_3ph_reset(csiphy_dev);
+ else
+ msm_csiphy_reset(csiphy_dev);
+
+ CDBG("%s:%d called\n", __func__, __LINE__);
+
+ if (csiphy_dev->hw_dts_version == CSIPHY_VERSION_V30)
+ csiphy_dev->hw_version =
+ msm_camera_io_r(csiphy_dev->base +
+ csiphy_dev->ctrl_reg->
+ csiphy_reg.mipi_csiphy_hw_version_addr);
+ else
+ csiphy_dev->hw_version = csiphy_dev->hw_dts_version;
+
+ csiphy_dev->csiphy_sof_debug = SOF_DEBUG_DISABLE;
+ CDBG("%s:%d called csiphy_dev->hw_version 0x%x\n", __func__, __LINE__,
+ csiphy_dev->hw_version);
+ csiphy_dev->csiphy_state = CSIPHY_POWER_UP;
+ return 0;
+
+csiphy_mux_base_fail:
+ iounmap(csiphy_dev->clk_mux_base);
+ csiphy_dev->clk_mux_base = NULL;
+csiphy_base_fail:
+ iounmap(csiphy_dev->base);
+ csiphy_dev->base = NULL;
+ioremap_fail:
+ if (cam_config_ahb_clk(CAM_AHB_CLIENT_CSIPHY,
+ CAMERA_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ return rc;
+}
+#endif
+
+#if DBG_CSIPHY
+static int msm_csiphy_release(struct csiphy_device *csiphy_dev, void *arg)
+{
+ int i = 0;
+ int rc = 0;
+ struct msm_camera_csi_lane_params *csi_lane_params;
+ uint16_t csi_lane_mask;
+ csi_lane_params = (struct msm_camera_csi_lane_params *)arg;
+
+ if (!csiphy_dev || !csiphy_dev->ref_count) {
+ pr_err("%s csiphy dev NULL / ref_count ZERO\n", __func__);
+ return 0;
+ }
+
+ if (csiphy_dev->csiphy_state != CSIPHY_POWER_UP) {
+ pr_err("%s: csiphy invalid state %d\n", __func__,
+ csiphy_dev->csiphy_state);
+ return -EINVAL;
+ }
+
+ if (--csiphy_dev->ref_count) {
+ CDBG("%s csiphy refcount = %d\n", __func__,
+ csiphy_dev->ref_count);
+ return 0;
+ }
+
+ if (csiphy_dev->csiphy_3phase == CSI_3PHASE_HW) {
+ msm_camera_io_w(0x0,
+ csiphy_dev->base + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl5.addr);
+ msm_camera_io_w(0x0,
+ csiphy_dev->base + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl6.addr);
+ } else if (csiphy_dev->hw_version < CSIPHY_VERSION_V30) {
+ csiphy_dev->lane_mask[csiphy_dev->pdev->id] = 0;
+ for (i = 0; i < 4; i++)
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_cfg2_addr + 0x40*i);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnck_cfg2_addr);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_glbl_pwr_cfg_addr);
+ } else {
+ if (!csi_lane_params) {
+ pr_err("%s:%d failed: csi_lane_params %p\n", __func__,
+ __LINE__, csi_lane_params);
+ return -EINVAL;
+ }
+ csi_lane_mask = (csi_lane_params->csi_lane_mask & 0x1F);
+
+ CDBG("%s csiphy_params, lane assign 0x%x mask = 0x%x\n",
+ __func__,
+ csi_lane_params->csi_lane_assign,
+ csi_lane_params->csi_lane_mask);
+
+ if (!csi_lane_mask)
+ csi_lane_mask = 0x1f;
+
+ csiphy_dev->lane_mask[csiphy_dev->pdev->id] &=
+ ~(csi_lane_mask);
+ i = 0;
+ while (csi_lane_mask) {
+ if (csi_lane_mask & 0x1) {
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_cfg2_addr + 0x40*i);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_misc1_addr + 0x40*i);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_test_imp + 0x40*i);
+ }
+ csi_lane_mask >>= 1;
+ i++;
+ }
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnck_cfg2_addr);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_glbl_pwr_cfg_addr);
+ }
+
+ disable_irq(csiphy_dev->irq->start);
+
+ if (csiphy_dev->hw_dts_version <= CSIPHY_VERSION_V22) {
+ msm_cam_clk_enable(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
+ csiphy_dev->num_clk, 0);
+ } else if (csiphy_dev->hw_dts_version >= CSIPHY_VERSION_V30) {
+ msm_cam_clk_enable(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
+ csiphy_dev->num_clk, 0);
+ iounmap(csiphy_dev->clk_mux_base);
+ }
+ iounmap(csiphy_dev->base);
+ csiphy_dev->base = NULL;
+ csiphy_dev->csiphy_state = CSIPHY_POWER_DOWN;
+
+ rc = cam_config_ahb_clk(CAM_AHB_CLIENT_CSIPHY, CAMERA_AHB_SUSPEND_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+ return rc;
+ }
+ return 0;
+}
+#else
+static int msm_csiphy_release(struct csiphy_device *csiphy_dev, void *arg)
+{
+ int i = 0;
+ struct msm_camera_csi_lane_params *csi_lane_params;
+ uint16_t csi_lane_mask;
+ csi_lane_params = (struct msm_camera_csi_lane_params *)arg;
+
+ if (!csiphy_dev || !csiphy_dev->ref_count) {
+ pr_err("%s csiphy dev NULL / ref_count ZERO\n", __func__);
+ return 0;
+ }
+
+ if (csiphy_dev->csiphy_state != CSIPHY_POWER_UP) {
+ pr_err("%s: csiphy invalid state %d\n", __func__,
+ csiphy_dev->csiphy_state);
+ return -EINVAL;
+ }
+
+ if (--csiphy_dev->ref_count) {
+ CDBG("%s csiphy refcount = %d\n", __func__,
+ csiphy_dev->ref_count);
+ return 0;
+ }
+
+ if (csiphy_dev->csiphy_3phase == CSI_3PHASE_HW) {
+ msm_camera_io_w(0x0,
+ csiphy_dev->base + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl5.addr);
+ msm_camera_io_w(0x0,
+ csiphy_dev->base + csiphy_dev->ctrl_reg->csiphy_3ph_reg.
+ mipi_csiphy_3ph_cmn_ctrl6.addr);
+ } else if (csiphy_dev->hw_version < CSIPHY_VERSION_V30) {
+ csiphy_dev->lane_mask[csiphy_dev->pdev->id] = 0;
+ for (i = 0; i < 4; i++)
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_cfg2_addr + 0x40*i);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnck_cfg2_addr);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_glbl_pwr_cfg_addr);
+ } else {
+ if (!csi_lane_params) {
+ pr_err("%s:%d failed: csi_lane_params %p\n", __func__,
+ __LINE__, csi_lane_params);
+ return -EINVAL;
+ }
+ csi_lane_mask = (csi_lane_params->csi_lane_mask & 0x1F);
+
+ CDBG("%s csiphy_params, lane assign 0x%x mask = 0x%x\n",
+ __func__,
+ csi_lane_params->csi_lane_assign,
+ csi_lane_params->csi_lane_mask);
+
+ if (!csi_lane_mask)
+ csi_lane_mask = 0x1f;
+
+ csiphy_dev->lane_mask[csiphy_dev->pdev->id] &=
+ ~(csi_lane_mask);
+ i = 0;
+ while (csi_lane_mask) {
+ if (csi_lane_mask & 0x1) {
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_cfg2_addr + 0x40*i);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_misc1_addr + 0x40*i);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnn_test_imp + 0x40*i);
+ }
+ csi_lane_mask >>= 1;
+ i++;
+ }
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_lnck_cfg2_addr);
+ msm_camera_io_w(0x0, csiphy_dev->base +
+ csiphy_dev->ctrl_reg->csiphy_reg.
+ mipi_csiphy_glbl_pwr_cfg_addr);
+ }
+
+ if (csiphy_dev->hw_dts_version <= CSIPHY_VERSION_V22) {
+ msm_cam_clk_enable(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
+ csiphy_dev->num_clk, 0);
+ } else if (csiphy_dev->hw_dts_version >= CSIPHY_VERSION_V30) {
+ msm_cam_clk_enable(&csiphy_dev->pdev->dev,
+ csiphy_dev->csiphy_clk_info, csiphy_dev->csiphy_clk,
+ csiphy_dev->num_clk, 0);
+ iounmap(csiphy_dev->clk_mux_base);
+ }
+
+ iounmap(csiphy_dev->base);
+ csiphy_dev->base = NULL;
+ csiphy_dev->csiphy_state = CSIPHY_POWER_DOWN;
+ if (cam_config_ahb_clk(CAM_AHB_CLIENT_CSIPHY,
+ CAMERA_AHB_SUSPEND_VOTE) < 0)
+ pr_err("%s: failed to remove vote for AHB\n", __func__);
+ return 0;
+}
+
+#endif
+static int32_t msm_csiphy_cmd(struct csiphy_device *csiphy_dev, void *arg)
+{
+ int rc = 0;
+ struct csiphy_cfg_data *cdata = (struct csiphy_cfg_data *)arg;
+ struct msm_camera_csiphy_params csiphy_params;
+ struct msm_camera_csi_lane_params csi_lane_params;
+ if (!csiphy_dev || !cdata) {
+ pr_err("%s: csiphy_dev NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (cdata->cfgtype) {
+ case CSIPHY_INIT:
+ rc = msm_csiphy_init(csiphy_dev);
+ break;
+ case CSIPHY_CFG:
+ if (copy_from_user(&csiphy_params,
+ (void *)cdata->cfg.csiphy_params,
+ sizeof(struct msm_camera_csiphy_params))) {
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ csiphy_dev->csiphy_sof_debug = SOF_DEBUG_DISABLE;
+ rc = msm_csiphy_lane_config(csiphy_dev, &csiphy_params);
+ break;
+ case CSIPHY_RELEASE:
+ if (copy_from_user(&csi_lane_params,
+ (void *)cdata->cfg.csi_lane_params,
+ sizeof(struct msm_camera_csi_lane_params))) {
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ if (csiphy_dev->csiphy_sof_debug == SOF_DEBUG_ENABLE)
+ disable_irq(csiphy_dev->irq->start);
+ rc = msm_csiphy_release(csiphy_dev, &csi_lane_params);
+ break;
+ default:
+ pr_err("%s: %d failed\n", __func__, __LINE__);
+ rc = -ENOIOCTLCMD;
+ break;
+ }
+ return rc;
+}
+
+static int32_t msm_csiphy_get_subdev_id(struct csiphy_device *csiphy_dev,
+ void *arg)
+{
+ uint32_t *subdev_id = (uint32_t *)arg;
+ if (!subdev_id) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ *subdev_id = csiphy_dev->pdev->id;
+ pr_debug("%s:%d subdev_id %d\n", __func__, __LINE__, *subdev_id);
+ return 0;
+}
+
+static long msm_csiphy_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc = -ENOIOCTLCMD;
+ struct csiphy_device *csiphy_dev = v4l2_get_subdevdata(sd);
+ CDBG("%s:%d id %d\n", __func__, __LINE__, csiphy_dev->pdev->id);
+ mutex_lock(&csiphy_dev->mutex);
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ rc = msm_csiphy_get_subdev_id(csiphy_dev, arg);
+ break;
+ case VIDIOC_MSM_CSIPHY_IO_CFG:
+ rc = msm_csiphy_cmd(csiphy_dev, arg);
+ break;
+ case VIDIOC_MSM_CSIPHY_RELEASE:
+ case MSM_SD_SHUTDOWN:
+ rc = msm_csiphy_release(csiphy_dev, arg);
+ break;
+ case MSM_SD_NOTIFY_FREEZE:
+ if (!csiphy_dev || !csiphy_dev->ctrl_reg ||
+ !csiphy_dev->ref_count)
+ break;
+ csiphy_dev->csiphy_sof_debug = SOF_DEBUG_ENABLE;
+ enable_irq(csiphy_dev->irq->start);
+ break;
+ default:
+ pr_err_ratelimited("%s: command not found\n", __func__);
+ }
+ mutex_unlock(&csiphy_dev->mutex);
+ CDBG("%s:%d\n", __func__, __LINE__);
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_csiphy_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct csiphy_cfg_data32 *u32 =
+ (struct csiphy_cfg_data32 *)arg;
+ struct csiphy_cfg_data csiphy_data;
+
+ switch (cmd) {
+ case VIDIOC_MSM_CSIPHY_IO_CFG32:
+ cmd = VIDIOC_MSM_CSIPHY_IO_CFG;
+ csiphy_data.cfgtype = u32->cfgtype;
+ csiphy_data.cfg.csiphy_params =
+ compat_ptr(u32->cfg.csiphy_params);
+ return msm_csiphy_subdev_ioctl(sd, cmd, &csiphy_data);
+ default:
+ return msm_csiphy_subdev_ioctl(sd, cmd, arg);
+ }
+}
+
+static long msm_csiphy_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_csiphy_subdev_do_ioctl);
+}
+#endif
+
+static const struct v4l2_subdev_internal_ops msm_csiphy_internal_ops;
+
+static struct v4l2_subdev_core_ops msm_csiphy_subdev_core_ops = {
+ .ioctl = &msm_csiphy_subdev_ioctl,
+};
+
+static const struct v4l2_subdev_ops msm_csiphy_subdev_ops = {
+ .core = &msm_csiphy_subdev_core_ops,
+};
+
+static int msm_csiphy_get_clk_info(struct csiphy_device *csiphy_dev,
+ struct platform_device *pdev)
+{
+ uint32_t count;
+ int i, rc;
+ uint32_t rates[CSIPHY_NUM_CLK_MAX];
+
+ struct device_node *of_node;
+ of_node = pdev->dev.of_node;
+
+ count = of_property_count_strings(of_node, "clock-names");
+ csiphy_dev->num_clk = count;
+
+ CDBG("%s: count = %d\n", __func__, count);
+ if (count == 0) {
+ pr_err("%s: no clocks found in device tree, count=%d",
+ __func__, count);
+ return 0;
+ }
+
+ if (count > CSIPHY_NUM_CLK_MAX) {
+ pr_err("%s: invalid count=%d, max is %d\n", __func__,
+ count, CSIPHY_NUM_CLK_MAX);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node, "clock-names",
+ i, &(csiphy_dev->csiphy_clk_info[i].clk_name));
+ CDBG("%s: clock-names[%d] = %s\n", __func__,
+ i, csiphy_dev->csiphy_clk_info[i].clk_name);
+ if (rc < 0) {
+ pr_err("%s:%d, failed\n", __func__, __LINE__);
+ return rc;
+ }
+ }
+ rc = of_property_read_u32_array(of_node, "qcom,clock-rates",
+ rates, count);
+ if (rc < 0) {
+ pr_err("%s:%d, failed", __func__, __LINE__);
+ return rc;
+ }
+ for (i = 0; i < count; i++) {
+ csiphy_dev->csiphy_clk_info[i].clk_rate = (rates[i] == 0) ?
+ (long)-1 : rates[i];
+ if (!strcmp(csiphy_dev->csiphy_clk_info[i].clk_name,
+ "csiphy_timer_src_clk")) {
+ CDBG("%s:%d, copy csiphy_timer_src_clk",
+ __func__, __LINE__);
+ csiphy_dev->csiphy_max_clk = rates[i];
+ csiphy_dev->csiphy_clk_index = i;
+ }
+ CDBG("%s: clk_rate[%d] = %ld\n", __func__, i,
+ csiphy_dev->csiphy_clk_info[i].clk_rate);
+ }
+ return 0;
+}
+
+static int csiphy_probe(struct platform_device *pdev)
+{
+ struct csiphy_device *new_csiphy_dev;
+ int rc = 0;
+
+ new_csiphy_dev = kzalloc(sizeof(struct csiphy_device), GFP_KERNEL);
+ if (!new_csiphy_dev) {
+ pr_err("%s: no enough memory\n", __func__);
+ return -ENOMEM;
+ }
+ new_csiphy_dev->is_3_1_20nm_hw = 0;
+ new_csiphy_dev->ctrl_reg = NULL;
+ new_csiphy_dev->ctrl_reg = kzalloc(sizeof(struct csiphy_ctrl_t),
+ GFP_KERNEL);
+ if (!new_csiphy_dev->ctrl_reg) {
+ pr_err("%s:%d kzalloc failed\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ v4l2_subdev_init(&new_csiphy_dev->msm_sd.sd, &msm_csiphy_subdev_ops);
+ v4l2_set_subdevdata(&new_csiphy_dev->msm_sd.sd, new_csiphy_dev);
+ platform_set_drvdata(pdev, &new_csiphy_dev->msm_sd.sd);
+
+ mutex_init(&new_csiphy_dev->mutex);
+
+ if (pdev->dev.of_node) {
+ of_property_read_u32((&pdev->dev)->of_node,
+ "cell-index", &pdev->id);
+ CDBG("%s: device id = %d\n", __func__, pdev->id);
+ }
+
+ /* ToDo: Enable 3phase clock for dynamic clock enable/disable */
+ rc = msm_csiphy_get_clk_info(new_csiphy_dev, pdev);
+ if (rc < 0) {
+ pr_err("%s: msm_csiphy_get_clk_info() failed", __func__);
+ return -EFAULT;
+ }
+
+ new_csiphy_dev->mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "csiphy");
+ if (!new_csiphy_dev->mem) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto csiphy_no_resource;
+ }
+ new_csiphy_dev->irq = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "csiphy");
+ if (!new_csiphy_dev->irq) {
+ pr_err("%s: no irq resource?\n", __func__);
+ rc = -ENODEV;
+ goto csiphy_no_resource;
+ }
+ new_csiphy_dev->io = request_mem_region(new_csiphy_dev->mem->start,
+ resource_size(new_csiphy_dev->mem), pdev->name);
+ if (!new_csiphy_dev->io) {
+ pr_err("%s: no valid mem region\n", __func__);
+ rc = -EBUSY;
+ goto csiphy_no_resource;
+ }
+
+ rc = request_irq(new_csiphy_dev->irq->start, msm_csiphy_irq,
+ IRQF_TRIGGER_RISING, "csiphy", new_csiphy_dev);
+ if (rc < 0) {
+ release_mem_region(new_csiphy_dev->mem->start,
+ resource_size(new_csiphy_dev->mem));
+ pr_err("%s: irq request fail\n", __func__);
+ rc = -EBUSY;
+ goto csiphy_no_resource;
+ }
+ disable_irq(new_csiphy_dev->irq->start);
+
+ new_csiphy_dev->clk_mux_mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "csiphy_clk_mux");
+ if (new_csiphy_dev->clk_mux_mem) {
+ new_csiphy_dev->clk_mux_io = request_mem_region(
+ new_csiphy_dev->clk_mux_mem->start,
+ resource_size(new_csiphy_dev->clk_mux_mem),
+ new_csiphy_dev->clk_mux_mem->name);
+ if (!new_csiphy_dev->clk_mux_io)
+ pr_err("%s: ERROR %d\n", __func__, __LINE__);
+ }
+
+ new_csiphy_dev->pdev = pdev;
+ new_csiphy_dev->msm_sd.sd.internal_ops = &msm_csiphy_internal_ops;
+ new_csiphy_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(new_csiphy_dev->msm_sd.sd.name,
+ ARRAY_SIZE(new_csiphy_dev->msm_sd.sd.name), "msm_csiphy");
+ media_entity_init(&new_csiphy_dev->msm_sd.sd.entity, 0, NULL, 0);
+ new_csiphy_dev->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ new_csiphy_dev->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_CSIPHY;
+ new_csiphy_dev->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x4;
+ msm_sd_register(&new_csiphy_dev->msm_sd);
+
+ new_csiphy_dev->csiphy_3phase = 0;
+ new_csiphy_dev->num_irq_registers = 0x8;
+
+ if (of_device_is_compatible(new_csiphy_dev->pdev->dev.of_node,
+ "qcom,csiphy-v2.0")) {
+ new_csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v2_0;
+ new_csiphy_dev->hw_dts_version = CSIPHY_VERSION_V20;
+ } else if (of_device_is_compatible(new_csiphy_dev->pdev->dev.of_node,
+ "qcom,csiphy-v2.2")) {
+ new_csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v2_2;
+ new_csiphy_dev->hw_dts_version = CSIPHY_VERSION_V22;
+ } else if (of_device_is_compatible(new_csiphy_dev->pdev->dev.of_node,
+ "qcom,csiphy-v3.0")) {
+ new_csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v3_0;
+ new_csiphy_dev->hw_dts_version = CSIPHY_VERSION_V30;
+ } else if (of_device_is_compatible(new_csiphy_dev->pdev->dev.of_node,
+ "qcom,csiphy-v3.1")) {
+ new_csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v3_1;
+ new_csiphy_dev->hw_dts_version = CSIPHY_VERSION_V31;
+ } else if (of_device_is_compatible(new_csiphy_dev->pdev->dev.of_node,
+ "qcom,csiphy-v3.1.1")) {
+ new_csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v3_1;
+ new_csiphy_dev->hw_dts_version = CSIPHY_VERSION_V31;
+ new_csiphy_dev->is_3_1_20nm_hw = 1;
+ } else if (of_device_is_compatible(new_csiphy_dev->pdev->dev.of_node,
+ "qcom,csiphy-v3.2")) {
+ new_csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v3_2;
+ new_csiphy_dev->hw_dts_version = CSIPHY_VERSION_V32;
+ } else if (of_device_is_compatible(new_csiphy_dev->pdev->dev.of_node,
+ "qcom,csiphy-v3.4.2")) {
+ new_csiphy_dev->ctrl_reg->csiphy_3ph_reg = csiphy_v3_4_2_3ph;
+ new_csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v3_4_2;
+ new_csiphy_dev->hw_dts_version = CSIPHY_VERSION_V342;
+ new_csiphy_dev->csiphy_3phase = CSI_3PHASE_HW;
+ } else if (of_device_is_compatible(new_csiphy_dev->pdev->dev.of_node,
+ "qcom,csiphy-v3.5")) {
+ new_csiphy_dev->ctrl_reg->csiphy_3ph_reg = csiphy_v3_5_3ph;
+ new_csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v3_5;
+ new_csiphy_dev->hw_dts_version = CSIPHY_VERSION_V35;
+ new_csiphy_dev->csiphy_3phase = CSI_3PHASE_HW;
+ } else {
+ pr_err("%s:%d, invalid hw version : 0x%x\n", __func__, __LINE__,
+ new_csiphy_dev->hw_dts_version);
+ return -EINVAL;
+ }
+
+ msm_cam_copy_v4l2_subdev_fops(&msm_csiphy_v4l2_subdev_fops);
+#ifdef CONFIG_COMPAT
+ msm_csiphy_v4l2_subdev_fops.compat_ioctl32 =
+ msm_csiphy_subdev_fops_ioctl;
+#endif
+ new_csiphy_dev->msm_sd.sd.devnode->fops =
+ &msm_csiphy_v4l2_subdev_fops;
+ new_csiphy_dev->csiphy_state = CSIPHY_POWER_DOWN;
+ return 0;
+
+csiphy_no_resource:
+ mutex_destroy(&new_csiphy_dev->mutex);
+ kfree(new_csiphy_dev->ctrl_reg);
+ kfree(new_csiphy_dev);
+ return 0;
+}
+
+static const struct of_device_id msm_csiphy_dt_match[] = {
+ {.compatible = "qcom,csiphy"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_csiphy_dt_match);
+
+static struct platform_driver csiphy_driver = {
+ .probe = csiphy_probe,
+ .driver = {
+ .name = MSM_CSIPHY_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_csiphy_dt_match,
+ },
+};
+
+static int __init msm_csiphy_init_module(void)
+{
+ return platform_driver_register(&csiphy_driver);
+}
+
+static void __exit msm_csiphy_exit_module(void)
+{
+ platform_driver_unregister(&csiphy_driver);
+}
+
+module_init(msm_csiphy_init_module);
+module_exit(msm_csiphy_exit_module);
+MODULE_DESCRIPTION("MSM CSIPHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
new file mode 100644
index 000000000000..b722b26e091f
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
@@ -0,0 +1,169 @@
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CSIPHY_H
+#define MSM_CSIPHY_H
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-subdev.h>
+#include <media/msm_cam_sensor.h>
+#include "msm_sd.h"
+#include "msm_camera_io_util.h"
+
+#define MAX_CSIPHY 3
+#define CSIPHY_NUM_CLK_MAX 16
+
+struct csiphy_reg_t {
+ uint32_t addr;
+ uint32_t data;
+};
+
+struct csiphy_reg_parms_t {
+/*MIPI CSI PHY registers*/
+ uint32_t mipi_csiphy_lnn_cfg1_addr;
+ uint32_t mipi_csiphy_lnn_cfg2_addr;
+ uint32_t mipi_csiphy_lnn_cfg3_addr;
+ uint32_t mipi_csiphy_lnn_cfg4_addr;
+ uint32_t mipi_csiphy_lnn_cfg5_addr;
+ uint32_t mipi_csiphy_lnck_cfg1_addr;
+ uint32_t mipi_csiphy_lnck_cfg2_addr;
+ uint32_t mipi_csiphy_lnck_cfg3_addr;
+ uint32_t mipi_csiphy_lnck_cfg4_addr;
+ uint32_t mipi_csiphy_lnn_test_imp;
+ uint32_t mipi_csiphy_lnn_misc1_addr;
+ uint32_t mipi_csiphy_glbl_reset_addr;
+ uint32_t mipi_csiphy_glbl_pwr_cfg_addr;
+ uint32_t mipi_csiphy_glbl_irq_cmd_addr;
+ uint32_t mipi_csiphy_hw_version_addr;
+ uint32_t mipi_csiphy_interrupt_status0_addr;
+ uint32_t mipi_csiphy_interrupt_mask0_addr;
+ uint32_t mipi_csiphy_interrupt_mask_val;
+ uint32_t mipi_csiphy_interrupt_mask_addr;
+ uint32_t mipi_csiphy_interrupt_clear0_addr;
+ uint32_t mipi_csiphy_interrupt_clear_addr;
+ uint32_t mipi_csiphy_mode_config_shift;
+ uint32_t mipi_csiphy_glbl_t_init_cfg0_addr;
+ uint32_t mipi_csiphy_t_wakeup_cfg0_addr;
+ uint32_t csiphy_version;
+};
+
+struct csiphy_reg_3ph_parms_t {
+/*MIPI CSI PHY registers*/
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl5;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl6;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl34;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl35;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl36;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl1;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl2;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl3;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl5;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl6;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl7;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl8;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl9;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl10;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl11;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl12;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl13;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl14;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl15;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl16;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl17;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl18;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl19;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl21;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl23;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl24;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl25;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl26;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl27;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl28;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl29;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl30;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl31;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl32;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl33;
+ struct csiphy_reg_t mipi_csiphy_3ph_lnn_ctrl51;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl7;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl11;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl12;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl13;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl14;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl15;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl16;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl17;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl18;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl19;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl20;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl21;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_misc1;
+ struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl0;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_cfg1;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_cfg2;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_cfg3;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_cfg4;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_cfg5;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_cfg6;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_cfg7;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_cfg8;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_cfg9;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_ctrl15;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_test_imp;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_test_force;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnn_ctrl5;
+};
+
+struct csiphy_ctrl_t {
+ struct csiphy_reg_parms_t csiphy_reg;
+ struct csiphy_reg_3ph_parms_t csiphy_3ph_reg;
+};
+
+enum msm_csiphy_state_t {
+ CSIPHY_POWER_UP,
+ CSIPHY_POWER_DOWN,
+};
+
+struct csiphy_device {
+ struct platform_device *pdev;
+ struct msm_sd_subdev msm_sd;
+ struct v4l2_subdev subdev;
+ struct resource *mem;
+ struct resource *clk_mux_mem;
+ struct resource *irq;
+ struct resource *io;
+ struct resource *clk_mux_io;
+ void __iomem *base;
+ void __iomem *clk_mux_base;
+ struct mutex mutex;
+ uint32_t hw_version;
+ uint32_t hw_dts_version;
+ enum msm_csiphy_state_t csiphy_state;
+ struct csiphy_ctrl_t *ctrl_reg;
+ uint32_t num_clk;
+ struct clk *csiphy_clk[CSIPHY_NUM_CLK_MAX];
+ struct msm_cam_clk_info csiphy_clk_info[CSIPHY_NUM_CLK_MAX];
+ int32_t ref_count;
+ uint16_t lane_mask[MAX_CSIPHY];
+ uint32_t is_3_1_20nm_hw;
+ uint32_t csiphy_clk_index;
+ uint32_t csiphy_max_clk;
+ uint8_t csiphy_3phase;
+ uint8_t num_irq_registers;
+ uint32_t csiphy_sof_debug;
+};
+
+#define VIDIOC_MSM_CSIPHY_RELEASE \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 9, void *)
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/eeprom/Makefile b/drivers/media/platform/msm/camera_v2/sensor/eeprom/Makefile
new file mode 100644
index 000000000000..ba78a650b4ba
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/eeprom/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
+obj-$(CONFIG_MSM_EEPROM) += msm_eeprom.o
diff --git a/drivers/media/platform/msm/camera_v2/sensor/eeprom/msm_eeprom.c b/drivers/media/platform/msm/camera_v2/sensor/eeprom/msm_eeprom.c
new file mode 100644
index 000000000000..92a1d5ccf28b
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/eeprom/msm_eeprom.c
@@ -0,0 +1,1818 @@
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include "msm_sd.h"
+#include "msm_cci.h"
+#include "msm_eeprom.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+DEFINE_MSM_MUTEX(msm_eeprom_mutex);
+#ifdef CONFIG_COMPAT
+static struct v4l2_file_operations msm_eeprom_v4l2_subdev_fops;
+#endif
+
+/**
+ * msm_get_read_mem_size - Get the total size for allocation
+ * @eeprom_map_array: mem map
+ *
+ * Returns size after computation size, returns error in case of error
+ */
+static int msm_get_read_mem_size
+ (struct msm_eeprom_memory_map_array *eeprom_map_array) {
+ int size = 0, i, j;
+ struct msm_eeprom_mem_map_t *eeprom_map;
+
+ if (eeprom_map_array->msm_size_of_max_mappings >
+ MSM_EEPROM_MAX_MEM_MAP_CNT) {
+ pr_err("%s:%d Memory map cnt greter then expected: %d",
+ __func__, __LINE__,
+ eeprom_map_array->msm_size_of_max_mappings);
+ return -EINVAL;
+ }
+ for (j = 0; j < eeprom_map_array->msm_size_of_max_mappings; j++) {
+ eeprom_map = &(eeprom_map_array->memory_map[j]);
+ if (eeprom_map->memory_map_size >
+ MSM_EEPROM_MEMORY_MAP_MAX_SIZE) {
+ pr_err("%s:%d Memory map size greter then expected: %d",
+ __func__, __LINE__,
+ eeprom_map->memory_map_size);
+ return -EINVAL;
+ }
+ for (i = 0; i < eeprom_map->memory_map_size; i++) {
+ if (eeprom_map->mem_settings[i].i2c_operation ==
+ MSM_CAM_READ) {
+ size += eeprom_map->mem_settings[i].reg_data;
+ }
+ }
+ }
+ CDBG("Total Data Size: %d\n", size);
+ return size;
+}
+
+/**
+ * msm_eeprom_verify_sum - verify crc32 checksum
+ * @mem: data buffer
+ * @size: size of data buffer
+ * @sum: expected checksum
+ *
+ * Returns 0 if checksum match, -EINVAL otherwise.
+ */
+static int msm_eeprom_verify_sum(const char *mem, uint32_t size, uint32_t sum)
+{
+ uint32_t crc = ~0;
+
+ /* check overflow */
+ if (size > crc - sizeof(uint32_t))
+ return -EINVAL;
+
+ crc = crc32_le(crc, mem, size);
+ if (~crc != sum) {
+ CDBG("%s: expect 0x%x, result 0x%x\n", __func__, sum, ~crc);
+ return -EINVAL;
+ }
+ CDBG("%s: checksum pass 0x%x\n", __func__, sum);
+ return 0;
+}
+
+/**
+ * msm_eeprom_match_crc - verify multiple regions using crc
+ * @data: data block to be verified
+ *
+ * Iterates through all regions stored in @data. Regions with odd index
+ * are treated as data, and its next region is treated as checksum. Thus
+ * regions of even index must have valid_size of 4 or 0 (skip verification).
+ * Returns a bitmask of verified regions, starting from LSB. 1 indicates
+ * a checksum match, while 0 indicates checksum mismatch or not verified.
+ */
+static uint32_t msm_eeprom_match_crc(struct msm_eeprom_memory_block_t *data)
+{
+ int j, rc;
+ uint32_t *sum;
+ uint32_t ret = 0;
+ uint8_t *memptr;
+ struct msm_eeprom_memory_map_t *map;
+
+ if (!data) {
+ pr_err("%s data is NULL", __func__);
+ return -EINVAL;
+ }
+ map = data->map;
+ memptr = data->mapdata;
+
+ for (j = 0; j + 1 < data->num_map; j += 2) {
+ /* empty table or no checksum */
+ if (!map[j].mem.valid_size || !map[j+1].mem.valid_size) {
+ memptr += map[j].mem.valid_size
+ + map[j+1].mem.valid_size;
+ continue;
+ }
+ if (map[j+1].mem.valid_size != sizeof(uint32_t)) {
+ CDBG("%s: malformatted data mapping\n", __func__);
+ return -EINVAL;
+ }
+ sum = (uint32_t *) (memptr + map[j].mem.valid_size);
+ rc = msm_eeprom_verify_sum(memptr, map[j].mem.valid_size,
+ *sum);
+ if (!rc)
+ ret |= 1 << (j/2);
+ memptr += map[j].mem.valid_size + map[j+1].mem.valid_size;
+ }
+ return ret;
+}
+
+/**
+ * read_eeprom_memory() - read map data into buffer
+ * @e_ctrl: eeprom control struct
+ * @block: block to be read
+ *
+ * This function iterates through blocks stored in block->map, reads each
+ * region and concatenate them into the pre-allocated block->mapdata
+ */
+static int read_eeprom_memory(struct msm_eeprom_ctrl_t *e_ctrl,
+ struct msm_eeprom_memory_block_t *block)
+{
+ int rc = 0;
+ int j;
+ struct msm_eeprom_memory_map_t *emap = block->map;
+ struct msm_eeprom_board_info *eb_info;
+ uint8_t *memptr = block->mapdata;
+
+ if (!e_ctrl) {
+ pr_err("%s e_ctrl is NULL", __func__);
+ return -EINVAL;
+ }
+
+ eb_info = e_ctrl->eboard_info;
+
+ for (j = 0; j < block->num_map; j++) {
+ if (emap[j].saddr.addr) {
+ eb_info->i2c_slaveaddr = emap[j].saddr.addr;
+ e_ctrl->i2c_client.cci_client->sid =
+ eb_info->i2c_slaveaddr >> 1;
+ pr_err("qcom,slave-addr = 0x%X\n",
+ eb_info->i2c_slaveaddr);
+ }
+
+ if (emap[j].page.valid_size) {
+ e_ctrl->i2c_client.addr_type = emap[j].page.addr_t;
+ rc = e_ctrl->i2c_client.i2c_func_tbl->i2c_write(
+ &(e_ctrl->i2c_client), emap[j].page.addr,
+ emap[j].page.data, emap[j].page.data_t);
+ msleep(emap[j].page.delay);
+ if (rc < 0) {
+ pr_err("%s: page write failed\n", __func__);
+ return rc;
+ }
+ }
+ if (emap[j].pageen.valid_size) {
+ e_ctrl->i2c_client.addr_type = emap[j].pageen.addr_t;
+ rc = e_ctrl->i2c_client.i2c_func_tbl->i2c_write(
+ &(e_ctrl->i2c_client), emap[j].pageen.addr,
+ emap[j].pageen.data, emap[j].pageen.data_t);
+ msleep(emap[j].pageen.delay);
+ if (rc < 0) {
+ pr_err("%s: page enable failed\n", __func__);
+ return rc;
+ }
+ }
+ if (emap[j].poll.valid_size) {
+ e_ctrl->i2c_client.addr_type = emap[j].poll.addr_t;
+ rc = e_ctrl->i2c_client.i2c_func_tbl->i2c_poll(
+ &(e_ctrl->i2c_client), emap[j].poll.addr,
+ emap[j].poll.data, emap[j].poll.data_t);
+ msleep(emap[j].poll.delay);
+ if (rc < 0) {
+ pr_err("%s: poll failed\n", __func__);
+ return rc;
+ }
+ }
+
+ if (emap[j].mem.valid_size) {
+ e_ctrl->i2c_client.addr_type = emap[j].mem.addr_t;
+ rc = e_ctrl->i2c_client.i2c_func_tbl->i2c_read_seq(
+ &(e_ctrl->i2c_client), emap[j].mem.addr,
+ memptr, emap[j].mem.valid_size);
+ if (rc < 0) {
+ pr_err("%s: read failed\n", __func__);
+ return rc;
+ }
+ memptr += emap[j].mem.valid_size;
+ }
+ if (emap[j].pageen.valid_size) {
+ e_ctrl->i2c_client.addr_type = emap[j].pageen.addr_t;
+ rc = e_ctrl->i2c_client.i2c_func_tbl->i2c_write(
+ &(e_ctrl->i2c_client), emap[j].pageen.addr,
+ 0, emap[j].pageen.data_t);
+ if (rc < 0) {
+ pr_err("%s: page disable failed\n", __func__);
+ return rc;
+ }
+ }
+ }
+ return rc;
+}
+/**
+ * msm_eeprom_parse_memory_map() - parse memory map in device node
+ * @of: device node
+ * @data: memory block for output
+ *
+ * This functions parses @of to fill @data. It allocates map itself, parses
+ * the @of node, calculate total data length, and allocates required buffer.
+ * It only fills the map, but does not perform actual reading.
+ */
+static int msm_eeprom_parse_memory_map(struct device_node *of,
+ struct msm_eeprom_memory_block_t *data)
+{
+ int i, rc = 0;
+ char property[PROPERTY_MAXSIZE];
+ uint32_t count = 6;
+ struct msm_eeprom_memory_map_t *map;
+
+ snprintf(property, PROPERTY_MAXSIZE, "qcom,num-blocks");
+ rc = of_property_read_u32(of, property, &data->num_map);
+ CDBG("%s: %s %d\n", __func__, property, data->num_map);
+ if (rc < 0) {
+ pr_err("%s failed rc %d\n", __func__, rc);
+ return rc;
+ }
+
+ map = kzalloc((sizeof(*map) * data->num_map), GFP_KERNEL);
+ if (!map) {
+ rc = -ENOMEM;
+ pr_err("%s failed line %d\n", __func__, __LINE__);
+ return rc;
+ }
+ data->map = map;
+
+ for (i = 0; i < data->num_map; i++) {
+ snprintf(property, PROPERTY_MAXSIZE, "qcom,page%d", i);
+ rc = of_property_read_u32_array(of, property,
+ (uint32_t *) &map[i].page, count);
+ if (rc < 0) {
+ pr_err("%s: failed %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ snprintf(property, PROPERTY_MAXSIZE,
+ "qcom,pageen%d", i);
+ rc = of_property_read_u32_array(of, property,
+ (uint32_t *) &map[i].pageen, count);
+ if (rc < 0)
+ CDBG("%s: pageen not needed\n", __func__);
+
+ snprintf(property, PROPERTY_MAXSIZE, "qcom,saddr%d", i);
+ rc = of_property_read_u32_array(of, property,
+ (uint32_t *) &map[i].saddr.addr, 1);
+ if (rc < 0)
+ CDBG("%s: saddr not needed - block %d\n", __func__, i);
+
+ snprintf(property, PROPERTY_MAXSIZE, "qcom,poll%d", i);
+ rc = of_property_read_u32_array(of, property,
+ (uint32_t *) &map[i].poll, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+
+ snprintf(property, PROPERTY_MAXSIZE, "qcom,mem%d", i);
+ rc = of_property_read_u32_array(of, property,
+ (uint32_t *) &map[i].mem, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+ data->num_data += map[i].mem.valid_size;
+ }
+
+ CDBG("%s num_bytes %d\n", __func__, data->num_data);
+
+ data->mapdata = kzalloc(data->num_data, GFP_KERNEL);
+ if (!data->mapdata) {
+ rc = -ENOMEM;
+ pr_err("%s failed line %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+ return rc;
+
+ERROR:
+ kfree(data->map);
+ memset(data, 0, sizeof(*data));
+ return rc;
+}
+
+/**
+ * eeprom_parse_memory_map - Parse mem map
+ * @e_ctrl: ctrl structure
+ * @eeprom_map_array: eeprom map
+ *
+ * Returns success or failure
+ */
+static int eeprom_parse_memory_map(struct msm_eeprom_ctrl_t *e_ctrl,
+ struct msm_eeprom_memory_map_array *eeprom_map_array)
+{
+ int rc = 0, i, j;
+ uint8_t *memptr;
+ struct msm_eeprom_mem_map_t *eeprom_map;
+
+ e_ctrl->cal_data.mapdata = NULL;
+ e_ctrl->cal_data.num_data = msm_get_read_mem_size(eeprom_map_array);
+ if (e_ctrl->cal_data.num_data <= 0) {
+ pr_err("%s:%d Error in reading mem size\n",
+ __func__, __LINE__);
+ e_ctrl->cal_data.num_data = 0;
+ return -EINVAL;
+ }
+ e_ctrl->cal_data.mapdata =
+ kzalloc(e_ctrl->cal_data.num_data, GFP_KERNEL);
+ if (!e_ctrl->cal_data.mapdata)
+ return -ENOMEM;
+
+ memptr = e_ctrl->cal_data.mapdata;
+ for (j = 0; j < eeprom_map_array->msm_size_of_max_mappings; j++) {
+ eeprom_map = &(eeprom_map_array->memory_map[j]);
+ if (e_ctrl->i2c_client.cci_client) {
+ e_ctrl->i2c_client.cci_client->sid =
+ eeprom_map->slave_addr >> 1;
+ } else if (e_ctrl->i2c_client.client) {
+ e_ctrl->i2c_client.client->addr =
+ eeprom_map->slave_addr >> 1;
+ }
+ CDBG("Slave Addr: 0x%X\n", eeprom_map->slave_addr);
+ CDBG("Memory map Size: %d",
+ eeprom_map->memory_map_size);
+ for (i = 0; i < eeprom_map->memory_map_size; i++) {
+ switch (eeprom_map->mem_settings[i].i2c_operation) {
+ case MSM_CAM_WRITE: {
+ e_ctrl->i2c_client.addr_type =
+ eeprom_map->mem_settings[i].addr_type;
+ rc = e_ctrl->i2c_client.i2c_func_tbl->i2c_write(
+ &(e_ctrl->i2c_client),
+ eeprom_map->mem_settings[i].reg_addr,
+ eeprom_map->mem_settings[i].reg_data,
+ eeprom_map->mem_settings[i].data_type);
+ msleep(eeprom_map->mem_settings[i].delay);
+ if (rc < 0) {
+ pr_err("%s: page write failed\n",
+ __func__);
+ goto clean_up;
+ }
+ }
+ break;
+ case MSM_CAM_POLL: {
+ e_ctrl->i2c_client.addr_type =
+ eeprom_map->mem_settings[i].addr_type;
+ rc = e_ctrl->i2c_client.i2c_func_tbl->i2c_poll(
+ &(e_ctrl->i2c_client),
+ eeprom_map->mem_settings[i].reg_addr,
+ eeprom_map->mem_settings[i].reg_data,
+ eeprom_map->mem_settings[i].data_type);
+ msleep(eeprom_map->mem_settings[i].delay);
+ if (rc < 0) {
+ pr_err("%s: poll failed\n",
+ __func__);
+ goto clean_up;
+ }
+ }
+ break;
+ case MSM_CAM_READ: {
+ e_ctrl->i2c_client.addr_type =
+ eeprom_map->mem_settings[i].addr_type;
+ rc = e_ctrl->i2c_client.i2c_func_tbl->
+ i2c_read_seq(&(e_ctrl->i2c_client),
+ eeprom_map->mem_settings[i].reg_addr,
+ memptr,
+ eeprom_map->mem_settings[i].reg_data);
+ msleep(eeprom_map->mem_settings[i].delay);
+ if (rc < 0) {
+ pr_err("%s: read failed\n",
+ __func__);
+ goto clean_up;
+ }
+ memptr += eeprom_map->mem_settings[i].reg_data;
+ }
+ break;
+ default:
+ pr_err("%s: %d Invalid i2c operation LC:%d\n",
+ __func__, __LINE__, i);
+ return -EINVAL;
+ }
+ }
+ }
+ memptr = e_ctrl->cal_data.mapdata;
+ for (i = 0; i < e_ctrl->cal_data.num_data; i++)
+ CDBG("memory_data[%d] = 0x%X\n", i, memptr[i]);
+ return rc;
+
+clean_up:
+ kfree(e_ctrl->cal_data.mapdata);
+ e_ctrl->cal_data.num_data = 0;
+ e_ctrl->cal_data.mapdata = NULL;
+ return rc;
+}
+
+/**
+ * msm_eeprom_power_up - Do eeprom power up here
+ * @e_ctrl: ctrl structure
+ * @power_info: power up info for eeprom
+ *
+ * Returns success or failure
+ */
+static int msm_eeprom_power_up(struct msm_eeprom_ctrl_t *e_ctrl,
+ struct msm_camera_power_ctrl_t *power_info) {
+ int32_t rc = 0;
+
+ rc = msm_camera_fill_vreg_params(
+ power_info->cam_vreg, power_info->num_vreg,
+ power_info->power_setting, power_info->power_setting_size);
+ if (rc < 0) {
+ pr_err("%s:%d failed in camera_fill_vreg_params rc %d",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+
+ /* Parse and fill vreg params for powerdown settings*/
+ rc = msm_camera_fill_vreg_params(
+ power_info->cam_vreg, power_info->num_vreg,
+ power_info->power_down_setting,
+ power_info->power_down_setting_size);
+ if (rc < 0) {
+ pr_err("%s:%d failed msm_camera_fill_vreg_params for PDOWN rc %d",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+
+ rc = msm_camera_power_up(power_info, e_ctrl->eeprom_device_type,
+ &e_ctrl->i2c_client);
+ if (rc) {
+ pr_err("%s:%d failed in eeprom Power up rc %d\n",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+ return rc;
+}
+
+/**
+ * msm_eeprom_power_up - Do power up, parse and power down
+ * @e_ctrl: ctrl structure
+ * Returns success or failure
+ */
+static int eeprom_init_config(struct msm_eeprom_ctrl_t *e_ctrl,
+ void __user *argp)
+{
+ int rc = 0;
+ struct msm_eeprom_cfg_data *cdata = argp;
+ struct msm_sensor_power_setting_array *power_setting_array = NULL;
+ struct msm_camera_power_ctrl_t *power_info;
+ struct msm_eeprom_memory_map_array *memory_map_arr = NULL;
+
+ power_setting_array =
+ kzalloc(sizeof(struct msm_sensor_power_setting_array),
+ GFP_KERNEL);
+ if (!power_setting_array) {
+ pr_err("%s:%d Mem Alloc Fail\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ return rc;
+ }
+ memory_map_arr = kzalloc(sizeof(struct msm_eeprom_memory_map_array),
+ GFP_KERNEL);
+ if (!memory_map_arr) {
+ rc = -ENOMEM;
+ pr_err("%s:%d Mem Alloc Fail\n", __func__, __LINE__);
+ goto free_mem;
+ }
+
+ if (copy_from_user(power_setting_array,
+ cdata->cfg.eeprom_info.power_setting_array,
+ sizeof(struct msm_sensor_power_setting_array))) {
+ pr_err("%s copy_from_user failed %d\n",
+ __func__, __LINE__);
+ goto free_mem;
+ }
+ CDBG("%s:%d Size of power setting array: %d\n",
+ __func__, __LINE__, power_setting_array->size);
+ if (copy_from_user(memory_map_arr,
+ cdata->cfg.eeprom_info.mem_map_array,
+ sizeof(struct msm_eeprom_memory_map_array))) {
+ rc = -EINVAL;
+ pr_err("%s copy_from_user failed for memory map%d\n",
+ __func__, __LINE__);
+ goto free_mem;
+ }
+
+ power_info = &(e_ctrl->eboard_info->power_info);
+
+ power_info->power_setting =
+ power_setting_array->power_setting_a;
+ power_info->power_down_setting =
+ power_setting_array->power_down_setting_a;
+
+ power_info->power_setting_size =
+ power_setting_array->size;
+ power_info->power_down_setting_size =
+ power_setting_array->size_down;
+
+ if ((power_info->power_setting_size >
+ MAX_POWER_CONFIG) ||
+ (power_info->power_down_setting_size >
+ MAX_POWER_CONFIG) ||
+ (!power_info->power_down_setting_size) ||
+ (!power_info->power_setting_size)) {
+ rc = -EINVAL;
+ pr_err("%s:%d Invalid power setting size :%d, %d\n",
+ __func__, __LINE__,
+ power_info->power_setting_size,
+ power_info->power_down_setting_size);
+ goto free_mem;
+ }
+
+ if (e_ctrl->i2c_client.cci_client) {
+ e_ctrl->i2c_client.cci_client->i2c_freq_mode =
+ cdata->cfg.eeprom_info.i2c_freq_mode;
+ if (e_ctrl->i2c_client.cci_client->i2c_freq_mode >
+ I2C_MAX_MODES) {
+ pr_err("%s::%d Improper I2C freq mode\n",
+ __func__, __LINE__);
+ e_ctrl->i2c_client.cci_client->i2c_freq_mode =
+ I2C_STANDARD_MODE;
+ }
+ }
+
+ /* Fill vreg power info and power up here */
+ rc = msm_eeprom_power_up(e_ctrl, power_info);
+ if (rc < 0) {
+ pr_err("Power Up failed for eeprom\n");
+ goto free_mem;
+ }
+
+ rc = eeprom_parse_memory_map(e_ctrl, memory_map_arr);
+ if (rc < 0) {
+ pr_err("%s::%d memory map parse failed\n", __func__, __LINE__);
+ goto free_mem;
+ }
+
+ rc = msm_camera_power_down(power_info, e_ctrl->eeprom_device_type,
+ &e_ctrl->i2c_client);
+ if (rc < 0) {
+ pr_err("%s:%d Power down failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto free_mem;
+ }
+
+free_mem:
+ kfree(power_setting_array);
+ kfree(memory_map_arr);
+ power_setting_array = NULL;
+ memory_map_arr = NULL;
+ return rc;
+}
+
+static int msm_eeprom_get_cmm_data(struct msm_eeprom_ctrl_t *e_ctrl,
+ struct msm_eeprom_cfg_data *cdata)
+{
+ int rc = 0;
+ struct msm_eeprom_cmm_t *cmm_data = &e_ctrl->eboard_info->cmm_data;
+ cdata->cfg.get_cmm_data.cmm_support = cmm_data->cmm_support;
+ cdata->cfg.get_cmm_data.cmm_compression = cmm_data->cmm_compression;
+ cdata->cfg.get_cmm_data.cmm_size = cmm_data->cmm_size;
+ return rc;
+}
+
+static int eeprom_config_read_cal_data(struct msm_eeprom_ctrl_t *e_ctrl,
+ struct msm_eeprom_cfg_data *cdata)
+{
+ int rc;
+
+ /* check range */
+ if (cdata->cfg.read_data.num_bytes >
+ e_ctrl->cal_data.num_data) {
+ CDBG("%s: Invalid size. exp %u, req %u\n", __func__,
+ e_ctrl->cal_data.num_data,
+ cdata->cfg.read_data.num_bytes);
+ return -EINVAL;
+ }
+
+ rc = copy_to_user(cdata->cfg.read_data.dbuffer,
+ e_ctrl->cal_data.mapdata,
+ cdata->cfg.read_data.num_bytes);
+
+ return rc;
+}
+
+static int msm_eeprom_config(struct msm_eeprom_ctrl_t *e_ctrl,
+ void __user *argp)
+{
+ struct msm_eeprom_cfg_data *cdata =
+ (struct msm_eeprom_cfg_data *)argp;
+ int rc = 0;
+
+ CDBG("%s E\n", __func__);
+ switch (cdata->cfgtype) {
+ case CFG_EEPROM_GET_INFO:
+ if (e_ctrl->userspace_probe == 1) {
+ pr_err("%s:%d Eeprom name should be module driver",
+ __func__, __LINE__);
+ rc = -EINVAL;
+ break;
+ }
+ CDBG("%s E CFG_EEPROM_GET_INFO\n", __func__);
+ cdata->is_supported = e_ctrl->is_supported;
+ memcpy(cdata->cfg.eeprom_name,
+ e_ctrl->eboard_info->eeprom_name,
+ sizeof(cdata->cfg.eeprom_name));
+ break;
+ case CFG_EEPROM_GET_CAL_DATA:
+ CDBG("%s E CFG_EEPROM_GET_CAL_DATA\n", __func__);
+ cdata->cfg.get_data.num_bytes =
+ e_ctrl->cal_data.num_data;
+ break;
+ case CFG_EEPROM_READ_CAL_DATA:
+ CDBG("%s E CFG_EEPROM_READ_CAL_DATA\n", __func__);
+ rc = eeprom_config_read_cal_data(e_ctrl, cdata);
+ break;
+ case CFG_EEPROM_GET_MM_INFO:
+ CDBG("%s E CFG_EEPROM_GET_MM_INFO\n", __func__);
+ rc = msm_eeprom_get_cmm_data(e_ctrl, cdata);
+ break;
+ case CFG_EEPROM_INIT:
+ if (e_ctrl->userspace_probe == 0) {
+ pr_err("%s:%d Eeprom already probed at kernel boot",
+ __func__, __LINE__);
+ rc = -EINVAL;
+ break;
+ }
+ if (e_ctrl->cal_data.num_data == 0) {
+ rc = eeprom_init_config(e_ctrl, argp);
+ if (rc < 0) {
+ pr_err("%s:%d Eeprom init failed\n",
+ __func__, __LINE__);
+ return rc;
+ }
+ } else {
+ CDBG("%s:%d Already read eeprom\n",
+ __func__, __LINE__);
+ }
+ break;
+ default:
+ break;
+ }
+
+ CDBG("%s X rc: %d\n", __func__, rc);
+ return rc;
+}
+
+static int msm_eeprom_get_subdev_id(struct msm_eeprom_ctrl_t *e_ctrl,
+ void *arg)
+{
+ uint32_t *subdev_id = (uint32_t *)arg;
+ CDBG("%s E\n", __func__);
+ if (!subdev_id) {
+ pr_err("%s failed\n", __func__);
+ return -EINVAL;
+ }
+ *subdev_id = e_ctrl->subdev_id;
+ CDBG("subdev_id %d\n", *subdev_id);
+ CDBG("%s X\n", __func__);
+ return 0;
+}
+
+static long msm_eeprom_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct msm_eeprom_ctrl_t *e_ctrl = v4l2_get_subdevdata(sd);
+ void __user *argp = (void __user *)arg;
+ CDBG("%s E\n", __func__);
+ CDBG("%s:%d a_ctrl %p argp %p\n", __func__, __LINE__, e_ctrl, argp);
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ return msm_eeprom_get_subdev_id(e_ctrl, argp);
+ case VIDIOC_MSM_EEPROM_CFG:
+ return msm_eeprom_config(e_ctrl, argp);
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ CDBG("%s X\n", __func__);
+}
+
+static struct msm_camera_i2c_fn_t msm_eeprom_cci_func_tbl = {
+ .i2c_read = msm_camera_cci_i2c_read,
+ .i2c_read_seq = msm_camera_cci_i2c_read_seq,
+ .i2c_write = msm_camera_cci_i2c_write,
+ .i2c_write_seq = msm_camera_cci_i2c_write_seq,
+ .i2c_write_table = msm_camera_cci_i2c_write_table,
+ .i2c_write_seq_table = msm_camera_cci_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_cci_i2c_write_table_w_microdelay,
+ .i2c_util = msm_sensor_cci_i2c_util,
+ .i2c_poll = msm_camera_cci_i2c_poll,
+};
+
+static struct msm_camera_i2c_fn_t msm_eeprom_qup_func_tbl = {
+ .i2c_read = msm_camera_qup_i2c_read,
+ .i2c_read_seq = msm_camera_qup_i2c_read_seq,
+ .i2c_write = msm_camera_qup_i2c_write,
+ .i2c_write_table = msm_camera_qup_i2c_write_table,
+ .i2c_write_seq_table = msm_camera_qup_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_qup_i2c_write_table_w_microdelay,
+};
+
+static struct msm_camera_i2c_fn_t msm_eeprom_spi_func_tbl = {
+ .i2c_read = msm_camera_spi_read,
+ .i2c_read_seq = msm_camera_spi_read_seq,
+};
+
+static int msm_eeprom_open(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh) {
+ int rc = 0;
+ struct msm_eeprom_ctrl_t *e_ctrl = v4l2_get_subdevdata(sd);
+ CDBG("%s E\n", __func__);
+ if (!e_ctrl) {
+ pr_err("%s failed e_ctrl is NULL\n", __func__);
+ return -EINVAL;
+ }
+ CDBG("%s X\n", __func__);
+ return rc;
+}
+
+static int msm_eeprom_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh) {
+ int rc = 0;
+ struct msm_eeprom_ctrl_t *e_ctrl = v4l2_get_subdevdata(sd);
+ CDBG("%s E\n", __func__);
+ if (!e_ctrl) {
+ pr_err("%s failed e_ctrl is NULL\n", __func__);
+ return -EINVAL;
+ }
+ CDBG("%s X\n", __func__);
+ return rc;
+}
+
+static struct msm_cam_clk_info cam_8960_clk_info[] = {
+ [SENSOR_CAM_MCLK] = {"cam_clk", 24000000},
+};
+
+static struct msm_cam_clk_info cam_8974_clk_info[] = {
+ [SENSOR_CAM_MCLK] = {"cam_src_clk", 19200000},
+ [SENSOR_CAM_CLK] = {"cam_clk", 0},
+};
+
+static const struct v4l2_subdev_internal_ops msm_eeprom_internal_ops = {
+ .open = msm_eeprom_open,
+ .close = msm_eeprom_close,
+};
+
+static struct v4l2_subdev_core_ops msm_eeprom_subdev_core_ops = {
+ .ioctl = msm_eeprom_subdev_ioctl,
+};
+
+static struct v4l2_subdev_ops msm_eeprom_subdev_ops = {
+ .core = &msm_eeprom_subdev_core_ops,
+};
+
+static int msm_eeprom_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc = 0;
+ struct msm_eeprom_ctrl_t *e_ctrl = NULL;
+ struct msm_camera_power_ctrl_t *power_info = NULL;
+ CDBG("%s E\n", __func__);
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ pr_err("%s i2c_check_functionality failed\n", __func__);
+ goto probe_failure;
+ }
+
+ e_ctrl = kzalloc(sizeof(*e_ctrl), GFP_KERNEL);
+ if (!e_ctrl) {
+ pr_err("%s:%d kzalloc failed\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ e_ctrl->eeprom_v4l2_subdev_ops = &msm_eeprom_subdev_ops;
+ e_ctrl->eeprom_mutex = &msm_eeprom_mutex;
+ CDBG("%s client = 0x%p\n", __func__, client);
+ e_ctrl->eboard_info = (struct msm_eeprom_board_info *)(id->driver_data);
+ if (!e_ctrl->eboard_info) {
+ pr_err("%s:%d board info NULL\n", __func__, __LINE__);
+ rc = -EINVAL;
+ goto ectrl_free;
+ }
+ power_info = &e_ctrl->eboard_info->power_info;
+ e_ctrl->i2c_client.client = client;
+ e_ctrl->cal_data.mapdata = NULL;
+ e_ctrl->cal_data.map = NULL;
+ e_ctrl->userspace_probe = 0;
+ e_ctrl->is_supported = 1;
+
+ /* Set device type as I2C */
+ e_ctrl->eeprom_device_type = MSM_CAMERA_I2C_DEVICE;
+ e_ctrl->i2c_client.i2c_func_tbl = &msm_eeprom_qup_func_tbl;
+
+ if (e_ctrl->eboard_info->i2c_slaveaddr != 0)
+ e_ctrl->i2c_client.client->addr =
+ e_ctrl->eboard_info->i2c_slaveaddr;
+ power_info->clk_info = cam_8960_clk_info;
+ power_info->clk_info_size = ARRAY_SIZE(cam_8960_clk_info);
+ power_info->dev = &client->dev;
+
+ /*IMPLEMENT READING PART*/
+ /* Initialize sub device */
+ v4l2_i2c_subdev_init(&e_ctrl->msm_sd.sd,
+ e_ctrl->i2c_client.client,
+ e_ctrl->eeprom_v4l2_subdev_ops);
+ v4l2_set_subdevdata(&e_ctrl->msm_sd.sd, e_ctrl);
+ e_ctrl->msm_sd.sd.internal_ops = &msm_eeprom_internal_ops;
+ e_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ media_entity_init(&e_ctrl->msm_sd.sd.entity, 0, NULL, 0);
+ e_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ e_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_EEPROM;
+ msm_sd_register(&e_ctrl->msm_sd);
+ CDBG("%s success result=%d X\n", __func__, rc);
+ return rc;
+
+ectrl_free:
+ kfree(e_ctrl);
+probe_failure:
+ pr_err("%s failed! rc = %d\n", __func__, rc);
+ return rc;
+}
+
+static int msm_eeprom_i2c_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct msm_eeprom_ctrl_t *e_ctrl;
+ if (!sd) {
+ pr_err("%s: Subdevice is NULL\n", __func__);
+ return 0;
+ }
+
+ e_ctrl = (struct msm_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
+ if (!e_ctrl) {
+ pr_err("%s: eeprom device is NULL\n", __func__);
+ return 0;
+ }
+
+ kfree(e_ctrl->cal_data.mapdata);
+ kfree(e_ctrl->cal_data.map);
+ if (e_ctrl->eboard_info) {
+ kfree(e_ctrl->eboard_info->power_info.gpio_conf);
+ kfree(e_ctrl->eboard_info);
+ }
+ e_ctrl->cal_data.mapdata = NULL;
+ kfree(e_ctrl);
+ e_ctrl = NULL;
+
+ return 0;
+}
+
+#define msm_eeprom_spi_parse_cmd(spic, str, name, out, size) \
+ { \
+ if (of_property_read_u32_array( \
+ spic->spi_master->dev.of_node, \
+ str, out, size)) { \
+ return -EFAULT; \
+ } else { \
+ spic->cmd_tbl.name.opcode = out[0]; \
+ spic->cmd_tbl.name.addr_len = out[1]; \
+ spic->cmd_tbl.name.dummy_len = out[2]; \
+ } \
+ }
+
+static int msm_eeprom_spi_parse_of(struct msm_camera_spi_client *spic)
+{
+ int rc = -EFAULT;
+ uint32_t tmp[3];
+ msm_eeprom_spi_parse_cmd(spic, "qcom,spiop,read", read, tmp, 3);
+ msm_eeprom_spi_parse_cmd(spic, "qcom,spiop,readseq", read_seq, tmp, 3);
+ msm_eeprom_spi_parse_cmd(spic, "qcom,spiop,queryid", query_id, tmp, 3);
+
+ rc = of_property_read_u32_array(spic->spi_master->dev.of_node,
+ "qcom,eeprom-id", tmp, 2);
+ if (rc) {
+ pr_err("%s: Failed to get eeprom id\n", __func__);
+ return rc;
+ }
+ spic->mfr_id0 = tmp[0];
+ spic->device_id0 = tmp[1];
+
+ return 0;
+}
+
+static int msm_eeprom_match_id(struct msm_eeprom_ctrl_t *e_ctrl)
+{
+ int rc;
+ struct msm_camera_i2c_client *client = &e_ctrl->i2c_client;
+ uint8_t id[2];
+
+ rc = msm_camera_spi_query_id(client, 0, &id[0], 2);
+ if (rc < 0)
+ return rc;
+ CDBG("%s: read 0x%x 0x%x, check 0x%x 0x%x\n", __func__, id[0],
+ id[1], client->spi_client->mfr_id0,
+ client->spi_client->device_id0);
+ if (id[0] != client->spi_client->mfr_id0
+ || id[1] != client->spi_client->device_id0)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int msm_eeprom_get_dt_data(struct msm_eeprom_ctrl_t *e_ctrl)
+{
+ int rc = 0, i = 0;
+ struct msm_eeprom_board_info *eb_info;
+ struct msm_camera_power_ctrl_t *power_info =
+ &e_ctrl->eboard_info->power_info;
+ struct device_node *of_node = NULL;
+ struct msm_camera_gpio_conf *gconf = NULL;
+ int8_t gpio_array_size = 0;
+ uint16_t *gpio_array = NULL;
+
+ eb_info = e_ctrl->eboard_info;
+ if (e_ctrl->eeprom_device_type == MSM_CAMERA_SPI_DEVICE)
+ of_node = e_ctrl->i2c_client.
+ spi_client->spi_master->dev.of_node;
+ else if (e_ctrl->eeprom_device_type == MSM_CAMERA_PLATFORM_DEVICE)
+ of_node = e_ctrl->pdev->dev.of_node;
+
+ if (!of_node) {
+ pr_err("%s: %d of_node is NULL\n", __func__ , __LINE__);
+ return -ENOMEM;
+ }
+ rc = msm_camera_get_dt_vreg_data(of_node, &power_info->cam_vreg,
+ &power_info->num_vreg);
+ if (rc < 0)
+ return rc;
+
+ if (e_ctrl->userspace_probe == 0) {
+ rc = msm_camera_get_dt_power_setting_data(of_node,
+ power_info->cam_vreg, power_info->num_vreg,
+ power_info);
+ if (rc < 0)
+ goto ERROR1;
+ }
+
+ power_info->gpio_conf = kzalloc(sizeof(struct msm_camera_gpio_conf),
+ GFP_KERNEL);
+ if (!power_info->gpio_conf) {
+ rc = -ENOMEM;
+ goto ERROR2;
+ }
+ gconf = power_info->gpio_conf;
+ gpio_array_size = of_gpio_count(of_node);
+ CDBG("%s gpio count %d\n", __func__, gpio_array_size);
+
+ if (gpio_array_size > 0) {
+ gpio_array = kzalloc(sizeof(uint16_t) * gpio_array_size,
+ GFP_KERNEL);
+ if (!gpio_array) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR3;
+ }
+ for (i = 0; i < gpio_array_size; i++) {
+ gpio_array[i] = of_get_gpio(of_node, i);
+ CDBG("%s gpio_array[%d] = %d\n", __func__, i,
+ gpio_array[i]);
+ }
+
+ rc = msm_camera_get_dt_gpio_req_tbl(of_node, gconf,
+ gpio_array, gpio_array_size);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR4;
+ }
+
+ rc = msm_camera_init_gpio_pin_tbl(of_node, gconf,
+ gpio_array, gpio_array_size);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR4;
+ }
+ kfree(gpio_array);
+ }
+
+ return rc;
+ERROR4:
+ kfree(gpio_array);
+ERROR3:
+ kfree(power_info->gpio_conf);
+ERROR2:
+ kfree(power_info->cam_vreg);
+ERROR1:
+ kfree(power_info->power_setting);
+ return rc;
+}
+
+
+static int msm_eeprom_cmm_dts(struct msm_eeprom_board_info *eb_info,
+ struct device_node *of_node)
+{
+ int rc = 0;
+ struct msm_eeprom_cmm_t *cmm_data = &eb_info->cmm_data;
+
+ cmm_data->cmm_support =
+ of_property_read_bool(of_node, "qcom,cmm-data-support");
+ if (!cmm_data->cmm_support)
+ return -EINVAL;
+ cmm_data->cmm_compression =
+ of_property_read_bool(of_node, "qcom,cmm-data-compressed");
+ if (!cmm_data->cmm_compression)
+ CDBG("No MM compression data\n");
+
+ rc = of_property_read_u32(of_node, "qcom,cmm-data-offset",
+ &cmm_data->cmm_offset);
+ if (rc < 0)
+ CDBG("No MM offset data\n");
+
+ rc = of_property_read_u32(of_node, "qcom,cmm-data-size",
+ &cmm_data->cmm_size);
+ if (rc < 0)
+ CDBG("No MM size data\n");
+
+ CDBG("cmm_support: cmm_compr %d, cmm_offset %d, cmm_size %d\n",
+ cmm_data->cmm_compression,
+ cmm_data->cmm_offset,
+ cmm_data->cmm_size);
+ return 0;
+}
+
+static int msm_eeprom_spi_setup(struct spi_device *spi)
+{
+ struct msm_eeprom_ctrl_t *e_ctrl = NULL;
+ struct msm_camera_i2c_client *client = NULL;
+ struct msm_camera_spi_client *spi_client;
+ struct msm_eeprom_board_info *eb_info;
+ struct msm_camera_power_ctrl_t *power_info = NULL;
+ int rc = 0;
+
+ e_ctrl = kzalloc(sizeof(*e_ctrl), GFP_KERNEL);
+ if (!e_ctrl) {
+ pr_err("%s:%d kzalloc failed\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ e_ctrl->eeprom_v4l2_subdev_ops = &msm_eeprom_subdev_ops;
+ e_ctrl->eeprom_mutex = &msm_eeprom_mutex;
+ client = &e_ctrl->i2c_client;
+ e_ctrl->is_supported = 0;
+ e_ctrl->userspace_probe = 0;
+ e_ctrl->cal_data.mapdata = NULL;
+ e_ctrl->cal_data.map = NULL;
+
+ spi_client = kzalloc(sizeof(*spi_client), GFP_KERNEL);
+ if (!spi_client) {
+ pr_err("%s:%d kzalloc failed\n", __func__, __LINE__);
+ kfree(e_ctrl);
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32(spi->dev.of_node, "cell-index",
+ &e_ctrl->subdev_id);
+ CDBG("cell-index %d, rc %d\n", e_ctrl->subdev_id, rc);
+ if (rc < 0) {
+ pr_err("failed rc %d\n", rc);
+ return rc;
+ }
+
+ eb_info = kzalloc(sizeof(*eb_info), GFP_KERNEL);
+ if (!eb_info)
+ goto spi_free;
+ e_ctrl->eboard_info = eb_info;
+
+ rc = of_property_read_string(spi->dev.of_node, "qcom,eeprom-name",
+ &eb_info->eeprom_name);
+ CDBG("%s qcom,eeprom-name %s, rc %d\n", __func__,
+ eb_info->eeprom_name, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ e_ctrl->userspace_probe = 1;
+ goto board_free;
+ }
+
+ e_ctrl->eeprom_device_type = MSM_CAMERA_SPI_DEVICE;
+ client->spi_client = spi_client;
+ spi_client->spi_master = spi;
+ client->i2c_func_tbl = &msm_eeprom_spi_func_tbl;
+ client->addr_type = MSM_CAMERA_I2C_3B_ADDR;
+
+ rc = msm_eeprom_cmm_dts(e_ctrl->eboard_info, spi->dev.of_node);
+ if (rc < 0)
+ CDBG("%s MM data miss:%d\n", __func__, __LINE__);
+
+ power_info = &eb_info->power_info;
+
+ power_info->clk_info = cam_8974_clk_info;
+ power_info->clk_info_size = ARRAY_SIZE(cam_8974_clk_info);
+ power_info->dev = &spi->dev;
+
+ rc = msm_eeprom_get_dt_data(e_ctrl);
+ if (rc < 0)
+ goto board_free;
+
+ /* set spi instruction info */
+ spi_client->retry_delay = 1;
+ spi_client->retries = 0;
+
+ rc = msm_eeprom_spi_parse_of(spi_client);
+ if (rc < 0) {
+ dev_err(&spi->dev,
+ "%s: Error parsing device properties\n", __func__);
+ goto board_free;
+ }
+
+ if (e_ctrl->userspace_probe == 0) {
+ /* prepare memory buffer */
+ rc = msm_eeprom_parse_memory_map(spi->dev.of_node,
+ &e_ctrl->cal_data);
+ if (rc < 0)
+ CDBG("%s: no cal memory map\n", __func__);
+
+ /* power up eeprom for reading */
+ rc = msm_camera_power_up(power_info, e_ctrl->eeprom_device_type,
+ &e_ctrl->i2c_client);
+ if (rc < 0) {
+ pr_err("failed rc %d\n", rc);
+ goto caldata_free;
+ }
+
+ /* check eeprom id */
+ rc = msm_eeprom_match_id(e_ctrl);
+ if (rc < 0) {
+ CDBG("%s: eeprom not matching %d\n", __func__, rc);
+ goto power_down;
+ }
+ /* read eeprom */
+ if (e_ctrl->cal_data.map) {
+ rc = read_eeprom_memory(e_ctrl, &e_ctrl->cal_data);
+ if (rc < 0) {
+ pr_err("%s: read cal data failed\n", __func__);
+ goto power_down;
+ }
+ e_ctrl->is_supported |= msm_eeprom_match_crc(
+ &e_ctrl->cal_data);
+ }
+
+ rc = msm_camera_power_down(power_info,
+ e_ctrl->eeprom_device_type, &e_ctrl->i2c_client);
+ if (rc < 0) {
+ pr_err("failed rc %d\n", rc);
+ goto caldata_free;
+ }
+ } else
+ e_ctrl->is_supported = 1;
+
+ /* initiazlie subdev */
+ v4l2_spi_subdev_init(&e_ctrl->msm_sd.sd,
+ e_ctrl->i2c_client.spi_client->spi_master,
+ e_ctrl->eeprom_v4l2_subdev_ops);
+ v4l2_set_subdevdata(&e_ctrl->msm_sd.sd, e_ctrl);
+ e_ctrl->msm_sd.sd.internal_ops = &msm_eeprom_internal_ops;
+ e_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ media_entity_init(&e_ctrl->msm_sd.sd.entity, 0, NULL, 0);
+ e_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ e_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_EEPROM;
+ msm_sd_register(&e_ctrl->msm_sd);
+ e_ctrl->is_supported = (e_ctrl->is_supported << 1) | 1;
+ CDBG("%s success result=%d supported=%x X\n", __func__, rc,
+ e_ctrl->is_supported);
+
+ return 0;
+
+power_down:
+ msm_camera_power_down(power_info, e_ctrl->eeprom_device_type,
+ &e_ctrl->i2c_client);
+caldata_free:
+ kfree(e_ctrl->cal_data.mapdata);
+ kfree(e_ctrl->cal_data.map);
+board_free:
+ kfree(e_ctrl->eboard_info);
+spi_free:
+ kfree(spi_client);
+ kfree(e_ctrl);
+ return rc;
+}
+
+static int msm_eeprom_spi_probe(struct spi_device *spi)
+{
+ int irq, cs, cpha, cpol, cs_high;
+
+ CDBG("%s\n", __func__);
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_0;
+ spi_setup(spi);
+
+ irq = spi->irq;
+ cs = spi->chip_select;
+ cpha = (spi->mode & SPI_CPHA) ? 1 : 0;
+ cpol = (spi->mode & SPI_CPOL) ? 1 : 0;
+ cs_high = (spi->mode & SPI_CS_HIGH) ? 1 : 0;
+ CDBG("%s: irq[%d] cs[%x] CPHA[%x] CPOL[%x] CS_HIGH[%x]\n",
+ __func__, irq, cs, cpha, cpol, cs_high);
+ CDBG("%s: max_speed[%u]\n", __func__, spi->max_speed_hz);
+
+ return msm_eeprom_spi_setup(spi);
+}
+
+static int msm_eeprom_spi_remove(struct spi_device *sdev)
+{
+ struct v4l2_subdev *sd = spi_get_drvdata(sdev);
+ struct msm_eeprom_ctrl_t *e_ctrl;
+ if (!sd) {
+ pr_err("%s: Subdevice is NULL\n", __func__);
+ return 0;
+ }
+
+ e_ctrl = (struct msm_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
+ if (!e_ctrl) {
+ pr_err("%s: eeprom device is NULL\n", __func__);
+ return 0;
+ }
+
+ kfree(e_ctrl->i2c_client.spi_client);
+ kfree(e_ctrl->cal_data.mapdata);
+ kfree(e_ctrl->cal_data.map);
+ if (e_ctrl->eboard_info) {
+ kfree(e_ctrl->eboard_info->power_info.gpio_conf);
+ kfree(e_ctrl->eboard_info);
+ }
+ e_ctrl->cal_data.mapdata = NULL;
+ kfree(e_ctrl);
+ e_ctrl = NULL;
+
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static void msm_eeprom_copy_power_settings_compat(
+ struct msm_sensor_power_setting_array *ps,
+ struct msm_sensor_power_setting_array32 *ps32)
+{
+ uint16_t i = 0;
+
+ ps->size = ps32->size;
+ for (i = 0; i < ps32->size; i++) {
+ ps->power_setting_a[i].config_val =
+ ps32->power_setting_a[i].config_val;
+ ps->power_setting_a[i].delay =
+ ps32->power_setting_a[i].delay;
+ ps->power_setting_a[i].seq_type =
+ ps32->power_setting_a[i].seq_type;
+ ps->power_setting_a[i].seq_val =
+ ps32->power_setting_a[i].seq_val;
+ }
+
+ ps->size_down = ps32->size_down;
+ for (i = 0; i < ps32->size_down; i++) {
+ ps->power_down_setting_a[i].config_val =
+ ps32->power_down_setting_a[i].config_val;
+ ps->power_down_setting_a[i].delay =
+ ps32->power_down_setting_a[i].delay;
+ ps->power_down_setting_a[i].seq_type =
+ ps32->power_down_setting_a[i].seq_type;
+ ps->power_down_setting_a[i].seq_val =
+ ps32->power_down_setting_a[i].seq_val;
+ }
+}
+
+static int eeprom_config_read_cal_data32(struct msm_eeprom_ctrl_t *e_ctrl,
+ void __user *arg)
+{
+ int rc;
+ uint8_t *ptr_dest = NULL;
+ struct msm_eeprom_cfg_data32 *cdata32 =
+ (struct msm_eeprom_cfg_data32 *) arg;
+ struct msm_eeprom_cfg_data cdata;
+
+ cdata.cfgtype = cdata32->cfgtype;
+ cdata.is_supported = cdata32->is_supported;
+ cdata.cfg.read_data.num_bytes = cdata32->cfg.read_data.num_bytes;
+ /* check range */
+ if (cdata.cfg.read_data.num_bytes >
+ e_ctrl->cal_data.num_data) {
+ CDBG("%s: Invalid size. exp %u, req %u\n", __func__,
+ e_ctrl->cal_data.num_data,
+ cdata.cfg.read_data.num_bytes);
+ return -EINVAL;
+ }
+ if (!e_ctrl->cal_data.mapdata)
+ return -EFAULT;
+
+ ptr_dest = (uint8_t *) compat_ptr(cdata32->cfg.read_data.dbuffer);
+
+ rc = copy_to_user(ptr_dest, e_ctrl->cal_data.mapdata,
+ cdata.cfg.read_data.num_bytes);
+
+ return rc;
+}
+
+static int eeprom_init_config32(struct msm_eeprom_ctrl_t *e_ctrl,
+ void __user *argp)
+{
+ int rc = 0;
+ struct msm_eeprom_cfg_data32 *cdata32 = argp;
+ struct msm_sensor_power_setting_array *power_setting_array = NULL;
+ struct msm_sensor_power_setting_array32 *power_setting_array32 = NULL;
+ struct msm_camera_power_ctrl_t *power_info = NULL;
+ struct msm_eeprom_memory_map_array *mem_map_array = NULL;
+
+ power_setting_array32 =
+ kzalloc(sizeof(struct msm_sensor_power_setting_array32),
+ GFP_KERNEL);
+ if (!power_setting_array32) {
+ pr_err("%s:%d Mem Alloc Fail\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ return rc;
+ }
+ power_setting_array =
+ kzalloc(sizeof(struct msm_sensor_power_setting_array),
+ GFP_KERNEL);
+ if (power_setting_array == NULL) {
+ pr_err("%s:%d Mem Alloc Fail\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto free_mem;
+ }
+ mem_map_array =
+ kzalloc(sizeof(struct msm_eeprom_memory_map_array),
+ GFP_KERNEL);
+ if (mem_map_array == NULL) {
+ pr_err("%s:%d Mem Alloc Fail\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto free_mem;
+ }
+
+ if (copy_from_user(power_setting_array32,
+ (void *)compat_ptr(cdata32->cfg.eeprom_info.
+ power_setting_array),
+ sizeof(struct msm_sensor_power_setting_array32))) {
+ pr_err("%s:%d copy_from_user failed\n",
+ __func__, __LINE__);
+ goto free_mem;
+ }
+ CDBG("%s:%d Size of power setting array: %d",
+ __func__, __LINE__, power_setting_array32->size);
+ if (copy_from_user(mem_map_array,
+ (void *)compat_ptr(cdata32->cfg.eeprom_info.mem_map_array),
+ sizeof(struct msm_eeprom_memory_map_array))) {
+ pr_err("%s:%d copy_from_user failed for memory map\n",
+ __func__, __LINE__);
+ goto free_mem;
+ }
+
+ power_info = &(e_ctrl->eboard_info->power_info);
+
+ msm_eeprom_copy_power_settings_compat(
+ power_setting_array,
+ power_setting_array32);
+
+ power_info->power_setting =
+ power_setting_array->power_setting_a;
+ power_info->power_down_setting =
+ power_setting_array->power_down_setting_a;
+
+ power_info->power_setting_size =
+ power_setting_array->size;
+ power_info->power_down_setting_size =
+ power_setting_array->size_down;
+
+ if ((power_info->power_setting_size >
+ MAX_POWER_CONFIG) ||
+ (power_info->power_down_setting_size >
+ MAX_POWER_CONFIG) ||
+ (!power_info->power_down_setting_size) ||
+ (!power_info->power_setting_size)) {
+ rc = -EINVAL;
+ pr_err("%s:%d Invalid power setting size :%d, %d\n",
+ __func__, __LINE__,
+ power_info->power_setting_size,
+ power_info->power_down_setting_size);
+ goto free_mem;
+ }
+
+ if (e_ctrl->i2c_client.cci_client) {
+ e_ctrl->i2c_client.cci_client->i2c_freq_mode =
+ cdata32->cfg.eeprom_info.i2c_freq_mode;
+ if (e_ctrl->i2c_client.cci_client->i2c_freq_mode >
+ I2C_MAX_MODES) {
+ pr_err("%s::%d Improper I2C Freq Mode\n",
+ __func__, __LINE__);
+ e_ctrl->i2c_client.cci_client->i2c_freq_mode =
+ I2C_STANDARD_MODE;
+ }
+ CDBG("%s:%d Not CCI probe", __func__, __LINE__);
+ }
+ /* Fill vreg power info and power up here */
+ rc = msm_eeprom_power_up(e_ctrl, power_info);
+ if (rc < 0) {
+ pr_err("%s:%d Power Up failed for eeprom\n",
+ __func__, __LINE__);
+ goto free_mem;
+ }
+
+ rc = eeprom_parse_memory_map(e_ctrl, mem_map_array);
+ if (rc < 0) {
+ pr_err("%s:%d memory map parse failed\n",
+ __func__, __LINE__);
+ goto free_mem;
+ }
+
+ rc = msm_camera_power_down(power_info,
+ e_ctrl->eeprom_device_type, &e_ctrl->i2c_client);
+ if (rc < 0)
+ pr_err("%s:%d Power down failed rc %d\n",
+ __func__, __LINE__, rc);
+
+free_mem:
+ kfree(power_setting_array32);
+ kfree(power_setting_array);
+ kfree(mem_map_array);
+ power_setting_array32 = NULL;
+ power_setting_array = NULL;
+ mem_map_array = NULL;
+ return rc;
+}
+
+static int msm_eeprom_config32(struct msm_eeprom_ctrl_t *e_ctrl,
+ void __user *argp)
+{
+ struct msm_eeprom_cfg_data32 *cdata =
+ (struct msm_eeprom_cfg_data32 *)argp;
+ int rc = 0;
+
+ CDBG("%s E\n", __func__);
+ switch (cdata->cfgtype) {
+ case CFG_EEPROM_GET_INFO:
+ if (e_ctrl->userspace_probe == 1) {
+ pr_err("%s:%d Eeprom name should be module driver",
+ __func__, __LINE__);
+ rc = -EINVAL;
+ break;
+ }
+ CDBG("%s E CFG_EEPROM_GET_INFO\n", __func__);
+ cdata->is_supported = e_ctrl->is_supported;
+ memcpy(cdata->cfg.eeprom_name,
+ e_ctrl->eboard_info->eeprom_name,
+ sizeof(cdata->cfg.eeprom_name));
+ break;
+ case CFG_EEPROM_GET_CAL_DATA:
+ CDBG("%s E CFG_EEPROM_GET_CAL_DATA\n", __func__);
+ cdata->cfg.get_data.num_bytes =
+ e_ctrl->cal_data.num_data;
+ break;
+ case CFG_EEPROM_READ_CAL_DATA:
+ CDBG("%s E CFG_EEPROM_READ_CAL_DATA\n", __func__);
+ rc = eeprom_config_read_cal_data32(e_ctrl, argp);
+ break;
+ case CFG_EEPROM_INIT:
+ if (e_ctrl->userspace_probe == 0) {
+ pr_err("%s:%d Eeprom already probed at kernel boot",
+ __func__, __LINE__);
+ rc = -EINVAL;
+ break;
+ }
+ if (e_ctrl->cal_data.num_data == 0) {
+ rc = eeprom_init_config32(e_ctrl, argp);
+ if (rc < 0)
+ pr_err("%s:%d Eeprom init failed\n",
+ __func__, __LINE__);
+ } else {
+ CDBG("%s:%d Already read eeprom\n",
+ __func__, __LINE__);
+ }
+ break;
+ default:
+ break;
+ }
+
+ CDBG("%s X rc: %d\n", __func__, rc);
+ return rc;
+}
+
+static long msm_eeprom_subdev_ioctl32(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct msm_eeprom_ctrl_t *e_ctrl = v4l2_get_subdevdata(sd);
+ void __user *argp = (void __user *)arg;
+
+ CDBG("%s E\n", __func__);
+ CDBG("%s:%d a_ctrl %p argp %p\n", __func__, __LINE__, e_ctrl, argp);
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ return msm_eeprom_get_subdev_id(e_ctrl, argp);
+ case VIDIOC_MSM_EEPROM_CFG32:
+ return msm_eeprom_config32(e_ctrl, argp);
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ CDBG("%s X\n", __func__);
+}
+
+static long msm_eeprom_subdev_do_ioctl32(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+ return msm_eeprom_subdev_ioctl32(sd, cmd, arg);
+}
+
+static long msm_eeprom_subdev_fops_ioctl32(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_eeprom_subdev_do_ioctl32);
+}
+
+#endif
+
+static int msm_eeprom_platform_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ int j = 0;
+ uint32_t temp;
+
+ struct msm_camera_cci_client *cci_client = NULL;
+ struct msm_eeprom_ctrl_t *e_ctrl = NULL;
+ struct msm_eeprom_board_info *eb_info = NULL;
+ struct device_node *of_node = pdev->dev.of_node;
+ struct msm_camera_power_ctrl_t *power_info = NULL;
+
+ CDBG("%s E\n", __func__);
+
+ e_ctrl = kzalloc(sizeof(*e_ctrl), GFP_KERNEL);
+ if (!e_ctrl) {
+ pr_err("%s:%d kzalloc failed\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ e_ctrl->eeprom_v4l2_subdev_ops = &msm_eeprom_subdev_ops;
+ e_ctrl->eeprom_mutex = &msm_eeprom_mutex;
+
+ e_ctrl->cal_data.mapdata = NULL;
+ e_ctrl->cal_data.map = NULL;
+ e_ctrl->userspace_probe = 0;
+ e_ctrl->is_supported = 0;
+ if (!of_node) {
+ pr_err("%s dev.of_node NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Set platform device handle */
+ e_ctrl->pdev = pdev;
+ /* Set device type as platform device */
+ e_ctrl->eeprom_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+ e_ctrl->i2c_client.i2c_func_tbl = &msm_eeprom_cci_func_tbl;
+ e_ctrl->i2c_client.cci_client = kzalloc(sizeof(
+ struct msm_camera_cci_client), GFP_KERNEL);
+ if (!e_ctrl->i2c_client.cci_client) {
+ pr_err("%s failed no memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ e_ctrl->eboard_info = kzalloc(sizeof(
+ struct msm_eeprom_board_info), GFP_KERNEL);
+ if (!e_ctrl->eboard_info) {
+ pr_err("%s failed line %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto cciclient_free;
+ }
+
+ eb_info = e_ctrl->eboard_info;
+ power_info = &eb_info->power_info;
+ cci_client = e_ctrl->i2c_client.cci_client;
+ cci_client->cci_subdev = msm_cci_get_subdev();
+ cci_client->retries = 3;
+ cci_client->id_map = 0;
+
+ power_info->clk_info = cam_8974_clk_info;
+ power_info->clk_info_size = ARRAY_SIZE(cam_8974_clk_info);
+ power_info->dev = &pdev->dev;
+
+ rc = of_property_read_u32(of_node, "cell-index",
+ &pdev->id);
+ CDBG("cell-index %d, rc %d\n", pdev->id, rc);
+ if (rc < 0) {
+ pr_err("failed rc %d\n", rc);
+ goto board_free;
+ }
+ e_ctrl->subdev_id = pdev->id;
+
+ rc = of_property_read_u32(of_node, "qcom,cci-master",
+ &e_ctrl->cci_master);
+ CDBG("qcom,cci-master %d, rc %d\n", e_ctrl->cci_master, rc);
+ if (rc < 0) {
+ pr_err("%s failed rc %d\n", __func__, rc);
+ goto board_free;
+ }
+ cci_client->cci_i2c_master = e_ctrl->cci_master;
+
+ rc = of_property_read_string(of_node, "qcom,eeprom-name",
+ &eb_info->eeprom_name);
+ CDBG("%s qcom,eeprom-name %s, rc %d\n", __func__,
+ eb_info->eeprom_name, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ e_ctrl->userspace_probe = 1;
+ }
+
+ rc = msm_eeprom_get_dt_data(e_ctrl);
+ if (rc)
+ goto board_free;
+
+ if (e_ctrl->userspace_probe == 0) {
+ rc = of_property_read_u32(of_node, "qcom,slave-addr",
+ &temp);
+ if (rc < 0) {
+ pr_err("%s failed rc %d\n", __func__, rc);
+ goto board_free;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,i2c-freq-mode",
+ &e_ctrl->i2c_freq_mode);
+ CDBG("qcom,i2c_freq_mode %d, rc %d\n",
+ e_ctrl->i2c_freq_mode, rc);
+ if (rc < 0) {
+ pr_err("%s qcom,i2c-freq-mode read fail. Setting to 0 %d\n",
+ __func__, rc);
+ e_ctrl->i2c_freq_mode = 0;
+ }
+ if (e_ctrl->i2c_freq_mode >= I2C_MAX_MODES) {
+ pr_err("%s:%d invalid i2c_freq_mode = %d\n",
+ __func__, __LINE__, e_ctrl->i2c_freq_mode);
+ e_ctrl->i2c_freq_mode = 0;
+ }
+ eb_info->i2c_slaveaddr = temp;
+ CDBG("qcom,slave-addr = 0x%X\n", eb_info->i2c_slaveaddr);
+ eb_info->i2c_freq_mode = e_ctrl->i2c_freq_mode;
+ cci_client->i2c_freq_mode = e_ctrl->i2c_freq_mode;
+ cci_client->sid = eb_info->i2c_slaveaddr >> 1;
+
+ rc = msm_eeprom_parse_memory_map(of_node, &e_ctrl->cal_data);
+ if (rc < 0)
+ goto board_free;
+
+ rc = msm_camera_power_up(power_info, e_ctrl->eeprom_device_type,
+ &e_ctrl->i2c_client);
+ if (rc) {
+ pr_err("failed rc %d\n", rc);
+ goto memdata_free;
+ }
+ rc = read_eeprom_memory(e_ctrl, &e_ctrl->cal_data);
+ if (rc < 0) {
+ pr_err("%s read_eeprom_memory failed\n", __func__);
+ goto power_down;
+ }
+ for (j = 0; j < e_ctrl->cal_data.num_data; j++)
+ CDBG("memory_data[%d] = 0x%X\n", j,
+ e_ctrl->cal_data.mapdata[j]);
+
+ e_ctrl->is_supported |= msm_eeprom_match_crc(&e_ctrl->cal_data);
+
+ rc = msm_camera_power_down(power_info,
+ e_ctrl->eeprom_device_type, &e_ctrl->i2c_client);
+ if (rc) {
+ pr_err("failed rc %d\n", rc);
+ goto memdata_free;
+ }
+ } else
+ e_ctrl->is_supported = 1;
+
+ v4l2_subdev_init(&e_ctrl->msm_sd.sd,
+ e_ctrl->eeprom_v4l2_subdev_ops);
+ v4l2_set_subdevdata(&e_ctrl->msm_sd.sd, e_ctrl);
+ platform_set_drvdata(pdev, &e_ctrl->msm_sd.sd);
+ e_ctrl->msm_sd.sd.internal_ops = &msm_eeprom_internal_ops;
+ e_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(e_ctrl->msm_sd.sd.name,
+ ARRAY_SIZE(e_ctrl->msm_sd.sd.name), "msm_eeprom");
+ media_entity_init(&e_ctrl->msm_sd.sd.entity, 0, NULL, 0);
+ e_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ e_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_EEPROM;
+ msm_sd_register(&e_ctrl->msm_sd);
+
+#ifdef CONFIG_COMPAT
+ msm_cam_copy_v4l2_subdev_fops(&msm_eeprom_v4l2_subdev_fops);
+ msm_eeprom_v4l2_subdev_fops.compat_ioctl32 =
+ msm_eeprom_subdev_fops_ioctl32;
+ e_ctrl->msm_sd.sd.devnode->fops = &msm_eeprom_v4l2_subdev_fops;
+#endif
+
+ e_ctrl->is_supported = (e_ctrl->is_supported << 1) | 1;
+ CDBG("%s X\n", __func__);
+ return rc;
+
+power_down:
+ msm_camera_power_down(power_info, e_ctrl->eeprom_device_type,
+ &e_ctrl->i2c_client);
+memdata_free:
+ kfree(e_ctrl->cal_data.mapdata);
+ kfree(e_ctrl->cal_data.map);
+board_free:
+ kfree(e_ctrl->eboard_info);
+cciclient_free:
+ kfree(e_ctrl->i2c_client.cci_client);
+ kfree(e_ctrl);
+ return rc;
+}
+
+static int msm_eeprom_platform_remove(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct msm_eeprom_ctrl_t *e_ctrl;
+ if (!sd) {
+ pr_err("%s: Subdevice is NULL\n", __func__);
+ return 0;
+ }
+
+ e_ctrl = (struct msm_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
+ if (!e_ctrl) {
+ pr_err("%s: eeprom device is NULL\n", __func__);
+ return 0;
+ }
+
+ kfree(e_ctrl->i2c_client.cci_client);
+ kfree(e_ctrl->cal_data.mapdata);
+ kfree(e_ctrl->cal_data.map);
+ if (e_ctrl->eboard_info) {
+ kfree(e_ctrl->eboard_info->power_info.gpio_conf);
+ kfree(e_ctrl->eboard_info);
+ }
+ kfree(e_ctrl);
+ return 0;
+}
+
+static const struct of_device_id msm_eeprom_dt_match[] = {
+ { .compatible = "qcom,eeprom" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, msm_eeprom_dt_match);
+
+static struct platform_driver msm_eeprom_platform_driver = {
+ .driver = {
+ .name = "qcom,eeprom",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_eeprom_dt_match,
+ },
+ .probe = msm_eeprom_platform_probe,
+ .remove = msm_eeprom_platform_remove,
+};
+
+static const struct i2c_device_id msm_eeprom_i2c_id[] = {
+ { "msm_eeprom", (kernel_ulong_t)NULL},
+ { }
+};
+
+static struct i2c_driver msm_eeprom_i2c_driver = {
+ .id_table = msm_eeprom_i2c_id,
+ .probe = msm_eeprom_i2c_probe,
+ .remove = msm_eeprom_i2c_remove,
+ .driver = {
+ .name = "msm_eeprom",
+ },
+};
+
+static struct spi_driver msm_eeprom_spi_driver = {
+ .driver = {
+ .name = "qcom_eeprom",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_eeprom_dt_match,
+ },
+ .probe = msm_eeprom_spi_probe,
+ .remove = msm_eeprom_spi_remove,
+};
+
+static int __init msm_eeprom_init_module(void)
+{
+ int rc = 0;
+ CDBG("%s E\n", __func__);
+ rc = platform_driver_register(&msm_eeprom_platform_driver);
+ CDBG("%s:%d platform rc %d\n", __func__, __LINE__, rc);
+ rc = spi_register_driver(&msm_eeprom_spi_driver);
+ CDBG("%s:%d spi rc %d\n", __func__, __LINE__, rc);
+ return i2c_add_driver(&msm_eeprom_i2c_driver);
+}
+
+static void __exit msm_eeprom_exit_module(void)
+{
+ platform_driver_unregister(&msm_eeprom_platform_driver);
+ spi_unregister_driver(&msm_eeprom_spi_driver);
+ i2c_del_driver(&msm_eeprom_i2c_driver);
+}
+
+module_init(msm_eeprom_init_module);
+module_exit(msm_eeprom_exit_module);
+MODULE_DESCRIPTION("MSM EEPROM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/sensor/eeprom/msm_eeprom.h b/drivers/media/platform/msm/camera_v2/sensor/eeprom/msm_eeprom.h
new file mode 100644
index 000000000000..5e348a8001a6
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/eeprom/msm_eeprom.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef MSM_EEPROM_H
+#define MSM_EEPROM_H
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <soc/qcom/camera2.h>
+#include <media/v4l2-subdev.h>
+#include <media/msmb_camera.h>
+#include "msm_camera_i2c.h"
+#include "msm_camera_spi.h"
+#include "msm_camera_io_util.h"
+#include "msm_camera_dt_util.h"
+
+struct msm_eeprom_ctrl_t;
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+ static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+#define PROPERTY_MAXSIZE 32
+
+struct msm_eeprom_ctrl_t {
+ struct platform_device *pdev;
+ struct mutex *eeprom_mutex;
+
+ struct v4l2_subdev sdev;
+ struct v4l2_subdev_ops *eeprom_v4l2_subdev_ops;
+ enum msm_camera_device_type_t eeprom_device_type;
+ struct msm_sd_subdev msm_sd;
+ enum cci_i2c_master_t cci_master;
+ enum i2c_freq_mode_t i2c_freq_mode;
+
+ struct msm_camera_i2c_client i2c_client;
+ struct msm_eeprom_board_info *eboard_info;
+ uint32_t subdev_id;
+ int32_t userspace_probe;
+ struct msm_eeprom_memory_block_t cal_data;
+ uint8_t is_supported;
+};
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/Makefile b/drivers/media/platform/msm/camera_v2/sensor/flash/Makefile
new file mode 100644
index 000000000000..6a28da5926c7
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/flash/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+obj-$(CONFIG_MSMB_CAMERA) += msm_flash.o
diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
new file mode 100644
index 000000000000..61720e29ddfd
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
@@ -0,0 +1,1218 @@
+/* Copyright (c) 2009-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include "msm_flash.h"
+#include "msm_camera_dt_util.h"
+#include "msm_cci.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+DEFINE_MSM_MUTEX(msm_flash_mutex);
+
+static struct v4l2_file_operations msm_flash_v4l2_subdev_fops;
+static struct led_trigger *torch_trigger;
+
+static const struct of_device_id msm_flash_dt_match[] = {
+ {.compatible = "qcom,camera-flash", .data = NULL},
+ {}
+};
+
+static struct msm_flash_table msm_i2c_flash_table;
+static struct msm_flash_table msm_gpio_flash_table;
+static struct msm_flash_table msm_pmic_flash_table;
+
+static struct msm_flash_table *flash_table[] = {
+ &msm_i2c_flash_table,
+ &msm_gpio_flash_table,
+ &msm_pmic_flash_table
+};
+
+static struct msm_camera_i2c_fn_t msm_sensor_cci_func_tbl = {
+ .i2c_read = msm_camera_cci_i2c_read,
+ .i2c_read_seq = msm_camera_cci_i2c_read_seq,
+ .i2c_write = msm_camera_cci_i2c_write,
+ .i2c_write_table = msm_camera_cci_i2c_write_table,
+ .i2c_write_seq_table = msm_camera_cci_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_cci_i2c_write_table_w_microdelay,
+ .i2c_util = msm_sensor_cci_i2c_util,
+ .i2c_poll = msm_camera_cci_i2c_poll,
+};
+
+void msm_torch_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ if (!torch_trigger) {
+ pr_err("No torch trigger found, can't set brightness\n");
+ return;
+ }
+
+ led_trigger_event(torch_trigger, value);
+};
+
+static struct led_classdev msm_torch_led[MAX_LED_TRIGGERS] = {
+ {
+ .name = "torch-light0",
+ .brightness_set = msm_torch_brightness_set,
+ .brightness = LED_OFF,
+ },
+ {
+ .name = "torch-light1",
+ .brightness_set = msm_torch_brightness_set,
+ .brightness = LED_OFF,
+ },
+ {
+ .name = "torch-light2",
+ .brightness_set = msm_torch_brightness_set,
+ .brightness = LED_OFF,
+ },
+};
+
+static int32_t msm_torch_create_classdev(struct platform_device *pdev,
+ void *data)
+{
+ int32_t rc = 0;
+ int32_t i = 0;
+ struct msm_flash_ctrl_t *fctrl =
+ (struct msm_flash_ctrl_t *)data;
+
+ if (!fctrl) {
+ pr_err("Invalid fctrl\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < fctrl->torch_num_sources; i++) {
+ if (fctrl->torch_trigger[i]) {
+ torch_trigger = fctrl->torch_trigger[i];
+ CDBG("%s:%d msm_torch_brightness_set for torch %d",
+ __func__, __LINE__, i);
+ msm_torch_brightness_set(&msm_torch_led[i],
+ LED_OFF);
+
+ rc = led_classdev_register(&pdev->dev,
+ &msm_torch_led[i]);
+ if (rc) {
+ pr_err("Failed to register %d led dev. rc = %d\n",
+ i, rc);
+ return rc;
+ }
+ } else {
+ pr_err("Invalid fctrl->torch_trigger[%d]\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+};
+
+static int32_t msm_flash_get_subdev_id(
+ struct msm_flash_ctrl_t *flash_ctrl, void *arg)
+{
+ uint32_t *subdev_id = (uint32_t *)arg;
+ CDBG("Enter\n");
+ if (!subdev_id) {
+ pr_err("failed\n");
+ return -EINVAL;
+ }
+ if (flash_ctrl->flash_device_type == MSM_CAMERA_PLATFORM_DEVICE)
+ *subdev_id = flash_ctrl->pdev->id;
+ else
+ *subdev_id = flash_ctrl->subdev_id;
+
+ CDBG("subdev_id %d\n", *subdev_id);
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t msm_flash_i2c_write_table(
+ struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_camera_i2c_reg_setting_array *settings)
+{
+ struct msm_camera_i2c_reg_setting conf_array;
+
+ conf_array.addr_type = settings->addr_type;
+ conf_array.data_type = settings->data_type;
+ conf_array.delay = settings->delay;
+ conf_array.reg_setting = settings->reg_setting_a;
+ conf_array.size = settings->size;
+
+ return flash_ctrl->flash_i2c_client.i2c_func_tbl->i2c_write_table(
+ &flash_ctrl->flash_i2c_client, &conf_array);
+}
+
+#ifdef CONFIG_COMPAT
+static void msm_flash_copy_power_settings_compat(
+ struct msm_sensor_power_setting *ps,
+ struct msm_sensor_power_setting32 *ps32, uint32_t size)
+{
+ uint16_t i = 0;
+
+ for (i = 0; i < size; i++) {
+ ps[i].config_val = ps32[i].config_val;
+ ps[i].delay = ps32[i].delay;
+ ps[i].seq_type = ps32[i].seq_type;
+ ps[i].seq_val = ps32[i].seq_val;
+ }
+}
+#endif
+
+static int32_t msm_flash_i2c_init(
+ struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data)
+{
+ int32_t rc = 0;
+ struct msm_flash_init_info_t *flash_init_info =
+ flash_data->cfg.flash_init_info;
+ struct msm_camera_i2c_reg_setting_array *settings = NULL;
+ struct msm_camera_cci_client *cci_client = NULL;
+#ifdef CONFIG_COMPAT
+ struct msm_sensor_power_setting_array32 *power_setting_array32 = NULL;
+#endif
+ if (!flash_init_info || !flash_init_info->power_setting_array) {
+ pr_err("%s:%d failed: Null pointer\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ power_setting_array32 = kzalloc(
+ sizeof(struct msm_sensor_power_setting_array32),
+ GFP_KERNEL);
+ if (!power_setting_array32) {
+ pr_err("%s mem allocation failed %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(power_setting_array32,
+ (void *)flash_init_info->power_setting_array,
+ sizeof(struct msm_sensor_power_setting_array32))) {
+ pr_err("%s copy_from_user failed %d\n",
+ __func__, __LINE__);
+ kfree(power_setting_array32);
+ return -EFAULT;
+ }
+
+ flash_ctrl->power_setting_array.size =
+ power_setting_array32->size;
+ flash_ctrl->power_setting_array.size_down =
+ power_setting_array32->size_down;
+ flash_ctrl->power_setting_array.power_down_setting =
+ compat_ptr(power_setting_array32->power_down_setting);
+ flash_ctrl->power_setting_array.power_setting =
+ compat_ptr(power_setting_array32->power_setting);
+
+ /* Validate power_up array size and power_down array size */
+ if ((!flash_ctrl->power_setting_array.size) ||
+ (flash_ctrl->power_setting_array.size >
+ MAX_POWER_CONFIG) ||
+ (!flash_ctrl->power_setting_array.size_down) ||
+ (flash_ctrl->power_setting_array.size_down >
+ MAX_POWER_CONFIG)) {
+
+ pr_err("failed: invalid size %d, size_down %d",
+ flash_ctrl->power_setting_array.size,
+ flash_ctrl->power_setting_array.size_down);
+ kfree(power_setting_array32);
+ power_setting_array32 = NULL;
+ return -EINVAL;
+ }
+ /* Copy the settings from compat struct to regular struct */
+ msm_flash_copy_power_settings_compat(
+ flash_ctrl->power_setting_array.power_setting_a,
+ power_setting_array32->power_setting_a,
+ flash_ctrl->power_setting_array.size);
+
+ msm_flash_copy_power_settings_compat(
+ flash_ctrl->power_setting_array.power_down_setting_a,
+ power_setting_array32->power_down_setting_a,
+ flash_ctrl->power_setting_array.size_down);
+ } else
+#endif
+ if (copy_from_user(&flash_ctrl->power_setting_array,
+ (void *)flash_init_info->power_setting_array,
+ sizeof(struct msm_sensor_power_setting_array))) {
+ pr_err("%s copy_from_user failed %d\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ if (flash_ctrl->flash_device_type == MSM_CAMERA_PLATFORM_DEVICE) {
+ cci_client = flash_ctrl->flash_i2c_client.cci_client;
+ cci_client->sid = flash_init_info->slave_addr >> 1;
+ cci_client->retries = 3;
+ cci_client->id_map = 0;
+ cci_client->i2c_freq_mode = flash_init_info->i2c_freq_mode;
+ }
+
+ flash_ctrl->power_info.power_setting =
+ flash_ctrl->power_setting_array.power_setting_a;
+ flash_ctrl->power_info.power_down_setting =
+ flash_ctrl->power_setting_array.power_down_setting_a;
+ flash_ctrl->power_info.power_setting_size =
+ flash_ctrl->power_setting_array.size;
+ flash_ctrl->power_info.power_down_setting_size =
+ flash_ctrl->power_setting_array.size_down;
+
+ rc = msm_camera_power_up(&flash_ctrl->power_info,
+ flash_ctrl->flash_device_type,
+ &flash_ctrl->flash_i2c_client);
+ if (rc < 0) {
+ pr_err("%s msm_camera_power_up failed %d\n",
+ __func__, __LINE__);
+ goto msm_flash_i2c_init_fail;
+ }
+
+ if (flash_data->cfg.flash_init_info->settings) {
+ settings = kzalloc(sizeof(
+ struct msm_camera_i2c_reg_setting_array), GFP_KERNEL);
+ if (!settings) {
+ pr_err("%s mem allocation failed %d\n",
+ __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(settings, (void *)flash_init_info->settings,
+ sizeof(struct msm_camera_i2c_reg_setting_array))) {
+ kfree(settings);
+ pr_err("%s copy_from_user failed %d\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ rc = msm_flash_i2c_write_table(flash_ctrl, settings);
+ kfree(settings);
+
+ if (rc < 0) {
+ pr_err("%s:%d msm_flash_i2c_write_table rc %d failed\n",
+ __func__, __LINE__, rc);
+ }
+ }
+
+ return 0;
+
+msm_flash_i2c_init_fail:
+ return rc;
+}
+
+static int32_t msm_flash_gpio_init(
+ struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data)
+{
+ int32_t i = 0;
+ int32_t rc = 0;
+
+ CDBG("Enter");
+ for (i = 0; i < flash_ctrl->flash_num_sources; i++)
+ flash_ctrl->flash_op_current[i] = LED_FULL;
+
+ for (i = 0; i < flash_ctrl->torch_num_sources; i++)
+ flash_ctrl->torch_op_current[i] = LED_HALF;
+
+ for (i = 0; i < flash_ctrl->torch_num_sources; i++) {
+ if (!flash_ctrl->torch_trigger[i]) {
+ if (i < flash_ctrl->flash_num_sources)
+ flash_ctrl->torch_trigger[i] =
+ flash_ctrl->flash_trigger[i];
+ else
+ flash_ctrl->torch_trigger[i] =
+ flash_ctrl->flash_trigger[
+ flash_ctrl->flash_num_sources - 1];
+ }
+ }
+
+ rc = flash_ctrl->func_tbl->camera_flash_off(flash_ctrl, flash_data);
+
+ CDBG("Exit");
+ return rc;
+}
+
+static int32_t msm_flash_i2c_release(
+ struct msm_flash_ctrl_t *flash_ctrl)
+{
+ int32_t rc = 0;
+
+ if (!(&flash_ctrl->power_info) || !(&flash_ctrl->flash_i2c_client)) {
+ pr_err("%s:%d failed: %p %p\n",
+ __func__, __LINE__, &flash_ctrl->power_info,
+ &flash_ctrl->flash_i2c_client);
+ return -EINVAL;
+ }
+
+ rc = msm_camera_power_down(&flash_ctrl->power_info,
+ flash_ctrl->flash_device_type,
+ &flash_ctrl->flash_i2c_client);
+ if (rc < 0) {
+ pr_err("%s msm_camera_power_down failed %d\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int32_t msm_flash_off(struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data)
+{
+ int32_t i = 0;
+
+ CDBG("Enter\n");
+
+ for (i = 0; i < flash_ctrl->flash_num_sources; i++)
+ if (flash_ctrl->flash_trigger[i])
+ led_trigger_event(flash_ctrl->flash_trigger[i], 0);
+
+ for (i = 0; i < flash_ctrl->torch_num_sources; i++)
+ if (flash_ctrl->torch_trigger[i])
+ led_trigger_event(flash_ctrl->torch_trigger[i], 0);
+ if (flash_ctrl->switch_trigger)
+ led_trigger_event(flash_ctrl->switch_trigger, 0);
+
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t msm_flash_i2c_write_setting_array(
+ struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data)
+{
+ int32_t rc = 0;
+ struct msm_camera_i2c_reg_setting_array *settings = NULL;
+
+ if (!flash_data->cfg.settings) {
+ pr_err("%s:%d failed: Null pointer\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ settings = kzalloc(sizeof(struct msm_camera_i2c_reg_setting_array),
+ GFP_KERNEL);
+ if (!settings) {
+ pr_err("%s mem allocation failed %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(settings, (void *)flash_data->cfg.settings,
+ sizeof(struct msm_camera_i2c_reg_setting_array))) {
+ kfree(settings);
+ pr_err("%s copy_from_user failed %d\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ rc = msm_flash_i2c_write_table(flash_ctrl, settings);
+ kfree(settings);
+
+ if (rc < 0) {
+ pr_err("%s:%d msm_flash_i2c_write_table rc = %d failed\n",
+ __func__, __LINE__, rc);
+ }
+ return rc;
+}
+
+static int32_t msm_flash_init(
+ struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data)
+{
+ uint32_t i = 0;
+ int32_t rc = -EFAULT;
+ enum msm_flash_driver_type flash_driver_type = FLASH_DRIVER_DEFAULT;
+
+ CDBG("Enter");
+
+ if (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT) {
+ pr_err("%s:%d Invalid flash state = %d",
+ __func__, __LINE__, flash_ctrl->flash_state);
+ return 0;
+ }
+
+ if (flash_data->cfg.flash_init_info->flash_driver_type ==
+ FLASH_DRIVER_DEFAULT) {
+ flash_driver_type = flash_ctrl->flash_driver_type;
+ for (i = 0; i < MAX_LED_TRIGGERS; i++) {
+ flash_data->flash_current[i] =
+ flash_ctrl->flash_max_current[i];
+ flash_data->flash_duration[i] =
+ flash_ctrl->flash_max_duration[i];
+ }
+ } else if (flash_data->cfg.flash_init_info->flash_driver_type ==
+ flash_ctrl->flash_driver_type) {
+ flash_driver_type = flash_ctrl->flash_driver_type;
+ for (i = 0; i < MAX_LED_TRIGGERS; i++) {
+ flash_ctrl->flash_max_current[i] =
+ flash_data->flash_current[i];
+ flash_ctrl->flash_max_duration[i] =
+ flash_data->flash_duration[i];
+ }
+ }
+
+ if (flash_driver_type == FLASH_DRIVER_DEFAULT) {
+ pr_err("%s:%d invalid flash_driver_type", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(flash_table); i++) {
+ if (flash_driver_type == flash_table[i]->flash_driver_type) {
+ flash_ctrl->func_tbl = &flash_table[i]->func_tbl;
+ rc = 0;
+ }
+ }
+
+ if (rc < 0) {
+ pr_err("%s:%d failed invalid flash_driver_type %d\n",
+ __func__, __LINE__,
+ flash_data->cfg.flash_init_info->flash_driver_type);
+ }
+
+ if (flash_ctrl->func_tbl->camera_flash_init) {
+ rc = flash_ctrl->func_tbl->camera_flash_init(
+ flash_ctrl, flash_data);
+ if (rc < 0) {
+ pr_err("%s:%d camera_flash_init failed rc = %d",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+ }
+
+ flash_ctrl->flash_state = MSM_CAMERA_FLASH_INIT;
+
+ CDBG("Exit");
+ return 0;
+}
+
+static int32_t msm_flash_low(
+ struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data)
+{
+ uint32_t curr = 0, max_current = 0;
+ int32_t i = 0;
+
+ CDBG("Enter\n");
+ /* Turn off flash triggers */
+ for (i = 0; i < flash_ctrl->flash_num_sources; i++)
+ if (flash_ctrl->flash_trigger[i])
+ led_trigger_event(flash_ctrl->flash_trigger[i], 0);
+
+ /* Turn on flash triggers */
+ for (i = 0; i < flash_ctrl->torch_num_sources; i++) {
+ if (flash_ctrl->torch_trigger[i]) {
+ max_current = flash_ctrl->torch_max_current[i];
+ if (flash_data->flash_current[i] >= 0 &&
+ flash_data->flash_current[i] <
+ max_current) {
+ curr = flash_data->flash_current[i];
+ } else {
+ curr = flash_ctrl->torch_op_current[i];
+ pr_debug("LED current clamped to %d\n",
+ curr);
+ }
+ CDBG("low_flash_current[%d] = %d", i, curr);
+ led_trigger_event(flash_ctrl->torch_trigger[i],
+ curr);
+ }
+ }
+ if (flash_ctrl->switch_trigger)
+ led_trigger_event(flash_ctrl->switch_trigger, 1);
+ CDBG("Exit\n");
+ return 0;
+}
+
+static int32_t msm_flash_high(
+ struct msm_flash_ctrl_t *flash_ctrl,
+ struct msm_flash_cfg_data_t *flash_data)
+{
+ int32_t curr = 0;
+ int32_t max_current = 0;
+ int32_t i = 0;
+
+ /* Turn off torch triggers */
+ for (i = 0; i < flash_ctrl->torch_num_sources; i++)
+ if (flash_ctrl->torch_trigger[i])
+ led_trigger_event(flash_ctrl->torch_trigger[i], 0);
+
+ /* Turn on flash triggers */
+ for (i = 0; i < flash_ctrl->flash_num_sources; i++) {
+ if (flash_ctrl->flash_trigger[i]) {
+ max_current = flash_ctrl->flash_max_current[i];
+ if (flash_data->flash_current[i] >= 0 &&
+ flash_data->flash_current[i] <
+ max_current) {
+ curr = flash_data->flash_current[i];
+ } else {
+ curr = flash_ctrl->flash_op_current[i];
+ pr_debug("LED flash_current[%d] clamped %d\n",
+ i, curr);
+ }
+ CDBG("high_flash_current[%d] = %d", i, curr);
+ led_trigger_event(flash_ctrl->flash_trigger[i],
+ curr);
+ }
+ }
+ if (flash_ctrl->switch_trigger)
+ led_trigger_event(flash_ctrl->switch_trigger, 1);
+ return 0;
+}
+
+static int32_t msm_flash_release(
+ struct msm_flash_ctrl_t *flash_ctrl)
+{
+ int32_t rc = 0;
+ if (flash_ctrl->flash_state == MSM_CAMERA_FLASH_RELEASE) {
+ pr_err("%s:%d Invalid flash state = %d",
+ __func__, __LINE__, flash_ctrl->flash_state);
+ return 0;
+ }
+
+ rc = flash_ctrl->func_tbl->camera_flash_off(flash_ctrl, NULL);
+ if (rc < 0) {
+ pr_err("%s:%d camera_flash_init failed rc = %d",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+ flash_ctrl->flash_state = MSM_CAMERA_FLASH_RELEASE;
+ return 0;
+}
+
+static int32_t msm_flash_config(struct msm_flash_ctrl_t *flash_ctrl,
+ void __user *argp)
+{
+ int32_t rc = -EINVAL;
+ struct msm_flash_cfg_data_t *flash_data =
+ (struct msm_flash_cfg_data_t *) argp;
+
+ mutex_lock(flash_ctrl->flash_mutex);
+
+ CDBG("Enter %s type %d\n", __func__, flash_data->cfg_type);
+
+ switch (flash_data->cfg_type) {
+ case CFG_FLASH_INIT:
+ rc = msm_flash_init(flash_ctrl, flash_data);
+ break;
+ case CFG_FLASH_RELEASE:
+ if (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT)
+ rc = flash_ctrl->func_tbl->camera_flash_release(
+ flash_ctrl);
+ break;
+ case CFG_FLASH_OFF:
+ if (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT)
+ rc = flash_ctrl->func_tbl->camera_flash_off(
+ flash_ctrl, flash_data);
+ break;
+ case CFG_FLASH_LOW:
+ if (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT)
+ rc = flash_ctrl->func_tbl->camera_flash_low(
+ flash_ctrl, flash_data);
+ break;
+ case CFG_FLASH_HIGH:
+ if (flash_ctrl->flash_state == MSM_CAMERA_FLASH_INIT)
+ rc = flash_ctrl->func_tbl->camera_flash_high(
+ flash_ctrl, flash_data);
+ break;
+ default:
+ rc = -EFAULT;
+ break;
+ }
+
+ mutex_unlock(flash_ctrl->flash_mutex);
+
+ CDBG("Exit %s type %d\n", __func__, flash_data->cfg_type);
+
+ return rc;
+}
+
+static long msm_flash_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct msm_flash_ctrl_t *fctrl = NULL;
+ void __user *argp = (void __user *)arg;
+
+ CDBG("Enter\n");
+
+ if (!sd) {
+ pr_err("sd NULL\n");
+ return -EINVAL;
+ }
+ fctrl = v4l2_get_subdevdata(sd);
+ if (!fctrl) {
+ pr_err("fctrl NULL\n");
+ return -EINVAL;
+ }
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ return msm_flash_get_subdev_id(fctrl, argp);
+ case VIDIOC_MSM_FLASH_CFG:
+ return msm_flash_config(fctrl, argp);
+ case MSM_SD_NOTIFY_FREEZE:
+ return 0;
+ case MSM_SD_SHUTDOWN:
+ if (!fctrl->func_tbl) {
+ pr_err("fctrl->func_tbl NULL\n");
+ return -EINVAL;
+ } else {
+ return fctrl->func_tbl->camera_flash_release(fctrl);
+ }
+ default:
+ pr_err_ratelimited("invalid cmd %d\n", cmd);
+ return -ENOIOCTLCMD;
+ }
+ CDBG("Exit\n");
+}
+
+static struct v4l2_subdev_core_ops msm_flash_subdev_core_ops = {
+ .ioctl = msm_flash_subdev_ioctl,
+};
+
+static struct v4l2_subdev_ops msm_flash_subdev_ops = {
+ .core = &msm_flash_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops msm_flash_internal_ops;
+
+static int32_t msm_flash_get_gpio_dt_data(struct device_node *of_node,
+ struct msm_flash_ctrl_t *fctrl)
+{
+ int32_t rc = 0, i = 0;
+ uint16_t *gpio_array = NULL;
+ int16_t gpio_array_size = 0;
+ struct msm_camera_gpio_conf *gconf = NULL;
+
+ gpio_array_size = of_gpio_count(of_node);
+ CDBG("%s gpio count %d\n", __func__, gpio_array_size);
+
+ if (gpio_array_size > 0) {
+ fctrl->power_info.gpio_conf =
+ kzalloc(sizeof(struct msm_camera_gpio_conf),
+ GFP_KERNEL);
+ if (!fctrl->power_info.gpio_conf) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ return rc;
+ }
+ gconf = fctrl->power_info.gpio_conf;
+
+ gpio_array = kzalloc(sizeof(uint16_t) * gpio_array_size,
+ GFP_KERNEL);
+ if (!gpio_array) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto free_gpio_conf;
+ }
+ for (i = 0; i < gpio_array_size; i++) {
+ gpio_array[i] = of_get_gpio(of_node, i);
+ if (((int16_t)gpio_array[i]) < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -EINVAL;
+ goto free_gpio_array;
+ }
+ CDBG("%s gpio_array[%d] = %d\n", __func__, i,
+ gpio_array[i]);
+ }
+
+ rc = msm_camera_get_dt_gpio_req_tbl(of_node, gconf,
+ gpio_array, gpio_array_size);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto free_gpio_array;
+ }
+
+ rc = msm_camera_init_gpio_pin_tbl(of_node, gconf,
+ gpio_array, gpio_array_size);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto free_cam_gpio_req_tbl;
+ }
+
+ if (fctrl->flash_driver_type == FLASH_DRIVER_DEFAULT)
+ fctrl->flash_driver_type = FLASH_DRIVER_GPIO;
+ CDBG("%s:%d fctrl->flash_driver_type = %d", __func__, __LINE__,
+ fctrl->flash_driver_type);
+ }
+
+ return 0;
+
+free_cam_gpio_req_tbl:
+ kfree(gconf->cam_gpio_req_tbl);
+free_gpio_array:
+ kfree(gpio_array);
+free_gpio_conf:
+ kfree(fctrl->power_info.gpio_conf);
+ return rc;
+}
+
+static int32_t msm_flash_get_pmic_source_info(
+ struct device_node *of_node,
+ struct msm_flash_ctrl_t *fctrl)
+{
+ int32_t rc = 0;
+ uint32_t count = 0, i = 0;
+ struct device_node *flash_src_node = NULL;
+ struct device_node *torch_src_node = NULL;
+ struct device_node *switch_src_node = NULL;
+
+ switch_src_node = of_parse_phandle(of_node, "qcom,switch-source", 0);
+ if (!switch_src_node) {
+ CDBG("%s:%d switch_src_node NULL\n", __func__, __LINE__);
+ } else {
+ rc = of_property_read_string(switch_src_node,
+ "qcom,default-led-trigger",
+ &fctrl->switch_trigger_name);
+ if (rc < 0) {
+ rc = of_property_read_string(switch_src_node,
+ "linux,default-trigger",
+ &fctrl->switch_trigger_name);
+ if (rc < 0)
+ pr_err("default-trigger read failed\n");
+ }
+ of_node_put(switch_src_node);
+ switch_src_node = NULL;
+ if (!rc) {
+ CDBG("switch trigger %s\n",
+ fctrl->switch_trigger_name);
+ led_trigger_register_simple(
+ fctrl->switch_trigger_name,
+ &fctrl->switch_trigger);
+ }
+ }
+
+ if (of_get_property(of_node, "qcom,flash-source", &count)) {
+ count /= sizeof(uint32_t);
+ CDBG("count %d\n", count);
+ if (count > MAX_LED_TRIGGERS) {
+ pr_err("invalid count\n");
+ return -EINVAL;
+ }
+ fctrl->flash_num_sources = count;
+ CDBG("%s:%d flash_num_sources = %d",
+ __func__, __LINE__, fctrl->flash_num_sources);
+ for (i = 0; i < count; i++) {
+ flash_src_node = of_parse_phandle(of_node,
+ "qcom,flash-source", i);
+ if (!flash_src_node) {
+ pr_err("flash_src_node NULL\n");
+ continue;
+ }
+
+ rc = of_property_read_string(flash_src_node,
+ "qcom,default-led-trigger",
+ &fctrl->flash_trigger_name[i]);
+ if (rc < 0) {
+ rc = of_property_read_string(flash_src_node,
+ "linux,default-trigger",
+ &fctrl->flash_trigger_name[i]);
+ if (rc < 0) {
+ pr_err("default-trigger read failed\n");
+ of_node_put(flash_src_node);
+ continue;
+ }
+ }
+
+ CDBG("default trigger %s\n",
+ fctrl->flash_trigger_name[i]);
+
+ /* Read operational-current */
+ rc = of_property_read_u32(flash_src_node,
+ "qcom,current",
+ &fctrl->flash_op_current[i]);
+ if (rc < 0) {
+ pr_err("current: read failed\n");
+ of_node_put(flash_src_node);
+ continue;
+ }
+
+ /* Read max-current */
+ rc = of_property_read_u32(flash_src_node,
+ "qcom,max-current",
+ &fctrl->flash_max_current[i]);
+ if (rc < 0) {
+ pr_err("current: read failed\n");
+ of_node_put(flash_src_node);
+ continue;
+ }
+
+ /* Read max-duration */
+ rc = of_property_read_u32(flash_src_node,
+ "qcom,duration",
+ &fctrl->flash_max_duration[i]);
+ if (rc < 0) {
+ pr_err("duration: read failed\n");
+ of_node_put(flash_src_node);
+ /* Non-fatal; this property is optional */
+ }
+
+ of_node_put(flash_src_node);
+
+ CDBG("max_current[%d] %d\n",
+ i, fctrl->flash_op_current[i]);
+
+ led_trigger_register_simple(
+ fctrl->flash_trigger_name[i],
+ &fctrl->flash_trigger[i]);
+ }
+ if (fctrl->flash_driver_type == FLASH_DRIVER_DEFAULT)
+ fctrl->flash_driver_type = FLASH_DRIVER_PMIC;
+ CDBG("%s:%d fctrl->flash_driver_type = %d", __func__, __LINE__,
+ fctrl->flash_driver_type);
+ }
+
+ if (of_get_property(of_node, "qcom,torch-source", &count)) {
+ count /= sizeof(uint32_t);
+ CDBG("count %d\n", count);
+ if (count > MAX_LED_TRIGGERS) {
+ pr_err("invalid count\n");
+ return -EINVAL;
+ }
+ fctrl->torch_num_sources = count;
+ CDBG("%s:%d torch_num_sources = %d",
+ __func__, __LINE__, fctrl->torch_num_sources);
+ for (i = 0; i < count; i++) {
+ torch_src_node = of_parse_phandle(of_node,
+ "qcom,torch-source", i);
+ if (!torch_src_node) {
+ pr_err("torch_src_node NULL\n");
+ continue;
+ }
+
+ rc = of_property_read_string(torch_src_node,
+ "qcom,default-led-trigger",
+ &fctrl->torch_trigger_name[i]);
+ if (rc < 0) {
+ rc = of_property_read_string(torch_src_node,
+ "linux,default-trigger",
+ &fctrl->torch_trigger_name[i]);
+ if (rc < 0) {
+ pr_err("default-trigger read failed\n");
+ of_node_put(torch_src_node);
+ continue;
+ }
+ }
+
+ CDBG("default trigger %s\n",
+ fctrl->torch_trigger_name[i]);
+
+ /* Read operational-current */
+ rc = of_property_read_u32(torch_src_node,
+ "qcom,current",
+ &fctrl->torch_op_current[i]);
+ if (rc < 0) {
+ pr_err("current: read failed\n");
+ of_node_put(torch_src_node);
+ continue;
+ }
+
+ /* Read max-current */
+ rc = of_property_read_u32(torch_src_node,
+ "qcom,max-current",
+ &fctrl->torch_max_current[i]);
+ if (rc < 0) {
+ pr_err("current: read failed\n");
+ of_node_put(torch_src_node);
+ continue;
+ }
+
+ of_node_put(torch_src_node);
+
+ CDBG("max_current[%d] %d\n",
+ i, fctrl->torch_op_current[i]);
+
+ led_trigger_register_simple(
+ fctrl->torch_trigger_name[i],
+ &fctrl->torch_trigger[i]);
+ }
+ if (fctrl->flash_driver_type == FLASH_DRIVER_DEFAULT)
+ fctrl->flash_driver_type = FLASH_DRIVER_PMIC;
+ CDBG("%s:%d fctrl->flash_driver_type = %d", __func__, __LINE__,
+ fctrl->flash_driver_type);
+ }
+
+ return 0;
+}
+
+static int32_t msm_flash_get_dt_data(struct device_node *of_node,
+ struct msm_flash_ctrl_t *fctrl)
+{
+ int32_t rc = 0;
+
+ CDBG("called\n");
+
+ if (!of_node) {
+ pr_err("of_node NULL\n");
+ return -EINVAL;
+ }
+
+ /* Read the sub device */
+ rc = of_property_read_u32(of_node, "cell-index", &fctrl->pdev->id);
+ if (rc < 0) {
+ pr_err("failed rc %d\n", rc);
+ return rc;
+ }
+
+ CDBG("subdev id %d\n", fctrl->subdev_id);
+
+ fctrl->flash_driver_type = FLASH_DRIVER_DEFAULT;
+
+ /* Read the CCI master. Use M0 if not available in the node */
+ rc = of_property_read_u32(of_node, "qcom,cci-master",
+ &fctrl->cci_i2c_master);
+ CDBG("%s qcom,cci-master %d, rc %d\n", __func__, fctrl->cci_i2c_master,
+ rc);
+ if (rc < 0) {
+ /* Set default master 0 */
+ fctrl->cci_i2c_master = MASTER_0;
+ rc = 0;
+ } else {
+ fctrl->flash_driver_type = FLASH_DRIVER_I2C;
+ }
+
+ /* Read the gpio information from device tree */
+ rc = msm_flash_get_gpio_dt_data(of_node, fctrl);
+ if (rc < 0) {
+ pr_err("%s:%d msm_flash_get_gpio_dt_data failed rc %d\n",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+
+ /* Read the flash and torch source info from device tree node */
+ rc = msm_flash_get_pmic_source_info(of_node, fctrl);
+ if (rc < 0) {
+ pr_err("%s:%d msm_flash_get_pmic_source_info failed rc %d\n",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_flash_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ int32_t i = 0;
+ int32_t rc = 0;
+ struct video_device *vdev;
+ struct v4l2_subdev *sd;
+ struct msm_flash_cfg_data_t32 *u32;
+ struct msm_flash_cfg_data_t flash_data;
+ struct msm_flash_init_info_t32 flash_init_info32;
+ struct msm_flash_init_info_t flash_init_info;
+
+ CDBG("Enter");
+
+ if (!file || !arg) {
+ pr_err("%s:failed NULL parameter\n", __func__);
+ return -EINVAL;
+ }
+ vdev = video_devdata(file);
+ sd = vdev_to_v4l2_subdev(vdev);
+ u32 = (struct msm_flash_cfg_data_t32 *)arg;
+
+ flash_data.cfg_type = u32->cfg_type;
+ for (i = 0; i < MAX_LED_TRIGGERS; i++) {
+ flash_data.flash_current[i] = u32->flash_current[i];
+ flash_data.flash_duration[i] = u32->flash_duration[i];
+ }
+ switch (cmd) {
+ case VIDIOC_MSM_FLASH_CFG32:
+ cmd = VIDIOC_MSM_FLASH_CFG;
+ switch (flash_data.cfg_type) {
+ case CFG_FLASH_OFF:
+ case CFG_FLASH_LOW:
+ case CFG_FLASH_HIGH:
+ flash_data.cfg.settings = compat_ptr(u32->cfg.settings);
+ break;
+ case CFG_FLASH_INIT:
+ flash_data.cfg.flash_init_info = &flash_init_info;
+ if (copy_from_user(&flash_init_info32,
+ (void *)compat_ptr(u32->cfg.flash_init_info),
+ sizeof(struct msm_flash_init_info_t32))) {
+ pr_err("%s copy_from_user failed %d\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+ flash_init_info.flash_driver_type =
+ flash_init_info32.flash_driver_type;
+ flash_init_info.slave_addr =
+ flash_init_info32.slave_addr;
+ flash_init_info.i2c_freq_mode =
+ flash_init_info32.i2c_freq_mode;
+ flash_init_info.settings =
+ compat_ptr(flash_init_info32.settings);
+ flash_init_info.power_setting_array =
+ compat_ptr(
+ flash_init_info32.power_setting_array);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ return msm_flash_subdev_ioctl(sd, cmd, arg);
+ }
+
+ rc = msm_flash_subdev_ioctl(sd, cmd, &flash_data);
+ for (i = 0; i < MAX_LED_TRIGGERS; i++) {
+ u32->flash_current[i] = flash_data.flash_current[i];
+ u32->flash_duration[i] = flash_data.flash_duration[i];
+ }
+ CDBG("Exit");
+ return rc;
+}
+
+static long msm_flash_subdev_fops_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_flash_subdev_do_ioctl);
+}
+#endif
+static int32_t msm_flash_platform_probe(struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ struct msm_flash_ctrl_t *flash_ctrl = NULL;
+ struct msm_camera_cci_client *cci_client = NULL;
+
+ CDBG("Enter");
+ if (!pdev->dev.of_node) {
+ pr_err("of_node NULL\n");
+ return -EINVAL;
+ }
+
+ flash_ctrl = kzalloc(sizeof(struct msm_flash_ctrl_t), GFP_KERNEL);
+ if (!flash_ctrl) {
+ pr_err("%s:%d failed no memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ memset(flash_ctrl, 0, sizeof(struct msm_flash_ctrl_t));
+
+ flash_ctrl->pdev = pdev;
+
+ rc = msm_flash_get_dt_data(pdev->dev.of_node, flash_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d msm_flash_get_dt_data failed\n",
+ __func__, __LINE__);
+ kfree(flash_ctrl);
+ return -EINVAL;
+ }
+
+ flash_ctrl->flash_state = MSM_CAMERA_FLASH_RELEASE;
+ flash_ctrl->power_info.dev = &flash_ctrl->pdev->dev;
+ flash_ctrl->flash_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+ flash_ctrl->flash_mutex = &msm_flash_mutex;
+ flash_ctrl->flash_i2c_client.i2c_func_tbl = &msm_sensor_cci_func_tbl;
+ flash_ctrl->flash_i2c_client.cci_client = kzalloc(
+ sizeof(struct msm_camera_cci_client), GFP_KERNEL);
+ if (!flash_ctrl->flash_i2c_client.cci_client) {
+ kfree(flash_ctrl);
+ pr_err("failed no memory\n");
+ return -ENOMEM;
+ }
+
+ cci_client = flash_ctrl->flash_i2c_client.cci_client;
+ cci_client->cci_subdev = msm_cci_get_subdev();
+ cci_client->cci_i2c_master = flash_ctrl->cci_i2c_master;
+
+ /* Initialize sub device */
+ v4l2_subdev_init(&flash_ctrl->msm_sd.sd, &msm_flash_subdev_ops);
+ v4l2_set_subdevdata(&flash_ctrl->msm_sd.sd, flash_ctrl);
+
+ flash_ctrl->msm_sd.sd.internal_ops = &msm_flash_internal_ops;
+ flash_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(flash_ctrl->msm_sd.sd.name,
+ ARRAY_SIZE(flash_ctrl->msm_sd.sd.name),
+ "msm_camera_flash");
+ media_entity_init(&flash_ctrl->msm_sd.sd.entity, 0, NULL, 0);
+ flash_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ flash_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_FLASH;
+ flash_ctrl->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x1;
+ msm_sd_register(&flash_ctrl->msm_sd);
+
+ CDBG("%s:%d flash sd name = %s", __func__, __LINE__,
+ flash_ctrl->msm_sd.sd.entity.name);
+ msm_cam_copy_v4l2_subdev_fops(&msm_flash_v4l2_subdev_fops);
+#ifdef CONFIG_COMPAT
+ msm_flash_v4l2_subdev_fops.compat_ioctl32 =
+ msm_flash_subdev_fops_ioctl;
+#endif
+ flash_ctrl->msm_sd.sd.devnode->fops = &msm_flash_v4l2_subdev_fops;
+
+ if (flash_ctrl->flash_driver_type == FLASH_DRIVER_PMIC)
+ rc = msm_torch_create_classdev(pdev, flash_ctrl);
+
+ CDBG("probe success\n");
+ return rc;
+}
+
+MODULE_DEVICE_TABLE(of, msm_flash_dt_match);
+
+static struct platform_driver msm_flash_platform_driver = {
+ .probe = msm_flash_platform_probe,
+ .driver = {
+ .name = "qcom,camera-flash",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_flash_dt_match,
+ },
+};
+
+static int __init msm_flash_init_module(void)
+{
+ int32_t rc = 0;
+ CDBG("Enter\n");
+ rc = platform_driver_register(&msm_flash_platform_driver);
+ if (rc)
+ pr_err("platform probe for flash failed");
+
+ return rc;
+}
+
+static void __exit msm_flash_exit_module(void)
+{
+ platform_driver_unregister(&msm_flash_platform_driver);
+ return;
+}
+
+static struct msm_flash_table msm_pmic_flash_table = {
+ .flash_driver_type = FLASH_DRIVER_PMIC,
+ .func_tbl = {
+ .camera_flash_init = NULL,
+ .camera_flash_release = msm_flash_release,
+ .camera_flash_off = msm_flash_off,
+ .camera_flash_low = msm_flash_low,
+ .camera_flash_high = msm_flash_high,
+ },
+};
+
+static struct msm_flash_table msm_gpio_flash_table = {
+ .flash_driver_type = FLASH_DRIVER_GPIO,
+ .func_tbl = {
+ .camera_flash_init = msm_flash_gpio_init,
+ .camera_flash_release = msm_flash_release,
+ .camera_flash_off = msm_flash_off,
+ .camera_flash_low = msm_flash_low,
+ .camera_flash_high = msm_flash_high,
+ },
+};
+
+static struct msm_flash_table msm_i2c_flash_table = {
+ .flash_driver_type = FLASH_DRIVER_I2C,
+ .func_tbl = {
+ .camera_flash_init = msm_flash_i2c_init,
+ .camera_flash_release = msm_flash_i2c_release,
+ .camera_flash_off = msm_flash_i2c_write_setting_array,
+ .camera_flash_low = msm_flash_i2c_write_setting_array,
+ .camera_flash_high = msm_flash_i2c_write_setting_array,
+ },
+};
+
+module_init(msm_flash_init_module);
+module_exit(msm_flash_exit_module);
+MODULE_DESCRIPTION("MSM FLASH");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h
new file mode 100644
index 000000000000..c82e48cddcaf
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.h
@@ -0,0 +1,120 @@
+/* Copyright (c) 2009-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef MSM_FLASH_H
+#define MSM_FLASH_H
+
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/msm_cam_sensor.h>
+#include <soc/qcom/camera2.h>
+#include "msm_camera_i2c.h"
+#include "msm_sd.h"
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+ static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+enum msm_camera_flash_state_t {
+ MSM_CAMERA_FLASH_INIT,
+ MSM_CAMERA_FLASH_RELEASE,
+};
+
+struct msm_flash_ctrl_t;
+
+struct msm_flash_func_t {
+ int32_t (*camera_flash_init)(struct msm_flash_ctrl_t *,
+ struct msm_flash_cfg_data_t *);
+ int32_t (*camera_flash_release)(struct msm_flash_ctrl_t *);
+ int32_t (*camera_flash_off)(struct msm_flash_ctrl_t *,
+ struct msm_flash_cfg_data_t *);
+ int32_t (*camera_flash_low)(struct msm_flash_ctrl_t *,
+ struct msm_flash_cfg_data_t *);
+ int32_t (*camera_flash_high)(struct msm_flash_ctrl_t *,
+ struct msm_flash_cfg_data_t *);
+};
+
+struct msm_flash_table {
+ enum msm_flash_driver_type flash_driver_type;
+ struct msm_flash_func_t func_tbl;
+};
+
+struct msm_flash_reg_t {
+ struct msm_camera_i2c_reg_setting *init_setting;
+ struct msm_camera_i2c_reg_setting *off_setting;
+ struct msm_camera_i2c_reg_setting *release_setting;
+ struct msm_camera_i2c_reg_setting *low_setting;
+ struct msm_camera_i2c_reg_setting *high_setting;
+};
+
+struct msm_flash_ctrl_t {
+ struct msm_camera_i2c_client flash_i2c_client;
+ struct msm_sd_subdev msm_sd;
+ struct platform_device *pdev;
+ struct msm_flash_func_t *func_tbl;
+ struct msm_camera_power_ctrl_t power_info;
+
+ /* Switch node to trigger led */
+ const char *switch_trigger_name;
+ struct led_trigger *switch_trigger;
+
+ /* Flash */
+ uint32_t flash_num_sources;
+ const char *flash_trigger_name[MAX_LED_TRIGGERS];
+ struct led_trigger *flash_trigger[MAX_LED_TRIGGERS];
+ uint32_t flash_op_current[MAX_LED_TRIGGERS];
+ uint32_t flash_max_current[MAX_LED_TRIGGERS];
+ uint32_t flash_max_duration[MAX_LED_TRIGGERS];
+
+ /* Torch */
+ uint32_t torch_num_sources;
+ const char *torch_trigger_name[MAX_LED_TRIGGERS];
+ struct led_trigger *torch_trigger[MAX_LED_TRIGGERS];
+ uint32_t torch_op_current[MAX_LED_TRIGGERS];
+ uint32_t torch_max_current[MAX_LED_TRIGGERS];
+
+ void *data;
+ enum msm_camera_device_type_t flash_device_type;
+ enum cci_i2c_master_t cci_i2c_master;
+ uint32_t subdev_id;
+ struct mutex *flash_mutex;
+ struct msm_sensor_power_setting_array power_setting_array;
+
+ /* flash driver type */
+ enum msm_flash_driver_type flash_driver_type;
+
+ /* flash state */
+ enum msm_camera_flash_state_t flash_state;
+};
+
+int msm_flash_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id);
+
+int msm_flash_probe(struct platform_device *pdev, const void *data);
+
+int32_t msm_flash_create_v4lsubdev(struct platform_device *pdev,
+ void *data);
+int32_t msm_led_i2c_flash_create_v4lsubdev(void *data);
+
+int32_t msm_led_i2c_trigger_get_subdev_id(struct msm_flash_ctrl_t *fctrl,
+ void *arg);
+
+int32_t msm_led_i2c_trigger_config(struct msm_flash_ctrl_t *fctrl,
+ void *data);
+
+int msm_flash_led_init(struct msm_flash_ctrl_t *fctrl);
+int msm_flash_led_release(struct msm_flash_ctrl_t *fctrl);
+int msm_flash_led_off(struct msm_flash_ctrl_t *fctrl);
+int msm_flash_led_low(struct msm_flash_ctrl_t *fctrl);
+int msm_flash_led_high(struct msm_flash_ctrl_t *fctrl);
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/Makefile b/drivers/media/platform/msm/camera_v2/sensor/io/Makefile
new file mode 100644
index 000000000000..ec958697ae13
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
+obj-$(CONFIG_MSMB_CAMERA) += msm_camera_cci_i2c.o msm_camera_qup_i2c.o msm_camera_spi.o msm_camera_dt_util.o
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c
new file mode 100644
index 000000000000..15705179301e
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c
@@ -0,0 +1,543 @@
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <soc/qcom/camera2.h>
+#include "msm_camera_i2c.h"
+#include "msm_cci.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#define S_I2C_DBG(fmt, args...) pr_debug(fmt, ##args)
+
+#define I2C_COMPARE_MATCH 0
+#define I2C_COMPARE_MISMATCH 1
+#define I2C_POLL_MAX_ITERATION 20
+
+int32_t msm_camera_cci_i2c_read(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t *data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ unsigned char buf[client->addr_type+data_type];
+ struct msm_camera_cci_ctrl cci_ctrl;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (data_type != MSM_CAMERA_I2C_BYTE_DATA
+ && data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+
+ cci_ctrl.cmd = MSM_CCI_I2C_READ;
+ cci_ctrl.cci_info = client->cci_client;
+ cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
+ cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = client->addr_type;
+ cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
+ cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = data_type;
+ rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ if (rc < 0) {
+ pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ rc = cci_ctrl.status;
+ if (data_type == MSM_CAMERA_I2C_BYTE_DATA)
+ *data = buf[0];
+ else
+ *data = buf[0] << 8 | buf[1];
+
+ S_I2C_DBG("%s addr = 0x%x data: 0x%x\n", __func__, addr, *data);
+ return rc;
+}
+
+int32_t msm_camera_cci_i2c_read_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ int32_t rc = -EFAULT;
+ unsigned char *buf = NULL;
+ int i;
+ struct msm_camera_cci_ctrl cci_ctrl;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || num_byte == 0)
+ return rc;
+
+ buf = kzalloc(num_byte, GFP_KERNEL);
+ if (!buf) {
+ pr_err("%s:%d no memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ cci_ctrl.cmd = MSM_CCI_I2C_READ;
+ cci_ctrl.cci_info = client->cci_client;
+ cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
+ cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = client->addr_type;
+ cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
+ cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = num_byte;
+ rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+ rc = cci_ctrl.status;
+
+ S_I2C_DBG("%s addr = 0x%x", __func__, addr);
+ for (i = 0; i < num_byte; i++) {
+ data[i] = buf[i];
+ S_I2C_DBG("Byte %d: 0x%x\n", i, buf[i]);
+ S_I2C_DBG("Data: 0x%x\n", data[i]);
+ }
+ kfree(buf);
+ return rc;
+}
+
+int32_t msm_camera_cci_i2c_write(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ struct msm_camera_cci_ctrl cci_ctrl;
+ struct msm_camera_i2c_reg_array reg_conf_tbl;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (data_type != MSM_CAMERA_I2C_BYTE_DATA
+ && data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+
+ CDBG("%s:%d reg addr = 0x%x data type: %d\n",
+ __func__, __LINE__, addr, data_type);
+ reg_conf_tbl.reg_addr = addr;
+ reg_conf_tbl.reg_data = data;
+ reg_conf_tbl.delay = 0;
+ cci_ctrl.cmd = MSM_CCI_I2C_WRITE;
+ cci_ctrl.cci_info = client->cci_client;
+ cci_ctrl.cfg.cci_i2c_write_cfg.reg_setting = &reg_conf_tbl;
+ cci_ctrl.cfg.cci_i2c_write_cfg.data_type = data_type;
+ cci_ctrl.cfg.cci_i2c_write_cfg.addr_type = client->addr_type;
+ cci_ctrl.cfg.cci_i2c_write_cfg.size = 1;
+ rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ if (rc < 0) {
+ pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ rc = cci_ctrl.status;
+ return rc;
+}
+
+int32_t msm_camera_cci_i2c_write_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ int32_t rc = -EFAULT;
+ uint32_t i = 0;
+ struct msm_camera_cci_ctrl cci_ctrl;
+ struct msm_camera_i2c_reg_array *reg_conf_tbl = NULL;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || num_byte == 0)
+ return rc;
+
+ if (num_byte > I2C_SEQ_REG_DATA_MAX) {
+ pr_err("%s: num_byte=%d clamped to max supported %d\n",
+ __func__, num_byte, I2C_SEQ_REG_DATA_MAX);
+ return rc;
+ }
+
+ S_I2C_DBG("%s reg addr = 0x%x num bytes: %d\n",
+ __func__, addr, num_byte);
+
+ reg_conf_tbl = kzalloc(num_byte *
+ (sizeof(struct msm_camera_i2c_reg_array)), GFP_KERNEL);
+ if (!reg_conf_tbl) {
+ pr_err("%s:%d no memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ reg_conf_tbl[0].reg_addr = addr;
+ for (i = 0; i < num_byte; i++) {
+ reg_conf_tbl[i].reg_data = data[i];
+ reg_conf_tbl[i].delay = 0;
+ }
+ cci_ctrl.cmd = MSM_CCI_I2C_WRITE_SEQ;
+ cci_ctrl.cci_info = client->cci_client;
+ cci_ctrl.cfg.cci_i2c_write_cfg.reg_setting = reg_conf_tbl;
+ cci_ctrl.cfg.cci_i2c_write_cfg.data_type = MSM_CAMERA_I2C_BYTE_DATA;
+ cci_ctrl.cfg.cci_i2c_write_cfg.addr_type = client->addr_type;
+ cci_ctrl.cfg.cci_i2c_write_cfg.size = num_byte;
+ rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ CDBG("%s line %d rc = %d\n", __func__, __LINE__, rc);
+ rc = cci_ctrl.status;
+ kfree(reg_conf_tbl);
+ reg_conf_tbl = NULL;
+ return rc;
+}
+
+static int32_t msm_camera_cci_i2c_write_table_cmd(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting,
+ enum msm_cci_cmd_type cmd)
+{
+ int32_t rc = -EFAULT;
+ struct msm_camera_cci_ctrl cci_ctrl;
+
+ if (!client || !write_setting)
+ return rc;
+
+ if ((write_setting->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && write_setting->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (write_setting->data_type != MSM_CAMERA_I2C_BYTE_DATA
+ && write_setting->data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+
+ cci_ctrl.cmd = cmd;
+ cci_ctrl.cci_info = client->cci_client;
+ cci_ctrl.cfg.cci_i2c_write_cfg.reg_setting =
+ write_setting->reg_setting;
+ cci_ctrl.cfg.cci_i2c_write_cfg.data_type = write_setting->data_type;
+ cci_ctrl.cfg.cci_i2c_write_cfg.addr_type = client->addr_type;
+ cci_ctrl.cfg.cci_i2c_write_cfg.size = write_setting->size;
+ rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ if (rc < 0) {
+ pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ rc = cci_ctrl.status;
+ if (write_setting->delay > 20)
+ msleep(write_setting->delay);
+ else if (write_setting->delay)
+ usleep_range(write_setting->delay * 1000, (write_setting->delay
+ * 1000) + 1000);
+
+ return rc;
+}
+
+int32_t msm_camera_cci_i2c_write_table_async(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ return msm_camera_cci_i2c_write_table_cmd(client, write_setting,
+ MSM_CCI_I2C_WRITE_ASYNC);
+}
+
+int32_t msm_camera_cci_i2c_write_table_sync(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ return msm_camera_cci_i2c_write_table_cmd(client, write_setting,
+ MSM_CCI_I2C_WRITE_SYNC);
+}
+
+int32_t msm_camera_cci_i2c_write_table_sync_block(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ return msm_camera_cci_i2c_write_table_cmd(client, write_setting,
+ MSM_CCI_I2C_WRITE_SYNC_BLOCK);
+}
+
+int32_t msm_camera_cci_i2c_write_table(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ return msm_camera_cci_i2c_write_table_cmd(client, write_setting,
+ MSM_CCI_I2C_WRITE);
+}
+
+int32_t msm_camera_cci_i2c_write_seq_table(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_seq_reg_setting *write_setting)
+{
+ int i;
+ int32_t rc = -EFAULT;
+ struct msm_camera_i2c_seq_reg_array *reg_setting;
+ uint16_t client_addr_type;
+
+ if (!client || !write_setting)
+ return rc;
+
+ if ((write_setting->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && write_setting->addr_type != MSM_CAMERA_I2C_WORD_ADDR)) {
+ pr_err("%s Invalide addr type %d\n", __func__,
+ write_setting->addr_type);
+ return rc;
+ }
+
+ reg_setting = write_setting->reg_setting;
+ client_addr_type = client->addr_type;
+ client->addr_type = write_setting->addr_type;
+
+ for (i = 0; i < write_setting->size; i++) {
+ rc = msm_camera_cci_i2c_write_seq(client, reg_setting->reg_addr,
+ reg_setting->reg_data, reg_setting->reg_data_size);
+ if (rc < 0)
+ return rc;
+ reg_setting++;
+ }
+ if (write_setting->delay > 20)
+ msleep(write_setting->delay);
+ else if (write_setting->delay)
+ usleep_range(write_setting->delay * 1000, (write_setting->delay
+ * 1000) + 1000);
+
+ client->addr_type = client_addr_type;
+ return rc;
+}
+
+int32_t msm_camera_cci_i2c_write_table_w_microdelay(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int32_t rc = -EFAULT;
+ struct msm_camera_cci_ctrl cci_ctrl;
+
+ if (!client || !write_setting)
+ return rc;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (write_setting->data_type != MSM_CAMERA_I2C_BYTE_DATA
+ && write_setting->data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+
+ cci_ctrl.cmd = MSM_CCI_I2C_WRITE;
+ cci_ctrl.cci_info = client->cci_client;
+ cci_ctrl.cfg.cci_i2c_write_cfg.reg_setting =
+ write_setting->reg_setting;
+ cci_ctrl.cfg.cci_i2c_write_cfg.data_type = write_setting->data_type;
+ cci_ctrl.cfg.cci_i2c_write_cfg.addr_type = client->addr_type;
+ cci_ctrl.cfg.cci_i2c_write_cfg.size = write_setting->size;
+ rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ if (rc < 0) {
+ pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ rc = cci_ctrl.status;
+ return rc;
+}
+
+static int32_t msm_camera_cci_i2c_compare(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc;
+ uint16_t reg_data = 0;
+ int data_len = 0;
+ switch (data_type) {
+ case MSM_CAMERA_I2C_BYTE_DATA:
+ case MSM_CAMERA_I2C_WORD_DATA:
+ data_len = data_type;
+ break;
+ case MSM_CAMERA_I2C_SET_BYTE_MASK:
+ case MSM_CAMERA_I2C_UNSET_BYTE_MASK:
+ data_len = MSM_CAMERA_I2C_BYTE_DATA;
+ break;
+ case MSM_CAMERA_I2C_SET_WORD_MASK:
+ case MSM_CAMERA_I2C_UNSET_WORD_MASK:
+ data_len = MSM_CAMERA_I2C_WORD_DATA;
+ break;
+ default:
+ pr_err("%s: Unsupport data type: %d\n", __func__, data_type);
+ break;
+ }
+
+ rc = msm_camera_cci_i2c_read(client, addr, &reg_data, data_len);
+ if (rc < 0)
+ return rc;
+
+ rc = I2C_COMPARE_MISMATCH;
+ switch (data_type) {
+ case MSM_CAMERA_I2C_BYTE_DATA:
+ case MSM_CAMERA_I2C_WORD_DATA:
+ if (data == reg_data)
+ rc = I2C_COMPARE_MATCH;
+ break;
+ case MSM_CAMERA_I2C_SET_BYTE_MASK:
+ case MSM_CAMERA_I2C_SET_WORD_MASK:
+ if ((reg_data & data) == data)
+ rc = I2C_COMPARE_MATCH;
+ break;
+ case MSM_CAMERA_I2C_UNSET_BYTE_MASK:
+ case MSM_CAMERA_I2C_UNSET_WORD_MASK:
+ if (!(reg_data & data))
+ rc = I2C_COMPARE_MATCH;
+ break;
+ default:
+ pr_err("%s: Unsupport data type: %d\n", __func__, data_type);
+ break;
+ }
+
+ S_I2C_DBG("%s: Register and data match result %d\n", __func__,
+ rc);
+ return rc;
+}
+
+int32_t msm_camera_cci_i2c_poll(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc;
+ S_I2C_DBG("%s: addr: 0x%x data: 0x%x dt: %d\n",
+ __func__, addr, data, data_type);
+
+ rc = msm_camera_cci_i2c_compare(client,
+ addr, data, data_type);
+ return rc;
+}
+
+static int32_t msm_camera_cci_i2c_set_mask(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t mask,
+ enum msm_camera_i2c_data_type data_type, uint16_t set_mask)
+{
+ int32_t rc;
+ uint16_t reg_data;
+
+ rc = msm_camera_cci_i2c_read(client, addr, &reg_data, data_type);
+ if (rc < 0) {
+ S_I2C_DBG("%s read fail\n", __func__);
+ return rc;
+ }
+ S_I2C_DBG("%s addr: 0x%x data: 0x%x setmask: 0x%x\n",
+ __func__, addr, reg_data, mask);
+
+ if (set_mask)
+ reg_data |= mask;
+ else
+ reg_data &= ~mask;
+ S_I2C_DBG("%s write: 0x%x\n", __func__, reg_data);
+
+ rc = msm_camera_cci_i2c_write(client, addr, reg_data, data_type);
+ if (rc < 0)
+ S_I2C_DBG("%s write fail\n", __func__);
+
+ return rc;
+}
+
+static int32_t msm_camera_cci_i2c_set_write_mask_data(
+ struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data, int16_t mask,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc;
+ uint16_t reg_data;
+ CDBG("%s\n", __func__);
+ if (mask == -1)
+ return 0;
+ if (mask == 0) {
+ rc = msm_camera_cci_i2c_write(client, addr, data, data_type);
+ } else {
+ rc = msm_camera_cci_i2c_read(client, addr, &reg_data,
+ data_type);
+ if (rc < 0) {
+ CDBG("%s read fail\n", __func__);
+ return rc;
+ }
+ reg_data &= ~mask;
+ reg_data |= (data & mask);
+ rc = msm_camera_cci_i2c_write(client, addr, reg_data,
+ data_type);
+ if (rc < 0)
+ CDBG("%s write fail\n", __func__);
+ }
+ return rc;
+}
+
+int32_t msm_camera_cci_i2c_write_conf_tbl(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_conf *reg_conf_tbl, uint16_t size,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int i;
+ int32_t rc = -EFAULT;
+ for (i = 0; i < size; i++) {
+ enum msm_camera_i2c_data_type dt;
+ if (reg_conf_tbl->cmd_type == MSM_CAMERA_I2C_CMD_POLL) {
+ rc = msm_camera_cci_i2c_poll(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ reg_conf_tbl->dt);
+ } else {
+ if (reg_conf_tbl->dt == 0)
+ dt = data_type;
+ else
+ dt = reg_conf_tbl->dt;
+ switch (dt) {
+ case MSM_CAMERA_I2C_BYTE_DATA:
+ case MSM_CAMERA_I2C_WORD_DATA:
+ rc = msm_camera_cci_i2c_write(
+ client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data, dt);
+ break;
+ case MSM_CAMERA_I2C_SET_BYTE_MASK:
+ rc = msm_camera_cci_i2c_set_mask(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ MSM_CAMERA_I2C_BYTE_DATA, 1);
+ break;
+ case MSM_CAMERA_I2C_UNSET_BYTE_MASK:
+ rc = msm_camera_cci_i2c_set_mask(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ MSM_CAMERA_I2C_BYTE_DATA, 0);
+ break;
+ case MSM_CAMERA_I2C_SET_WORD_MASK:
+ rc = msm_camera_cci_i2c_set_mask(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ MSM_CAMERA_I2C_WORD_DATA, 1);
+ break;
+ case MSM_CAMERA_I2C_UNSET_WORD_MASK:
+ rc = msm_camera_cci_i2c_set_mask(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ MSM_CAMERA_I2C_WORD_DATA, 0);
+ break;
+ case MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA:
+ rc = msm_camera_cci_i2c_set_write_mask_data(
+ client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ reg_conf_tbl->mask,
+ MSM_CAMERA_I2C_BYTE_DATA);
+ break;
+ default:
+ pr_err("%s: Unsupport data type: %d\n",
+ __func__, dt);
+ break;
+ }
+ }
+ if (rc < 0)
+ break;
+ reg_conf_tbl++;
+ }
+ return rc;
+}
+
+int32_t msm_sensor_cci_i2c_util(struct msm_camera_i2c_client *client,
+ uint16_t cci_cmd)
+{
+ int32_t rc = 0;
+ struct msm_camera_cci_ctrl cci_ctrl;
+
+ CDBG("%s line %d\n", __func__, __LINE__);
+ cci_ctrl.cmd = cci_cmd;
+ cci_ctrl.cci_info = client->cci_client;
+ rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ if (rc < 0) {
+ pr_err("%s line %d rc = %d\n", __func__, __LINE__, rc);
+ return rc;
+ }
+ return cci_ctrl.status;
+}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c
new file mode 100644
index 000000000000..02e21b512313
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.c
@@ -0,0 +1,1562 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_camera_dt_util.h"
+#include "msm_camera_io_util.h"
+#include "msm_camera_i2c_mux.h"
+#include "msm_cci.h"
+
+#define CAM_SENSOR_PINCTRL_STATE_SLEEP "cam_suspend"
+#define CAM_SENSOR_PINCTRL_STATE_DEFAULT "cam_default"
+/*#define CONFIG_MSM_CAMERA_DT_DEBUG*/
+
+#define VALIDATE_VOLTAGE(min, max, config_val) ((config_val) && \
+ (config_val >= min) && (config_val <= max))
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+int msm_camera_fill_vreg_params(struct camera_vreg_t *cam_vreg,
+ int num_vreg, struct msm_sensor_power_setting *power_setting,
+ uint16_t power_setting_size)
+{
+ uint16_t i = 0;
+ int j = 0;
+
+ /* Validate input parameters */
+ if (!cam_vreg || !power_setting) {
+ pr_err("%s:%d failed: cam_vreg %p power_setting %p", __func__,
+ __LINE__, cam_vreg, power_setting);
+ return -EINVAL;
+ }
+
+ /* Validate size of num_vreg */
+ if (num_vreg <= 0) {
+ pr_err("failed: num_vreg %d", num_vreg);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < power_setting_size; i++) {
+ if (power_setting[i].seq_type != SENSOR_VREG)
+ continue;
+
+ switch (power_setting[i].seq_val) {
+ case CAM_VDIG:
+ for (j = 0; j < num_vreg; j++) {
+ if (!strcmp(cam_vreg[j].reg_name, "cam_vdig")) {
+ CDBG("%s:%d i %d j %d cam_vdig\n",
+ __func__, __LINE__, i, j);
+ power_setting[i].seq_val = j;
+ if (VALIDATE_VOLTAGE(
+ cam_vreg[j].min_voltage,
+ cam_vreg[j].max_voltage,
+ power_setting[i].config_val)) {
+ cam_vreg[j].min_voltage =
+ cam_vreg[j].max_voltage =
+ power_setting[i].config_val;
+ }
+ break;
+ }
+ }
+ if (j == num_vreg)
+ power_setting[i].seq_val = INVALID_VREG;
+ break;
+
+ case CAM_VIO:
+ for (j = 0; j < num_vreg; j++) {
+ if (!strcmp(cam_vreg[j].reg_name, "cam_vio")) {
+ CDBG("%s:%d i %d j %d cam_vio\n",
+ __func__, __LINE__, i, j);
+ power_setting[i].seq_val = j;
+ if (VALIDATE_VOLTAGE(
+ cam_vreg[j].min_voltage,
+ cam_vreg[j].max_voltage,
+ power_setting[i].config_val)) {
+ cam_vreg[j].min_voltage =
+ cam_vreg[j].max_voltage =
+ power_setting[i].config_val;
+ }
+ break;
+ }
+ }
+ if (j == num_vreg)
+ power_setting[i].seq_val = INVALID_VREG;
+ break;
+
+ case CAM_VANA:
+ for (j = 0; j < num_vreg; j++) {
+ if (!strcmp(cam_vreg[j].reg_name, "cam_vana")) {
+ CDBG("%s:%d i %d j %d cam_vana\n",
+ __func__, __LINE__, i, j);
+ power_setting[i].seq_val = j;
+ if (VALIDATE_VOLTAGE(
+ cam_vreg[j].min_voltage,
+ cam_vreg[j].max_voltage,
+ power_setting[i].config_val)) {
+ cam_vreg[j].min_voltage =
+ cam_vreg[j].max_voltage =
+ power_setting[i].config_val;
+ }
+ break;
+ }
+ }
+ if (j == num_vreg)
+ power_setting[i].seq_val = INVALID_VREG;
+ break;
+
+ case CAM_VAF:
+ for (j = 0; j < num_vreg; j++) {
+ if (!strcmp(cam_vreg[j].reg_name, "cam_vaf")) {
+ CDBG("%s:%d i %d j %d cam_vaf\n",
+ __func__, __LINE__, i, j);
+ power_setting[i].seq_val = j;
+ if (VALIDATE_VOLTAGE(
+ cam_vreg[j].min_voltage,
+ cam_vreg[j].max_voltage,
+ power_setting[i].config_val)) {
+ cam_vreg[j].min_voltage =
+ cam_vreg[j].max_voltage =
+ power_setting[i].config_val;
+ }
+ break;
+ }
+ }
+ if (j == num_vreg)
+ power_setting[i].seq_val = INVALID_VREG;
+ break;
+
+ case CAM_V_CUSTOM1:
+ for (j = 0; j < num_vreg; j++) {
+ if (!strcmp(cam_vreg[j].reg_name,
+ "cam_v_custom1")) {
+ CDBG("%s:%d i %d j %d cam_vcustom1\n",
+ __func__, __LINE__, i, j);
+ power_setting[i].seq_val = j;
+ if (VALIDATE_VOLTAGE(
+ cam_vreg[j].min_voltage,
+ cam_vreg[j].max_voltage,
+ power_setting[i].config_val)) {
+ cam_vreg[j].min_voltage =
+ cam_vreg[j].max_voltage =
+ power_setting[i].config_val;
+ }
+ break;
+ }
+ }
+ if (j == num_vreg)
+ power_setting[i].seq_val = INVALID_VREG;
+ break;
+
+ case CAM_V_CUSTOM2:
+ for (j = 0; j < num_vreg; j++) {
+ if (!strcmp(cam_vreg[j].reg_name,
+ "cam_v_custom2")) {
+ CDBG("%s:%d i %d j %d cam_vcustom2\n",
+ __func__, __LINE__, i, j);
+ power_setting[i].seq_val = j;
+ if (VALIDATE_VOLTAGE(
+ cam_vreg[j].min_voltage,
+ cam_vreg[j].max_voltage,
+ power_setting[i].config_val)) {
+ cam_vreg[j].min_voltage =
+ cam_vreg[j].max_voltage =
+ power_setting[i].config_val;
+ }
+ break;
+ }
+ }
+ if (j == num_vreg)
+ power_setting[i].seq_val = INVALID_VREG;
+ break;
+
+ default:
+ pr_err("%s:%d invalid seq_val %d\n", __func__,
+ __LINE__, power_setting[i].seq_val);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+int msm_sensor_get_sub_module_index(struct device_node *of_node,
+ struct msm_sensor_info_t **s_info)
+{
+ int rc = 0, i = 0;
+ uint32_t val = 0, count = 0;
+ uint32_t *val_array = NULL;
+ struct device_node *src_node = NULL;
+ struct msm_sensor_info_t *sensor_info;
+
+ sensor_info = kzalloc(sizeof(*sensor_info), GFP_KERNEL);
+ if (!sensor_info) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ for (i = 0; i < SUB_MODULE_MAX; i++) {
+ sensor_info->subdev_id[i] = -1;
+ /* Subdev expose additional interface for same sub module*/
+ sensor_info->subdev_intf[i] = -1;
+ }
+
+ src_node = of_parse_phandle(of_node, "qcom,actuator-src", 0);
+ if (!src_node) {
+ CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CDBG("%s qcom,actuator cell index %d, rc %d\n", __func__,
+ val, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+ sensor_info->subdev_id[SUB_MODULE_ACTUATOR] = val;
+ of_node_put(src_node);
+ src_node = NULL;
+ }
+
+ src_node = of_parse_phandle(of_node, "qcom,ois-src", 0);
+ if (!src_node) {
+ CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CDBG("%s qcom,ois cell index %d, rc %d\n", __func__,
+ val, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+ sensor_info->subdev_id[SUB_MODULE_OIS] = val;
+ of_node_put(src_node);
+ src_node = NULL;
+ }
+
+ src_node = of_parse_phandle(of_node, "qcom,eeprom-src", 0);
+ if (!src_node) {
+ CDBG("%s:%d eeprom src_node NULL\n", __func__, __LINE__);
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CDBG("%s qcom,eeprom cell index %d, rc %d\n", __func__,
+ val, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+ sensor_info->subdev_id[SUB_MODULE_EEPROM] = val;
+ of_node_put(src_node);
+ src_node = NULL;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,eeprom-sd-index", &val);
+ if (rc != -EINVAL) {
+ CDBG("%s qcom,eeprom-sd-index %d, rc %d\n", __func__, val, rc);
+ if (rc < 0) {
+ pr_err("%s:%d failed rc %d\n", __func__, __LINE__, rc);
+ goto ERROR;
+ }
+ sensor_info->subdev_id[SUB_MODULE_EEPROM] = val;
+ } else {
+ rc = 0;
+ }
+
+ src_node = of_parse_phandle(of_node, "qcom,led-flash-src", 0);
+ if (!src_node) {
+ CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CDBG("%s qcom,led flash cell index %d, rc %d\n", __func__,
+ val, rc);
+ if (rc < 0) {
+ pr_err("%s:%d failed %d\n", __func__, __LINE__, rc);
+ goto ERROR;
+ }
+ sensor_info->subdev_id[SUB_MODULE_LED_FLASH] = val;
+ of_node_put(src_node);
+ src_node = NULL;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,strobe-flash-sd-index", &val);
+ if (rc != -EINVAL) {
+ CDBG("%s qcom,strobe-flash-sd-index %d, rc %d\n", __func__,
+ val, rc);
+ if (rc < 0) {
+ pr_err("%s:%d failed rc %d\n", __func__, __LINE__, rc);
+ goto ERROR;
+ }
+ sensor_info->subdev_id[SUB_MODULE_STROBE_FLASH] = val;
+ } else {
+ rc = 0;
+ }
+
+ if (of_get_property(of_node, "qcom,csiphy-sd-index", &count)) {
+ count /= sizeof(uint32_t);
+ if (count > 2) {
+ pr_err("%s qcom,csiphy-sd-index count %d > 2\n",
+ __func__, count);
+ goto ERROR;
+ }
+ val_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+ if (!val_array) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto ERROR;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,csiphy-sd-index",
+ val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ kfree(val_array);
+ goto ERROR;
+ }
+ for (i = 0; i < count; i++) {
+ sensor_info->subdev_id[SUB_MODULE_CSIPHY + i] =
+ val_array[i];
+ CDBG("%s csiphy_core[%d] = %d\n",
+ __func__, i, val_array[i]);
+ }
+ kfree(val_array);
+ } else {
+ pr_err("%s:%d qcom,csiphy-sd-index not present\n", __func__,
+ __LINE__);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+
+ if (of_get_property(of_node, "qcom,csid-sd-index", &count)) {
+ count /= sizeof(uint32_t);
+ if (count > 2) {
+ pr_err("%s qcom,csid-sd-index count %d > 2\n",
+ __func__, count);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ val_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+ if (!val_array) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto ERROR;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,csid-sd-index",
+ val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ kfree(val_array);
+ goto ERROR;
+ }
+ for (i = 0; i < count; i++) {
+ sensor_info->subdev_id
+ [SUB_MODULE_CSID + i] = val_array[i];
+ CDBG("%s csid_core[%d] = %d\n",
+ __func__, i, val_array[i]);
+ }
+ kfree(val_array);
+ } else {
+ pr_err("%s:%d qcom,csid-sd-index not present\n", __func__,
+ __LINE__);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+
+ *s_info = sensor_info;
+ return rc;
+ERROR:
+ kfree(sensor_info);
+ return rc;
+}
+
+int msm_sensor_get_dt_actuator_data(struct device_node *of_node,
+ struct msm_actuator_info **act_info)
+{
+ int rc = 0;
+ uint32_t val = 0;
+ struct msm_actuator_info *actuator_info;
+
+ rc = of_property_read_u32(of_node, "qcom,actuator-cam-name", &val);
+ CDBG("%s qcom,actuator-cam-name %d, rc %d\n", __func__, val, rc);
+ if (rc < 0)
+ return 0;
+
+ actuator_info = kzalloc(sizeof(*actuator_info), GFP_KERNEL);
+ if (!actuator_info) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto ERROR;
+ }
+
+ actuator_info->cam_name = val;
+
+ rc = of_property_read_u32(of_node, "qcom,actuator-vcm-pwd", &val);
+ CDBG("%s qcom,actuator-vcm-pwd %d, rc %d\n", __func__, val, rc);
+ if (!rc)
+ actuator_info->vcm_pwd = val;
+
+ rc = of_property_read_u32(of_node, "qcom,actuator-vcm-enable", &val);
+ CDBG("%s qcom,actuator-vcm-enable %d, rc %d\n", __func__, val, rc);
+ if (!rc)
+ actuator_info->vcm_enable = val;
+
+ *act_info = actuator_info;
+ return 0;
+ERROR:
+ kfree(actuator_info);
+ return rc;
+}
+
+int msm_sensor_get_dt_csi_data(struct device_node *of_node,
+ struct msm_camera_csi_lane_params **csi_lane_params)
+{
+ int rc = 0;
+ uint32_t val = 0;
+ struct msm_camera_csi_lane_params *clp;
+
+ clp = kzalloc(sizeof(*clp), GFP_KERNEL);
+ if (!clp) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ *csi_lane_params = clp;
+
+ rc = of_property_read_u32(of_node, "qcom,csi-lane-assign", &val);
+ CDBG("%s qcom,csi-lane-assign 0x%x, rc %d\n", __func__, val, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+ clp->csi_lane_assign = val;
+
+ rc = of_property_read_u32(of_node, "qcom,csi-lane-mask", &val);
+ CDBG("%s qcom,csi-lane-mask 0x%x, rc %d\n", __func__, val, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR;
+ }
+ clp->csi_lane_mask = val;
+
+ return rc;
+ERROR:
+ kfree(clp);
+ return rc;
+}
+
+int msm_camera_get_dt_power_setting_data(struct device_node *of_node,
+ struct camera_vreg_t *cam_vreg, int num_vreg,
+ struct msm_camera_power_ctrl_t *power_info)
+{
+ int rc = 0, i, j;
+ int count = 0;
+ const char *seq_name = NULL;
+ uint32_t *array = NULL;
+ struct msm_sensor_power_setting *ps;
+
+ struct msm_sensor_power_setting *power_setting;
+ uint16_t *power_setting_size, size = 0;
+ bool need_reverse = 0;
+
+ if (!power_info)
+ return -EINVAL;
+
+ power_setting = power_info->power_setting;
+ power_setting_size = &power_info->power_setting_size;
+
+ count = of_property_count_strings(of_node, "qcom,cam-power-seq-type");
+ *power_setting_size = count;
+
+ CDBG("%s qcom,cam-power-seq-type count %d\n", __func__, count);
+
+ if (count <= 0)
+ return 0;
+
+ ps = kzalloc(sizeof(*ps) * count, GFP_KERNEL);
+ if (!ps) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ power_setting = ps;
+ power_info->power_setting = ps;
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,cam-power-seq-type", i,
+ &seq_name);
+ CDBG("%s seq_name[%d] = %s\n", __func__, i,
+ seq_name);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR1;
+ }
+ if (!strcmp(seq_name, "sensor_vreg")) {
+ ps[i].seq_type = SENSOR_VREG;
+ CDBG("%s:%d seq_type[%d] %d\n", __func__, __LINE__,
+ i, ps[i].seq_type);
+ } else if (!strcmp(seq_name, "sensor_gpio")) {
+ ps[i].seq_type = SENSOR_GPIO;
+ CDBG("%s:%d seq_type[%d] %d\n", __func__, __LINE__,
+ i, ps[i].seq_type);
+ } else if (!strcmp(seq_name, "sensor_clk")) {
+ ps[i].seq_type = SENSOR_CLK;
+ CDBG("%s:%d seq_type[%d] %d\n", __func__, __LINE__,
+ i, ps[i].seq_type);
+ } else if (!strcmp(seq_name, "sensor_i2c_mux")) {
+ ps[i].seq_type = SENSOR_I2C_MUX;
+ CDBG("%s:%d seq_type[%d] %d\n", __func__, __LINE__,
+ i, ps[i].seq_type);
+ } else {
+ CDBG("%s: unrecognized seq-type\n", __func__);
+ rc = -EILSEQ;
+ goto ERROR1;
+ }
+ }
+
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,cam-power-seq-val", i,
+ &seq_name);
+ CDBG("%s seq_name[%d] = %s\n", __func__, i,
+ seq_name);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR1;
+ }
+ switch (ps[i].seq_type) {
+ case SENSOR_VREG:
+ for (j = 0; j < num_vreg; j++) {
+ if (!strcmp(seq_name, cam_vreg[j].reg_name))
+ break;
+ }
+ if (j < num_vreg)
+ ps[i].seq_val = j;
+ else
+ rc = -EILSEQ;
+ break;
+ case SENSOR_GPIO:
+ if (!strcmp(seq_name, "sensor_gpio_reset"))
+ ps[i].seq_val = SENSOR_GPIO_RESET;
+ else if (!strcmp(seq_name, "sensor_gpio_standby"))
+ ps[i].seq_val = SENSOR_GPIO_STANDBY;
+ else if (!strcmp(seq_name, "sensor_gpio_vdig"))
+ ps[i].seq_val = SENSOR_GPIO_VDIG;
+ else if (!strcmp(seq_name, "sensor_gpio_vana"))
+ ps[i].seq_val = SENSOR_GPIO_VANA;
+ else if (!strcmp(seq_name, "sensor_gpio_vaf"))
+ ps[i].seq_val = SENSOR_GPIO_VAF;
+ else if (!strcmp(seq_name, "sensor_gpio_vio"))
+ ps[i].seq_val = SENSOR_GPIO_VIO;
+ else if (!strcmp(seq_name, "sensor_gpio_custom1"))
+ ps[i].seq_val = SENSOR_GPIO_CUSTOM1;
+ else if (!strcmp(seq_name, "sensor_gpio_custom2"))
+ ps[i].seq_val = SENSOR_GPIO_CUSTOM2;
+ else
+ rc = -EILSEQ;
+ break;
+ case SENSOR_CLK:
+ if (!strcmp(seq_name, "sensor_cam_mclk"))
+ ps[i].seq_val = SENSOR_CAM_MCLK;
+ else if (!strcmp(seq_name, "sensor_cam_clk"))
+ ps[i].seq_val = SENSOR_CAM_CLK;
+ else
+ rc = -EILSEQ;
+ break;
+ case SENSOR_I2C_MUX:
+ if (!strcmp(seq_name, "none"))
+ ps[i].seq_val = 0;
+ else
+ rc = -EILSEQ;
+ break;
+ default:
+ rc = -EILSEQ;
+ break;
+ }
+ if (rc < 0) {
+ CDBG("%s: unrecognized seq-val\n", __func__);
+ goto ERROR1;
+ }
+ }
+
+ array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+ if (!array) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+
+
+ rc = of_property_read_u32_array(of_node, "qcom,cam-power-seq-cfg-val",
+ array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ if (ps[i].seq_type == SENSOR_GPIO) {
+ if (array[i] == 0)
+ ps[i].config_val = GPIO_OUT_LOW;
+ else if (array[i] == 1)
+ ps[i].config_val = GPIO_OUT_HIGH;
+ } else {
+ ps[i].config_val = array[i];
+ }
+ CDBG("%s power_setting[%d].config_val = %ld\n", __func__, i,
+ ps[i].config_val);
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cam-power-seq-delay",
+ array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ ps[i].delay = array[i];
+ CDBG("%s power_setting[%d].delay = %d\n", __func__,
+ i, ps[i].delay);
+ }
+ kfree(array);
+
+ size = *power_setting_size;
+
+ if (NULL != ps && 0 != size)
+ need_reverse = 1;
+
+ power_info->power_down_setting =
+ kzalloc(sizeof(*ps) * size, GFP_KERNEL);
+
+ if (!power_info->power_down_setting) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+
+ memcpy(power_info->power_down_setting,
+ ps, sizeof(*ps) * size);
+
+ power_info->power_down_setting_size = size;
+
+ if (need_reverse) {
+ int c, end = size - 1;
+ struct msm_sensor_power_setting power_down_setting_t;
+ for (c = 0; c < size/2; c++) {
+ power_down_setting_t =
+ power_info->power_down_setting[c];
+ power_info->power_down_setting[c] =
+ power_info->power_down_setting[end];
+ power_info->power_down_setting[end] =
+ power_down_setting_t;
+ end--;
+ }
+ }
+ return rc;
+ERROR2:
+ kfree(array);
+ERROR1:
+ kfree(ps);
+ power_setting_size = 0;
+ return rc;
+}
+
+int msm_camera_get_dt_gpio_req_tbl(struct device_node *of_node,
+ struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
+ uint16_t gpio_array_size)
+{
+ int rc = 0, i = 0;
+ uint32_t count = 0;
+ uint32_t *val_array = NULL;
+
+ if (!of_get_property(of_node, "qcom,gpio-req-tbl-num", &count))
+ return 0;
+
+ count /= sizeof(uint32_t);
+ if (!count) {
+ pr_err("%s qcom,gpio-req-tbl-num 0\n", __func__);
+ return 0;
+ }
+
+ val_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+ if (!val_array) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ gconf->cam_gpio_req_tbl = kzalloc(sizeof(struct gpio) * count,
+ GFP_KERNEL);
+ if (!gconf->cam_gpio_req_tbl) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+ gconf->cam_gpio_req_tbl_size = count;
+
+ rc = of_property_read_u32_array(of_node, "qcom,gpio-req-tbl-num",
+ val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ if (val_array[i] >= gpio_array_size) {
+ pr_err("%s gpio req tbl index %d invalid\n",
+ __func__, val_array[i]);
+ return -EINVAL;
+ }
+ gconf->cam_gpio_req_tbl[i].gpio = gpio_array[val_array[i]];
+ CDBG("%s cam_gpio_req_tbl[%d].gpio = %d\n", __func__, i,
+ gconf->cam_gpio_req_tbl[i].gpio);
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,gpio-req-tbl-flags",
+ val_array, count);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ for (i = 0; i < count; i++) {
+ gconf->cam_gpio_req_tbl[i].flags = val_array[i];
+ CDBG("%s cam_gpio_req_tbl[%d].flags = %ld\n", __func__, i,
+ gconf->cam_gpio_req_tbl[i].flags);
+ }
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,gpio-req-tbl-label", i,
+ &gconf->cam_gpio_req_tbl[i].label);
+ CDBG("%s cam_gpio_req_tbl[%d].label = %s\n", __func__, i,
+ gconf->cam_gpio_req_tbl[i].label);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ }
+ }
+
+ kfree(val_array);
+ return rc;
+
+ERROR2:
+ kfree(gconf->cam_gpio_req_tbl);
+ERROR1:
+ kfree(val_array);
+ gconf->cam_gpio_req_tbl_size = 0;
+ return rc;
+}
+
+int msm_camera_init_gpio_pin_tbl(struct device_node *of_node,
+ struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
+ uint16_t gpio_array_size)
+{
+ int rc = 0, val = 0;
+
+ gconf->gpio_num_info = kzalloc(sizeof(struct msm_camera_gpio_num_info),
+ GFP_KERNEL);
+ if (!gconf->gpio_num_info) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-vana", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-vana failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-vana invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_VANA] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_VANA] = 1;
+ CDBG("%s qcom,gpio-vana %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_VANA]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-vio", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-vio failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-vio invalid %d\n",
+ __func__, __LINE__, val);
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_VIO] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_VIO] = 1;
+ CDBG("%s qcom,gpio-vio %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_VIO]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-vaf", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-vaf failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-vaf invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_VAF] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_VAF] = 1;
+ CDBG("%s qcom,gpio-vaf %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_VAF]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-vdig", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-vdig failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-vdig invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_VDIG] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_VDIG] = 1;
+ CDBG("%s qcom,gpio-vdig %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_VDIG]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-reset", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-reset failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-reset invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_RESET] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_RESET] = 1;
+ CDBG("%s qcom,gpio-reset %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_RESET]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-standby", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-standby failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-standby invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_STANDBY] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_STANDBY] = 1;
+ CDBG("%s qcom,gpio-standby %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_STANDBY]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-af-pwdm", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-af-pwdm failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-af-pwdm invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_AF_PWDM] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_AF_PWDM] = 1;
+ CDBG("%s qcom,gpio-af-pwdm %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_AF_PWDM]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-flash-en", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-flash-en failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-flash-en invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_FL_EN] = 1;
+ CDBG("%s qcom,gpio-flash-en %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_FL_EN]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-flash-now", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-flash-now failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-flash-now invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_FL_NOW] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_FL_NOW] = 1;
+ CDBG("%s qcom,gpio-flash-now %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_FL_NOW]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-flash-reset", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%dread qcom,gpio-flash-reset failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-flash-reset invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_FL_RESET] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_FL_RESET] = 1;
+ CDBG("%s qcom,gpio-flash-reset %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_FL_RESET]);
+ } else
+ rc = 0;
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-custom1", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-custom1 failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-custom1 invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_CUSTOM1] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_CUSTOM1] = 1;
+ CDBG("%s qcom,gpio-custom1 %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_CUSTOM1]);
+ } else {
+ rc = 0;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,gpio-custom2", &val);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s:%d read qcom,gpio-custom2 failed rc %d\n",
+ __func__, __LINE__, rc);
+ goto ERROR;
+ } else if (val >= gpio_array_size) {
+ pr_err("%s:%d qcom,gpio-custom2 invalid %d\n",
+ __func__, __LINE__, val);
+ rc = -EINVAL;
+ goto ERROR;
+ }
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_CUSTOM2] =
+ gpio_array[val];
+ gconf->gpio_num_info->valid[SENSOR_GPIO_CUSTOM2] = 1;
+ CDBG("%s qcom,gpio-custom2 %d\n", __func__,
+ gconf->gpio_num_info->gpio_num[SENSOR_GPIO_CUSTOM2]);
+ } else {
+ rc = 0;
+ }
+
+ return rc;
+
+ERROR:
+ kfree(gconf->gpio_num_info);
+ gconf->gpio_num_info = NULL;
+ return rc;
+}
+
+int msm_camera_get_dt_vreg_data(struct device_node *of_node,
+ struct camera_vreg_t **cam_vreg, int *num_vreg)
+{
+ int rc = 0, i = 0;
+ int32_t count = 0;
+ uint32_t *vreg_array = NULL;
+ struct camera_vreg_t *vreg = NULL;
+ bool custom_vreg_name = false;
+
+ count = of_property_count_strings(of_node, "qcom,cam-vreg-name");
+ CDBG("%s qcom,cam-vreg-name count %d\n", __func__, count);
+
+ if (!count || (count == -EINVAL)) {
+ pr_err("%s:%d number of entries is 0 or not present in dts\n",
+ __func__, __LINE__);
+ return 0;
+ }
+
+ vreg = kzalloc(sizeof(*vreg) * count, GFP_KERNEL);
+ if (!vreg) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ *cam_vreg = vreg;
+ *num_vreg = count;
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,cam-vreg-name", i,
+ &vreg[i].reg_name);
+ CDBG("%s reg_name[%d] = %s\n", __func__, i,
+ vreg[i].reg_name);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR1;
+ }
+ }
+
+ custom_vreg_name = of_property_read_bool(of_node,
+ "qcom,cam-custom-vreg-name");
+ if (custom_vreg_name) {
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,cam-custom-vreg-name", i,
+ &vreg[i].custom_vreg_name);
+ CDBG("%s sub reg_name[%d] = %s\n", __func__, i,
+ vreg[i].custom_vreg_name);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR1;
+ }
+ }
+ }
+
+ vreg_array = kzalloc(sizeof(uint32_t) * count, GFP_KERNEL);
+ if (!vreg_array) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ goto ERROR1;
+ }
+
+ for (i = 0; i < count; i++)
+ vreg[i].type = VREG_TYPE_DEFAULT;
+
+ rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-type",
+ vreg_array, count);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ } else {
+ for (i = 0; i < count; i++) {
+ vreg[i].type = vreg_array[i];
+ CDBG("%s cam_vreg[%d].type = %d\n",
+ __func__, i, vreg[i].type);
+ }
+ }
+ } else {
+ CDBG("%s:%d no qcom,cam-vreg-type entries in dts\n",
+ __func__, __LINE__);
+ rc = 0;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-min-voltage",
+ vreg_array, count);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ } else {
+ for (i = 0; i < count; i++) {
+ vreg[i].min_voltage = vreg_array[i];
+ CDBG("%s cam_vreg[%d].min_voltage = %d\n",
+ __func__, i, vreg[i].min_voltage);
+ }
+ }
+ } else {
+ CDBG("%s:%d no qcom,cam-vreg-min-voltage entries in dts\n",
+ __func__, __LINE__);
+ rc = 0;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-max-voltage",
+ vreg_array, count);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ } else {
+ for (i = 0; i < count; i++) {
+ vreg[i].max_voltage = vreg_array[i];
+ CDBG("%s cam_vreg[%d].max_voltage = %d\n",
+ __func__, i, vreg[i].max_voltage);
+ }
+ }
+ } else {
+ CDBG("%s:%d no qcom,cam-vreg-max-voltage entries in dts\n",
+ __func__, __LINE__);
+ rc = 0;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,cam-vreg-op-mode",
+ vreg_array, count);
+ if (rc != -EINVAL) {
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto ERROR2;
+ } else {
+ for (i = 0; i < count; i++) {
+ vreg[i].op_mode = vreg_array[i];
+ CDBG("%s cam_vreg[%d].op_mode = %d\n",
+ __func__, i, vreg[i].op_mode);
+ }
+ }
+ } else {
+ CDBG("%s:%d no qcom,cam-vreg-op-mode entries in dts\n",
+ __func__, __LINE__);
+ rc = 0;
+ }
+
+ kfree(vreg_array);
+ return rc;
+ERROR2:
+ kfree(vreg_array);
+ERROR1:
+ kfree(vreg);
+ *num_vreg = 0;
+ return rc;
+}
+
+static int msm_camera_enable_i2c_mux(struct msm_camera_i2c_conf *i2c_conf)
+{
+ struct v4l2_subdev *i2c_mux_sd =
+ dev_get_drvdata(&i2c_conf->mux_dev->dev);
+ v4l2_subdev_call(i2c_mux_sd, core, ioctl,
+ VIDIOC_MSM_I2C_MUX_INIT, NULL);
+ v4l2_subdev_call(i2c_mux_sd, core, ioctl,
+ VIDIOC_MSM_I2C_MUX_CFG, (void *)&i2c_conf->i2c_mux_mode);
+ return 0;
+}
+
+static int msm_camera_disable_i2c_mux(struct msm_camera_i2c_conf *i2c_conf)
+{
+ struct v4l2_subdev *i2c_mux_sd =
+ dev_get_drvdata(&i2c_conf->mux_dev->dev);
+ v4l2_subdev_call(i2c_mux_sd, core, ioctl,
+ VIDIOC_MSM_I2C_MUX_RELEASE, NULL);
+ return 0;
+}
+
+static int msm_camera_pinctrl_init(struct msm_camera_power_ctrl_t *ctrl)
+{
+ struct msm_pinctrl_info *sensor_pctrl = NULL;
+
+ sensor_pctrl = &ctrl->pinctrl_info;
+ sensor_pctrl->pinctrl = devm_pinctrl_get(ctrl->dev);
+ if (IS_ERR_OR_NULL(sensor_pctrl->pinctrl)) {
+ pr_err("%s:%d Getting pinctrl handle failed\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ sensor_pctrl->gpio_state_active =
+ pinctrl_lookup_state(sensor_pctrl->pinctrl,
+ CAM_SENSOR_PINCTRL_STATE_DEFAULT);
+ if (IS_ERR_OR_NULL(sensor_pctrl->gpio_state_active)) {
+ pr_err("%s:%d Failed to get the active state pinctrl handle\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ sensor_pctrl->gpio_state_suspend
+ = pinctrl_lookup_state(sensor_pctrl->pinctrl,
+ CAM_SENSOR_PINCTRL_STATE_SLEEP);
+ if (IS_ERR_OR_NULL(sensor_pctrl->gpio_state_suspend)) {
+ pr_err("%s:%d Failed to get the suspend state pinctrl handle\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int msm_camera_power_up(struct msm_camera_power_ctrl_t *ctrl,
+ enum msm_camera_device_type_t device_type,
+ struct msm_camera_i2c_client *sensor_i2c_client)
+{
+ int rc = 0, index = 0, no_gpio = 0, ret = 0;
+ struct msm_sensor_power_setting *power_setting = NULL;
+
+ CDBG("%s:%d\n", __func__, __LINE__);
+ if (!ctrl || !sensor_i2c_client) {
+ pr_err("failed ctrl %p sensor_i2c_client %p\n", ctrl,
+ sensor_i2c_client);
+ return -EINVAL;
+ }
+ if (ctrl->gpio_conf->cam_gpiomux_conf_tbl != NULL)
+ pr_err("%s:%d mux install\n", __func__, __LINE__);
+
+ ret = msm_camera_pinctrl_init(ctrl);
+ if (ret < 0) {
+ pr_err("%s:%d Initialization of pinctrl failed\n",
+ __func__, __LINE__);
+ ctrl->cam_pinctrl_status = 0;
+ } else {
+ ctrl->cam_pinctrl_status = 1;
+ }
+ rc = msm_camera_request_gpio_table(
+ ctrl->gpio_conf->cam_gpio_req_tbl,
+ ctrl->gpio_conf->cam_gpio_req_tbl_size, 1);
+ if (rc < 0)
+ no_gpio = rc;
+ if (ctrl->cam_pinctrl_status) {
+ ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
+ ctrl->pinctrl_info.gpio_state_active);
+ if (ret)
+ pr_err("%s:%d cannot set pin to active state",
+ __func__, __LINE__);
+ }
+ for (index = 0; index < ctrl->power_setting_size; index++) {
+ CDBG("%s index %d\n", __func__, index);
+ power_setting = &ctrl->power_setting[index];
+ CDBG("%s type %d\n", __func__, power_setting->seq_type);
+ switch (power_setting->seq_type) {
+ case SENSOR_CLK:
+ if (power_setting->seq_val >= ctrl->clk_info_size) {
+ pr_err("%s clk index %d >= max %d\n", __func__,
+ power_setting->seq_val,
+ ctrl->clk_info_size);
+ goto power_up_failed;
+ }
+ if (power_setting->config_val)
+ ctrl->clk_info[power_setting->seq_val].
+ clk_rate = power_setting->config_val;
+
+ rc = msm_cam_clk_enable(ctrl->dev,
+ &ctrl->clk_info[0],
+ (struct clk **)&power_setting->data[0],
+ ctrl->clk_info_size,
+ 1);
+ if (rc < 0) {
+ pr_err("%s: clk enable failed\n",
+ __func__);
+ goto power_up_failed;
+ }
+ break;
+ case SENSOR_GPIO:
+ if (no_gpio) {
+ pr_err("%s: request gpio failed\n", __func__);
+ return no_gpio;
+ }
+ if (power_setting->seq_val >= SENSOR_GPIO_MAX ||
+ !ctrl->gpio_conf->gpio_num_info) {
+ pr_err("%s gpio index %d >= max %d\n", __func__,
+ power_setting->seq_val,
+ SENSOR_GPIO_MAX);
+ goto power_up_failed;
+ }
+ if (!ctrl->gpio_conf->gpio_num_info->valid
+ [power_setting->seq_val])
+ continue;
+ CDBG("%s:%d gpio set val %d\n", __func__, __LINE__,
+ ctrl->gpio_conf->gpio_num_info->gpio_num
+ [power_setting->seq_val]);
+ gpio_set_value_cansleep(
+ ctrl->gpio_conf->gpio_num_info->gpio_num
+ [power_setting->seq_val],
+ (int) power_setting->config_val);
+ break;
+ case SENSOR_VREG:
+ if (power_setting->seq_val == INVALID_VREG)
+ break;
+ if (power_setting->seq_val >= CAM_VREG_MAX) {
+ pr_err("%s vreg index %d >= max %d\n", __func__,
+ power_setting->seq_val,
+ SENSOR_GPIO_MAX);
+ goto power_up_failed;
+ }
+ if (power_setting->seq_val < ctrl->num_vreg)
+ msm_camera_config_single_vreg(ctrl->dev,
+ &ctrl->cam_vreg
+ [power_setting->seq_val],
+ (struct regulator **)
+ &power_setting->data[0],
+ 1);
+ else
+ pr_err("ERR:%s: %d usr_idx:%d dts_idx:%d\n",
+ __func__, __LINE__,
+ power_setting->seq_val, ctrl->num_vreg);
+ break;
+ case SENSOR_I2C_MUX:
+ if (ctrl->i2c_conf && ctrl->i2c_conf->use_i2c_mux)
+ msm_camera_enable_i2c_mux(ctrl->i2c_conf);
+ break;
+ default:
+ pr_err("%s error power seq type %d\n", __func__,
+ power_setting->seq_type);
+ break;
+ }
+ if (power_setting->delay > 20) {
+ msleep(power_setting->delay);
+ } else if (power_setting->delay) {
+ usleep_range(power_setting->delay * 1000,
+ (power_setting->delay * 1000) + 1000);
+ }
+ }
+
+ if (device_type == MSM_CAMERA_PLATFORM_DEVICE) {
+ rc = sensor_i2c_client->i2c_func_tbl->i2c_util(
+ sensor_i2c_client, MSM_CCI_INIT);
+ if (rc < 0) {
+ pr_err("%s cci_init failed\n", __func__);
+ goto power_up_failed;
+ }
+ }
+
+ CDBG("%s exit\n", __func__);
+ return 0;
+power_up_failed:
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ for (index--; index >= 0; index--) {
+ CDBG("%s index %d\n", __func__, index);
+ power_setting = &ctrl->power_setting[index];
+ CDBG("%s type %d\n", __func__, power_setting->seq_type);
+ switch (power_setting->seq_type) {
+
+ case SENSOR_CLK:
+ msm_cam_clk_enable(ctrl->dev,
+ &ctrl->clk_info[0],
+ (struct clk **)&power_setting->data[0],
+ ctrl->clk_info_size,
+ 0);
+ break;
+ case SENSOR_GPIO:
+ if (!ctrl->gpio_conf->gpio_num_info)
+ continue;
+ if (!ctrl->gpio_conf->gpio_num_info->valid
+ [power_setting->seq_val])
+ continue;
+ gpio_set_value_cansleep(
+ ctrl->gpio_conf->gpio_num_info->gpio_num
+ [power_setting->seq_val], GPIOF_OUT_INIT_LOW);
+ break;
+ case SENSOR_VREG:
+ if (power_setting->seq_val < ctrl->num_vreg)
+ msm_camera_config_single_vreg(ctrl->dev,
+ &ctrl->cam_vreg
+ [power_setting->seq_val],
+ (struct regulator **)
+ &power_setting->data[0],
+ 0);
+ else
+ pr_err("%s:%d:seq_val: %d > num_vreg: %d\n",
+ __func__, __LINE__,
+ power_setting->seq_val, ctrl->num_vreg);
+ break;
+ case SENSOR_I2C_MUX:
+ if (ctrl->i2c_conf && ctrl->i2c_conf->use_i2c_mux)
+ msm_camera_disable_i2c_mux(ctrl->i2c_conf);
+ break;
+ default:
+ pr_err("%s error power seq type %d\n", __func__,
+ power_setting->seq_type);
+ break;
+ }
+ if (power_setting->delay > 20) {
+ msleep(power_setting->delay);
+ } else if (power_setting->delay) {
+ usleep_range(power_setting->delay * 1000,
+ (power_setting->delay * 1000) + 1000);
+ }
+ }
+ if (ctrl->cam_pinctrl_status) {
+ ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
+ ctrl->pinctrl_info.gpio_state_suspend);
+ if (ret)
+ pr_err("%s:%d cannot set pin to suspend state\n",
+ __func__, __LINE__);
+ devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
+ }
+ ctrl->cam_pinctrl_status = 0;
+ msm_camera_request_gpio_table(
+ ctrl->gpio_conf->cam_gpio_req_tbl,
+ ctrl->gpio_conf->cam_gpio_req_tbl_size, 0);
+ return rc;
+}
+
+static struct msm_sensor_power_setting*
+msm_camera_get_power_settings(struct msm_camera_power_ctrl_t *ctrl,
+ enum msm_sensor_power_seq_type_t seq_type,
+ uint16_t seq_val)
+{
+ struct msm_sensor_power_setting *power_setting, *ps = NULL;
+ int idx;
+
+ for (idx = 0; idx < ctrl->power_setting_size; idx++) {
+ power_setting = &ctrl->power_setting[idx];
+ if (power_setting->seq_type == seq_type &&
+ power_setting->seq_val == seq_val) {
+ ps = power_setting;
+ return ps;
+ }
+
+ }
+ return ps;
+}
+
+int msm_camera_power_down(struct msm_camera_power_ctrl_t *ctrl,
+ enum msm_camera_device_type_t device_type,
+ struct msm_camera_i2c_client *sensor_i2c_client)
+{
+ int index = 0, ret = 0;
+ struct msm_sensor_power_setting *pd = NULL;
+ struct msm_sensor_power_setting *ps;
+
+ CDBG("%s:%d\n", __func__, __LINE__);
+ if (!ctrl || !sensor_i2c_client) {
+ pr_err("failed ctrl %p sensor_i2c_client %p\n", ctrl,
+ sensor_i2c_client);
+ return -EINVAL;
+ }
+ if (device_type == MSM_CAMERA_PLATFORM_DEVICE)
+ sensor_i2c_client->i2c_func_tbl->i2c_util(
+ sensor_i2c_client, MSM_CCI_RELEASE);
+
+ for (index = 0; index < ctrl->power_down_setting_size; index++) {
+ CDBG("%s index %d\n", __func__, index);
+ pd = &ctrl->power_down_setting[index];
+ ps = NULL;
+ CDBG("%s type %d\n", __func__, pd->seq_type);
+ switch (pd->seq_type) {
+ case SENSOR_CLK:
+
+ ps = msm_camera_get_power_settings(ctrl,
+ pd->seq_type,
+ pd->seq_val);
+ if (ps)
+ msm_cam_clk_enable(ctrl->dev,
+ &ctrl->clk_info[0],
+ (struct clk **)&ps->data[0],
+ ctrl->clk_info_size,
+ 0);
+ else
+ pr_err("%s error in power up/down seq data\n",
+ __func__);
+ break;
+ case SENSOR_GPIO:
+ if (pd->seq_val >= SENSOR_GPIO_MAX ||
+ !ctrl->gpio_conf->gpio_num_info) {
+ pr_err("%s gpio index %d >= max %d\n", __func__,
+ pd->seq_val,
+ SENSOR_GPIO_MAX);
+ continue;
+ }
+ if (!ctrl->gpio_conf->gpio_num_info->valid
+ [pd->seq_val])
+ continue;
+ gpio_set_value_cansleep(
+ ctrl->gpio_conf->gpio_num_info->gpio_num
+ [pd->seq_val],
+ (int) pd->config_val);
+ break;
+ case SENSOR_VREG:
+ if (pd->seq_val == INVALID_VREG)
+ break;
+ if (pd->seq_val >= CAM_VREG_MAX) {
+ pr_err("%s vreg index %d >= max %d\n", __func__,
+ pd->seq_val,
+ SENSOR_GPIO_MAX);
+ continue;
+ }
+
+ ps = msm_camera_get_power_settings(ctrl,
+ pd->seq_type,
+ pd->seq_val);
+ if (ps) {
+ if (pd->seq_val < ctrl->num_vreg)
+ msm_camera_config_single_vreg(ctrl->dev,
+ &ctrl->cam_vreg
+ [pd->seq_val],
+ (struct regulator **)
+ &ps->data[0],
+ 0);
+ else
+ pr_err("%s:%d:seq_val:%d > num_vreg: %d\n",
+ __func__, __LINE__, pd->seq_val,
+ ctrl->num_vreg);
+ } else
+ pr_err("%s error in power up/down seq data\n",
+ __func__);
+ break;
+ case SENSOR_I2C_MUX:
+ if (ctrl->i2c_conf && ctrl->i2c_conf->use_i2c_mux)
+ msm_camera_disable_i2c_mux(ctrl->i2c_conf);
+ break;
+ default:
+ pr_err("%s error power seq type %d\n", __func__,
+ pd->seq_type);
+ break;
+ }
+ if (pd->delay > 20) {
+ msleep(pd->delay);
+ } else if (pd->delay) {
+ usleep_range(pd->delay * 1000,
+ (pd->delay * 1000) + 1000);
+ }
+ }
+ if (ctrl->cam_pinctrl_status) {
+ ret = pinctrl_select_state(ctrl->pinctrl_info.pinctrl,
+ ctrl->pinctrl_info.gpio_state_suspend);
+ if (ret)
+ pr_err("%s:%d cannot set pin to suspend state",
+ __func__, __LINE__);
+ devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
+ }
+ ctrl->cam_pinctrl_status = 0;
+ msm_camera_request_gpio_table(
+ ctrl->gpio_conf->cam_gpio_req_tbl,
+ ctrl->gpio_conf->cam_gpio_req_tbl_size, 0);
+ CDBG("%s exit\n", __func__);
+ return 0;
+}
+
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.h b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.h
new file mode 100644
index 000000000000..3177775f7f4a
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_dt_util.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CAMERA_DT_UTIL_H__
+#define MSM_CAMERA_DT_UTIL_H__
+
+#include <soc/qcom/camera2.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include "msm_camera_i2c.h"
+
+#define INVALID_VREG 100
+
+int msm_sensor_get_sub_module_index(struct device_node *of_node,
+ struct msm_sensor_info_t **s_info);
+
+int msm_sensor_get_dt_actuator_data(struct device_node *of_node,
+ struct msm_actuator_info **act_info);
+
+int msm_sensor_get_dt_csi_data(struct device_node *of_node,
+ struct msm_camera_csi_lane_params **csi_lane_params);
+
+int msm_camera_get_dt_power_setting_data(struct device_node *of_node,
+ struct camera_vreg_t *cam_vreg, int num_vreg,
+ struct msm_camera_power_ctrl_t *power_info);
+
+int msm_camera_get_dt_gpio_req_tbl(struct device_node *of_node,
+ struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
+ uint16_t gpio_array_size);
+
+int msm_camera_init_gpio_pin_tbl(struct device_node *of_node,
+ struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
+ uint16_t gpio_array_size);
+
+int msm_camera_get_dt_vreg_data(struct device_node *of_node,
+ struct camera_vreg_t **cam_vreg, int *num_vreg);
+
+int msm_camera_power_up(struct msm_camera_power_ctrl_t *ctrl,
+ enum msm_camera_device_type_t device_type,
+ struct msm_camera_i2c_client *sensor_i2c_client);
+
+int msm_camera_power_down(struct msm_camera_power_ctrl_t *ctrl,
+ enum msm_camera_device_type_t device_type,
+ struct msm_camera_i2c_client *sensor_i2c_client);
+
+int msm_camera_fill_vreg_params(struct camera_vreg_t *cam_vreg,
+ int num_vreg, struct msm_sensor_power_setting *power_setting,
+ uint16_t power_setting_size);
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c.h b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c.h
new file mode 100644
index 000000000000..eac70f5379d9
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c.h
@@ -0,0 +1,149 @@
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_CAMERA_CCI_I2C_H
+#define MSM_CAMERA_CCI_I2C_H
+
+#include <linux/delay.h>
+#include <media/v4l2-subdev.h>
+#include <media/msm_cam_sensor.h>
+
+struct msm_camera_i2c_client {
+ struct msm_camera_i2c_fn_t *i2c_func_tbl;
+ struct i2c_client *client;
+ struct msm_camera_cci_client *cci_client;
+ struct msm_camera_spi_client *spi_client;
+ enum msm_camera_i2c_reg_addr_type addr_type;
+};
+
+struct msm_camera_i2c_fn_t {
+ int (*i2c_read)(struct msm_camera_i2c_client *, uint32_t, uint16_t *,
+ enum msm_camera_i2c_data_type);
+ int32_t (*i2c_read_seq)(struct msm_camera_i2c_client *, uint32_t,
+ uint8_t *, uint32_t);
+ int (*i2c_write)(struct msm_camera_i2c_client *, uint32_t, uint16_t,
+ enum msm_camera_i2c_data_type);
+ int (*i2c_write_seq)(struct msm_camera_i2c_client *, uint32_t ,
+ uint8_t *, uint32_t);
+ int32_t (*i2c_write_table)(struct msm_camera_i2c_client *,
+ struct msm_camera_i2c_reg_setting *);
+ int32_t (*i2c_write_seq_table)(struct msm_camera_i2c_client *,
+ struct msm_camera_i2c_seq_reg_setting *);
+ int32_t (*i2c_write_table_w_microdelay)
+ (struct msm_camera_i2c_client *,
+ struct msm_camera_i2c_reg_setting *);
+ int32_t (*i2c_util)(struct msm_camera_i2c_client *, uint16_t);
+ int32_t (*i2c_write_conf_tbl)(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_conf *reg_conf_tbl, uint16_t size,
+ enum msm_camera_i2c_data_type data_type);
+ int32_t (*i2c_poll)(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type);
+ int32_t (*i2c_read_burst)(struct msm_camera_i2c_client *client,
+ uint32_t read_byte, uint8_t *buffer, uint32_t addr,
+ enum msm_camera_i2c_data_type data_type);
+ int32_t (*i2c_write_burst)(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_array *reg_setting, uint32_t reg_size,
+ uint32_t buf_len, uint32_t addr,
+ enum msm_camera_i2c_data_type data_type);
+ int32_t (*i2c_write_table_async)(struct msm_camera_i2c_client *,
+ struct msm_camera_i2c_reg_setting *);
+ int32_t (*i2c_write_table_sync)(struct msm_camera_i2c_client *,
+ struct msm_camera_i2c_reg_setting *);
+ int32_t (*i2c_write_table_sync_block)(struct msm_camera_i2c_client *,
+ struct msm_camera_i2c_reg_setting *);
+};
+
+int32_t msm_camera_cci_i2c_read(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t *data,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_cci_i2c_read_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int32_t msm_camera_cci_i2c_write(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_cci_i2c_write_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int32_t msm_camera_cci_i2c_write_table(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_cci_i2c_write_table_async(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_cci_i2c_write_table_sync(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_cci_i2c_write_table_sync_block(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_cci_i2c_write_seq_table(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_seq_reg_setting *write_setting);
+
+int32_t msm_camera_cci_i2c_write_table_w_microdelay(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_cci_i2c_write_conf_tbl(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_conf *reg_conf_tbl, uint16_t size,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_sensor_cci_i2c_util(struct msm_camera_i2c_client *client,
+ uint16_t cci_cmd);
+
+int32_t msm_camera_cci_i2c_poll(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_qup_i2c_read(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t *data,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_qup_i2c_read_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int32_t msm_camera_qup_i2c_write(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_qup_i2c_write_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int32_t msm_camera_qup_i2c_write_table(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_qup_i2c_write_seq_table(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_seq_reg_setting *write_setting);
+
+int32_t msm_camera_qup_i2c_write_table_w_microdelay(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_qup_i2c_write_conf_tbl(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_conf *reg_conf_tbl, uint16_t size,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_qup_i2c_poll(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type);
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c_mux.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c_mux.c
new file mode 100644
index 000000000000..888af7e1eb37
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c_mux.c
@@ -0,0 +1,186 @@
+/* Copyright (c) 2011-2014, The Linux Foundatation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include "msm_camera_i2c_mux.h"
+
+/* TODO move this somewhere else */
+#define MSM_I2C_MUX_DRV_NAME "msm_cam_i2c_mux"
+static int msm_i2c_mux_config(struct i2c_mux_device *mux_device, uint8_t *mode)
+{
+ uint32_t val;
+ val = msm_camera_io_r(mux_device->ctl_base);
+ if (*mode == MODE_DUAL) {
+ msm_camera_io_w(val | 0x3, mux_device->ctl_base);
+ } else if (*mode == MODE_L) {
+ msm_camera_io_w(((val | 0x2) & ~(0x1)), mux_device->ctl_base);
+ val = msm_camera_io_r(mux_device->ctl_base);
+ CDBG("the camio mode config left value is %d\n", val);
+ } else {
+ msm_camera_io_w(((val | 0x1) & ~(0x2)), mux_device->ctl_base);
+ val = msm_camera_io_r(mux_device->ctl_base);
+ CDBG("the camio mode config right value is %d\n", val);
+ }
+ return 0;
+}
+
+static int msm_i2c_mux_init(struct i2c_mux_device *mux_device)
+{
+ int rc = 0, val = 0;
+ if (mux_device->use_count == 0) {
+ mux_device->ctl_base = ioremap(mux_device->ctl_mem->start,
+ resource_size(mux_device->ctl_mem));
+ if (!mux_device->ctl_base) {
+ rc = -ENOMEM;
+ return rc;
+ }
+ mux_device->rw_base = ioremap(mux_device->rw_mem->start,
+ resource_size(mux_device->rw_mem));
+ if (!mux_device->rw_base) {
+ rc = -ENOMEM;
+ iounmap(mux_device->ctl_base);
+ return rc;
+ }
+ val = msm_camera_io_r(mux_device->rw_base);
+ msm_camera_io_w((val | 0x200), mux_device->rw_base);
+ }
+ mux_device->use_count++;
+ return 0;
+};
+
+static int msm_i2c_mux_release(struct i2c_mux_device *mux_device)
+{
+ int val = 0;
+ mux_device->use_count--;
+ if (mux_device->use_count == 0) {
+ val = msm_camera_io_r(mux_device->rw_base);
+ msm_camera_io_w((val & ~0x200), mux_device->rw_base);
+ iounmap(mux_device->rw_base);
+ iounmap(mux_device->ctl_base);
+ }
+ return 0;
+}
+
+static long msm_i2c_mux_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ struct i2c_mux_device *mux_device;
+ int rc = 0;
+ mux_device = v4l2_get_subdevdata(sd);
+ if (mux_device == NULL) {
+ rc = -ENOMEM;
+ return rc;
+ }
+ mutex_lock(&mux_device->mutex);
+ switch (cmd) {
+ case VIDIOC_MSM_I2C_MUX_CFG:
+ rc = msm_i2c_mux_config(mux_device, (uint8_t *) arg);
+ break;
+ case VIDIOC_MSM_I2C_MUX_INIT:
+ rc = msm_i2c_mux_init(mux_device);
+ break;
+ case VIDIOC_MSM_I2C_MUX_RELEASE:
+ rc = msm_i2c_mux_release(mux_device);
+ break;
+ default:
+ rc = -ENOIOCTLCMD;
+ }
+ mutex_unlock(&mux_device->mutex);
+ return rc;
+}
+
+static struct v4l2_subdev_core_ops msm_i2c_mux_subdev_core_ops = {
+ .ioctl = &msm_i2c_mux_subdev_ioctl,
+};
+
+static const struct v4l2_subdev_ops msm_i2c_mux_subdev_ops = {
+ .core = &msm_i2c_mux_subdev_core_ops,
+};
+
+static int i2c_mux_probe(struct platform_device *pdev)
+{
+ struct i2c_mux_device *mux_device;
+ int rc = 0;
+ CDBG("%s: device id = %d\n", __func__, pdev->id);
+ mux_device = kzalloc(sizeof(struct i2c_mux_device), GFP_KERNEL);
+ if (!mux_device) {
+ pr_err("%s: no enough memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ v4l2_subdev_init(&mux_device->subdev, &msm_i2c_mux_subdev_ops);
+ v4l2_set_subdevdata(&mux_device->subdev, mux_device);
+ platform_set_drvdata(pdev, &mux_device->subdev);
+ mutex_init(&mux_device->mutex);
+
+ mux_device->ctl_mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "i2c_mux_ctl");
+ if (!mux_device->ctl_mem) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto i2c_mux_no_resource;
+ }
+ mux_device->ctl_io = request_mem_region(mux_device->ctl_mem->start,
+ resource_size(mux_device->ctl_mem), pdev->name);
+ if (!mux_device->ctl_io) {
+ pr_err("%s: no valid mem region\n", __func__);
+ rc = -EBUSY;
+ goto i2c_mux_no_resource;
+ }
+ mux_device->rw_mem = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "i2c_mux_rw");
+ if (!mux_device->rw_mem) {
+ pr_err("%s: no mem resource?\n", __func__);
+ rc = -ENODEV;
+ goto i2c_mux_no_resource;
+ }
+ mux_device->rw_io = request_mem_region(mux_device->rw_mem->start,
+ resource_size(mux_device->rw_mem), pdev->name);
+ if (!mux_device->rw_io) {
+ pr_err("%s: no valid mem region\n", __func__);
+ rc = -EBUSY;
+ goto i2c_mux_no_resource;
+ }
+ mux_device->pdev = pdev;
+ return 0;
+
+i2c_mux_no_resource:
+ mutex_destroy(&mux_device->mutex);
+ kfree(mux_device);
+ return 0;
+}
+
+static struct platform_driver i2c_mux_driver = {
+ .probe = i2c_mux_probe,
+ .driver = {
+ .name = MSM_I2C_MUX_DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init msm_camera_i2c_mux_init_module(void)
+{
+ return platform_driver_register(&i2c_mux_driver);
+}
+
+static void __exit msm_camera_i2c_mux_exit_module(void)
+{
+ platform_driver_unregister(&i2c_mux_driver);
+}
+
+module_init(msm_camera_i2c_mux_init_module);
+module_exit(msm_camera_i2c_mux_exit_module);
+MODULE_DESCRIPTION("MSM Camera I2C mux driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c_mux.h b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c_mux.h
new file mode 100644
index 000000000000..649d499c76db
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_i2c_mux.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_I2C_MUX_H
+#define MSM_I2C_MUX_H
+
+#include <linux/io.h>
+#include <media/v4l2-subdev.h>
+
+struct i2c_mux_device {
+ struct platform_device *pdev;
+ struct v4l2_subdev subdev;
+ struct resource *ctl_mem;
+ struct resource *ctl_io;
+ void __iomem *ctl_base;
+ struct resource *rw_mem;
+ struct resource *rw_io;
+ void __iomem *rw_base;
+ struct mutex mutex;
+ unsigned use_count;
+};
+
+struct i2c_mux_cfg_params {
+ struct v4l2_subdev *subdev;
+ void *parms;
+};
+
+#define VIDIOC_MSM_I2C_MUX_CFG \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 13, struct i2c_mux_cfg_params)
+
+#define VIDIOC_MSM_I2C_MUX_INIT \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 14, struct v4l2_subdev*)
+
+#define VIDIOC_MSM_I2C_MUX_RELEASE \
+ _IOWR('V', BASE_VIDIOC_PRIVATE + 15, struct v4l2_subdev*)
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c
new file mode 100644
index 000000000000..41b4952f4f40
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_qup_i2c.c
@@ -0,0 +1,550 @@
+/* Copyright (c) 2011, 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <soc/qcom/camera2.h>
+#include "msm_camera_i2c.h"
+
+#undef CDBG
+#ifdef CONFIG_MSMB_CAMERA_DEBUG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#define S_I2C_DBG(fmt, args...) pr_debug(fmt, ##args)
+#else
+#define CDBG(fmt, args...) do { } while (0)
+#define S_I2C_DBG(fmt, args...) do { } while (0)
+#endif
+
+#define I2C_COMPARE_MATCH 0
+#define I2C_COMPARE_MISMATCH 1
+#define I2C_POLL_MAX_ITERATION 20
+
+static int32_t msm_camera_qup_i2c_rxdata(
+ struct msm_camera_i2c_client *dev_client, unsigned char *rxdata,
+ int data_length)
+{
+ int32_t rc = 0;
+ uint16_t saddr = dev_client->client->addr >> 1;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = saddr,
+ .flags = 0,
+ .len = dev_client->addr_type,
+ .buf = rxdata,
+ },
+ {
+ .addr = saddr,
+ .flags = I2C_M_RD,
+ .len = data_length,
+ .buf = rxdata,
+ },
+ };
+ rc = i2c_transfer(dev_client->client->adapter, msgs, 2);
+ if (rc < 0)
+ S_I2C_DBG("msm_camera_qup_i2c_rxdata failed 0x%x\n", saddr);
+ return rc;
+}
+
+static int32_t msm_camera_qup_i2c_txdata(
+ struct msm_camera_i2c_client *dev_client, unsigned char *txdata,
+ int length)
+{
+ int32_t rc = 0;
+ uint16_t saddr = dev_client->client->addr >> 1;
+ struct i2c_msg msg[] = {
+ {
+ .addr = saddr,
+ .flags = 0,
+ .len = length,
+ .buf = txdata,
+ },
+ };
+ rc = i2c_transfer(dev_client->client->adapter, msg, 1);
+ if (rc < 0)
+ S_I2C_DBG("msm_camera_qup_i2c_txdata faild 0x%x\n", saddr);
+ return rc;
+}
+
+int32_t msm_camera_qup_i2c_read(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t *data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ unsigned char buf[client->addr_type+data_type];
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (data_type != MSM_CAMERA_I2C_BYTE_DATA
+ && data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+
+ if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
+ buf[0] = addr;
+ } else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
+ buf[0] = addr >> BITS_PER_BYTE;
+ buf[1] = addr;
+ }
+ rc = msm_camera_qup_i2c_rxdata(client, buf, data_type);
+ if (rc < 0) {
+ S_I2C_DBG("%s fail\n", __func__);
+ return rc;
+ }
+
+ if (data_type == MSM_CAMERA_I2C_BYTE_DATA)
+ *data = buf[0];
+ else
+ *data = buf[0] << 8 | buf[1];
+
+ S_I2C_DBG("%s addr = 0x%x data: 0x%x\n", __func__, addr, *data);
+ return rc;
+}
+
+int32_t msm_camera_qup_i2c_read_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ int32_t rc = -EFAULT;
+ unsigned char buf[client->addr_type+num_byte];
+ int i;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || num_byte == 0)
+ return rc;
+
+ if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
+ buf[0] = addr;
+ } else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
+ buf[0] = addr >> BITS_PER_BYTE;
+ buf[1] = addr;
+ }
+ rc = msm_camera_qup_i2c_rxdata(client, buf, num_byte);
+ if (rc < 0) {
+ S_I2C_DBG("%s fail\n", __func__);
+ return rc;
+ }
+
+ S_I2C_DBG("%s addr = 0x%x", __func__, addr);
+ for (i = 0; i < num_byte; i++) {
+ data[i] = buf[i];
+ S_I2C_DBG("Byte %d: 0x%x\n", i, buf[i]);
+ S_I2C_DBG("Data: 0x%x\n", data[i]);
+ }
+ return rc;
+}
+
+int32_t msm_camera_qup_i2c_write(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ unsigned char buf[client->addr_type+data_type];
+ uint8_t len = 0;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (data_type != MSM_CAMERA_I2C_BYTE_DATA
+ && data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+
+ S_I2C_DBG("%s reg addr = 0x%x data type: %d\n",
+ __func__, addr, data_type);
+ if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
+ buf[0] = addr;
+ S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+ len, buf[len]);
+ len = 1;
+ } else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
+ buf[0] = addr >> BITS_PER_BYTE;
+ buf[1] = addr;
+ S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+ len, buf[len]);
+ S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+ len+1, buf[len+1]);
+ len = 2;
+ }
+ S_I2C_DBG("Data: 0x%x\n", data);
+ if (data_type == MSM_CAMERA_I2C_BYTE_DATA) {
+ buf[len] = data;
+ S_I2C_DBG("Byte %d: 0x%x\n", len, buf[len]);
+ len += 1;
+ } else if (data_type == MSM_CAMERA_I2C_WORD_DATA) {
+ buf[len] = data >> BITS_PER_BYTE;
+ buf[len+1] = data;
+ S_I2C_DBG("Byte %d: 0x%x\n", len, buf[len]);
+ S_I2C_DBG("Byte %d: 0x%x\n", len+1, buf[len+1]);
+ len += 2;
+ }
+ rc = msm_camera_qup_i2c_txdata(client, buf, len);
+ if (rc < 0)
+ S_I2C_DBG("%s fail\n", __func__);
+ return rc;
+}
+
+int32_t msm_camera_qup_i2c_write_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ int32_t rc = -EFAULT;
+ unsigned char buf[client->addr_type+num_byte];
+ uint8_t len = 0, i = 0;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || num_byte == 0)
+ return rc;
+
+ S_I2C_DBG("%s reg addr = 0x%x num bytes: %d\n",
+ __func__, addr, num_byte);
+ if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
+ buf[0] = addr;
+ S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+ len, buf[len]);
+ len = 1;
+ } else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {
+ buf[0] = addr >> BITS_PER_BYTE;
+ buf[1] = addr;
+ S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+ len, buf[len]);
+ S_I2C_DBG("%s byte %d: 0x%x\n", __func__,
+ len+1, buf[len+1]);
+ len = 2;
+ }
+ if (num_byte > I2C_SEQ_REG_DATA_MAX) {
+ pr_err("%s: num_byte=%d clamped to max supported %d\n",
+ __func__, num_byte, I2C_SEQ_REG_DATA_MAX);
+ num_byte = I2C_SEQ_REG_DATA_MAX;
+ }
+ for (i = 0; i < num_byte; i++) {
+ buf[i+len] = data[i];
+ S_I2C_DBG("Byte %d: 0x%x\n", i+len, buf[i+len]);
+ S_I2C_DBG("Data: 0x%x\n", data[i]);
+ }
+ rc = msm_camera_qup_i2c_txdata(client, buf, len+num_byte);
+ if (rc < 0)
+ S_I2C_DBG("%s fail\n", __func__);
+ return rc;
+}
+
+int32_t msm_camera_qup_i2c_write_table(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int i;
+ int32_t rc = -EFAULT;
+ struct msm_camera_i2c_reg_array *reg_setting;
+ uint16_t client_addr_type;
+
+ if (!client || !write_setting)
+ return rc;
+
+ if ((write_setting->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && write_setting->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (write_setting->data_type != MSM_CAMERA_I2C_BYTE_DATA
+ && write_setting->data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+
+ reg_setting = write_setting->reg_setting;
+ client_addr_type = client->addr_type;
+ client->addr_type = write_setting->addr_type;
+
+ for (i = 0; i < write_setting->size; i++) {
+ CDBG("%s addr 0x%x data 0x%x\n", __func__,
+ reg_setting->reg_addr, reg_setting->reg_data);
+
+ rc = msm_camera_qup_i2c_write(client, reg_setting->reg_addr,
+ reg_setting->reg_data, write_setting->data_type);
+ if (rc < 0)
+ break;
+ reg_setting++;
+ }
+ if (write_setting->delay > 20)
+ msleep(write_setting->delay);
+ else if (write_setting->delay)
+ usleep_range(write_setting->delay * 1000, (write_setting->delay
+ * 1000) + 1000);
+
+ client->addr_type = client_addr_type;
+ return rc;
+}
+
+int32_t msm_camera_qup_i2c_write_seq_table(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_seq_reg_setting *write_setting)
+{
+ int i;
+ int32_t rc = -EFAULT;
+ struct msm_camera_i2c_seq_reg_array *reg_setting;
+ uint16_t client_addr_type;
+
+ if (!client || !write_setting)
+ return rc;
+
+ if ((write_setting->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && write_setting->addr_type != MSM_CAMERA_I2C_WORD_ADDR)) {
+ pr_err("%s Invalide addr type %d\n", __func__,
+ write_setting->addr_type);
+ return rc;
+ }
+
+ reg_setting = write_setting->reg_setting;
+ client_addr_type = client->addr_type;
+ client->addr_type = write_setting->addr_type;
+
+ for (i = 0; i < write_setting->size; i++) {
+ rc = msm_camera_qup_i2c_write_seq(client, reg_setting->reg_addr,
+ reg_setting->reg_data, reg_setting->reg_data_size);
+ if (rc < 0)
+ break;
+ reg_setting++;
+ }
+ if (write_setting->delay > 20)
+ msleep(write_setting->delay);
+ else if (write_setting->delay)
+ usleep_range(write_setting->delay * 1000, (write_setting->delay
+ * 1000) + 1000);
+
+ client->addr_type = client_addr_type;
+ return rc;
+}
+
+int32_t msm_camera_qup_i2c_write_table_w_microdelay(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int i;
+ int32_t rc = -EFAULT;
+ struct msm_camera_i2c_reg_array *reg_setting = NULL;
+
+ if (!client || !write_setting)
+ return rc;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (write_setting->data_type != MSM_CAMERA_I2C_BYTE_DATA
+ && write_setting->data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+
+ reg_setting = write_setting->reg_setting;
+ for (i = 0; i < write_setting->size; i++) {
+ rc = msm_camera_qup_i2c_write(client, reg_setting->reg_addr,
+ reg_setting->reg_data, write_setting->data_type);
+ if (rc < 0)
+ break;
+ if (reg_setting->delay)
+ usleep_range(reg_setting->delay,
+ reg_setting->delay + 1000);
+ reg_setting++;
+ }
+ return rc;
+}
+
+static int32_t msm_camera_qup_i2c_compare(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc;
+ uint16_t reg_data = 0;
+ int data_len = 0;
+ switch (data_type) {
+ case MSM_CAMERA_I2C_BYTE_DATA:
+ case MSM_CAMERA_I2C_WORD_DATA:
+ data_len = data_type;
+ break;
+ case MSM_CAMERA_I2C_SET_BYTE_MASK:
+ case MSM_CAMERA_I2C_UNSET_BYTE_MASK:
+ data_len = MSM_CAMERA_I2C_BYTE_DATA;
+ break;
+ case MSM_CAMERA_I2C_SET_WORD_MASK:
+ case MSM_CAMERA_I2C_UNSET_WORD_MASK:
+ data_len = MSM_CAMERA_I2C_WORD_DATA;
+ break;
+ default:
+ pr_err("%s: Unsupport data type: %d\n", __func__, data_type);
+ break;
+ }
+
+ rc = msm_camera_qup_i2c_read(client, addr, &reg_data, data_len);
+ if (rc < 0)
+ return rc;
+
+ rc = I2C_COMPARE_MISMATCH;
+ switch (data_type) {
+ case MSM_CAMERA_I2C_BYTE_DATA:
+ case MSM_CAMERA_I2C_WORD_DATA:
+ if (data == reg_data)
+ rc = I2C_COMPARE_MATCH;
+ break;
+ case MSM_CAMERA_I2C_SET_BYTE_MASK:
+ case MSM_CAMERA_I2C_SET_WORD_MASK:
+ if ((reg_data & data) == data)
+ rc = I2C_COMPARE_MATCH;
+ break;
+ case MSM_CAMERA_I2C_UNSET_BYTE_MASK:
+ case MSM_CAMERA_I2C_UNSET_WORD_MASK:
+ if (!(reg_data & data))
+ rc = I2C_COMPARE_MATCH;
+ break;
+ default:
+ pr_err("%s: Unsupport data type: %d\n", __func__, data_type);
+ break;
+ }
+
+ S_I2C_DBG("%s: Register and data match result %d\n", __func__,
+ rc);
+ return rc;
+}
+
+int32_t msm_camera_qup_i2c_poll(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc;
+ int i;
+ S_I2C_DBG("%s: addr: 0x%x data: 0x%x dt: %d\n",
+ __func__, addr, data, data_type);
+
+ for (i = 0; i < I2C_POLL_MAX_ITERATION; i++) {
+ rc = msm_camera_qup_i2c_compare(client,
+ addr, data, data_type);
+ if (rc == 0 || rc < 0)
+ break;
+ usleep_range(10000, 11000);
+ }
+ return rc;
+}
+
+static int32_t msm_camera_qup_i2c_set_mask(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t mask,
+ enum msm_camera_i2c_data_type data_type, uint16_t set_mask)
+{
+ int32_t rc;
+ uint16_t reg_data;
+
+ rc = msm_camera_qup_i2c_read(client, addr, &reg_data, data_type);
+ if (rc < 0) {
+ S_I2C_DBG("%s read fail\n", __func__);
+ return rc;
+ }
+ S_I2C_DBG("%s addr: 0x%x data: 0x%x setmask: 0x%x\n",
+ __func__, addr, reg_data, mask);
+
+ if (set_mask)
+ reg_data |= mask;
+ else
+ reg_data &= ~mask;
+ S_I2C_DBG("%s write: 0x%x\n", __func__, reg_data);
+
+ rc = msm_camera_qup_i2c_write(client, addr, reg_data, data_type);
+ if (rc < 0)
+ S_I2C_DBG("%s write fail\n", __func__);
+
+ return rc;
+}
+
+static int32_t msm_camera_qup_i2c_set_write_mask_data(
+ struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data, int16_t mask,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc;
+ uint16_t reg_data;
+ CDBG("%s\n", __func__);
+ if (mask == -1)
+ return 0;
+ if (mask == 0) {
+ rc = msm_camera_qup_i2c_write(client, addr, data, data_type);
+ } else {
+ rc = msm_camera_qup_i2c_read(client, addr, &reg_data,
+ data_type);
+ if (rc < 0) {
+ CDBG("%s read fail\n", __func__);
+ return rc;
+ }
+ reg_data &= ~mask;
+ reg_data |= (data & mask);
+ rc = msm_camera_qup_i2c_write(client, addr, reg_data,
+ data_type);
+ if (rc < 0)
+ CDBG("%s write fail\n", __func__);
+ }
+ return rc;
+}
+
+
+int32_t msm_camera_qup_i2c_write_conf_tbl(
+ struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_conf *reg_conf_tbl, uint16_t size,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int i;
+ int32_t rc = -EFAULT;
+ pr_err("%s, E. ", __func__);
+ for (i = 0; i < size; i++) {
+ enum msm_camera_i2c_data_type dt;
+ if (reg_conf_tbl->cmd_type == MSM_CAMERA_I2C_CMD_POLL) {
+ rc = msm_camera_qup_i2c_poll(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ reg_conf_tbl->dt);
+ } else {
+ if (reg_conf_tbl->dt == 0)
+ dt = data_type;
+ else
+ dt = reg_conf_tbl->dt;
+ switch (dt) {
+ case MSM_CAMERA_I2C_BYTE_DATA:
+ case MSM_CAMERA_I2C_WORD_DATA:
+ rc = msm_camera_qup_i2c_write(
+ client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data, dt);
+ break;
+ case MSM_CAMERA_I2C_SET_BYTE_MASK:
+ rc = msm_camera_qup_i2c_set_mask(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ MSM_CAMERA_I2C_BYTE_DATA, 1);
+ break;
+ case MSM_CAMERA_I2C_UNSET_BYTE_MASK:
+ rc = msm_camera_qup_i2c_set_mask(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ MSM_CAMERA_I2C_BYTE_DATA, 0);
+ break;
+ case MSM_CAMERA_I2C_SET_WORD_MASK:
+ rc = msm_camera_qup_i2c_set_mask(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ MSM_CAMERA_I2C_WORD_DATA, 1);
+ break;
+ case MSM_CAMERA_I2C_UNSET_WORD_MASK:
+ rc = msm_camera_qup_i2c_set_mask(client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ MSM_CAMERA_I2C_WORD_DATA, 0);
+ break;
+ case MSM_CAMERA_I2C_SET_BYTE_WRITE_MASK_DATA:
+ rc = msm_camera_qup_i2c_set_write_mask_data(
+ client,
+ reg_conf_tbl->reg_addr,
+ reg_conf_tbl->reg_data,
+ reg_conf_tbl->mask,
+ MSM_CAMERA_I2C_BYTE_DATA);
+ break;
+ default:
+ pr_err("%s: Unsupport data type: %d\n",
+ __func__, dt);
+ break;
+ }
+ }
+ if (rc < 0)
+ break;
+ reg_conf_tbl++;
+ }
+ return rc;
+}
+
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_spi.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_spi.c
new file mode 100644
index 000000000000..df52ad097db4
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_spi.c
@@ -0,0 +1,836 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <soc/qcom/camera2.h>
+#include "msm_camera_spi.h"
+
+#undef SPIDBG
+#ifdef CONFIG_MSMB_CAMERA_DEBUG
+#define SPIDBG(fmt, args...) pr_debug(fmt, ##args)
+#define S_I2C_DBG(fmt, args...) pr_debug(fmt, ##args)
+#else
+#define SPIDBG(fmt, args...) do { } while (0)
+#define S_I2C_DBG(fmt, args...) do { } while (0)
+#endif
+
+static int msm_camera_spi_txfr(struct spi_device *spi, char *txbuf,
+ char *rxbuf, int num_byte)
+{
+ struct spi_transfer t;
+ struct spi_message m;
+
+ memset(&t, 0, sizeof(t));
+ t.tx_buf = txbuf;
+ t.rx_buf = rxbuf;
+ t.len = num_byte;
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+
+ return spi_sync(spi, &m);
+}
+
+static int msm_camera_spi_txfr_read(struct spi_device *spi, char *txbuf,
+ char *rxbuf, int txlen, int rxlen)
+{
+ struct spi_transfer tx;
+ struct spi_transfer rx;
+ struct spi_message m;
+ memset(&tx, 0, sizeof(tx));
+ memset(&rx, 0, sizeof(rx));
+ tx.tx_buf = txbuf;
+ rx.rx_buf = rxbuf;
+ tx.len = txlen;
+ rx.len = rxlen;
+ spi_message_init(&m);
+ spi_message_add_tail(&tx, &m);
+ spi_message_add_tail(&rx, &m);
+ return spi_sync(spi, &m);
+}
+
+
+/**
+ * msm_camera_set_addr() - helper function to set transfer address
+ * @addr: device address
+ * @addr_len: the addr field length of an instruction
+ * @type: type (i.e. byte-length) of @addr
+ * @str: shifted address output, must be zeroed when passed in
+ *
+ * This helper function sets @str based on the addr field length of an
+ * instruction and the data length.
+ */
+static void msm_camera_set_addr(uint32_t addr, uint8_t addr_len,
+ enum msm_camera_i2c_reg_addr_type type,
+ char *str)
+{
+ int i, len;
+ if (!addr_len)
+ return;
+
+ if (addr_len < type)
+ SPIDBG("%s: omitting higher bits in address\n", __func__);
+
+ /* only support transfer MSB first for now */
+ len = addr_len - type;
+ for (i = len; i < addr_len; i++) {
+ if (i >= 0)
+ str[i] = (addr >> (BITS_PER_BYTE * (addr_len - i - 1)))
+ & 0xFF;
+ }
+
+}
+
+/**
+ * msm_camera_spi_tx_helper() - wrapper for SPI transaction
+ * @client: io client
+ * @inst: inst of this transaction
+ * @addr: device addr following the inst
+ * @data: output byte array (could be NULL)
+ * @num_byte: size of @data
+ * @tx, rx: optional transfer buffer. It must be at least header
+ * + @num_byte long.
+ *
+ * This is the core function for SPI transaction, except for writes. It first
+ * checks address type, then allocates required memory for tx/rx buffers.
+ * It sends out <opcode><addr>, and optionally receives @num_byte of response,
+ * if @data is not NULL. This function does not check for wait conditions,
+ * and will return immediately once bus transaction finishes.
+ *
+ * This function will allocate buffers of header + @num_byte long. For
+ * large transfers, the allocation could fail. External buffer @tx, @rx
+ * should be passed in to bypass allocation. The size of buffer should be
+ * at least header + num_byte long. Since buffer is managed externally,
+ * @data will be ignored, and read results will be in @rx.
+ * @tx, @rx also can be used for repeated transfers to improve performance.
+ */
+int32_t msm_camera_spi_tx_helper(struct msm_camera_i2c_client *client,
+ struct msm_camera_spi_inst *inst, uint32_t addr, uint8_t *data,
+ uint32_t num_byte, char *tx, char *rx)
+{
+ int32_t rc = -EINVAL;
+ struct spi_device *spi = client->spi_client->spi_master;
+ char *ctx = NULL, *crx = NULL;
+ uint32_t len, hlen;
+ uint8_t retries = client->spi_client->retries;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR)
+ && (client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ && (client->addr_type != MSM_CAMERA_I2C_3B_ADDR))
+ return rc;
+
+ hlen = msm_camera_spi_get_hlen(inst);
+ len = hlen + num_byte;
+
+ if (tx)
+ ctx = tx;
+ else
+ ctx = kzalloc(len, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (num_byte) {
+ if (rx)
+ crx = rx;
+ else
+ crx = kzalloc(len, GFP_KERNEL);
+ if (!crx) {
+ if (!tx)
+ kfree(ctx);
+ return -ENOMEM;
+ }
+ } else {
+ crx = NULL;
+ }
+
+ ctx[0] = inst->opcode;
+ msm_camera_set_addr(addr, inst->addr_len, client->addr_type, ctx + 1);
+ while ((rc = msm_camera_spi_txfr(spi, ctx, crx, len)) && retries) {
+ retries--;
+ msleep(client->spi_client->retry_delay);
+ }
+ if (rc < 0) {
+ SPIDBG("%s: failed %d\n", __func__, rc);
+ goto out;
+ }
+ if (data && num_byte && !rx)
+ memcpy(data, crx + hlen, num_byte);
+
+out:
+ if (!tx)
+ kfree(ctx);
+ if (!rx)
+ kfree(crx);
+ return rc;
+}
+
+int32_t msm_camera_spi_tx_read(struct msm_camera_i2c_client *client,
+ struct msm_camera_spi_inst *inst, uint32_t addr, uint8_t *data,
+ uint32_t num_byte, char *tx, char *rx)
+{
+ int32_t rc = -EINVAL;
+ struct spi_device *spi = client->spi_client->spi_master;
+ char *ctx = NULL, *crx = NULL;
+ uint32_t hlen;
+ uint8_t retries = client->spi_client->retries;
+
+ if ((client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ && (client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR)
+ && (client->addr_type != MSM_CAMERA_I2C_3B_ADDR))
+ return rc;
+
+ hlen = msm_camera_spi_get_hlen(inst);
+ if (tx)
+ ctx = tx;
+ else
+ ctx = kzalloc(hlen, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ if (num_byte) {
+ if (rx)
+ crx = rx;
+ else
+ crx = kzalloc(num_byte, GFP_KERNEL);
+ if (!crx) {
+ if (!tx)
+ kfree(ctx);
+ return -ENOMEM;
+ }
+ } else {
+ crx = NULL;
+ }
+
+ ctx[0] = inst->opcode;
+ if (client->addr_type == MSM_CAMERA_I2C_3B_ADDR) {
+ msm_camera_set_addr(addr, inst->addr_len, client->addr_type,
+ ctx + 1);
+ } else {
+ ctx[1] = (addr >> BITS_PER_BYTE) & 0xFF;
+ ctx[2] = (addr & 0xFF);
+ ctx[3] = 0;
+ }
+ SPIDBG("%s: tx(%u): %02x %02x %02x %02x\n", __func__,
+ hlen, ctx[0], ctx[1], ctx[2], ctx[3]);
+ while ((rc = msm_camera_spi_txfr_read(spi, ctx, crx, hlen, num_byte))
+ && retries) {
+ retries--;
+ msleep(client->spi_client->retry_delay);
+ }
+ if (rc < 0) {
+ pr_err("%s: failed %d\n", __func__, rc);
+ goto out;
+ }
+ if (data && num_byte && !rx)
+ memcpy(data, crx, num_byte);
+out:
+ if (!tx)
+ kfree(ctx);
+ if (!rx)
+ kfree(crx);
+ return rc;
+}
+
+int32_t msm_camera_spi_read(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t *data,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EINVAL;
+ uint8_t temp[2];
+
+ if ((data_type != MSM_CAMERA_I2C_BYTE_DATA)
+ && (data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+
+ rc = msm_camera_spi_tx_read(client,
+ &client->spi_client->cmd_tbl.read, addr, &temp[0],
+ data_type, NULL, NULL);
+ if (rc < 0) {
+ pr_err("%s: failed %d\n", __func__, rc);
+ return rc;
+ }
+
+ if (data_type == MSM_CAMERA_I2C_BYTE_DATA)
+ *data = temp[0];
+ else
+ *data = (temp[0] << BITS_PER_BYTE) | temp[1];
+
+ SPIDBG("%s: addr 0x%x, data %u\n", __func__, addr, *data);
+ return rc;
+}
+
+int32_t msm_camera_spi_read_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ return msm_camera_spi_tx_helper(client,
+ &client->spi_client->cmd_tbl.read_seq, addr, data, num_byte,
+ NULL, NULL);
+}
+
+/**
+ * msm_camera_spi_read_seq_l()- function for large SPI reads
+ * @client: io client
+ * @addr: device address to read
+ * @num_byte: read length
+ * @tx,rx: pre-allocated SPI buffer. Its size must be at least
+ * header + num_byte
+ *
+ * This function is used for large transactions. Instead of allocating SPI
+ * buffer each time, caller is responsible for pre-allocating memory buffers.
+ * Memory buffer must be at least header + num_byte. Header length can be
+ * obtained by msm_camera_spi_get_hlen().
+ */
+int32_t msm_camera_spi_read_seq_l(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint32_t num_byte, char *tx, char *rx)
+{
+ return msm_camera_spi_tx_helper(client,
+ &client->spi_client->cmd_tbl.read_seq, addr, NULL, num_byte,
+ tx, rx);
+}
+
+int32_t msm_camera_spi_query_id(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ return msm_camera_spi_tx_helper(client,
+ &client->spi_client->cmd_tbl.query_id, addr, data, num_byte,
+ NULL, NULL);
+}
+
+static int32_t msm_camera_spi_read_status_reg(
+ struct msm_camera_i2c_client *client, uint8_t *status)
+{
+ struct msm_camera_spi_inst *rs =
+ &client->spi_client->cmd_tbl.read_status;
+ if (rs->addr_len != 0) {
+ pr_err("%s: not implemented yet\n", __func__);
+ return -EINVAL;
+ }
+ return msm_camera_spi_tx_helper(client, rs, 0, status, 1, NULL, NULL);
+}
+
+static int32_t msm_camera_spi_device_busy(struct msm_camera_i2c_client *client,
+ uint8_t *busy)
+{
+ int rc;
+ uint8_t st = 0;
+ rc = msm_camera_spi_read_status_reg(client, &st);
+ if (rc < 0) {
+ pr_err("%s: failed to read status reg\n", __func__);
+ return rc;
+ }
+ *busy = st & client->spi_client->busy_mask;
+ return 0;
+}
+
+static int32_t msm_camera_spi_wait(struct msm_camera_i2c_client *client,
+ struct msm_camera_spi_inst *inst)
+{
+ uint8_t busy;
+ int i, rc;
+ SPIDBG("%s: op 0x%x wait start\n", __func__, inst->opcode);
+ for (i = 0; i < inst->delay_count; i++) {
+ rc = msm_camera_spi_device_busy(client, &busy);
+ if (rc < 0)
+ return rc;
+ if (!busy)
+ break;
+ else
+ msleep(inst->delay_intv);
+ SPIDBG("%s: op 0x%x wait\n", __func__, inst->opcode);
+ }
+ if (i > inst->delay_count) {
+ pr_err("%s: op %x timed out\n", __func__, inst->opcode);
+ return -ETIMEDOUT;
+ }
+ SPIDBG("%s: op %x finished\n", __func__, inst->opcode);
+ return 0;
+}
+
+static int32_t msm_camera_spi_write_enable(
+ struct msm_camera_i2c_client *client)
+{
+ struct msm_camera_spi_inst *we =
+ &client->spi_client->cmd_tbl.write_enable;
+ int rc;
+ if (0 == we->opcode)
+ return 0;
+ if (we->addr_len != 0) {
+ pr_err("%s: not implemented yet\n", __func__);
+ return -EINVAL;
+ }
+ rc = msm_camera_spi_tx_helper(client, we, 0, NULL, 0, NULL, NULL);
+ if (rc < 0)
+ pr_err("%s: write enable failed\n", __func__);
+ return rc;
+}
+
+int32_t msm_camera_spi_erase(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint32_t size)
+{
+ struct msm_camera_spi_inst *se = &client->spi_client->cmd_tbl.erase;
+ int rc = 0;
+ uint32_t cur;
+ uint32_t end = addr + size;
+ uint32_t erase_size = client->spi_client->erase_size;
+ end = addr + size;
+ for (cur = rounddown(addr, erase_size); cur < end; cur += erase_size) {
+ SPIDBG("%s: erasing 0x%x\n", __func__, cur);
+ rc = msm_camera_spi_write_enable(client);
+ if (rc < 0)
+ return rc;
+ rc = msm_camera_spi_tx_helper(client, se, cur, NULL, 0,
+ NULL, NULL);
+ if (rc < 0) {
+ pr_err("%s: erase failed\n", __func__);
+ return rc;
+ }
+ rc = msm_camera_spi_wait(client, se);
+ if (rc < 0) {
+ pr_err("%s: erase timedout\n", __func__);
+ return rc;
+ }
+ }
+ return rc;
+}
+
+/**
+ * msm_camera_spi_page_program() - core function to perform write
+ * @client: need for obtaining SPI device
+ * @addr: address to program on device
+ * @data: data to write
+ * @len: size of data
+ * @tx: tx buffer, size >= header + len
+ *
+ * This function performs SPI write, and has no boundary check. Writing range
+ * should not cross page boundary, or data will be corrupted. Transaction is
+ * guaranteed to be finished when it returns. This function should never be
+ * used outside msm_camera_spi_write_seq().
+ */
+static int32_t msm_camera_spi_page_program(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint16_t len, uint8_t *tx)
+{
+ int rc;
+ struct msm_camera_spi_inst *pg =
+ &client->spi_client->cmd_tbl.page_program;
+ struct spi_device *spi = client->spi_client->spi_master;
+ uint8_t retries = client->spi_client->retries;
+ uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+ SPIDBG("%s: addr 0x%x, size 0x%x\n", __func__, addr, len);
+ rc = msm_camera_spi_write_enable(client);
+ if (rc < 0)
+ return rc;
+ memset(tx, 0, header_len);
+ tx[0] = pg->opcode;
+ msm_camera_set_addr(addr, pg->addr_len, client->addr_type, tx + 1);
+ memcpy(tx + header_len, data, len);
+ SPIDBG("%s: tx(%u): %02x %02x %02x %02x\n", __func__,
+ len, tx[0], tx[1], tx[2], tx[3]);
+ while ((rc = spi_write(spi, tx, len + header_len)) && retries) {
+ rc = msm_camera_spi_wait(client, pg);
+ msleep(client->spi_client->retry_delay);
+ retries--;
+ }
+ if (rc < 0) {
+ pr_err("%s: failed %d\n", __func__, rc);
+ return rc;
+ }
+ rc = msm_camera_spi_wait(client, pg);
+ return rc;
+}
+
+int32_t msm_camera_spi_write_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte)
+{
+ struct msm_camera_spi_inst *pg =
+ &client->spi_client->cmd_tbl.page_program;
+ const uint32_t page_size = client->spi_client->page_size;
+ uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+ uint16_t len;
+ uint32_t cur_len, end;
+ char *tx, *pdata = data;
+ int rc = -EINVAL;
+ if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR)
+ && (client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ && (client->addr_type != MSM_CAMERA_I2C_3B_ADDR))
+ return rc;
+ /* single page write */
+ if ((addr % page_size) + num_byte <= page_size) {
+ len = header_len + num_byte;
+ tx = kmalloc(len, GFP_KERNEL);
+ if (!tx)
+ goto NOMEM;
+ rc = msm_camera_spi_page_program(client, addr, data,
+ num_byte, tx);
+ if (rc < 0)
+ goto ERROR;
+ goto OUT;
+ }
+ /* multi page write */
+ len = header_len + page_size;
+ tx = kmalloc(len, GFP_KERNEL);
+ if (!tx)
+ goto NOMEM;
+ while (num_byte) {
+ end = min(page_size, (addr % page_size) + num_byte);
+ cur_len = end - (addr % page_size);
+ rc = msm_camera_spi_page_program(client, addr, pdata,
+ cur_len, tx);
+ if (rc < 0)
+ goto ERROR;
+ addr += cur_len;
+ pdata += cur_len;
+ num_byte -= cur_len;
+ }
+ goto OUT;
+NOMEM:
+ pr_err("%s: memory allocation failed\n", __func__);
+ return -ENOMEM;
+ERROR:
+ pr_err("%s: error write\n", __func__);
+OUT:
+ kfree(tx);
+ return rc;
+}
+
+int32_t msm_camera_spi_write(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data, enum msm_camera_i2c_data_type data_type)
+{
+ struct msm_camera_spi_inst *pg =
+ &client->spi_client->cmd_tbl.page_program;
+ uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+ uint16_t len = 0;
+ char buf[data_type];
+ char *tx;
+ int rc = -EINVAL;
+ if (((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR)
+ && (client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ && (client->addr_type != MSM_CAMERA_I2C_3B_ADDR))
+ || (data_type != MSM_CAMERA_I2C_BYTE_DATA
+ && data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+ S_I2C_DBG("Data: 0x%x\n", data);
+ len = header_len + (uint8_t)data_type;
+ tx = kmalloc(len, GFP_KERNEL);
+ if (!tx)
+ goto NOMEM;
+ if (data_type == MSM_CAMERA_I2C_BYTE_DATA) {
+ buf[0] = data;
+ SPIDBG("Byte %d: 0x%x\n", len, buf[0]);
+ } else if (data_type == MSM_CAMERA_I2C_WORD_DATA) {
+ buf[0] = (data >> BITS_PER_BYTE) & 0x00FF;
+ buf[1] = (data & 0x00FF);
+ }
+ rc = msm_camera_spi_page_program(client, addr, buf,
+ (uint16_t)data_type, tx);
+ if (rc < 0)
+ goto ERROR;
+ goto OUT;
+NOMEM:
+ pr_err("%s: memory allocation failed\n", __func__);
+ return -ENOMEM;
+ERROR:
+ pr_err("%s: error write\n", __func__);
+OUT:
+ kfree(tx);
+ return rc;
+}
+int32_t msm_camera_spi_write_table(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting)
+{
+ int i;
+ int32_t rc = -EFAULT;
+ struct msm_camera_i2c_reg_array *reg_setting;
+ uint16_t client_addr_type;
+ if (!client || !write_setting)
+ return rc;
+ if ((write_setting->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
+ && write_setting->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (write_setting->data_type != MSM_CAMERA_I2C_BYTE_DATA
+ && write_setting->data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+ reg_setting = write_setting->reg_setting;
+ client_addr_type = client->addr_type;
+ client->addr_type = write_setting->addr_type;
+ for (i = 0; i < write_setting->size; i++) {
+ SPIDBG("%s addr %x data %x\n", __func__,
+ reg_setting->reg_addr, reg_setting->reg_data);
+ rc = msm_camera_spi_write(client, reg_setting->reg_addr,
+ reg_setting->reg_data, write_setting->data_type);
+ if (rc < 0)
+ break;
+ reg_setting++;
+ }
+ if (write_setting->delay > 20)
+ msleep(write_setting->delay);
+ else if (write_setting->delay)
+ usleep_range(write_setting->delay * 1000,
+ (write_setting->delay
+ * 1000) + 1000);
+ client->addr_type = client_addr_type;
+ return rc;
+}
+uint32_t msm_get_burst_size(struct msm_camera_i2c_reg_array *reg_setting,
+ uint32_t reg_size, uint32_t index, uint16_t burst_addr)
+{
+ uint32_t i;
+ uint32_t cnt = 0;
+ for (i = index; i < reg_size; i++) {
+ if (reg_setting[i].reg_addr == burst_addr)
+ cnt++;
+ else
+ break;
+ }
+ return cnt;
+}
+
+#ifdef SPI_DYNAMIC_ALLOC
+int32_t msm_camera_spi_send_burst(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_array *reg_setting, uint32_t reg_size,
+ struct msm_camera_burst_info *info,
+ enum msm_camera_i2c_data_type data_type)
+{
+ uint32_t i, j, k;
+ int32_t rc = 0;
+ uint32_t chunk_num, residue;
+ struct msm_camera_spi_inst *pg =
+ &client->spi_client->cmd_tbl.page_program;
+ uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+ uint8_t *ctx, *data;
+ uint32_t len;
+ if (info->burst_len == 0 || info->chunk_size == 0) {
+ pr_err("%s:%d Invalid argument\n", __func__, __LINE__);
+ return rc;
+ }
+ if (info->burst_start + info->burst_len > reg_size) {
+ pr_err("%s too big burst size, index=%d, size=%d\n", __func__,
+ info->burst_start, info->burst_len);
+ return rc;
+ }
+ chunk_num = info->burst_len / info->chunk_size;
+ residue = info->burst_len % info->chunk_size;
+ SPIDBG("%s header_len=%d, chunk nb=%d, residue=%d\n",
+ __func__, header_len, chunk_num, residue);
+ len = info->chunk_size * data_type + header_len;
+ SPIDBG("buffer allocation size = %d\n", len);
+ ctx = kmalloc(len, GFP_KERNEL);
+ if (!ctx) {
+ pr_err("%s %d memory alloc fail!\n", __func__, __LINE__);
+ return rc;
+ }
+ ctx[0] = pg->opcode;
+ ctx[1] = (info->burst_addr >> 8) & 0xff;
+ ctx[2] = info->burst_addr & 0xff;
+ k = info->burst_start;
+ for (i = 0; i < chunk_num; i++) {
+ data = ctx + header_len;
+ for (j = 0; j < info->chunk_size; j++) {
+ *data++ = (reg_setting[k+j].reg_data >> 8) & 0xff;
+ *data++ = reg_setting[k+j].reg_data & 0xff;
+ }
+ rc = msm_camera_spi_txfr(client->spi_client->spi_master,
+ (void *) ctx, NULL,
+ info->chunk_size * data_type + header_len);
+ if (rc < 0) {
+ pr_err("%s %d spi sending error = %d!!\n",
+ __func__, __LINE__, rc);
+ goto fail;
+ }
+ k += info->chunk_size;
+ }
+ SPIDBG("%s burst chunk start=%d, residue=%d\n",
+ __func__, k, residue);
+ if (residue) {
+ data = ctx + header_len;
+ for (j = 0; j < residue; j++) {
+ *data++ = (reg_setting[k+j].reg_data >> 8) & 0xff;
+ *data++ = reg_setting[k+j].reg_data & 0xff;
+ }
+ rc = msm_camera_spi_txfr(client->spi_client->spi_master,
+ (void *)ctx, NULL,
+ residue*data_type+header_len);
+ if (rc < 0) {
+ pr_err("%s %d spi sending error = %d!!\n", __func__,
+ __LINE__, rc);
+ goto fail;
+ }
+ }
+fail:
+ kfree(ctx);
+ return rc;
+}
+#else /* SPI_DYNAMIC_ALLOC */
+int32_t msm_camera_spi_send_burst(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_array *reg_setting, uint32_t reg_size,
+ struct msm_camera_burst_info *info,
+ enum msm_camera_i2c_data_type data_type)
+{
+ uint32_t i, j, k;
+ int32_t rc = 0;
+ uint32_t chunk_num, residue;
+ struct msm_camera_spi_inst *pg =
+ &client->spi_client->cmd_tbl.page_program;
+ uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+ struct msm_spi_write_burst_packet tx_buf;
+ if (info->burst_len == 0 || info->burst_len == 0
+ || info->chunk_size == 0) {
+ pr_err("%s %d Invalid argument\n", __func__, __LINE__);
+ return rc;
+ }
+ if (info->burst_start + info->burst_len > reg_size) {
+ pr_err("%s too big burst size, index=%d, size=%d\n", __func__,
+ info->burst_start, info->burst_len);
+ return rc;
+ }
+ chunk_num = info->burst_len / info->chunk_size;
+ residue = info->burst_len % info->chunk_size;
+ SPIDBG("%s header_len=%d, chunk nb=%d, residue=%d\n",
+ __func__, header_len, chunk_num, residue);
+ tx_buf.cmd = pg->opcode;
+ tx_buf.addr_msb = (info->burst_addr >> 8) & 0xff;
+ tx_buf.addr_lsb = info->burst_addr & 0xff;
+ SPIDBG("%s cmd=%d, addr_msb=0x%x, addr_lsb=0x%x\n", __func__,
+ tx_buf.cmd, tx_buf.addr_msb, tx_buf.addr_lsb);
+ k = info->burst_start;
+ for (i = 0; i < chunk_num; i++) {
+ SPIDBG("%s burst chunk start=%d, chunk_size=%d, chunk_num=%d\n",
+ __func__,
+ k, info->chunk_size, i);
+ for (j = 0; j < info->chunk_size; j++) {
+ tx_buf.data_arr[j].data_msb =
+ (reg_setting[k+j].reg_data >> 8) & 0xff;
+ tx_buf.data_arr[j].data_lsb =
+ reg_setting[k+j].reg_data & 0xff;
+ }
+ rc = msm_camera_spi_txfr(client->spi_client->spi_master,
+ (void *)&tx_buf, NULL,
+ info->chunk_size * data_type+header_len);
+ if (rc < 0) {
+ pr_err("%s %d spi sending error = %d!!\n", __func__,
+ __LINE__, rc);
+ goto fail;
+ }
+ k += info->chunk_size;
+ }
+ SPIDBG("%s burst chunk start=%d, residue=%d\n", __func__, k, residue);
+ if (residue) {
+ for (j = 0; j < residue; j++) {
+ tx_buf.data_arr[j].data_msb = (reg_setting[k+j].reg_data
+ >> 8) & 0xff;
+ tx_buf.data_arr[j].data_lsb = reg_setting[k+j].reg_data
+ & 0xff;
+ }
+ rc = msm_camera_spi_txfr(client->spi_client->spi_master,
+ (void *)&tx_buf, NULL,
+ residue * data_type+header_len);
+ if (rc < 0) {
+ pr_err("%s %d spi sending error = %d!!\n", __func__,
+ __LINE__, rc);
+ goto fail;
+ }
+ }
+fail:
+ return rc;
+}
+#endif /* SPI_DYNAMIC_ALLOC */
+
+int32_t msm_camera_spi_write_burst(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_array *reg_setting, uint32_t reg_size,
+ uint32_t buf_len, uint32_t burst_addr,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int k = 0;
+ int32_t rc = -EFAULT;
+ struct msm_camera_burst_info burst_info;
+ SPIDBG(" %s: start\n", __func__);
+ if (buf_len <= 0) {
+ pr_err("%s Invalid parameter, buf_len = %d\n",
+ __func__, buf_len);
+ return rc;
+ }
+ if (reg_size <= 0 || reg_setting == NULL) {
+ pr_err("%s Invalid parameter, array_size = %d\n",
+ __func__, reg_size);
+ return rc;
+ }
+
+ if ((client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+
+ SPIDBG(" %s: buf_len=%d, reg_size=%d\n", __func__, buf_len, reg_size);
+ while (k < reg_size) {
+ if (reg_setting[k].reg_addr == burst_addr) {
+ memset(&burst_info, 0x00,
+ sizeof(struct msm_camera_burst_info));
+ burst_info.burst_addr = burst_addr;
+ burst_info.burst_start = k;
+ burst_info.chunk_size = buf_len;
+ burst_info.burst_len =
+ msm_get_burst_size(reg_setting, reg_size, k,
+ burst_addr);
+ SPIDBG("%s burst start = %d, length = %d\n", __func__,
+ k, burst_info.burst_len);
+ rc = msm_camera_spi_send_burst(client, reg_setting,
+ reg_size, &burst_info, data_type);
+ if (rc < 0) {
+ pr_err("[%s::%d][spi_sync Error::%d]\n",
+ __func__, __LINE__, rc);
+ return rc;
+ }
+ k += burst_info.burst_len;
+ } else {
+ SPIDBG("%s word write, start = %d\n", __func__, k);
+ msm_camera_spi_write(client, reg_setting[k].reg_addr,
+ reg_setting[k].reg_data, data_type);
+ k++;
+ }
+ }
+ SPIDBG("%s: end\n", __func__);
+ return rc;
+}
+
+int32_t msm_camera_spi_read_burst(struct msm_camera_i2c_client *client,
+ uint32_t read_byte, uint8_t *buffer, uint32_t burst_addr,
+ enum msm_camera_i2c_data_type data_type)
+{
+ int32_t rc = -EFAULT;
+ struct msm_camera_spi_inst *pg =
+ &client->spi_client->cmd_tbl.read;
+ uint32_t len = msm_camera_spi_get_hlen(pg);
+ uint8_t *tx_buf = NULL;
+ uint8_t *r = buffer;
+ SPIDBG("%s: start\n", __func__);
+
+ if (buffer == NULL || read_byte == 0 || len == 0) {
+ pr_err("%s %d Invalid parameters!!\n", __func__, __LINE__);
+ return rc;
+ }
+
+ if ((client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ || (data_type != MSM_CAMERA_I2C_WORD_DATA))
+ return rc;
+ tx_buf = kzalloc(len, GFP_KERNEL);
+ if (!tx_buf)
+ return -ENOMEM;
+
+ tx_buf[0] = pg->opcode;
+ tx_buf[1] = (burst_addr >> 8) & 0xff;
+ tx_buf[2] = burst_addr & 0xff;
+ tx_buf[3] = 0; /* dummy */
+ rc = msm_camera_spi_txfr_read(client->spi_client->spi_master,
+ &tx_buf[0], r, len, read_byte);
+ if (rc < 0)
+ pr_err("[%s::%d][spi_sync Error::%d]\n", __func__,
+ __LINE__, rc);
+
+ kfree(tx_buf);
+
+ SPIDBG("%s: end\n", __func__);
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_spi.h b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_spi.h
new file mode 100644
index 000000000000..4b389fbf3766
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_spi.h
@@ -0,0 +1,120 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_CAMERA_SPI_H
+#define __MSM_CAMERA_SPI_H
+
+#include <linux/spi/spi.h>
+#include <media/msm_cam_sensor.h>
+#include "msm_camera_i2c.h"
+
+#define MAX_SPI_SIZE 110
+#define SPI_DYNAMIC_ALLOC
+
+/**
+ * Common SPI communication scheme
+ * tx: <opcode>[addr][wait][write buffer]
+ * rx: [read buffer]
+ * Some inst require polling busy reg until it's done
+ */
+struct msm_camera_spi_inst {
+ uint8_t opcode; /* one-byte opcode */
+ uint8_t addr_len; /* addr len in bytes */
+ uint8_t dummy_len; /* setup cycles */
+ uint8_t delay_intv; /* delay intv for this inst (ms) */
+ uint8_t delay_count; /* total delay count for this inst */
+};
+
+struct msm_spi_write_burst_data {
+ u8 data_msb;
+ u8 data_lsb;
+};
+
+struct msm_spi_write_burst_packet {
+ u8 cmd;
+ u8 addr_msb;
+ u8 addr_lsb;
+ struct msm_spi_write_burst_data data_arr[MAX_SPI_SIZE];
+};
+
+struct msm_camera_burst_info {
+ uint32_t burst_addr;
+ uint32_t burst_start;
+ uint32_t burst_len;
+ uint32_t chunk_size;
+};
+
+struct msm_camera_spi_inst_tbl {
+ struct msm_camera_spi_inst read;
+ struct msm_camera_spi_inst read_seq;
+ struct msm_camera_spi_inst query_id;
+ struct msm_camera_spi_inst page_program;
+ struct msm_camera_spi_inst write_enable;
+ struct msm_camera_spi_inst read_status;
+ struct msm_camera_spi_inst erase;
+};
+
+struct msm_camera_spi_client {
+ struct spi_device *spi_master;
+ struct msm_camera_spi_inst_tbl cmd_tbl;
+ uint8_t device_id0;
+ uint8_t device_id1;
+ uint8_t mfr_id0;
+ uint8_t mfr_id1;
+ uint8_t retry_delay; /* ms */
+ uint8_t retries; /* retry times upon failure */
+ uint8_t busy_mask; /* busy bit in status reg */
+ uint16_t page_size; /* page size for page program */
+ uint32_t erase_size; /* minimal erase size */
+};
+
+static __always_inline
+uint16_t msm_camera_spi_get_hlen(struct msm_camera_spi_inst *inst)
+{
+ return sizeof(inst->opcode) + inst->addr_len + inst->dummy_len;
+}
+
+int32_t msm_camera_spi_read(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t *data,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_spi_read_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int32_t msm_camera_spi_read_seq_l(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint32_t num_byte, char *tx, char *rx);
+
+int32_t msm_camera_spi_query_id(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int32_t msm_camera_spi_write_seq(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint8_t *data, uint32_t num_byte);
+
+int32_t msm_camera_spi_erase(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint32_t size);
+
+int32_t msm_camera_spi_write(struct msm_camera_i2c_client *client,
+ uint32_t addr, uint16_t data, enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_spi_write_table(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_setting *write_setting);
+
+int32_t msm_camera_spi_write_burst(struct msm_camera_i2c_client *client,
+ struct msm_camera_i2c_reg_array *reg_setting, uint32_t reg_size,
+ uint32_t buf_len, uint32_t addr,
+ enum msm_camera_i2c_data_type data_type);
+
+int32_t msm_camera_spi_read_burst(struct msm_camera_i2c_client *client,
+ uint32_t read_byte, uint8_t *buffer, uint32_t addr,
+ enum msm_camera_i2c_data_type data_type);
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
new file mode 100644
index 000000000000..ea15d2f9aa50
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.c
@@ -0,0 +1,1392 @@
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include "msm_sensor.h"
+#include "msm_sd.h"
+#include "camera.h"
+#include "msm_cci.h"
+#include "msm_camera_io_util.h"
+#include "msm_camera_i2c_mux.h"
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <linux/regulator/consumer.h>
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static void msm_sensor_adjust_mclk(struct msm_camera_power_ctrl_t *ctrl)
+{
+ int idx;
+ struct msm_sensor_power_setting *power_setting;
+ for (idx = 0; idx < ctrl->power_setting_size; idx++) {
+ power_setting = &ctrl->power_setting[idx];
+ if (power_setting->seq_type == SENSOR_CLK &&
+ power_setting->seq_val == SENSOR_CAM_MCLK) {
+ if (power_setting->config_val == 24000000) {
+ power_setting->config_val = 23880000;
+ CDBG("%s MCLK request adjusted to 23.88MHz\n"
+ , __func__);
+ }
+ break;
+ }
+ }
+
+ return;
+}
+
+static void msm_sensor_misc_regulator(
+ struct msm_sensor_ctrl_t *sctrl, uint32_t enable)
+{
+ int32_t rc = 0;
+ if (enable) {
+ sctrl->misc_regulator = (void *)rpm_regulator_get(
+ &sctrl->pdev->dev, sctrl->sensordata->misc_regulator);
+ if (sctrl->misc_regulator) {
+ rc = rpm_regulator_set_mode(sctrl->misc_regulator,
+ RPM_REGULATOR_MODE_HPM);
+ if (rc < 0) {
+ pr_err("%s: Failed to set for rpm regulator on %s: %d\n",
+ __func__,
+ sctrl->sensordata->misc_regulator, rc);
+ rpm_regulator_put(sctrl->misc_regulator);
+ }
+ } else {
+ pr_err("%s: Failed to vote for rpm regulator on %s: %d\n",
+ __func__,
+ sctrl->sensordata->misc_regulator, rc);
+ }
+ } else {
+ if (sctrl->misc_regulator) {
+ rc = rpm_regulator_set_mode(
+ (struct rpm_regulator *)sctrl->misc_regulator,
+ RPM_REGULATOR_MODE_AUTO);
+ if (rc < 0)
+ pr_err("%s: Failed to set for rpm regulator on %s: %d\n",
+ __func__,
+ sctrl->sensordata->misc_regulator, rc);
+ rpm_regulator_put(sctrl->misc_regulator);
+ }
+ }
+}
+
+int32_t msm_sensor_free_sensor_data(struct msm_sensor_ctrl_t *s_ctrl)
+{
+ if (!s_ctrl->pdev && !s_ctrl->sensor_i2c_client->client)
+ return 0;
+ kfree(s_ctrl->sensordata->slave_info);
+ kfree(s_ctrl->sensordata->cam_slave_info);
+ kfree(s_ctrl->sensordata->actuator_info);
+ kfree(s_ctrl->sensordata->power_info.gpio_conf->gpio_num_info);
+ kfree(s_ctrl->sensordata->power_info.gpio_conf->cam_gpio_req_tbl);
+ kfree(s_ctrl->sensordata->power_info.gpio_conf);
+ kfree(s_ctrl->sensordata->power_info.cam_vreg);
+ kfree(s_ctrl->sensordata->power_info.power_setting);
+ kfree(s_ctrl->sensordata->power_info.power_down_setting);
+ kfree(s_ctrl->sensordata->csi_lane_params);
+ kfree(s_ctrl->sensordata->sensor_info);
+ kfree(s_ctrl->sensordata->power_info.clk_info);
+ kfree(s_ctrl->sensordata);
+ return 0;
+}
+
+static struct msm_cam_clk_info cam_8974_clk_info[] = {
+ [SENSOR_CAM_MCLK] = {"cam_src_clk", 24000000},
+ [SENSOR_CAM_CLK] = {"cam_clk", 0},
+};
+
+int msm_sensor_power_down(struct msm_sensor_ctrl_t *s_ctrl)
+{
+ struct msm_camera_power_ctrl_t *power_info;
+ enum msm_camera_device_type_t sensor_device_type;
+ struct msm_camera_i2c_client *sensor_i2c_client;
+
+ if (!s_ctrl) {
+ pr_err("%s:%d failed: s_ctrl %p\n",
+ __func__, __LINE__, s_ctrl);
+ return -EINVAL;
+ }
+
+ if (s_ctrl->is_csid_tg_mode)
+ return 0;
+
+ power_info = &s_ctrl->sensordata->power_info;
+ sensor_device_type = s_ctrl->sensor_device_type;
+ sensor_i2c_client = s_ctrl->sensor_i2c_client;
+
+ if (!power_info || !sensor_i2c_client) {
+ pr_err("%s:%d failed: power_info %p sensor_i2c_client %p\n",
+ __func__, __LINE__, power_info, sensor_i2c_client);
+ return -EINVAL;
+ }
+ return msm_camera_power_down(power_info, sensor_device_type,
+ sensor_i2c_client);
+}
+
+int msm_sensor_power_up(struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int rc;
+ struct msm_camera_power_ctrl_t *power_info;
+ struct msm_camera_i2c_client *sensor_i2c_client;
+ struct msm_camera_slave_info *slave_info;
+ const char *sensor_name;
+ uint32_t retry = 0;
+
+ if (!s_ctrl) {
+ pr_err("%s:%d failed: %p\n",
+ __func__, __LINE__, s_ctrl);
+ return -EINVAL;
+ }
+
+ if (s_ctrl->is_csid_tg_mode)
+ return 0;
+
+ power_info = &s_ctrl->sensordata->power_info;
+ sensor_i2c_client = s_ctrl->sensor_i2c_client;
+ slave_info = s_ctrl->sensordata->slave_info;
+ sensor_name = s_ctrl->sensordata->sensor_name;
+
+ if (!power_info || !sensor_i2c_client || !slave_info ||
+ !sensor_name) {
+ pr_err("%s:%d failed: %p %p %p %p\n",
+ __func__, __LINE__, power_info,
+ sensor_i2c_client, slave_info, sensor_name);
+ return -EINVAL;
+ }
+
+ if (s_ctrl->set_mclk_23880000)
+ msm_sensor_adjust_mclk(power_info);
+
+ for (retry = 0; retry < 3; retry++) {
+ rc = msm_camera_power_up(power_info, s_ctrl->sensor_device_type,
+ sensor_i2c_client);
+ if (rc < 0)
+ return rc;
+ rc = msm_sensor_check_id(s_ctrl);
+ if (rc < 0) {
+ msm_camera_power_down(power_info,
+ s_ctrl->sensor_device_type, sensor_i2c_client);
+ msleep(20);
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static uint16_t msm_sensor_id_by_mask(struct msm_sensor_ctrl_t *s_ctrl,
+ uint16_t chipid)
+{
+ uint16_t sensor_id = chipid;
+ int16_t sensor_id_mask = s_ctrl->sensordata->slave_info->sensor_id_mask;
+
+ if (!sensor_id_mask)
+ sensor_id_mask = ~sensor_id_mask;
+
+ sensor_id &= sensor_id_mask;
+ sensor_id_mask &= -sensor_id_mask;
+ sensor_id_mask -= 1;
+ while (sensor_id_mask) {
+ sensor_id_mask >>= 1;
+ sensor_id >>= 1;
+ }
+ return sensor_id;
+}
+
+int msm_sensor_match_id(struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int rc = 0;
+ uint16_t chipid = 0;
+ struct msm_camera_i2c_client *sensor_i2c_client;
+ struct msm_camera_slave_info *slave_info;
+ const char *sensor_name;
+
+ if (!s_ctrl) {
+ pr_err("%s:%d failed: %p\n",
+ __func__, __LINE__, s_ctrl);
+ return -EINVAL;
+ }
+ sensor_i2c_client = s_ctrl->sensor_i2c_client;
+ slave_info = s_ctrl->sensordata->slave_info;
+ sensor_name = s_ctrl->sensordata->sensor_name;
+
+ if (!sensor_i2c_client || !slave_info || !sensor_name) {
+ pr_err("%s:%d failed: %p %p %p\n",
+ __func__, __LINE__, sensor_i2c_client, slave_info,
+ sensor_name);
+ return -EINVAL;
+ }
+
+ rc = sensor_i2c_client->i2c_func_tbl->i2c_read(
+ sensor_i2c_client, slave_info->sensor_id_reg_addr,
+ &chipid, MSM_CAMERA_I2C_WORD_DATA);
+ if (rc < 0) {
+ pr_err("%s: %s: read id failed\n", __func__, sensor_name);
+ return rc;
+ }
+
+ CDBG("%s: read id: 0x%x expected id 0x%x:\n", __func__, chipid,
+ slave_info->sensor_id);
+ if (msm_sensor_id_by_mask(s_ctrl, chipid) != slave_info->sensor_id) {
+ pr_err("msm_sensor_match_id chip id doesnot match\n");
+ return -ENODEV;
+ }
+ return rc;
+}
+
+static struct msm_sensor_ctrl_t *get_sctrl(struct v4l2_subdev *sd)
+{
+ return container_of(container_of(sd, struct msm_sd_subdev, sd),
+ struct msm_sensor_ctrl_t, msm_sd);
+}
+
+static void msm_sensor_stop_stream(struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+
+ mutex_lock(s_ctrl->msm_sensor_mutex);
+ if (s_ctrl->sensor_state == MSM_SENSOR_POWER_UP) {
+ s_ctrl->sensor_i2c_client->i2c_func_tbl->i2c_write_table(
+ s_ctrl->sensor_i2c_client, &s_ctrl->stop_setting);
+ kfree(s_ctrl->stop_setting.reg_setting);
+ s_ctrl->stop_setting.reg_setting = NULL;
+
+ if (s_ctrl->func_tbl->sensor_power_down) {
+ if (s_ctrl->sensordata->misc_regulator)
+ msm_sensor_misc_regulator(s_ctrl, 0);
+
+ rc = s_ctrl->func_tbl->sensor_power_down(s_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d failed rc %d\n", __func__,
+ __LINE__, rc);
+ }
+ s_ctrl->sensor_state = MSM_SENSOR_POWER_DOWN;
+ CDBG("%s:%d sensor state %d\n", __func__, __LINE__,
+ s_ctrl->sensor_state);
+ } else {
+ pr_err("s_ctrl->func_tbl NULL\n");
+ }
+ }
+ mutex_unlock(s_ctrl->msm_sensor_mutex);
+ return;
+}
+
+static int msm_sensor_get_af_status(struct msm_sensor_ctrl_t *s_ctrl,
+ void __user *argp)
+{
+ /* TO-DO: Need to set AF status register address and expected value
+ We need to check the AF status in the sensor register and
+ set the status in the *status variable accordingly*/
+ return 0;
+}
+
+static long msm_sensor_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc = 0;
+ struct msm_sensor_ctrl_t *s_ctrl = get_sctrl(sd);
+ void __user *argp = (void __user *)arg;
+ if (!s_ctrl) {
+ pr_err("%s s_ctrl NULL\n", __func__);
+ return -EBADF;
+ }
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_CFG:
+#ifdef CONFIG_COMPAT
+ if (is_compat_task())
+ rc = s_ctrl->func_tbl->sensor_config32(s_ctrl, argp);
+ else
+#endif
+ rc = s_ctrl->func_tbl->sensor_config(s_ctrl, argp);
+ return rc;
+ case VIDIOC_MSM_SENSOR_GET_AF_STATUS:
+ return msm_sensor_get_af_status(s_ctrl, argp);
+ case VIDIOC_MSM_SENSOR_RELEASE:
+ case MSM_SD_SHUTDOWN:
+ msm_sensor_stop_stream(s_ctrl);
+ return 0;
+ case MSM_SD_NOTIFY_FREEZE:
+ return 0;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_sensor_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_CFG32:
+ cmd = VIDIOC_MSM_SENSOR_CFG;
+ default:
+ return msm_sensor_subdev_ioctl(sd, cmd, arg);
+ }
+}
+
+long msm_sensor_subdev_fops_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_sensor_subdev_do_ioctl);
+}
+
+static int msm_sensor_config32(struct msm_sensor_ctrl_t *s_ctrl,
+ void __user *argp)
+{
+ struct sensorb_cfg_data32 *cdata = (struct sensorb_cfg_data32 *)argp;
+ int32_t rc = 0;
+ int32_t i = 0;
+ mutex_lock(s_ctrl->msm_sensor_mutex);
+ CDBG("%s:%d %s cfgtype = %d\n", __func__, __LINE__,
+ s_ctrl->sensordata->sensor_name, cdata->cfgtype);
+ switch (cdata->cfgtype) {
+ case CFG_GET_SENSOR_INFO:
+ memcpy(cdata->cfg.sensor_info.sensor_name,
+ s_ctrl->sensordata->sensor_name,
+ sizeof(cdata->cfg.sensor_info.sensor_name));
+ cdata->cfg.sensor_info.session_id =
+ s_ctrl->sensordata->sensor_info->session_id;
+ for (i = 0; i < SUB_MODULE_MAX; i++) {
+ cdata->cfg.sensor_info.subdev_id[i] =
+ s_ctrl->sensordata->sensor_info->subdev_id[i];
+ cdata->cfg.sensor_info.subdev_intf[i] =
+ s_ctrl->sensordata->sensor_info->subdev_intf[i];
+ }
+ cdata->cfg.sensor_info.is_mount_angle_valid =
+ s_ctrl->sensordata->sensor_info->is_mount_angle_valid;
+ cdata->cfg.sensor_info.sensor_mount_angle =
+ s_ctrl->sensordata->sensor_info->sensor_mount_angle;
+ cdata->cfg.sensor_info.position =
+ s_ctrl->sensordata->sensor_info->position;
+ cdata->cfg.sensor_info.modes_supported =
+ s_ctrl->sensordata->sensor_info->modes_supported;
+ CDBG("%s:%d sensor name %s\n", __func__, __LINE__,
+ cdata->cfg.sensor_info.sensor_name);
+ CDBG("%s:%d session id %d\n", __func__, __LINE__,
+ cdata->cfg.sensor_info.session_id);
+ for (i = 0; i < SUB_MODULE_MAX; i++) {
+ CDBG("%s:%d subdev_id[%d] %d\n", __func__, __LINE__, i,
+ cdata->cfg.sensor_info.subdev_id[i]);
+ CDBG("%s:%d subdev_intf[%d] %d\n", __func__, __LINE__,
+ i, cdata->cfg.sensor_info.subdev_intf[i]);
+ }
+ CDBG("%s:%d mount angle valid %d value %d\n", __func__,
+ __LINE__, cdata->cfg.sensor_info.is_mount_angle_valid,
+ cdata->cfg.sensor_info.sensor_mount_angle);
+
+ break;
+ case CFG_GET_SENSOR_INIT_PARAMS:
+ cdata->cfg.sensor_init_params.modes_supported =
+ s_ctrl->sensordata->sensor_info->modes_supported;
+ cdata->cfg.sensor_init_params.position =
+ s_ctrl->sensordata->sensor_info->position;
+ cdata->cfg.sensor_init_params.sensor_mount_angle =
+ s_ctrl->sensordata->sensor_info->sensor_mount_angle;
+ CDBG("%s:%d init params mode %d pos %d mount %d\n", __func__,
+ __LINE__,
+ cdata->cfg.sensor_init_params.modes_supported,
+ cdata->cfg.sensor_init_params.position,
+ cdata->cfg.sensor_init_params.sensor_mount_angle);
+ break;
+ case CFG_WRITE_I2C_ARRAY:
+ case CFG_WRITE_I2C_ARRAY_SYNC:
+ case CFG_WRITE_I2C_ARRAY_SYNC_BLOCK:
+ case CFG_WRITE_I2C_ARRAY_ASYNC: {
+ struct msm_camera_i2c_reg_setting32 conf_array32;
+ struct msm_camera_i2c_reg_setting conf_array;
+ struct msm_camera_i2c_reg_array *reg_setting = NULL;
+
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+
+ if (copy_from_user(&conf_array32,
+ (void *)compat_ptr(cdata->cfg.setting),
+ sizeof(struct msm_camera_i2c_reg_setting32))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ conf_array.addr_type = conf_array32.addr_type;
+ conf_array.data_type = conf_array32.data_type;
+ conf_array.delay = conf_array32.delay;
+ conf_array.size = conf_array32.size;
+ conf_array.reg_setting = compat_ptr(conf_array32.reg_setting);
+
+ if (!conf_array.size ||
+ conf_array.size > I2C_REG_DATA_MAX) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ reg_setting = kzalloc(conf_array.size *
+ (sizeof(struct msm_camera_i2c_reg_array)), GFP_KERNEL);
+ if (!reg_setting) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(reg_setting,
+ (void *)(conf_array.reg_setting),
+ conf_array.size *
+ sizeof(struct msm_camera_i2c_reg_array))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ kfree(reg_setting);
+ rc = -EFAULT;
+ break;
+ }
+
+ conf_array.reg_setting = reg_setting;
+
+ if (CFG_WRITE_I2C_ARRAY == cdata->cfgtype)
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+ i2c_write_table(s_ctrl->sensor_i2c_client,
+ &conf_array);
+ else if (CFG_WRITE_I2C_ARRAY_ASYNC == cdata->cfgtype)
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+ i2c_write_table_async(s_ctrl->sensor_i2c_client,
+ &conf_array);
+ else if (CFG_WRITE_I2C_ARRAY_SYNC_BLOCK == cdata->cfgtype)
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+ i2c_write_table_sync_block(
+ s_ctrl->sensor_i2c_client,
+ &conf_array);
+ else
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+ i2c_write_table_sync(s_ctrl->sensor_i2c_client,
+ &conf_array);
+
+ kfree(reg_setting);
+ break;
+ }
+ case CFG_SLAVE_READ_I2C: {
+ struct msm_camera_i2c_read_config read_config;
+ struct msm_camera_i2c_read_config *read_config_ptr = NULL;
+ uint16_t local_data = 0;
+ uint16_t orig_slave_addr = 0, read_slave_addr = 0;
+
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ read_config_ptr =
+ (struct msm_camera_i2c_read_config *)
+ compat_ptr(cdata->cfg.setting);
+
+ if (copy_from_user(&read_config, read_config_ptr,
+ sizeof(struct msm_camera_i2c_read_config))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ read_slave_addr = read_config.slave_addr;
+ CDBG("%s:CFG_SLAVE_READ_I2C:", __func__);
+ CDBG("%s:slave_addr=0x%x reg_addr=0x%x, data_type=%d\n",
+ __func__, read_config.slave_addr,
+ read_config.reg_addr, read_config.data_type);
+ if (s_ctrl->sensor_i2c_client->cci_client) {
+ orig_slave_addr =
+ s_ctrl->sensor_i2c_client->cci_client->sid;
+ s_ctrl->sensor_i2c_client->cci_client->sid =
+ read_slave_addr >> 1;
+ } else if (s_ctrl->sensor_i2c_client->client) {
+ orig_slave_addr =
+ s_ctrl->sensor_i2c_client->client->addr;
+ s_ctrl->sensor_i2c_client->client->addr =
+ read_slave_addr >> 1;
+ } else {
+ pr_err("%s: error: no i2c/cci client found.", __func__);
+ rc = -EFAULT;
+ break;
+ }
+ CDBG("%s:orig_slave_addr=0x%x, new_slave_addr=0x%x",
+ __func__, orig_slave_addr,
+ read_slave_addr >> 1);
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->i2c_read(
+ s_ctrl->sensor_i2c_client,
+ read_config.reg_addr,
+ &local_data, read_config.data_type);
+ if (s_ctrl->sensor_i2c_client->cci_client) {
+ s_ctrl->sensor_i2c_client->cci_client->sid =
+ orig_slave_addr;
+ } else if (s_ctrl->sensor_i2c_client->client) {
+ s_ctrl->sensor_i2c_client->client->addr =
+ orig_slave_addr;
+ }
+ if (rc < 0) {
+ pr_err("%s:%d: i2c_read failed\n", __func__, __LINE__);
+ break;
+ }
+ read_config_ptr->data = local_data;
+ break;
+ }
+ case CFG_WRITE_I2C_SEQ_ARRAY: {
+ struct msm_camera_i2c_seq_reg_setting32 conf_array32;
+ struct msm_camera_i2c_seq_reg_setting conf_array;
+ struct msm_camera_i2c_seq_reg_array *reg_setting = NULL;
+
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+
+ if (copy_from_user(&conf_array32,
+ (void *)compat_ptr(cdata->cfg.setting),
+ sizeof(struct msm_camera_i2c_seq_reg_setting32))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ conf_array.addr_type = conf_array32.addr_type;
+ conf_array.delay = conf_array32.delay;
+ conf_array.size = conf_array32.size;
+ conf_array.reg_setting = compat_ptr(conf_array32.reg_setting);
+
+ if (!conf_array.size ||
+ conf_array.size > I2C_SEQ_REG_DATA_MAX) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ reg_setting = kzalloc(conf_array.size *
+ (sizeof(struct msm_camera_i2c_seq_reg_array)),
+ GFP_KERNEL);
+ if (!reg_setting) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(reg_setting, (void *)conf_array.reg_setting,
+ conf_array.size *
+ sizeof(struct msm_camera_i2c_seq_reg_array))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ kfree(reg_setting);
+ rc = -EFAULT;
+ break;
+ }
+
+ conf_array.reg_setting = reg_setting;
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+ i2c_write_seq_table(s_ctrl->sensor_i2c_client,
+ &conf_array);
+ kfree(reg_setting);
+ break;
+ }
+
+ case CFG_POWER_UP:
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_DOWN) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+ if (s_ctrl->func_tbl->sensor_power_up) {
+ if (s_ctrl->sensordata->misc_regulator)
+ msm_sensor_misc_regulator(s_ctrl, 1);
+
+ rc = s_ctrl->func_tbl->sensor_power_up(s_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d failed rc %d\n", __func__,
+ __LINE__, rc);
+ break;
+ }
+ s_ctrl->sensor_state = MSM_SENSOR_POWER_UP;
+ CDBG("%s:%d sensor state %d\n", __func__, __LINE__,
+ s_ctrl->sensor_state);
+ } else {
+ rc = -EFAULT;
+ }
+ break;
+ case CFG_POWER_DOWN:
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ kfree(s_ctrl->stop_setting.reg_setting);
+ s_ctrl->stop_setting.reg_setting = NULL;
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+ if (s_ctrl->func_tbl->sensor_power_down) {
+ if (s_ctrl->sensordata->misc_regulator)
+ msm_sensor_misc_regulator(s_ctrl, 0);
+
+ rc = s_ctrl->func_tbl->sensor_power_down(s_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d failed rc %d\n", __func__,
+ __LINE__, rc);
+ break;
+ }
+ s_ctrl->sensor_state = MSM_SENSOR_POWER_DOWN;
+ CDBG("%s:%d sensor state %d\n", __func__, __LINE__,
+ s_ctrl->sensor_state);
+ } else {
+ rc = -EFAULT;
+ }
+ break;
+ case CFG_SET_STOP_STREAM_SETTING: {
+ struct msm_camera_i2c_reg_setting32 stop_setting32;
+ struct msm_camera_i2c_reg_setting *stop_setting =
+ &s_ctrl->stop_setting;
+ struct msm_camera_i2c_reg_array *reg_setting = NULL;
+
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ if (copy_from_user(&stop_setting32,
+ (void *)compat_ptr((cdata->cfg.setting)),
+ sizeof(struct msm_camera_i2c_reg_setting32))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ stop_setting->addr_type = stop_setting32.addr_type;
+ stop_setting->data_type = stop_setting32.data_type;
+ stop_setting->delay = stop_setting32.delay;
+ stop_setting->size = stop_setting32.size;
+
+ reg_setting = compat_ptr(stop_setting32.reg_setting);
+
+ if (!stop_setting->size) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ stop_setting->reg_setting = kzalloc(stop_setting->size *
+ (sizeof(struct msm_camera_i2c_reg_array)), GFP_KERNEL);
+ if (!stop_setting->reg_setting) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(stop_setting->reg_setting,
+ (void *)reg_setting,
+ stop_setting->size *
+ sizeof(struct msm_camera_i2c_reg_array))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ kfree(stop_setting->reg_setting);
+ stop_setting->reg_setting = NULL;
+ stop_setting->size = 0;
+ rc = -EFAULT;
+ break;
+ }
+ break;
+ }
+
+ case CFG_SET_I2C_SYNC_PARAM: {
+ struct msm_camera_cci_ctrl cci_ctrl;
+
+ s_ctrl->sensor_i2c_client->cci_client->cid =
+ cdata->cfg.sensor_i2c_sync_params.cid;
+ s_ctrl->sensor_i2c_client->cci_client->id_map =
+ cdata->cfg.sensor_i2c_sync_params.csid;
+
+ CDBG("I2C_SYNC_PARAM CID:%d, line:%d delay:%d, cdid:%d\n",
+ s_ctrl->sensor_i2c_client->cci_client->cid,
+ cdata->cfg.sensor_i2c_sync_params.line,
+ cdata->cfg.sensor_i2c_sync_params.delay,
+ cdata->cfg.sensor_i2c_sync_params.csid);
+
+ cci_ctrl.cmd = MSM_CCI_SET_SYNC_CID;
+ cci_ctrl.cfg.cci_wait_sync_cfg.line =
+ cdata->cfg.sensor_i2c_sync_params.line;
+ cci_ctrl.cfg.cci_wait_sync_cfg.delay =
+ cdata->cfg.sensor_i2c_sync_params.delay;
+ cci_ctrl.cfg.cci_wait_sync_cfg.cid =
+ cdata->cfg.sensor_i2c_sync_params.cid;
+ cci_ctrl.cfg.cci_wait_sync_cfg.csid =
+ cdata->cfg.sensor_i2c_sync_params.csid;
+ rc = v4l2_subdev_call(s_ctrl->sensor_i2c_client->
+ cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ if (rc < 0) {
+ pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+ rc = -EFAULT;
+ break;
+ }
+ break;
+ }
+
+ default:
+ rc = -EFAULT;
+ break;
+ }
+
+DONE:
+ mutex_unlock(s_ctrl->msm_sensor_mutex);
+
+ return rc;
+}
+#endif
+
+int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp)
+{
+ struct sensorb_cfg_data *cdata = (struct sensorb_cfg_data *)argp;
+ int32_t rc = 0;
+ int32_t i = 0;
+ mutex_lock(s_ctrl->msm_sensor_mutex);
+ CDBG("%s:%d %s cfgtype = %d\n", __func__, __LINE__,
+ s_ctrl->sensordata->sensor_name, cdata->cfgtype);
+ switch (cdata->cfgtype) {
+ case CFG_GET_SENSOR_INFO:
+ memcpy(cdata->cfg.sensor_info.sensor_name,
+ s_ctrl->sensordata->sensor_name,
+ sizeof(cdata->cfg.sensor_info.sensor_name));
+ cdata->cfg.sensor_info.session_id =
+ s_ctrl->sensordata->sensor_info->session_id;
+ for (i = 0; i < SUB_MODULE_MAX; i++) {
+ cdata->cfg.sensor_info.subdev_id[i] =
+ s_ctrl->sensordata->sensor_info->subdev_id[i];
+ cdata->cfg.sensor_info.subdev_intf[i] =
+ s_ctrl->sensordata->sensor_info->subdev_intf[i];
+ }
+ cdata->cfg.sensor_info.is_mount_angle_valid =
+ s_ctrl->sensordata->sensor_info->is_mount_angle_valid;
+ cdata->cfg.sensor_info.sensor_mount_angle =
+ s_ctrl->sensordata->sensor_info->sensor_mount_angle;
+ cdata->cfg.sensor_info.position =
+ s_ctrl->sensordata->sensor_info->position;
+ cdata->cfg.sensor_info.modes_supported =
+ s_ctrl->sensordata->sensor_info->modes_supported;
+ CDBG("%s:%d sensor name %s\n", __func__, __LINE__,
+ cdata->cfg.sensor_info.sensor_name);
+ CDBG("%s:%d session id %d\n", __func__, __LINE__,
+ cdata->cfg.sensor_info.session_id);
+ for (i = 0; i < SUB_MODULE_MAX; i++) {
+ CDBG("%s:%d subdev_id[%d] %d\n", __func__, __LINE__, i,
+ cdata->cfg.sensor_info.subdev_id[i]);
+ CDBG("%s:%d subdev_intf[%d] %d\n", __func__, __LINE__,
+ i, cdata->cfg.sensor_info.subdev_intf[i]);
+ }
+ CDBG("%s:%d mount angle valid %d value %d\n", __func__,
+ __LINE__, cdata->cfg.sensor_info.is_mount_angle_valid,
+ cdata->cfg.sensor_info.sensor_mount_angle);
+
+ break;
+ case CFG_GET_SENSOR_INIT_PARAMS:
+ cdata->cfg.sensor_init_params.modes_supported =
+ s_ctrl->sensordata->sensor_info->modes_supported;
+ cdata->cfg.sensor_init_params.position =
+ s_ctrl->sensordata->sensor_info->position;
+ cdata->cfg.sensor_init_params.sensor_mount_angle =
+ s_ctrl->sensordata->sensor_info->sensor_mount_angle;
+ CDBG("%s:%d init params mode %d pos %d mount %d\n", __func__,
+ __LINE__,
+ cdata->cfg.sensor_init_params.modes_supported,
+ cdata->cfg.sensor_init_params.position,
+ cdata->cfg.sensor_init_params.sensor_mount_angle);
+ break;
+
+ case CFG_WRITE_I2C_ARRAY:
+ case CFG_WRITE_I2C_ARRAY_SYNC:
+ case CFG_WRITE_I2C_ARRAY_SYNC_BLOCK:
+ case CFG_WRITE_I2C_ARRAY_ASYNC: {
+ struct msm_camera_i2c_reg_setting conf_array;
+ struct msm_camera_i2c_reg_array *reg_setting = NULL;
+
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+
+ if (copy_from_user(&conf_array,
+ (void *)cdata->cfg.setting,
+ sizeof(struct msm_camera_i2c_reg_setting))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ if (!conf_array.size ||
+ conf_array.size > I2C_REG_DATA_MAX) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ reg_setting = kzalloc(conf_array.size *
+ (sizeof(struct msm_camera_i2c_reg_array)), GFP_KERNEL);
+ if (!reg_setting) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(reg_setting, (void *)conf_array.reg_setting,
+ conf_array.size *
+ sizeof(struct msm_camera_i2c_reg_array))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ kfree(reg_setting);
+ rc = -EFAULT;
+ break;
+ }
+
+ conf_array.reg_setting = reg_setting;
+ if (cdata->cfgtype == CFG_WRITE_I2C_ARRAY)
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+ i2c_write_table(s_ctrl->sensor_i2c_client,
+ &conf_array);
+ else if (CFG_WRITE_I2C_ARRAY_ASYNC == cdata->cfgtype)
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+ i2c_write_table_async(s_ctrl->sensor_i2c_client,
+ &conf_array);
+ else if (CFG_WRITE_I2C_ARRAY_SYNC_BLOCK == cdata->cfgtype)
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+ i2c_write_table_sync_block(
+ s_ctrl->sensor_i2c_client,
+ &conf_array);
+ else
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+ i2c_write_table_sync(s_ctrl->sensor_i2c_client,
+ &conf_array);
+
+ kfree(reg_setting);
+ break;
+ }
+ case CFG_SLAVE_READ_I2C: {
+ struct msm_camera_i2c_read_config read_config;
+ struct msm_camera_i2c_read_config *read_config_ptr = NULL;
+ uint16_t local_data = 0;
+ uint16_t orig_slave_addr = 0, read_slave_addr = 0;
+
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ read_config_ptr =
+ (struct msm_camera_i2c_read_config *)cdata->cfg.setting;
+ if (copy_from_user(&read_config, read_config_ptr,
+ sizeof(struct msm_camera_i2c_read_config))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ read_slave_addr = read_config.slave_addr;
+ CDBG("%s:CFG_SLAVE_READ_I2C:", __func__);
+ CDBG("%s:slave_addr=0x%x reg_addr=0x%x, data_type=%d\n",
+ __func__, read_config.slave_addr,
+ read_config.reg_addr, read_config.data_type);
+ if (s_ctrl->sensor_i2c_client->cci_client) {
+ orig_slave_addr =
+ s_ctrl->sensor_i2c_client->cci_client->sid;
+ s_ctrl->sensor_i2c_client->cci_client->sid =
+ read_slave_addr >> 1;
+ } else if (s_ctrl->sensor_i2c_client->client) {
+ orig_slave_addr =
+ s_ctrl->sensor_i2c_client->client->addr;
+ s_ctrl->sensor_i2c_client->client->addr =
+ read_slave_addr >> 1;
+ } else {
+ pr_err("%s: error: no i2c/cci client found.", __func__);
+ rc = -EFAULT;
+ break;
+ }
+ CDBG("%s:orig_slave_addr=0x%x, new_slave_addr=0x%x",
+ __func__, orig_slave_addr,
+ read_slave_addr >> 1);
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->i2c_read(
+ s_ctrl->sensor_i2c_client,
+ read_config.reg_addr,
+ &local_data, read_config.data_type);
+ if (s_ctrl->sensor_i2c_client->cci_client) {
+ s_ctrl->sensor_i2c_client->cci_client->sid =
+ orig_slave_addr;
+ } else if (s_ctrl->sensor_i2c_client->client) {
+ s_ctrl->sensor_i2c_client->client->addr =
+ orig_slave_addr;
+ }
+ if (rc < 0) {
+ pr_err("%s:%d: i2c_read failed\n", __func__, __LINE__);
+ break;
+ }
+ read_config_ptr->data = local_data;
+ break;
+ }
+ case CFG_SLAVE_WRITE_I2C_ARRAY: {
+ struct msm_camera_i2c_array_write_config write_config;
+ struct msm_camera_i2c_reg_array *reg_setting = NULL;
+ uint16_t write_slave_addr = 0;
+ uint16_t orig_slave_addr = 0;
+
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ if (copy_from_user(&write_config,
+ (void *)cdata->cfg.setting,
+ sizeof(struct msm_camera_i2c_array_write_config))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ CDBG("%s:CFG_SLAVE_WRITE_I2C_ARRAY:", __func__);
+ CDBG("%s:slave_addr=0x%x, array_size=%d\n", __func__,
+ write_config.slave_addr,
+ write_config.conf_array.size);
+
+ if (!write_config.conf_array.size ||
+ write_config.conf_array.size > I2C_REG_DATA_MAX) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ reg_setting = kzalloc(write_config.conf_array.size *
+ (sizeof(struct msm_camera_i2c_reg_array)), GFP_KERNEL);
+ if (!reg_setting) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(reg_setting,
+ (void *)(write_config.conf_array.reg_setting),
+ write_config.conf_array.size *
+ sizeof(struct msm_camera_i2c_reg_array))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ kfree(reg_setting);
+ rc = -EFAULT;
+ break;
+ }
+ write_config.conf_array.reg_setting = reg_setting;
+ write_slave_addr = write_config.slave_addr;
+ if (s_ctrl->sensor_i2c_client->cci_client) {
+ orig_slave_addr =
+ s_ctrl->sensor_i2c_client->cci_client->sid;
+ s_ctrl->sensor_i2c_client->cci_client->sid =
+ write_slave_addr >> 1;
+ } else if (s_ctrl->sensor_i2c_client->client) {
+ orig_slave_addr =
+ s_ctrl->sensor_i2c_client->client->addr;
+ s_ctrl->sensor_i2c_client->client->addr =
+ write_slave_addr >> 1;
+ } else {
+ pr_err("%s: error: no i2c/cci client found.", __func__);
+ kfree(reg_setting);
+ rc = -EFAULT;
+ break;
+ }
+ CDBG("%s:orig_slave_addr=0x%x, new_slave_addr=0x%x",
+ __func__, orig_slave_addr,
+ write_slave_addr >> 1);
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->i2c_write_table(
+ s_ctrl->sensor_i2c_client, &(write_config.conf_array));
+ if (s_ctrl->sensor_i2c_client->cci_client) {
+ s_ctrl->sensor_i2c_client->cci_client->sid =
+ orig_slave_addr;
+ } else if (s_ctrl->sensor_i2c_client->client) {
+ s_ctrl->sensor_i2c_client->client->addr =
+ orig_slave_addr;
+ } else {
+ pr_err("%s: error: no i2c/cci client found.", __func__);
+ kfree(reg_setting);
+ rc = -EFAULT;
+ break;
+ }
+ kfree(reg_setting);
+ break;
+ }
+ case CFG_WRITE_I2C_SEQ_ARRAY: {
+ struct msm_camera_i2c_seq_reg_setting conf_array;
+ struct msm_camera_i2c_seq_reg_array *reg_setting = NULL;
+
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+
+ if (copy_from_user(&conf_array,
+ (void *)cdata->cfg.setting,
+ sizeof(struct msm_camera_i2c_seq_reg_setting))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ if (!conf_array.size ||
+ conf_array.size > I2C_SEQ_REG_DATA_MAX) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ reg_setting = kzalloc(conf_array.size *
+ (sizeof(struct msm_camera_i2c_seq_reg_array)),
+ GFP_KERNEL);
+ if (!reg_setting) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(reg_setting, (void *)conf_array.reg_setting,
+ conf_array.size *
+ sizeof(struct msm_camera_i2c_seq_reg_array))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ kfree(reg_setting);
+ rc = -EFAULT;
+ break;
+ }
+
+ conf_array.reg_setting = reg_setting;
+ rc = s_ctrl->sensor_i2c_client->i2c_func_tbl->
+ i2c_write_seq_table(s_ctrl->sensor_i2c_client,
+ &conf_array);
+ kfree(reg_setting);
+ break;
+ }
+
+ case CFG_POWER_UP:
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_DOWN) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+ if (s_ctrl->func_tbl->sensor_power_up) {
+ if (s_ctrl->sensordata->misc_regulator)
+ msm_sensor_misc_regulator(s_ctrl, 1);
+
+ rc = s_ctrl->func_tbl->sensor_power_up(s_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d failed rc %d\n", __func__,
+ __LINE__, rc);
+ break;
+ }
+ s_ctrl->sensor_state = MSM_SENSOR_POWER_UP;
+ CDBG("%s:%d sensor state %d\n", __func__, __LINE__,
+ s_ctrl->sensor_state);
+ } else {
+ rc = -EFAULT;
+ }
+ break;
+
+ case CFG_POWER_DOWN:
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ kfree(s_ctrl->stop_setting.reg_setting);
+ s_ctrl->stop_setting.reg_setting = NULL;
+ if (s_ctrl->sensor_state != MSM_SENSOR_POWER_UP) {
+ pr_err("%s:%d failed: invalid state %d\n", __func__,
+ __LINE__, s_ctrl->sensor_state);
+ rc = -EFAULT;
+ break;
+ }
+ if (s_ctrl->func_tbl->sensor_power_down) {
+ if (s_ctrl->sensordata->misc_regulator)
+ msm_sensor_misc_regulator(s_ctrl, 0);
+
+ rc = s_ctrl->func_tbl->sensor_power_down(s_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d failed rc %d\n", __func__,
+ __LINE__, rc);
+ break;
+ }
+ s_ctrl->sensor_state = MSM_SENSOR_POWER_DOWN;
+ CDBG("%s:%d sensor state %d\n", __func__, __LINE__,
+ s_ctrl->sensor_state);
+ } else {
+ rc = -EFAULT;
+ }
+ break;
+
+ case CFG_SET_STOP_STREAM_SETTING: {
+ struct msm_camera_i2c_reg_setting *stop_setting =
+ &s_ctrl->stop_setting;
+ struct msm_camera_i2c_reg_array *reg_setting = NULL;
+
+ if (s_ctrl->is_csid_tg_mode)
+ goto DONE;
+
+ if (copy_from_user(stop_setting,
+ (void *)cdata->cfg.setting,
+ sizeof(struct msm_camera_i2c_reg_setting))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ reg_setting = stop_setting->reg_setting;
+
+ if (!stop_setting->size) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ stop_setting->reg_setting = kzalloc(stop_setting->size *
+ (sizeof(struct msm_camera_i2c_reg_array)), GFP_KERNEL);
+ if (!stop_setting->reg_setting) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(stop_setting->reg_setting,
+ (void *)reg_setting,
+ stop_setting->size *
+ sizeof(struct msm_camera_i2c_reg_array))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ kfree(stop_setting->reg_setting);
+ stop_setting->reg_setting = NULL;
+ stop_setting->size = 0;
+ rc = -EFAULT;
+ break;
+ }
+ break;
+ }
+
+ case CFG_SET_I2C_SYNC_PARAM: {
+ struct msm_camera_cci_ctrl cci_ctrl;
+
+ s_ctrl->sensor_i2c_client->cci_client->cid =
+ cdata->cfg.sensor_i2c_sync_params.cid;
+ s_ctrl->sensor_i2c_client->cci_client->id_map =
+ cdata->cfg.sensor_i2c_sync_params.csid;
+
+ CDBG("I2C_SYNC_PARAM CID:%d, line:%d delay:%d, cdid:%d\n",
+ s_ctrl->sensor_i2c_client->cci_client->cid,
+ cdata->cfg.sensor_i2c_sync_params.line,
+ cdata->cfg.sensor_i2c_sync_params.delay,
+ cdata->cfg.sensor_i2c_sync_params.csid);
+
+ cci_ctrl.cmd = MSM_CCI_SET_SYNC_CID;
+ cci_ctrl.cfg.cci_wait_sync_cfg.line =
+ cdata->cfg.sensor_i2c_sync_params.line;
+ cci_ctrl.cfg.cci_wait_sync_cfg.delay =
+ cdata->cfg.sensor_i2c_sync_params.delay;
+ cci_ctrl.cfg.cci_wait_sync_cfg.cid =
+ cdata->cfg.sensor_i2c_sync_params.cid;
+ cci_ctrl.cfg.cci_wait_sync_cfg.csid =
+ cdata->cfg.sensor_i2c_sync_params.csid;
+ rc = v4l2_subdev_call(s_ctrl->sensor_i2c_client->
+ cci_client->cci_subdev,
+ core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+ if (rc < 0) {
+ pr_err("%s: line %d rc = %d\n", __func__, __LINE__, rc);
+ rc = -EFAULT;
+ break;
+ }
+ break;
+ }
+
+ default:
+ rc = -EFAULT;
+ break;
+ }
+
+DONE:
+ mutex_unlock(s_ctrl->msm_sensor_mutex);
+
+ return rc;
+}
+
+int msm_sensor_check_id(struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int rc;
+
+ if (s_ctrl->func_tbl->sensor_match_id)
+ rc = s_ctrl->func_tbl->sensor_match_id(s_ctrl);
+ else
+ rc = msm_sensor_match_id(s_ctrl);
+ if (rc < 0)
+ pr_err("%s:%d match id failed rc %d\n", __func__, __LINE__, rc);
+ return rc;
+}
+
+static int msm_sensor_power(struct v4l2_subdev *sd, int on)
+{
+ int rc = 0;
+ struct msm_sensor_ctrl_t *s_ctrl = get_sctrl(sd);
+ mutex_lock(s_ctrl->msm_sensor_mutex);
+ if (!on && s_ctrl->sensor_state == MSM_SENSOR_POWER_UP) {
+ s_ctrl->func_tbl->sensor_power_down(s_ctrl);
+ s_ctrl->sensor_state = MSM_SENSOR_POWER_DOWN;
+ }
+ mutex_unlock(s_ctrl->msm_sensor_mutex);
+ return rc;
+}
+
+static int msm_sensor_v4l2_enum_fmt(struct v4l2_subdev *sd,
+ unsigned int index, enum v4l2_mbus_pixelcode *code)
+{
+ struct msm_sensor_ctrl_t *s_ctrl = get_sctrl(sd);
+
+ if ((unsigned int)index >= s_ctrl->sensor_v4l2_subdev_info_size)
+ return -EINVAL;
+
+ *code = s_ctrl->sensor_v4l2_subdev_info[index].code;
+ return 0;
+}
+
+static struct v4l2_subdev_core_ops msm_sensor_subdev_core_ops = {
+ .ioctl = msm_sensor_subdev_ioctl,
+ .s_power = msm_sensor_power,
+};
+
+static struct v4l2_subdev_video_ops msm_sensor_subdev_video_ops = {
+ .enum_mbus_fmt = msm_sensor_v4l2_enum_fmt,
+};
+
+static struct v4l2_subdev_ops msm_sensor_subdev_ops = {
+ .core = &msm_sensor_subdev_core_ops,
+ .video = &msm_sensor_subdev_video_ops,
+};
+
+static struct msm_sensor_fn_t msm_sensor_func_tbl = {
+ .sensor_config = msm_sensor_config,
+#ifdef CONFIG_COMPAT
+ .sensor_config32 = msm_sensor_config32,
+#endif
+ .sensor_power_up = msm_sensor_power_up,
+ .sensor_power_down = msm_sensor_power_down,
+ .sensor_match_id = msm_sensor_match_id,
+};
+
+static struct msm_camera_i2c_fn_t msm_sensor_cci_func_tbl = {
+ .i2c_read = msm_camera_cci_i2c_read,
+ .i2c_read_seq = msm_camera_cci_i2c_read_seq,
+ .i2c_write = msm_camera_cci_i2c_write,
+ .i2c_write_table = msm_camera_cci_i2c_write_table,
+ .i2c_write_seq_table = msm_camera_cci_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_cci_i2c_write_table_w_microdelay,
+ .i2c_util = msm_sensor_cci_i2c_util,
+ .i2c_write_conf_tbl = msm_camera_cci_i2c_write_conf_tbl,
+ .i2c_write_table_async = msm_camera_cci_i2c_write_table_async,
+ .i2c_write_table_sync = msm_camera_cci_i2c_write_table_sync,
+ .i2c_write_table_sync_block = msm_camera_cci_i2c_write_table_sync_block,
+
+};
+
+static struct msm_camera_i2c_fn_t msm_sensor_qup_func_tbl = {
+ .i2c_read = msm_camera_qup_i2c_read,
+ .i2c_read_seq = msm_camera_qup_i2c_read_seq,
+ .i2c_write = msm_camera_qup_i2c_write,
+ .i2c_write_table = msm_camera_qup_i2c_write_table,
+ .i2c_write_seq_table = msm_camera_qup_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_qup_i2c_write_table_w_microdelay,
+ .i2c_write_conf_tbl = msm_camera_qup_i2c_write_conf_tbl,
+ .i2c_write_table_async = msm_camera_qup_i2c_write_table,
+ .i2c_write_table_sync = msm_camera_qup_i2c_write_table,
+ .i2c_write_table_sync_block = msm_camera_qup_i2c_write_table,
+};
+
+int32_t msm_sensor_init_default_params(struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = -ENOMEM;
+ struct msm_camera_cci_client *cci_client = NULL;
+ struct msm_cam_clk_info *clk_info = NULL;
+ unsigned long mount_pos = 0;
+
+ /* Validate input parameters */
+ if (!s_ctrl) {
+ pr_err("%s:%d failed: invalid params s_ctrl %p\n", __func__,
+ __LINE__, s_ctrl);
+ return -EINVAL;
+ }
+
+ if (!s_ctrl->sensor_i2c_client) {
+ pr_err("%s:%d failed: invalid params sensor_i2c_client %p\n",
+ __func__, __LINE__, s_ctrl->sensor_i2c_client);
+ return -EINVAL;
+ }
+
+ /* Initialize cci_client */
+ s_ctrl->sensor_i2c_client->cci_client = kzalloc(sizeof(
+ struct msm_camera_cci_client), GFP_KERNEL);
+ if (!s_ctrl->sensor_i2c_client->cci_client) {
+ pr_err("%s:%d failed: no memory cci_client %p\n", __func__,
+ __LINE__, s_ctrl->sensor_i2c_client->cci_client);
+ return -ENOMEM;
+ }
+
+ if (s_ctrl->sensor_device_type == MSM_CAMERA_PLATFORM_DEVICE) {
+ cci_client = s_ctrl->sensor_i2c_client->cci_client;
+
+ /* Get CCI subdev */
+ cci_client->cci_subdev = msm_cci_get_subdev();
+
+ /* Update CCI / I2C function table */
+ if (!s_ctrl->sensor_i2c_client->i2c_func_tbl)
+ s_ctrl->sensor_i2c_client->i2c_func_tbl =
+ &msm_sensor_cci_func_tbl;
+ } else {
+ if (!s_ctrl->sensor_i2c_client->i2c_func_tbl) {
+ CDBG("%s:%d\n", __func__, __LINE__);
+ s_ctrl->sensor_i2c_client->i2c_func_tbl =
+ &msm_sensor_qup_func_tbl;
+ }
+ }
+
+ /* Update function table driven by ioctl */
+ if (!s_ctrl->func_tbl)
+ s_ctrl->func_tbl = &msm_sensor_func_tbl;
+
+ /* Update v4l2 subdev ops table */
+ if (!s_ctrl->sensor_v4l2_subdev_ops)
+ s_ctrl->sensor_v4l2_subdev_ops = &msm_sensor_subdev_ops;
+
+ /* Initialize clock info */
+ clk_info = kzalloc(sizeof(cam_8974_clk_info), GFP_KERNEL);
+ if (!clk_info) {
+ pr_err("%s:%d failed no memory clk_info %p\n", __func__,
+ __LINE__, clk_info);
+ rc = -ENOMEM;
+ goto FREE_CCI_CLIENT;
+ }
+ memcpy(clk_info, cam_8974_clk_info, sizeof(cam_8974_clk_info));
+ s_ctrl->sensordata->power_info.clk_info = clk_info;
+ s_ctrl->sensordata->power_info.clk_info_size =
+ ARRAY_SIZE(cam_8974_clk_info);
+
+ /* Update sensor mount angle and position in media entity flag */
+ mount_pos = s_ctrl->sensordata->sensor_info->position << 16;
+ mount_pos = mount_pos | ((s_ctrl->sensordata->sensor_info->
+ sensor_mount_angle / 90) << 8);
+ s_ctrl->msm_sd.sd.entity.flags = mount_pos | MEDIA_ENT_FL_DEFAULT;
+
+ return 0;
+
+FREE_CCI_CLIENT:
+ kfree(cci_client);
+ return rc;
+}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.h b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.h
new file mode 100644
index 000000000000..bd12588eada9
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor.h
@@ -0,0 +1,125 @@
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_SENSOR_H
+#define MSM_SENSOR_H
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <soc/qcom/camera2.h>
+#include <media/msm_cam_sensor.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ioctl.h>
+#include "msm_camera_i2c.h"
+#include "msm_camera_dt_util.h"
+#include "msm_sd.h"
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+ static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+enum msm_sensor_sensor_slave_info_type {
+ MSM_SENSOR_SLAVEADDR_DATA,
+ MSM_SENSOR_IDREGADDR_DATA,
+ MSM_SENSOR_SENSOR_ID_DATA,
+ MSM_SENSOR_SENIDMASK_DATA,
+ MSM_SENSOR_NUM_ID_INFO_DATA,
+};
+
+struct msm_sensor_ctrl_t;
+
+enum msm_sensor_state_t {
+ MSM_SENSOR_POWER_DOWN,
+ MSM_SENSOR_POWER_UP,
+};
+
+struct msm_sensor_fn_t {
+ int (*sensor_config)(struct msm_sensor_ctrl_t *, void __user *);
+#ifdef CONFIG_COMPAT
+ int (*sensor_config32)(struct msm_sensor_ctrl_t *, void __user *);
+#endif
+ int (*sensor_power_down)(struct msm_sensor_ctrl_t *);
+ int (*sensor_power_up)(struct msm_sensor_ctrl_t *);
+ int (*sensor_match_id)(struct msm_sensor_ctrl_t *);
+};
+
+struct msm_sensor_ctrl_t {
+ struct platform_device *pdev;
+ struct mutex *msm_sensor_mutex;
+
+ enum msm_camera_device_type_t sensor_device_type;
+ struct msm_camera_sensor_board_info *sensordata;
+ struct msm_sensor_power_setting_array power_setting_array;
+ struct msm_sensor_packed_cfg_t *cfg_override;
+ struct msm_sd_subdev msm_sd;
+ enum cci_i2c_master_t cci_i2c_master;
+
+ struct msm_camera_i2c_client *sensor_i2c_client;
+ struct v4l2_subdev_info *sensor_v4l2_subdev_info;
+ uint8_t sensor_v4l2_subdev_info_size;
+ struct v4l2_subdev_ops *sensor_v4l2_subdev_ops;
+ struct msm_sensor_fn_t *func_tbl;
+ struct msm_camera_i2c_reg_setting stop_setting;
+ void *misc_regulator;
+ enum msm_sensor_state_t sensor_state;
+ uint8_t is_probe_succeed;
+ uint32_t id;
+ struct device_node *of_node;
+ enum msm_camera_stream_type_t camera_stream_type;
+ uint32_t set_mclk_23880000;
+ uint8_t is_csid_tg_mode;
+};
+
+int msm_sensor_config(struct msm_sensor_ctrl_t *s_ctrl, void __user *argp);
+
+int msm_sensor_power_up(struct msm_sensor_ctrl_t *s_ctrl);
+
+int msm_sensor_power_down(struct msm_sensor_ctrl_t *s_ctrl);
+
+int msm_sensor_check_id(struct msm_sensor_ctrl_t *s_ctrl);
+
+int msm_sensor_match_id(struct msm_sensor_ctrl_t *s_ctrl);
+
+int msm_sensor_update_cfg(struct msm_sensor_ctrl_t *s_ctrl);
+
+int msm_sensor_free_sensor_data(struct msm_sensor_ctrl_t *s_ctrl);
+
+int32_t msm_sensor_init_default_params(struct msm_sensor_ctrl_t *s_ctrl);
+
+int32_t msm_sensor_get_dt_gpio_req_tbl(struct device_node *of_node,
+ struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
+ uint16_t gpio_array_size);
+
+int32_t msm_sensor_get_dt_gpio_set_tbl(struct device_node *of_node,
+ struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
+ uint16_t gpio_array_size);
+
+int32_t msm_sensor_init_gpio_pin_tbl(struct device_node *of_node,
+ struct msm_camera_gpio_conf *gconf, uint16_t *gpio_array,
+ uint16_t gpio_array_size);
+#ifdef CONFIG_COMPAT
+long msm_sensor_subdev_fops_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg);
+#endif
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
new file mode 100644
index 000000000000..24f10c40f655
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.c
@@ -0,0 +1,1376 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define SENSOR_DRIVER_I2C "i2c_camera"
+/* Header file declaration */
+#include "msm_sensor.h"
+#include "msm_sd.h"
+#include "camera.h"
+#include "msm_cci.h"
+#include "msm_camera_dt_util.h"
+
+/* Logging macro */
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+#define SENSOR_MAX_MOUNTANGLE (360)
+
+static struct v4l2_file_operations msm_sensor_v4l2_subdev_fops;
+static int32_t msm_sensor_driver_platform_probe(struct platform_device *pdev);
+
+/* Static declaration */
+static struct msm_sensor_ctrl_t *g_sctrl[MAX_CAMERAS];
+
+static int msm_sensor_platform_remove(struct platform_device *pdev)
+{
+ struct msm_sensor_ctrl_t *s_ctrl;
+
+ pr_err("%s: sensor FREE\n", __func__);
+
+ s_ctrl = g_sctrl[pdev->id];
+ if (!s_ctrl) {
+ pr_err("%s: sensor device is NULL\n", __func__);
+ return 0;
+ }
+
+ msm_sensor_free_sensor_data(s_ctrl);
+ kfree(s_ctrl->msm_sensor_mutex);
+ kfree(s_ctrl->sensor_i2c_client);
+ kfree(s_ctrl);
+ g_sctrl[pdev->id] = NULL;
+
+ return 0;
+}
+
+
+static const struct of_device_id msm_sensor_driver_dt_match[] = {
+ {.compatible = "qcom,camera"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_sensor_driver_dt_match);
+
+static struct platform_driver msm_sensor_platform_driver = {
+ .probe = msm_sensor_driver_platform_probe,
+ .driver = {
+ .name = "qcom,camera",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_sensor_driver_dt_match,
+ },
+ .remove = msm_sensor_platform_remove,
+};
+
+static struct v4l2_subdev_info msm_sensor_driver_subdev_info[] = {
+ {
+ .code = V4L2_MBUS_FMT_SBGGR10_1X10,
+ .colorspace = V4L2_COLORSPACE_JPEG,
+ .fmt = 1,
+ .order = 0,
+ },
+};
+
+static int32_t msm_sensor_driver_create_i2c_v4l_subdev
+ (struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+ uint32_t session_id = 0;
+ struct i2c_client *client = s_ctrl->sensor_i2c_client->client;
+
+ CDBG("%s %s I2c probe succeeded\n", __func__, client->name);
+ rc = camera_init_v4l2(&client->dev, &session_id);
+ if (rc < 0) {
+ pr_err("failed: camera_init_i2c_v4l2 rc %d", rc);
+ return rc;
+ }
+ CDBG("%s rc %d session_id %d\n", __func__, rc, session_id);
+ snprintf(s_ctrl->msm_sd.sd.name,
+ sizeof(s_ctrl->msm_sd.sd.name), "%s",
+ s_ctrl->sensordata->sensor_name);
+ v4l2_i2c_subdev_init(&s_ctrl->msm_sd.sd, client,
+ s_ctrl->sensor_v4l2_subdev_ops);
+ v4l2_set_subdevdata(&s_ctrl->msm_sd.sd, client);
+ s_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ media_entity_init(&s_ctrl->msm_sd.sd.entity, 0, NULL, 0);
+ s_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ s_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_SENSOR;
+ s_ctrl->msm_sd.sd.entity.name = s_ctrl->msm_sd.sd.name;
+ s_ctrl->sensordata->sensor_info->session_id = session_id;
+ s_ctrl->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x3;
+ msm_sd_register(&s_ctrl->msm_sd);
+ msm_sensor_v4l2_subdev_fops = v4l2_subdev_fops;
+#ifdef CONFIG_COMPAT
+ msm_sensor_v4l2_subdev_fops.compat_ioctl32 =
+ msm_sensor_subdev_fops_ioctl;
+#endif
+ s_ctrl->msm_sd.sd.devnode->fops =
+ &msm_sensor_v4l2_subdev_fops;
+ CDBG("%s:%d\n", __func__, __LINE__);
+ return rc;
+}
+
+static int32_t msm_sensor_driver_create_v4l_subdev
+ (struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+ uint32_t session_id = 0;
+
+ rc = camera_init_v4l2(&s_ctrl->pdev->dev, &session_id);
+ if (rc < 0) {
+ pr_err("failed: camera_init_v4l2 rc %d", rc);
+ return rc;
+ }
+ CDBG("rc %d session_id %d", rc, session_id);
+ s_ctrl->sensordata->sensor_info->session_id = session_id;
+
+ /* Create /dev/v4l-subdevX device */
+ v4l2_subdev_init(&s_ctrl->msm_sd.sd, s_ctrl->sensor_v4l2_subdev_ops);
+ snprintf(s_ctrl->msm_sd.sd.name, sizeof(s_ctrl->msm_sd.sd.name), "%s",
+ s_ctrl->sensordata->sensor_name);
+ v4l2_set_subdevdata(&s_ctrl->msm_sd.sd, s_ctrl->pdev);
+ s_ctrl->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ media_entity_init(&s_ctrl->msm_sd.sd.entity, 0, NULL, 0);
+ s_ctrl->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ s_ctrl->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_SENSOR;
+ s_ctrl->msm_sd.sd.entity.name = s_ctrl->msm_sd.sd.name;
+ s_ctrl->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x3;
+ msm_sd_register(&s_ctrl->msm_sd);
+ msm_cam_copy_v4l2_subdev_fops(&msm_sensor_v4l2_subdev_fops);
+#ifdef CONFIG_COMPAT
+ msm_sensor_v4l2_subdev_fops.compat_ioctl32 =
+ msm_sensor_subdev_fops_ioctl;
+#endif
+ s_ctrl->msm_sd.sd.devnode->fops =
+ &msm_sensor_v4l2_subdev_fops;
+
+ return rc;
+}
+
+static int32_t msm_sensor_fill_eeprom_subdevid_by_name(
+ struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+ const char *eeprom_name;
+ struct device_node *src_node = NULL;
+ uint32_t val = 0, eeprom_name_len;
+ int32_t *eeprom_subdev_id, i, userspace_probe = 0;
+ int32_t count = 0;
+ struct msm_sensor_info_t *sensor_info;
+ struct device_node *of_node = s_ctrl->of_node;
+ const void *p;
+
+ if (!s_ctrl->sensordata->eeprom_name || !of_node)
+ return -EINVAL;
+
+ eeprom_name_len = strlen(s_ctrl->sensordata->eeprom_name);
+ if (eeprom_name_len >= MAX_SENSOR_NAME)
+ return -EINVAL;
+
+ sensor_info = s_ctrl->sensordata->sensor_info;
+ eeprom_subdev_id = &sensor_info->subdev_id[SUB_MODULE_EEPROM];
+ /*
+ * string for eeprom name is valid, set sudev id to -1
+ * and try to found new id
+ */
+ *eeprom_subdev_id = -1;
+
+ if (0 == eeprom_name_len)
+ return 0;
+
+ p = of_get_property(of_node, "qcom,eeprom-src", &count);
+ if (!p || !count)
+ return 0;
+
+ count /= sizeof(uint32_t);
+ for (i = 0; i < count; i++) {
+ userspace_probe = 0;
+ eeprom_name = NULL;
+ src_node = of_parse_phandle(of_node, "qcom,eeprom-src", i);
+ if (!src_node) {
+ pr_err("eeprom src node NULL\n");
+ continue;
+ }
+ /* In the case of eeprom probe from kernel eeprom name
+ should be present, Otherwise it will throw as errors */
+ rc = of_property_read_string(src_node, "qcom,eeprom-name",
+ &eeprom_name);
+ if (rc < 0) {
+ pr_err("%s:%d Eeprom userspace probe for %s\n",
+ __func__, __LINE__,
+ s_ctrl->sensordata->eeprom_name);
+ of_node_put(src_node);
+ userspace_probe = 1;
+ if (count > 1)
+ return -EINVAL;
+ }
+ if (!userspace_probe &&
+ strcmp(eeprom_name, s_ctrl->sensordata->eeprom_name))
+ continue;
+
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ if (rc < 0) {
+ pr_err("%s qcom,eeprom cell index %d, rc %d\n",
+ __func__, val, rc);
+ of_node_put(src_node);
+ if (userspace_probe)
+ return -EINVAL;
+ continue;
+ }
+
+ *eeprom_subdev_id = val;
+ CDBG("%s:%d Eeprom subdevice id is %d\n",
+ __func__, __LINE__, val);
+ of_node_put(src_node);
+ src_node = NULL;
+ break;
+ }
+
+ return rc;
+}
+
+static int32_t msm_sensor_fill_actuator_subdevid_by_name(
+ struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+ struct device_node *src_node = NULL;
+ uint32_t val = 0, actuator_name_len;
+ int32_t *actuator_subdev_id;
+ struct msm_sensor_info_t *sensor_info;
+ struct device_node *of_node = s_ctrl->of_node;
+
+ if (!s_ctrl->sensordata->actuator_name || !of_node)
+ return -EINVAL;
+
+ actuator_name_len = strlen(s_ctrl->sensordata->actuator_name);
+ if (actuator_name_len >= MAX_SENSOR_NAME)
+ return -EINVAL;
+
+ sensor_info = s_ctrl->sensordata->sensor_info;
+ actuator_subdev_id = &sensor_info->subdev_id[SUB_MODULE_ACTUATOR];
+ /*
+ * string for actuator name is valid, set sudev id to -1
+ * and try to found new id
+ */
+ *actuator_subdev_id = -1;
+
+ if (0 == actuator_name_len)
+ return 0;
+
+ src_node = of_parse_phandle(of_node, "qcom,actuator-src", 0);
+ if (!src_node) {
+ CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CDBG("%s qcom,actuator cell index %d, rc %d\n", __func__,
+ val, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ *actuator_subdev_id = val;
+ of_node_put(src_node);
+ src_node = NULL;
+ }
+
+ return rc;
+}
+
+static int32_t msm_sensor_fill_ois_subdevid_by_name(
+ struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+ struct device_node *src_node = NULL;
+ uint32_t val = 0, ois_name_len;
+ int32_t *ois_subdev_id;
+ struct msm_sensor_info_t *sensor_info;
+ struct device_node *of_node = s_ctrl->of_node;
+
+ if (!s_ctrl->sensordata->ois_name || !of_node)
+ return -EINVAL;
+
+ ois_name_len = strlen(s_ctrl->sensordata->ois_name);
+ if (ois_name_len >= MAX_SENSOR_NAME)
+ return -EINVAL;
+
+ sensor_info = s_ctrl->sensordata->sensor_info;
+ ois_subdev_id = &sensor_info->subdev_id[SUB_MODULE_OIS];
+ /*
+ * string for ois name is valid, set sudev id to -1
+ * and try to found new id
+ */
+ *ois_subdev_id = -1;
+
+ if (0 == ois_name_len)
+ return 0;
+
+ src_node = of_parse_phandle(of_node, "qcom,ois-src", 0);
+ if (!src_node) {
+ CDBG("%s:%d src_node NULL\n", __func__, __LINE__);
+ } else {
+ rc = of_property_read_u32(src_node, "cell-index", &val);
+ CDBG("%s qcom,ois cell index %d, rc %d\n", __func__,
+ val, rc);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ *ois_subdev_id = val;
+ of_node_put(src_node);
+ src_node = NULL;
+ }
+
+ return rc;
+}
+
+static int32_t msm_sensor_fill_slave_info_init_params(
+ struct msm_camera_sensor_slave_info *slave_info,
+ struct msm_sensor_info_t *sensor_info)
+{
+ struct msm_sensor_init_params *sensor_init_params;
+ if (!slave_info || !sensor_info)
+ return -EINVAL;
+
+ if (!slave_info->is_init_params_valid)
+ return 0;
+
+ sensor_init_params = &slave_info->sensor_init_params;
+ if (INVALID_CAMERA_B != sensor_init_params->position)
+ sensor_info->position =
+ sensor_init_params->position;
+
+ if (SENSOR_MAX_MOUNTANGLE > sensor_init_params->sensor_mount_angle) {
+ sensor_info->sensor_mount_angle =
+ sensor_init_params->sensor_mount_angle;
+ sensor_info->is_mount_angle_valid = 1;
+ }
+
+ if (CAMERA_MODE_INVALID != sensor_init_params->modes_supported)
+ sensor_info->modes_supported =
+ sensor_init_params->modes_supported;
+
+ return 0;
+}
+
+
+static int32_t msm_sensor_validate_slave_info(
+ struct msm_sensor_info_t *sensor_info)
+{
+ if (INVALID_CAMERA_B == sensor_info->position) {
+ sensor_info->position = BACK_CAMERA_B;
+ CDBG("%s:%d Set default sensor position\n",
+ __func__, __LINE__);
+ }
+ if (CAMERA_MODE_INVALID == sensor_info->modes_supported) {
+ sensor_info->modes_supported = CAMERA_MODE_2D_B;
+ CDBG("%s:%d Set default sensor modes_supported\n",
+ __func__, __LINE__);
+ }
+ if (SENSOR_MAX_MOUNTANGLE <= sensor_info->sensor_mount_angle) {
+ sensor_info->sensor_mount_angle = 0;
+ CDBG("%s:%d Set default sensor mount angle\n",
+ __func__, __LINE__);
+ sensor_info->is_mount_angle_valid = 1;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static int32_t msm_sensor_get_pw_settings_compat(
+ struct msm_sensor_power_setting *ps,
+ struct msm_sensor_power_setting *us_ps, uint32_t size)
+{
+ int32_t rc = 0, i = 0;
+ struct msm_sensor_power_setting32 *ps32 =
+ kzalloc(sizeof(*ps32) * size, GFP_KERNEL);
+
+ if (!ps32) {
+ pr_err("failed: no memory ps32");
+ return -ENOMEM;
+ }
+ if (copy_from_user(ps32, (void *)us_ps, sizeof(*ps32) * size)) {
+ pr_err("failed: copy_from_user");
+ kfree(ps32);
+ return -EFAULT;
+ }
+ for (i = 0; i < size; i++) {
+ ps[i].config_val = ps32[i].config_val;
+ ps[i].delay = ps32[i].delay;
+ ps[i].seq_type = ps32[i].seq_type;
+ ps[i].seq_val = ps32[i].seq_val;
+ }
+ kfree(ps32);
+ return rc;
+}
+#endif
+
+static int32_t msm_sensor_create_pd_settings(void *setting,
+ struct msm_sensor_power_setting *pd, uint32_t size_down,
+ struct msm_sensor_power_setting *pu)
+{
+ int32_t rc = 0;
+ int c, end;
+ struct msm_sensor_power_setting pd_tmp;
+
+ pr_err("Generating power_down_setting");
+
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ int i = 0;
+ struct msm_sensor_power_setting32 *power_setting_iter =
+ (struct msm_sensor_power_setting32 *)compat_ptr((
+ (struct msm_camera_sensor_slave_info32 *)setting)->
+ power_setting_array.power_setting);
+
+ for (i = 0; i < size_down; i++) {
+ pd[i].config_val = power_setting_iter[i].config_val;
+ pd[i].delay = power_setting_iter[i].delay;
+ pd[i].seq_type = power_setting_iter[i].seq_type;
+ pd[i].seq_val = power_setting_iter[i].seq_val;
+ }
+ } else
+#endif
+ {
+ if (copy_from_user(pd, (void *)pu, sizeof(*pd) * size_down)) {
+ pr_err("failed: copy_from_user");
+ return -EFAULT;
+ }
+ }
+ /* reverse */
+ end = size_down - 1;
+ for (c = 0; c < size_down/2; c++) {
+ pd_tmp = pd[c];
+ pd[c] = pd[end];
+ pd[end] = pd_tmp;
+ end--;
+ }
+ return rc;
+}
+
+static int32_t msm_sensor_get_power_down_settings(void *setting,
+ struct msm_camera_sensor_slave_info *slave_info,
+ struct msm_camera_power_ctrl_t *power_info)
+{
+ int32_t rc = 0;
+ uint16_t size_down = 0;
+ uint16_t i = 0;
+ struct msm_sensor_power_setting *pd = NULL;
+
+ /* DOWN */
+ size_down = slave_info->power_setting_array.size_down;
+ if (!size_down || size_down > MAX_POWER_CONFIG)
+ size_down = slave_info->power_setting_array.size;
+ /* Validate size_down */
+ if (size_down > MAX_POWER_CONFIG) {
+ pr_err("failed: invalid size_down %d", size_down);
+ return -EINVAL;
+ }
+ /* Allocate memory for power down setting */
+ pd = kzalloc(sizeof(*pd) * size_down, GFP_KERNEL);
+ if (!pd) {
+ pr_err("failed: no memory power_setting %p", pd);
+ return -EFAULT;
+ }
+
+ if (slave_info->power_setting_array.power_down_setting) {
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ rc = msm_sensor_get_pw_settings_compat(
+ pd, slave_info->power_setting_array.
+ power_down_setting, size_down);
+ if (rc < 0) {
+ pr_err("failed");
+ kfree(pd);
+ return -EFAULT;
+ }
+ } else
+#endif
+ if (copy_from_user(pd, (void *)slave_info->power_setting_array.
+ power_down_setting, sizeof(*pd) * size_down)) {
+ pr_err("failed: copy_from_user");
+ kfree(pd);
+ return -EFAULT;
+ }
+ } else {
+
+ rc = msm_sensor_create_pd_settings(setting, pd, size_down,
+ slave_info->power_setting_array.power_setting);
+ if (rc < 0) {
+ pr_err("failed");
+ kfree(pd);
+ return -EFAULT;
+ }
+ }
+
+ /* Fill power down setting and power down setting size */
+ power_info->power_down_setting = pd;
+ power_info->power_down_setting_size = size_down;
+
+ /* Print power setting */
+ for (i = 0; i < size_down; i++) {
+ CDBG("DOWN seq_type %d seq_val %d config_val %ld delay %d",
+ pd[i].seq_type, pd[i].seq_val,
+ pd[i].config_val, pd[i].delay);
+ }
+ return rc;
+}
+
+static int32_t msm_sensor_get_power_up_settings(void *setting,
+ struct msm_camera_sensor_slave_info *slave_info,
+ struct msm_camera_power_ctrl_t *power_info)
+{
+ int32_t rc = 0;
+ uint16_t size = 0;
+ uint16_t i = 0;
+ struct msm_sensor_power_setting *pu = NULL;
+
+ size = slave_info->power_setting_array.size;
+
+ /* Validate size */
+ if ((size == 0) || (size > MAX_POWER_CONFIG)) {
+ pr_err("failed: invalid power_setting size_up = %d\n", size);
+ return -EINVAL;
+ }
+
+ /* Allocate memory for power up setting */
+ pu = kzalloc(sizeof(*pu) * size, GFP_KERNEL);
+ if (!pu) {
+ pr_err("failed: no memory power_setting %p", pu);
+ return -ENOMEM;
+ }
+
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ rc = msm_sensor_get_pw_settings_compat(pu,
+ slave_info->power_setting_array.
+ power_setting, size);
+ if (rc < 0) {
+ pr_err("failed");
+ kfree(pu);
+ return -EFAULT;
+ }
+ } else
+#endif
+ {
+ if (copy_from_user(pu,
+ (void *)slave_info->power_setting_array.power_setting,
+ sizeof(*pu) * size)) {
+ pr_err("failed: copy_from_user");
+ kfree(pu);
+ return -EFAULT;
+ }
+ }
+
+ /* Print power setting */
+ for (i = 0; i < size; i++) {
+ CDBG("UP seq_type %d seq_val %d config_val %ld delay %d",
+ pu[i].seq_type, pu[i].seq_val,
+ pu[i].config_val, pu[i].delay);
+ }
+
+
+ /* Fill power up setting and power up setting size */
+ power_info->power_setting = pu;
+ power_info->power_setting_size = size;
+
+ return rc;
+}
+
+static int32_t msm_sensor_get_power_settings(void *setting,
+ struct msm_camera_sensor_slave_info *slave_info,
+ struct msm_camera_power_ctrl_t *power_info)
+{
+ int32_t rc = 0;
+
+ rc = msm_sensor_get_power_up_settings(setting, slave_info, power_info);
+ if (rc < 0) {
+ pr_err("failed");
+ return -EINVAL;
+ }
+
+ rc = msm_sensor_get_power_down_settings(setting, slave_info,
+ power_info);
+ if (rc < 0) {
+ pr_err("failed");
+ return -EINVAL;
+ }
+ return rc;
+}
+
+static void msm_sensor_fill_sensor_info(struct msm_sensor_ctrl_t *s_ctrl,
+ struct msm_sensor_info_t *sensor_info, char *entity_name)
+{
+ uint32_t i;
+
+ if (!s_ctrl || !sensor_info) {
+ pr_err("%s:failed\n", __func__);
+ return;
+ }
+
+ strlcpy(sensor_info->sensor_name, s_ctrl->sensordata->sensor_name,
+ MAX_SENSOR_NAME);
+
+ sensor_info->session_id = s_ctrl->sensordata->sensor_info->session_id;
+
+ s_ctrl->sensordata->sensor_info->subdev_id[SUB_MODULE_SENSOR] =
+ s_ctrl->sensordata->sensor_info->session_id;
+ for (i = 0; i < SUB_MODULE_MAX; i++) {
+ sensor_info->subdev_id[i] =
+ s_ctrl->sensordata->sensor_info->subdev_id[i];
+ sensor_info->subdev_intf[i] =
+ s_ctrl->sensordata->sensor_info->subdev_intf[i];
+ }
+
+ sensor_info->is_mount_angle_valid =
+ s_ctrl->sensordata->sensor_info->is_mount_angle_valid;
+ sensor_info->sensor_mount_angle =
+ s_ctrl->sensordata->sensor_info->sensor_mount_angle;
+ sensor_info->modes_supported =
+ s_ctrl->sensordata->sensor_info->modes_supported;
+ sensor_info->position =
+ s_ctrl->sensordata->sensor_info->position;
+
+ strlcpy(entity_name, s_ctrl->msm_sd.sd.entity.name, MAX_SENSOR_NAME);
+}
+
+/* static function definition */
+int32_t msm_sensor_driver_probe(void *setting,
+ struct msm_sensor_info_t *probed_info, char *entity_name)
+{
+ int32_t rc = 0;
+ struct msm_sensor_ctrl_t *s_ctrl = NULL;
+ struct msm_camera_cci_client *cci_client = NULL;
+ struct msm_camera_sensor_slave_info *slave_info = NULL;
+ struct msm_camera_slave_info *camera_info = NULL;
+
+ unsigned long mount_pos = 0;
+ uint32_t is_yuv;
+
+ /* Validate input parameters */
+ if (!setting) {
+ pr_err("failed: slave_info %p", setting);
+ return -EINVAL;
+ }
+
+ /* Allocate memory for slave info */
+ slave_info = kzalloc(sizeof(*slave_info), GFP_KERNEL);
+ if (!slave_info) {
+ pr_err("failed: no memory slave_info %p", slave_info);
+ return -ENOMEM;
+ }
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ struct msm_camera_sensor_slave_info32 *slave_info32 =
+ kzalloc(sizeof(*slave_info32), GFP_KERNEL);
+ if (!slave_info32) {
+ pr_err("failed: no memory for slave_info32 %p\n",
+ slave_info32);
+ rc = -ENOMEM;
+ goto free_slave_info;
+ }
+ if (copy_from_user((void *)slave_info32, setting,
+ sizeof(*slave_info32))) {
+ pr_err("failed: copy_from_user");
+ rc = -EFAULT;
+ kfree(slave_info32);
+ goto free_slave_info;
+ }
+
+ strlcpy(slave_info->actuator_name, slave_info32->actuator_name,
+ sizeof(slave_info->actuator_name));
+
+ strlcpy(slave_info->eeprom_name, slave_info32->eeprom_name,
+ sizeof(slave_info->eeprom_name));
+
+ strlcpy(slave_info->sensor_name, slave_info32->sensor_name,
+ sizeof(slave_info->sensor_name));
+
+ strlcpy(slave_info->ois_name, slave_info32->ois_name,
+ sizeof(slave_info->ois_name));
+
+ strlcpy(slave_info->flash_name, slave_info32->flash_name,
+ sizeof(slave_info->flash_name));
+
+ slave_info->addr_type = slave_info32->addr_type;
+ slave_info->camera_id = slave_info32->camera_id;
+
+ slave_info->i2c_freq_mode = slave_info32->i2c_freq_mode;
+ slave_info->sensor_id_info = slave_info32->sensor_id_info;
+
+ slave_info->slave_addr = slave_info32->slave_addr;
+ slave_info->power_setting_array.size =
+ slave_info32->power_setting_array.size;
+ slave_info->power_setting_array.size_down =
+ slave_info32->power_setting_array.size_down;
+ slave_info->power_setting_array.size_down =
+ slave_info32->power_setting_array.size_down;
+ slave_info->power_setting_array.power_setting =
+ compat_ptr(slave_info32->
+ power_setting_array.power_setting);
+ slave_info->power_setting_array.power_down_setting =
+ compat_ptr(slave_info32->
+ power_setting_array.power_down_setting);
+ slave_info->is_init_params_valid =
+ slave_info32->is_init_params_valid;
+ slave_info->sensor_init_params =
+ slave_info32->sensor_init_params;
+ slave_info->output_format =
+ slave_info32->output_format;
+ kfree(slave_info32);
+ } else
+#endif
+ {
+ if (copy_from_user(slave_info,
+ (void *)setting, sizeof(*slave_info))) {
+ pr_err("failed: copy_from_user");
+ rc = -EFAULT;
+ goto free_slave_info;
+ }
+ }
+
+ /* Print slave info */
+ CDBG("camera id %d Slave addr 0x%X addr_type %d\n",
+ slave_info->camera_id, slave_info->slave_addr,
+ slave_info->addr_type);
+ CDBG("sensor_id_reg_addr 0x%X sensor_id 0x%X sensor id mask %d",
+ slave_info->sensor_id_info.sensor_id_reg_addr,
+ slave_info->sensor_id_info.sensor_id,
+ slave_info->sensor_id_info.sensor_id_mask);
+ CDBG("power up size %d power down size %d\n",
+ slave_info->power_setting_array.size,
+ slave_info->power_setting_array.size_down);
+
+ if (slave_info->is_init_params_valid) {
+ CDBG("position %d",
+ slave_info->sensor_init_params.position);
+ CDBG("mount %d",
+ slave_info->sensor_init_params.sensor_mount_angle);
+ }
+
+ /* Validate camera id */
+ if (slave_info->camera_id >= MAX_CAMERAS) {
+ pr_err("failed: invalid camera id %d max %d",
+ slave_info->camera_id, MAX_CAMERAS);
+ rc = -EINVAL;
+ goto free_slave_info;
+ }
+
+ /* Extract s_ctrl from camera id */
+ s_ctrl = g_sctrl[slave_info->camera_id];
+ if (!s_ctrl) {
+ pr_err("failed: s_ctrl %p for camera_id %d", s_ctrl,
+ slave_info->camera_id);
+ rc = -EINVAL;
+ goto free_slave_info;
+ }
+
+ CDBG("s_ctrl[%d] %p", slave_info->camera_id, s_ctrl);
+
+ if (s_ctrl->is_probe_succeed == 1) {
+ /*
+ * Different sensor on this camera slot has been connected
+ * and probe already succeeded for that sensor. Ignore this
+ * probe
+ */
+ if (slave_info->sensor_id_info.sensor_id ==
+ s_ctrl->sensordata->cam_slave_info->
+ sensor_id_info.sensor_id) {
+ pr_err("slot%d: sensor id%d already probed\n",
+ slave_info->camera_id,
+ s_ctrl->sensordata->cam_slave_info->
+ sensor_id_info.sensor_id);
+ msm_sensor_fill_sensor_info(s_ctrl,
+ probed_info, entity_name);
+ } else
+ pr_err("slot %d has some other sensor\n",
+ slave_info->camera_id);
+
+ rc = 0;
+ goto free_slave_info;
+ }
+
+ if (slave_info->power_setting_array.size == 0 &&
+ slave_info->slave_addr == 0) {
+ s_ctrl->is_csid_tg_mode = 1;
+ goto CSID_TG;
+ }
+
+ rc = msm_sensor_get_power_settings(setting, slave_info,
+ &s_ctrl->sensordata->power_info);
+ if (rc < 0) {
+ pr_err("failed");
+ goto free_slave_info;
+ }
+
+
+ camera_info = kzalloc(sizeof(struct msm_camera_slave_info), GFP_KERNEL);
+ if (!camera_info) {
+ pr_err("failed: no memory slave_info %p", camera_info);
+ goto free_slave_info;
+
+ }
+
+ s_ctrl->sensordata->slave_info = camera_info;
+
+ /* Fill sensor slave info */
+ camera_info->sensor_slave_addr = slave_info->slave_addr;
+ camera_info->sensor_id_reg_addr =
+ slave_info->sensor_id_info.sensor_id_reg_addr;
+ camera_info->sensor_id = slave_info->sensor_id_info.sensor_id;
+ camera_info->sensor_id_mask = slave_info->sensor_id_info.sensor_id_mask;
+
+ /* Fill CCI master, slave address and CCI default params */
+ if (!s_ctrl->sensor_i2c_client) {
+ pr_err("failed: sensor_i2c_client %p",
+ s_ctrl->sensor_i2c_client);
+ rc = -EINVAL;
+ goto free_camera_info;
+ }
+ /* Fill sensor address type */
+ s_ctrl->sensor_i2c_client->addr_type = slave_info->addr_type;
+ if (s_ctrl->sensor_i2c_client->client)
+ s_ctrl->sensor_i2c_client->client->addr =
+ camera_info->sensor_slave_addr;
+
+ cci_client = s_ctrl->sensor_i2c_client->cci_client;
+ if (!cci_client) {
+ pr_err("failed: cci_client %p", cci_client);
+ goto free_camera_info;
+ }
+ cci_client->cci_i2c_master = s_ctrl->cci_i2c_master;
+ cci_client->sid = slave_info->slave_addr >> 1;
+ cci_client->retries = 3;
+ cci_client->id_map = 0;
+ cci_client->i2c_freq_mode = slave_info->i2c_freq_mode;
+
+ /* Parse and fill vreg params for powerup settings */
+ rc = msm_camera_fill_vreg_params(
+ s_ctrl->sensordata->power_info.cam_vreg,
+ s_ctrl->sensordata->power_info.num_vreg,
+ s_ctrl->sensordata->power_info.power_setting,
+ s_ctrl->sensordata->power_info.power_setting_size);
+ if (rc < 0) {
+ pr_err("failed: msm_camera_get_dt_power_setting_data rc %d",
+ rc);
+ goto free_camera_info;
+ }
+
+ /* Parse and fill vreg params for powerdown settings*/
+ rc = msm_camera_fill_vreg_params(
+ s_ctrl->sensordata->power_info.cam_vreg,
+ s_ctrl->sensordata->power_info.num_vreg,
+ s_ctrl->sensordata->power_info.power_down_setting,
+ s_ctrl->sensordata->power_info.power_down_setting_size);
+ if (rc < 0) {
+ pr_err("failed: msm_camera_fill_vreg_params for PDOWN rc %d",
+ rc);
+ goto free_camera_info;
+ }
+
+CSID_TG:
+ /* Update sensor, actuator and eeprom name in
+ * sensor control structure */
+ s_ctrl->sensordata->sensor_name = slave_info->sensor_name;
+ s_ctrl->sensordata->eeprom_name = slave_info->eeprom_name;
+ s_ctrl->sensordata->actuator_name = slave_info->actuator_name;
+ s_ctrl->sensordata->ois_name = slave_info->ois_name;
+ /*
+ * Update eeporm subdevice Id by input eeprom name
+ */
+ rc = msm_sensor_fill_eeprom_subdevid_by_name(s_ctrl);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto free_camera_info;
+ }
+ /*
+ * Update actuator subdevice Id by input actuator name
+ */
+ rc = msm_sensor_fill_actuator_subdevid_by_name(s_ctrl);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto free_camera_info;
+ }
+
+ rc = msm_sensor_fill_ois_subdevid_by_name(s_ctrl);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ goto free_camera_info;
+ }
+
+ /* Power up and probe sensor */
+ rc = s_ctrl->func_tbl->sensor_power_up(s_ctrl);
+ if (rc < 0) {
+ pr_err("%s power up failed", slave_info->sensor_name);
+ goto free_camera_info;
+ }
+
+ pr_err("%s probe succeeded", slave_info->sensor_name);
+
+ /*
+ Set probe succeeded flag to 1 so that no other camera shall
+ * probed on this slot
+ */
+ s_ctrl->is_probe_succeed = 1;
+
+ /*
+ * Update the subdevice id of flash-src based on availability in kernel.
+ */
+ if (strlen(slave_info->flash_name) == 0) {
+ s_ctrl->sensordata->sensor_info->
+ subdev_id[SUB_MODULE_LED_FLASH] = -1;
+ }
+
+ /*
+ * Create /dev/videoX node, comment for now until dummy /dev/videoX
+ * node is created and used by HAL
+ */
+
+ if (s_ctrl->sensor_device_type == MSM_CAMERA_PLATFORM_DEVICE)
+ rc = msm_sensor_driver_create_v4l_subdev(s_ctrl);
+ else
+ rc = msm_sensor_driver_create_i2c_v4l_subdev(s_ctrl);
+ if (rc < 0) {
+ pr_err("failed: camera creat v4l2 rc %d", rc);
+ goto camera_power_down;
+ }
+
+ /* Power down */
+ s_ctrl->func_tbl->sensor_power_down(s_ctrl);
+
+ rc = msm_sensor_fill_slave_info_init_params(
+ slave_info,
+ s_ctrl->sensordata->sensor_info);
+ if (rc < 0) {
+ pr_err("%s Fill slave info failed", slave_info->sensor_name);
+ goto free_camera_info;
+ }
+ rc = msm_sensor_validate_slave_info(s_ctrl->sensordata->sensor_info);
+ if (rc < 0) {
+ pr_err("%s Validate slave info failed",
+ slave_info->sensor_name);
+ goto free_camera_info;
+ }
+ /* Update sensor mount angle and position in media entity flag */
+ is_yuv = (slave_info->output_format == MSM_SENSOR_YCBCR) ? 1 : 0;
+ mount_pos = is_yuv << 25 |
+ (s_ctrl->sensordata->sensor_info->position << 16) |
+ ((s_ctrl->sensordata->
+ sensor_info->sensor_mount_angle / 90) << 8);
+
+ s_ctrl->msm_sd.sd.entity.flags = mount_pos | MEDIA_ENT_FL_DEFAULT;
+
+ /*Save sensor info*/
+ s_ctrl->sensordata->cam_slave_info = slave_info;
+
+ msm_sensor_fill_sensor_info(s_ctrl, probed_info, entity_name);
+
+ return rc;
+
+camera_power_down:
+ s_ctrl->func_tbl->sensor_power_down(s_ctrl);
+free_camera_info:
+ kfree(camera_info);
+free_slave_info:
+ kfree(slave_info);
+ return rc;
+}
+
+static int32_t msm_sensor_driver_get_gpio_data(
+ struct msm_camera_sensor_board_info *sensordata,
+ struct device_node *of_node)
+{
+ int32_t rc = 0, i = 0;
+ struct msm_camera_gpio_conf *gconf = NULL;
+ uint16_t *gpio_array = NULL;
+ uint16_t gpio_array_size = 0;
+
+ /* Validate input paramters */
+ if (!sensordata || !of_node) {
+ pr_err("failed: invalid params sensordata %p of_node %p",
+ sensordata, of_node);
+ return -EINVAL;
+ }
+
+ sensordata->power_info.gpio_conf = kzalloc(
+ sizeof(struct msm_camera_gpio_conf), GFP_KERNEL);
+ if (!sensordata->power_info.gpio_conf) {
+ pr_err("failed");
+ return -ENOMEM;
+ }
+ gconf = sensordata->power_info.gpio_conf;
+
+ gpio_array_size = of_gpio_count(of_node);
+ CDBG("gpio count %d", gpio_array_size);
+ if (!gpio_array_size)
+ return 0;
+
+ gpio_array = kzalloc(sizeof(uint16_t) * gpio_array_size, GFP_KERNEL);
+ if (!gpio_array) {
+ pr_err("failed");
+ goto FREE_GPIO_CONF;
+ }
+ for (i = 0; i < gpio_array_size; i++) {
+ gpio_array[i] = of_get_gpio(of_node, i);
+ CDBG("gpio_array[%d] = %d", i, gpio_array[i]);
+ }
+
+ rc = msm_camera_get_dt_gpio_req_tbl(of_node, gconf, gpio_array,
+ gpio_array_size);
+ if (rc < 0) {
+ pr_err("failed");
+ goto FREE_GPIO_CONF;
+ }
+
+ rc = msm_camera_init_gpio_pin_tbl(of_node, gconf, gpio_array,
+ gpio_array_size);
+ if (rc < 0) {
+ pr_err("failed");
+ goto FREE_GPIO_REQ_TBL;
+ }
+
+ kfree(gpio_array);
+ return rc;
+
+FREE_GPIO_REQ_TBL:
+ kfree(sensordata->power_info.gpio_conf->cam_gpio_req_tbl);
+FREE_GPIO_CONF:
+ kfree(sensordata->power_info.gpio_conf);
+ kfree(gpio_array);
+ return rc;
+}
+
+static int32_t msm_sensor_driver_get_dt_data(struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+ struct msm_camera_sensor_board_info *sensordata = NULL;
+ struct device_node *of_node = s_ctrl->of_node;
+ uint32_t cell_id;
+
+ s_ctrl->sensordata = kzalloc(sizeof(*sensordata), GFP_KERNEL);
+ if (!s_ctrl->sensordata) {
+ pr_err("failed: no memory");
+ return -ENOMEM;
+ }
+
+ sensordata = s_ctrl->sensordata;
+
+ /*
+ * Read cell index - this cell index will be the camera slot where
+ * this camera will be mounted
+ */
+ rc = of_property_read_u32(of_node, "cell-index", &cell_id);
+ if (rc < 0) {
+ pr_err("failed: cell-index rc %d", rc);
+ goto FREE_SENSOR_DATA;
+ }
+ s_ctrl->id = cell_id;
+
+ /* Validate cell_id */
+ if (cell_id >= MAX_CAMERAS) {
+ pr_err("failed: invalid cell_id %d", cell_id);
+ rc = -EINVAL;
+ goto FREE_SENSOR_DATA;
+ }
+
+ /* Check whether g_sctrl is already filled for this cell_id */
+ if (g_sctrl[cell_id]) {
+ pr_err("failed: sctrl already filled for cell_id %d", cell_id);
+ rc = -EINVAL;
+ goto FREE_SENSOR_DATA;
+ }
+
+ /* Read subdev info */
+ rc = msm_sensor_get_sub_module_index(of_node, &sensordata->sensor_info);
+ if (rc < 0) {
+ pr_err("failed");
+ goto FREE_SENSOR_DATA;
+ }
+
+ /* Read vreg information */
+ rc = msm_camera_get_dt_vreg_data(of_node,
+ &sensordata->power_info.cam_vreg,
+ &sensordata->power_info.num_vreg);
+ if (rc < 0) {
+ pr_err("failed: msm_camera_get_dt_vreg_data rc %d", rc);
+ goto FREE_SUB_MODULE_DATA;
+ }
+
+ /* Read gpio information */
+ rc = msm_sensor_driver_get_gpio_data(sensordata, of_node);
+ if (rc < 0) {
+ pr_err("failed: msm_sensor_driver_get_gpio_data rc %d", rc);
+ goto FREE_VREG_DATA;
+ }
+
+ /* Get CCI master */
+ rc = of_property_read_u32(of_node, "qcom,cci-master",
+ &s_ctrl->cci_i2c_master);
+ CDBG("qcom,cci-master %d, rc %d", s_ctrl->cci_i2c_master, rc);
+ if (rc < 0) {
+ /* Set default master 0 */
+ s_ctrl->cci_i2c_master = MASTER_0;
+ rc = 0;
+ }
+
+ /* Get mount angle */
+ if (0 > of_property_read_u32(of_node, "qcom,mount-angle",
+ &sensordata->sensor_info->sensor_mount_angle)) {
+ /* Invalidate mount angle flag */
+ sensordata->sensor_info->is_mount_angle_valid = 0;
+ sensordata->sensor_info->sensor_mount_angle = 0;
+ } else {
+ sensordata->sensor_info->is_mount_angle_valid = 1;
+ }
+ CDBG("%s qcom,mount-angle %d\n", __func__,
+ sensordata->sensor_info->sensor_mount_angle);
+ if (0 > of_property_read_u32(of_node, "qcom,sensor-position",
+ &sensordata->sensor_info->position)) {
+ CDBG("%s:%d Invalid sensor position\n", __func__, __LINE__);
+ sensordata->sensor_info->position = INVALID_CAMERA_B;
+ }
+ if (0 > of_property_read_u32(of_node, "qcom,sensor-mode",
+ &sensordata->sensor_info->modes_supported)) {
+ CDBG("%s:%d Invalid sensor mode supported\n",
+ __func__, __LINE__);
+ sensordata->sensor_info->modes_supported = CAMERA_MODE_INVALID;
+ }
+ /* Get vdd-cx regulator */
+ /*Optional property, don't return error if absent */
+ of_property_read_string(of_node, "qcom,vdd-cx-name",
+ &sensordata->misc_regulator);
+ CDBG("qcom,misc_regulator %s", sensordata->misc_regulator);
+
+ s_ctrl->set_mclk_23880000 = of_property_read_bool(of_node,
+ "qcom,mclk-23880000");
+
+ CDBG("%s qcom,mclk-23880000 = %d\n", __func__,
+ s_ctrl->set_mclk_23880000);
+
+ return rc;
+
+FREE_VREG_DATA:
+ kfree(sensordata->power_info.cam_vreg);
+FREE_SUB_MODULE_DATA:
+ kfree(sensordata->sensor_info);
+FREE_SENSOR_DATA:
+ kfree(sensordata);
+ return rc;
+}
+
+static int32_t msm_sensor_driver_parse(struct msm_sensor_ctrl_t *s_ctrl)
+{
+ int32_t rc = 0;
+
+ CDBG("Enter");
+ /* Validate input parameters */
+
+
+ /* Allocate memory for sensor_i2c_client */
+ s_ctrl->sensor_i2c_client = kzalloc(sizeof(*s_ctrl->sensor_i2c_client),
+ GFP_KERNEL);
+ if (!s_ctrl->sensor_i2c_client) {
+ pr_err("failed: no memory sensor_i2c_client %p",
+ s_ctrl->sensor_i2c_client);
+ return -ENOMEM;
+ }
+
+ /* Allocate memory for mutex */
+ s_ctrl->msm_sensor_mutex = kzalloc(sizeof(*s_ctrl->msm_sensor_mutex),
+ GFP_KERNEL);
+ if (!s_ctrl->msm_sensor_mutex) {
+ pr_err("failed: no memory msm_sensor_mutex %p",
+ s_ctrl->msm_sensor_mutex);
+ goto FREE_SENSOR_I2C_CLIENT;
+ }
+
+ /* Parse dt information and store in sensor control structure */
+ rc = msm_sensor_driver_get_dt_data(s_ctrl);
+ if (rc < 0) {
+ pr_err("failed: rc %d", rc);
+ goto FREE_MUTEX;
+ }
+
+ /* Initialize mutex */
+ mutex_init(s_ctrl->msm_sensor_mutex);
+
+ /* Initilize v4l2 subdev info */
+ s_ctrl->sensor_v4l2_subdev_info = msm_sensor_driver_subdev_info;
+ s_ctrl->sensor_v4l2_subdev_info_size =
+ ARRAY_SIZE(msm_sensor_driver_subdev_info);
+
+ /* Initialize default parameters */
+ rc = msm_sensor_init_default_params(s_ctrl);
+ if (rc < 0) {
+ pr_err("failed: msm_sensor_init_default_params rc %d", rc);
+ goto FREE_DT_DATA;
+ }
+
+ /* Store sensor control structure in static database */
+ g_sctrl[s_ctrl->id] = s_ctrl;
+ CDBG("g_sctrl[%d] %p", s_ctrl->id, g_sctrl[s_ctrl->id]);
+
+ return rc;
+
+FREE_DT_DATA:
+ kfree(s_ctrl->sensordata->power_info.gpio_conf->gpio_num_info);
+ kfree(s_ctrl->sensordata->power_info.gpio_conf->cam_gpio_req_tbl);
+ kfree(s_ctrl->sensordata->power_info.gpio_conf);
+ kfree(s_ctrl->sensordata->power_info.cam_vreg);
+ kfree(s_ctrl->sensordata);
+FREE_MUTEX:
+ kfree(s_ctrl->msm_sensor_mutex);
+FREE_SENSOR_I2C_CLIENT:
+ kfree(s_ctrl->sensor_i2c_client);
+ return rc;
+}
+
+static int32_t msm_sensor_driver_platform_probe(struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ struct msm_sensor_ctrl_t *s_ctrl = NULL;
+
+ /* Create sensor control structure */
+ s_ctrl = kzalloc(sizeof(*s_ctrl), GFP_KERNEL);
+ if (!s_ctrl) {
+ pr_err("failed: no memory s_ctrl %p", s_ctrl);
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, s_ctrl);
+
+ /* Initialize sensor device type */
+ s_ctrl->sensor_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+ s_ctrl->of_node = pdev->dev.of_node;
+
+ rc = msm_sensor_driver_parse(s_ctrl);
+ if (rc < 0) {
+ pr_err("failed: msm_sensor_driver_parse rc %d", rc);
+ goto FREE_S_CTRL;
+ }
+
+ /* Fill platform device */
+ pdev->id = s_ctrl->id;
+ s_ctrl->pdev = pdev;
+
+ /* Fill device in power info */
+ s_ctrl->sensordata->power_info.dev = &pdev->dev;
+ return rc;
+FREE_S_CTRL:
+ kfree(s_ctrl);
+ return rc;
+}
+
+static int32_t msm_sensor_driver_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int32_t rc = 0;
+ struct msm_sensor_ctrl_t *s_ctrl;
+
+ CDBG("\n\nEnter: msm_sensor_driver_i2c_probe");
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ pr_err("%s %s i2c_check_functionality failed\n",
+ __func__, client->name);
+ rc = -EFAULT;
+ return rc;
+ }
+
+ /* Create sensor control structure */
+ s_ctrl = kzalloc(sizeof(*s_ctrl), GFP_KERNEL);
+ if (!s_ctrl) {
+ pr_err("failed: no memory s_ctrl %p", s_ctrl);
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(client, s_ctrl);
+
+ /* Initialize sensor device type */
+ s_ctrl->sensor_device_type = MSM_CAMERA_I2C_DEVICE;
+ s_ctrl->of_node = client->dev.of_node;
+
+ rc = msm_sensor_driver_parse(s_ctrl);
+ if (rc < 0) {
+ pr_err("failed: msm_sensor_driver_parse rc %d", rc);
+ goto FREE_S_CTRL;
+ }
+
+ if (s_ctrl->sensor_i2c_client != NULL) {
+ s_ctrl->sensor_i2c_client->client = client;
+ s_ctrl->sensordata->power_info.dev = &client->dev;
+
+ }
+
+ return rc;
+FREE_S_CTRL:
+ kfree(s_ctrl);
+ return rc;
+}
+
+static int msm_sensor_driver_i2c_remove(struct i2c_client *client)
+{
+ struct msm_sensor_ctrl_t *s_ctrl = i2c_get_clientdata(client);
+
+ pr_err("%s: sensor FREE\n", __func__);
+
+ if (!s_ctrl) {
+ pr_err("%s: sensor device is NULL\n", __func__);
+ return 0;
+ }
+
+ g_sctrl[s_ctrl->id] = NULL;
+ msm_sensor_free_sensor_data(s_ctrl);
+ kfree(s_ctrl->msm_sensor_mutex);
+ kfree(s_ctrl->sensor_i2c_client);
+ kfree(s_ctrl);
+
+ return 0;
+}
+
+static const struct i2c_device_id i2c_id[] = {
+ {SENSOR_DRIVER_I2C, (kernel_ulong_t)NULL},
+ { }
+};
+
+static struct i2c_driver msm_sensor_driver_i2c = {
+ .id_table = i2c_id,
+ .probe = msm_sensor_driver_i2c_probe,
+ .remove = msm_sensor_driver_i2c_remove,
+ .driver = {
+ .name = SENSOR_DRIVER_I2C,
+ },
+};
+
+static int __init msm_sensor_driver_init(void)
+{
+ int32_t rc = 0;
+
+ CDBG("%s Enter\n", __func__);
+ rc = platform_driver_register(&msm_sensor_platform_driver);
+ if (rc)
+ pr_err("%s platform_driver_register failed rc = %d",
+ __func__, rc);
+ rc = i2c_add_driver(&msm_sensor_driver_i2c);
+ if (rc)
+ pr_err("%s i2c_add_driver failed rc = %d", __func__, rc);
+
+ return rc;
+}
+
+static void __exit msm_sensor_driver_exit(void)
+{
+ CDBG("Enter");
+ platform_driver_unregister(&msm_sensor_platform_driver);
+ i2c_del_driver(&msm_sensor_driver_i2c);
+ return;
+}
+
+module_init(msm_sensor_driver_init);
+module_exit(msm_sensor_driver_exit);
+MODULE_DESCRIPTION("msm_sensor_driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.h b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.h
new file mode 100644
index 000000000000..1ab58245dc40
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_driver.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_SENSOR_DRIVER_H
+#define MSM_SENSOR_DRIVER_H
+
+#include "msm_sensor.h"
+
+int32_t msm_sensor_driver_probe(void *setting,
+ struct msm_sensor_info_t *probed_info, char *entity_name);
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_init.c b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_init.c
new file mode 100644
index 000000000000..8b6e3d3e1f78
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_init.c
@@ -0,0 +1,225 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "MSM-SENSOR-INIT %s:%d " fmt "\n", __func__, __LINE__
+
+/* Header files */
+#include "msm_sensor_init.h"
+#include "msm_sensor_driver.h"
+#include "msm_sensor.h"
+#include "msm_sd.h"
+
+/* Logging macro */
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+static struct msm_sensor_init_t *s_init;
+static struct v4l2_file_operations msm_sensor_init_v4l2_subdev_fops;
+/* Static function declaration */
+static long msm_sensor_init_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg);
+
+/* Static structure declaration */
+static struct v4l2_subdev_core_ops msm_sensor_init_subdev_core_ops = {
+ .ioctl = msm_sensor_init_subdev_ioctl,
+};
+
+static struct v4l2_subdev_ops msm_sensor_init_subdev_ops = {
+ .core = &msm_sensor_init_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops msm_sensor_init_internal_ops;
+
+static int msm_sensor_wait_for_probe_done(struct msm_sensor_init_t *s_init)
+{
+ int rc;
+ int tm = 10000;
+ if (s_init->module_init_status == 1) {
+ CDBG("msm_cam_get_module_init_status -2\n");
+ return 0;
+ }
+ rc = wait_event_timeout(s_init->state_wait,
+ (s_init->module_init_status == 1), msecs_to_jiffies(tm));
+ if (rc == 0)
+ pr_err("%s:%d wait timeout\n", __func__, __LINE__);
+
+ return rc;
+}
+
+/* Static function definition */
+static int32_t msm_sensor_driver_cmd(struct msm_sensor_init_t *s_init,
+ void *arg)
+{
+ int32_t rc = 0;
+ struct sensor_init_cfg_data *cfg = (struct sensor_init_cfg_data *)arg;
+
+ /* Validate input parameters */
+ if (!s_init || !cfg) {
+ pr_err("failed: s_init %p cfg %p", s_init, cfg);
+ return -EINVAL;
+ }
+
+ switch (cfg->cfgtype) {
+ case CFG_SINIT_PROBE:
+ mutex_lock(&s_init->imutex);
+ s_init->module_init_status = 0;
+ rc = msm_sensor_driver_probe(cfg->cfg.setting,
+ &cfg->probed_info,
+ cfg->entity_name);
+ mutex_unlock(&s_init->imutex);
+ if (rc < 0)
+ pr_err("%s failed (non-fatal) rc %d", __func__, rc);
+ break;
+
+ case CFG_SINIT_PROBE_DONE:
+ s_init->module_init_status = 1;
+ wake_up(&s_init->state_wait);
+ break;
+
+ case CFG_SINIT_PROBE_WAIT_DONE:
+ msm_sensor_wait_for_probe_done(s_init);
+ break;
+
+ default:
+ pr_err("default");
+ break;
+ }
+
+ return rc;
+}
+
+static long msm_sensor_init_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ long rc = 0;
+ struct msm_sensor_init_t *s_init = v4l2_get_subdevdata(sd);
+ CDBG("Enter");
+
+ /* Validate input parameters */
+ if (!s_init) {
+ pr_err("failed: s_init %p", s_init);
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_INIT_CFG:
+ rc = msm_sensor_driver_cmd(s_init, arg);
+ break;
+
+ default:
+ pr_err_ratelimited("default\n");
+ break;
+ }
+
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_sensor_init_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ int32_t rc = 0;
+ struct video_device *vdev = video_devdata(file);
+ struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+ struct sensor_init_cfg_data32 *u32 =
+ (struct sensor_init_cfg_data32 *)arg;
+ struct sensor_init_cfg_data sensor_init_data;
+
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_INIT_CFG32:
+ memset(&sensor_init_data, 0, sizeof(sensor_init_data));
+ sensor_init_data.cfgtype = u32->cfgtype;
+ sensor_init_data.cfg.setting = compat_ptr(u32->cfg.setting);
+ cmd = VIDIOC_MSM_SENSOR_INIT_CFG;
+ rc = msm_sensor_init_subdev_ioctl(sd, cmd, &sensor_init_data);
+ if (rc < 0) {
+ pr_err("%s:%d VIDIOC_MSM_SENSOR_INIT_CFG failed (non-fatal)",
+ __func__, __LINE__);
+ return rc;
+ }
+ u32->probed_info = sensor_init_data.probed_info;
+ strlcpy(u32->entity_name, sensor_init_data.entity_name,
+ sizeof(sensor_init_data.entity_name));
+ return 0;
+ default:
+ return msm_sensor_init_subdev_ioctl(sd, cmd, arg);
+ }
+}
+
+static long msm_sensor_init_subdev_fops_ioctl(
+ struct file *file, unsigned int cmd, unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_sensor_init_subdev_do_ioctl);
+}
+#endif
+
+static int __init msm_sensor_init_module(void)
+{
+ int ret = 0;
+ /* Allocate memory for msm_sensor_init control structure */
+ s_init = kzalloc(sizeof(struct msm_sensor_init_t), GFP_KERNEL);
+ if (!s_init) {
+ pr_err("failed: no memory s_init %p", NULL);
+ return -ENOMEM;
+ }
+
+ CDBG("MSM_SENSOR_INIT_MODULE %p", NULL);
+
+ /* Initialize mutex */
+ mutex_init(&s_init->imutex);
+
+ /* Create /dev/v4l-subdevX for msm_sensor_init */
+ v4l2_subdev_init(&s_init->msm_sd.sd, &msm_sensor_init_subdev_ops);
+ snprintf(s_init->msm_sd.sd.name, sizeof(s_init->msm_sd.sd.name), "%s",
+ "msm_sensor_init");
+ v4l2_set_subdevdata(&s_init->msm_sd.sd, s_init);
+ s_init->msm_sd.sd.internal_ops = &msm_sensor_init_internal_ops;
+ s_init->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ media_entity_init(&s_init->msm_sd.sd.entity, 0, NULL, 0);
+ s_init->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ s_init->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_SENSOR_INIT;
+ s_init->msm_sd.sd.entity.name = s_init->msm_sd.sd.name;
+ s_init->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x6;
+ ret = msm_sd_register(&s_init->msm_sd);
+ if (ret) {
+ CDBG("%s: msm_sd_register error = %d\n", __func__, ret);
+ goto error;
+ }
+ msm_cam_copy_v4l2_subdev_fops(&msm_sensor_init_v4l2_subdev_fops);
+#ifdef CONFIG_COMPAT
+ msm_sensor_init_v4l2_subdev_fops.compat_ioctl32 =
+ msm_sensor_init_subdev_fops_ioctl;
+#endif
+ s_init->msm_sd.sd.devnode->fops =
+ &msm_sensor_init_v4l2_subdev_fops;
+
+ init_waitqueue_head(&s_init->state_wait);
+
+ return 0;
+error:
+ mutex_destroy(&s_init->imutex);
+ kfree(s_init);
+ return ret;
+}
+
+static void __exit msm_sensor_exit_module(void)
+{
+ msm_sd_unregister(&s_init->msm_sd);
+ mutex_destroy(&s_init->imutex);
+ kfree(s_init);
+ return;
+}
+
+module_init(msm_sensor_init_module);
+module_exit(msm_sensor_exit_module);
+MODULE_DESCRIPTION("msm_sensor_init");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_init.h b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_init.h
new file mode 100644
index 000000000000..256b0a1fbcd5
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/msm_sensor_init.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_SENSOR_INIT_H
+#define MSM_SENSOR_INIT_H
+
+#include "msm_sensor.h"
+
+struct msm_sensor_init_t {
+ struct mutex imutex;
+ struct msm_sd_subdev msm_sd;
+ int module_init_status;
+ wait_queue_head_t state_wait;
+};
+
+#endif
diff --git a/drivers/media/platform/msm/camera_v2/sensor/ois/Makefile b/drivers/media/platform/msm/camera_v2/sensor/ois/Makefile
new file mode 100644
index 000000000000..f09c92acb57f
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/ois/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -Idrivers/media/platform/msm/camera_v2
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/common
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/io
+ccflags-y += -Idrivers/media/platform/msm/camera_v2/sensor/cci
+obj-$(CONFIG_MSMB_CAMERA) += msm_ois.o
diff --git a/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c b/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c
new file mode 100644
index 000000000000..664517a8e959
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.c
@@ -0,0 +1,774 @@
+/* Copyright (c) 2014 - 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
+
+#include <linux/module.h>
+#include "msm_sd.h"
+#include "msm_ois.h"
+#include "msm_cci.h"
+
+DEFINE_MSM_MUTEX(msm_ois_mutex);
+/*#define MSM_OIS_DEBUG*/
+#undef CDBG
+#ifdef MSM_OIS_DEBUG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define MAX_POLL_COUNT 100
+
+static struct v4l2_file_operations msm_ois_v4l2_subdev_fops;
+static int32_t msm_ois_power_up(struct msm_ois_ctrl_t *o_ctrl);
+static int32_t msm_ois_power_down(struct msm_ois_ctrl_t *o_ctrl);
+
+static struct i2c_driver msm_ois_i2c_driver;
+
+static int32_t msm_ois_write_settings(struct msm_ois_ctrl_t *o_ctrl,
+ uint16_t size, struct reg_settings_ois_t *settings)
+{
+ int32_t rc = -EFAULT;
+ int32_t i = 0;
+ struct msm_camera_i2c_seq_reg_array *reg_setting;
+ CDBG("Enter\n");
+
+ for (i = 0; i < size; i++) {
+ switch (settings[i].i2c_operation) {
+ case MSM_OIS_WRITE: {
+ switch (settings[i].data_type) {
+ case MSM_CAMERA_I2C_BYTE_DATA:
+ case MSM_CAMERA_I2C_WORD_DATA:
+ rc = o_ctrl->i2c_client.i2c_func_tbl->i2c_write(
+ &o_ctrl->i2c_client,
+ settings[i].reg_addr,
+ settings[i].reg_data,
+ settings[i].data_type);
+ break;
+ case MSM_CAMERA_I2C_DWORD_DATA:
+ reg_setting =
+ kzalloc(sizeof(struct msm_camera_i2c_seq_reg_array),
+ GFP_KERNEL);
+ if (!reg_setting)
+ return -ENOMEM;
+
+ reg_setting->reg_addr = settings[i].reg_addr;
+ reg_setting->reg_data[0] = (uint8_t)
+ ((settings[i].reg_data &
+ 0xFF000000) >> 24);
+ reg_setting->reg_data[1] = (uint8_t)
+ ((settings[i].reg_data &
+ 0x00FF0000) >> 16);
+ reg_setting->reg_data[2] = (uint8_t)
+ ((settings[i].reg_data &
+ 0x0000FF00) >> 8);
+ reg_setting->reg_data[3] = (uint8_t)
+ (settings[i].reg_data & 0x000000FF);
+ reg_setting->reg_data_size = 4;
+ rc = o_ctrl->i2c_client.i2c_func_tbl->
+ i2c_write_seq(&o_ctrl->i2c_client,
+ reg_setting->reg_addr,
+ reg_setting->reg_data,
+ reg_setting->reg_data_size);
+ kfree(reg_setting);
+ reg_setting = NULL;
+ if (rc < 0)
+ return rc;
+ break;
+
+ default:
+ pr_err("Unsupport data type: %d\n",
+ settings[i].data_type);
+ break;
+ }
+ }
+ break;
+
+ case MSM_OIS_POLL: {
+ int32_t poll_count = 0;
+ switch (settings[i].data_type) {
+ case MSM_CAMERA_I2C_BYTE_DATA:
+ case MSM_CAMERA_I2C_WORD_DATA:
+ do {
+ rc = o_ctrl->i2c_client.i2c_func_tbl
+ ->i2c_poll(&o_ctrl->i2c_client,
+ settings[i].reg_addr,
+ settings[i].reg_data,
+ settings[i].data_type);
+
+ if (poll_count++ > MAX_POLL_COUNT) {
+ pr_err("MSM_OIS_POLL failed");
+ break;
+ }
+ } while (rc != 0);
+ break;
+
+ default:
+ pr_err("Unsupport data type: %d\n",
+ settings[i].data_type);
+ break;
+ }
+ }
+ }
+
+ if (settings[i].delay > 20)
+ msleep(settings[i].delay);
+ else if (0 != settings[i].delay)
+ usleep_range(settings[i].delay * 1000,
+ (settings[i].delay * 1000) + 1000);
+
+ if (rc < 0)
+ break;
+ }
+
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_ois_vreg_control(struct msm_ois_ctrl_t *o_ctrl,
+ int config)
+{
+ int rc = 0, i, cnt;
+ struct msm_ois_vreg *vreg_cfg;
+
+ vreg_cfg = &o_ctrl->vreg_cfg;
+ cnt = vreg_cfg->num_vreg;
+ if (!cnt)
+ return 0;
+
+ if (cnt >= MSM_OIS_MAX_VREGS) {
+ pr_err("%s failed %d cnt %d\n", __func__, __LINE__, cnt);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ rc = msm_camera_config_single_vreg(&(o_ctrl->pdev->dev),
+ &vreg_cfg->cam_vreg[i],
+ (struct regulator **)&vreg_cfg->data[i],
+ config);
+ }
+ return rc;
+}
+
+static int32_t msm_ois_power_down(struct msm_ois_ctrl_t *o_ctrl)
+{
+ int32_t rc = 0;
+ CDBG("Enter\n");
+ if (o_ctrl->ois_state != OIS_DISABLE_STATE) {
+
+ rc = msm_ois_vreg_control(o_ctrl, 0);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
+
+ o_ctrl->i2c_tbl_index = 0;
+ o_ctrl->ois_state = OIS_OPS_INACTIVE;
+ }
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int msm_ois_init(struct msm_ois_ctrl_t *o_ctrl)
+{
+ int rc = 0;
+ CDBG("Enter\n");
+
+ if (!o_ctrl) {
+ pr_err("failed\n");
+ return -EINVAL;
+ }
+
+ if (o_ctrl->ois_device_type == MSM_CAMERA_PLATFORM_DEVICE) {
+ rc = o_ctrl->i2c_client.i2c_func_tbl->i2c_util(
+ &o_ctrl->i2c_client, MSM_CCI_INIT);
+ if (rc < 0)
+ pr_err("cci_init failed\n");
+ }
+ o_ctrl->ois_state = OIS_OPS_ACTIVE;
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_ois_control(struct msm_ois_ctrl_t *o_ctrl,
+ struct msm_ois_set_info_t *set_info)
+{
+ struct reg_settings_ois_t *settings = NULL;
+ int32_t rc = 0;
+ struct msm_camera_cci_client *cci_client = NULL;
+ CDBG("Enter\n");
+
+ if (o_ctrl->ois_device_type == MSM_CAMERA_PLATFORM_DEVICE) {
+ cci_client = o_ctrl->i2c_client.cci_client;
+ cci_client->sid =
+ set_info->ois_params.i2c_addr >> 1;
+ cci_client->retries = 3;
+ cci_client->id_map = 0;
+ cci_client->cci_i2c_master = o_ctrl->cci_master;
+ cci_client->i2c_freq_mode = set_info->ois_params.i2c_freq_mode;
+ } else {
+ o_ctrl->i2c_client.client->addr =
+ set_info->ois_params.i2c_addr;
+ }
+ o_ctrl->i2c_client.addr_type = MSM_CAMERA_I2C_WORD_ADDR;
+
+
+ if (set_info->ois_params.setting_size > 0 &&
+ set_info->ois_params.setting_size
+ < MAX_OIS_REG_SETTINGS) {
+ settings = kmalloc(
+ sizeof(struct reg_settings_ois_t) *
+ (set_info->ois_params.setting_size),
+ GFP_KERNEL);
+ if (settings == NULL) {
+ pr_err("Error allocating memory\n");
+ return -EFAULT;
+ }
+ if (copy_from_user(settings,
+ (void *)set_info->ois_params.settings,
+ set_info->ois_params.setting_size *
+ sizeof(struct reg_settings_ois_t))) {
+ kfree(settings);
+ pr_err("Error copying\n");
+ return -EFAULT;
+ }
+
+ rc = msm_ois_write_settings(o_ctrl,
+ set_info->ois_params.setting_size,
+ settings);
+ kfree(settings);
+ if (rc < 0) {
+ pr_err("Error\n");
+ return -EFAULT;
+ }
+ }
+
+ CDBG("Exit\n");
+
+ return rc;
+}
+
+
+static int32_t msm_ois_config(struct msm_ois_ctrl_t *o_ctrl,
+ void __user *argp)
+{
+ struct msm_ois_cfg_data *cdata =
+ (struct msm_ois_cfg_data *)argp;
+ int32_t rc = 0;
+ mutex_lock(o_ctrl->ois_mutex);
+ CDBG("Enter\n");
+ CDBG("%s type %d\n", __func__, cdata->cfgtype);
+ switch (cdata->cfgtype) {
+ case CFG_OIS_INIT:
+ rc = msm_ois_init(o_ctrl);
+ if (rc < 0)
+ pr_err("msm_ois_init failed %d\n", rc);
+ break;
+ case CFG_OIS_POWERDOWN:
+ rc = msm_ois_power_down(o_ctrl);
+ if (rc < 0)
+ pr_err("msm_ois_power_down failed %d\n", rc);
+ break;
+ case CFG_OIS_POWERUP:
+ rc = msm_ois_power_up(o_ctrl);
+ if (rc < 0)
+ pr_err("Failed ois power up%d\n", rc);
+ break;
+ case CFG_OIS_CONTROL:
+ rc = msm_ois_control(o_ctrl, &cdata->cfg.set_info);
+ if (rc < 0)
+ pr_err("Failed ois control%d\n", rc);
+ break;
+ case CFG_OIS_I2C_WRITE_SEQ_TABLE: {
+ struct msm_camera_i2c_seq_reg_setting conf_array;
+ struct msm_camera_i2c_seq_reg_array *reg_setting = NULL;
+
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ memcpy(&conf_array,
+ (void *)cdata->cfg.settings,
+ sizeof(struct msm_camera_i2c_seq_reg_setting));
+ } else
+#endif
+ if (copy_from_user(&conf_array,
+ (void *)cdata->cfg.settings,
+ sizeof(struct msm_camera_i2c_seq_reg_setting))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+
+ if (!conf_array.size) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -EFAULT;
+ break;
+ }
+ reg_setting = kzalloc(conf_array.size *
+ (sizeof(struct msm_camera_i2c_seq_reg_array)),
+ GFP_KERNEL);
+ if (!reg_setting) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ rc = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(reg_setting, (void *)conf_array.reg_setting,
+ conf_array.size *
+ sizeof(struct msm_camera_i2c_seq_reg_array))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ kfree(reg_setting);
+ rc = -EFAULT;
+ break;
+ }
+
+ conf_array.reg_setting = reg_setting;
+ rc = o_ctrl->i2c_client.i2c_func_tbl->
+ i2c_write_seq_table(&o_ctrl->i2c_client,
+ &conf_array);
+ kfree(reg_setting);
+ break;
+ }
+ default:
+ break;
+ }
+ mutex_unlock(o_ctrl->ois_mutex);
+ CDBG("Exit\n");
+ return rc;
+}
+
+static int32_t msm_ois_get_subdev_id(struct msm_ois_ctrl_t *o_ctrl,
+ void *arg)
+{
+ uint32_t *subdev_id = (uint32_t *)arg;
+ CDBG("Enter\n");
+ if (!subdev_id) {
+ pr_err("failed\n");
+ return -EINVAL;
+ }
+ if (o_ctrl->ois_device_type == MSM_CAMERA_PLATFORM_DEVICE)
+ *subdev_id = o_ctrl->pdev->id;
+ else
+ *subdev_id = o_ctrl->subdev_id;
+
+ CDBG("subdev_id %d\n", *subdev_id);
+ CDBG("Exit\n");
+ return 0;
+}
+
+static struct msm_camera_i2c_fn_t msm_sensor_cci_func_tbl = {
+ .i2c_read = msm_camera_cci_i2c_read,
+ .i2c_read_seq = msm_camera_cci_i2c_read_seq,
+ .i2c_write = msm_camera_cci_i2c_write,
+ .i2c_write_table = msm_camera_cci_i2c_write_table,
+ .i2c_write_seq = msm_camera_cci_i2c_write_seq,
+ .i2c_write_seq_table = msm_camera_cci_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_cci_i2c_write_table_w_microdelay,
+ .i2c_util = msm_sensor_cci_i2c_util,
+ .i2c_poll = msm_camera_cci_i2c_poll,
+};
+
+static struct msm_camera_i2c_fn_t msm_sensor_qup_func_tbl = {
+ .i2c_read = msm_camera_qup_i2c_read,
+ .i2c_read_seq = msm_camera_qup_i2c_read_seq,
+ .i2c_write = msm_camera_qup_i2c_write,
+ .i2c_write_table = msm_camera_qup_i2c_write_table,
+ .i2c_write_seq = msm_camera_qup_i2c_write_seq,
+ .i2c_write_seq_table = msm_camera_qup_i2c_write_seq_table,
+ .i2c_write_table_w_microdelay =
+ msm_camera_qup_i2c_write_table_w_microdelay,
+ .i2c_poll = msm_camera_qup_i2c_poll,
+};
+
+static int msm_ois_close(struct v4l2_subdev *sd,
+ struct v4l2_subdev_fh *fh) {
+ int rc = 0;
+ struct msm_ois_ctrl_t *o_ctrl = v4l2_get_subdevdata(sd);
+ CDBG("Enter\n");
+ if (!o_ctrl) {
+ pr_err("failed\n");
+ return -EINVAL;
+ }
+ mutex_lock(o_ctrl->ois_mutex);
+ if (o_ctrl->ois_device_type == MSM_CAMERA_PLATFORM_DEVICE &&
+ o_ctrl->ois_state != OIS_DISABLE_STATE) {
+ rc = o_ctrl->i2c_client.i2c_func_tbl->i2c_util(
+ &o_ctrl->i2c_client, MSM_CCI_RELEASE);
+ if (rc < 0)
+ pr_err("cci_init failed\n");
+ }
+ o_ctrl->ois_state = OIS_DISABLE_STATE;
+ mutex_unlock(o_ctrl->ois_mutex);
+ CDBG("Exit\n");
+ return rc;
+}
+
+static const struct v4l2_subdev_internal_ops msm_ois_internal_ops = {
+ .close = msm_ois_close,
+};
+
+static long msm_ois_subdev_ioctl(struct v4l2_subdev *sd,
+ unsigned int cmd, void *arg)
+{
+ int rc;
+ struct msm_ois_ctrl_t *o_ctrl = v4l2_get_subdevdata(sd);
+ void __user *argp = (void __user *)arg;
+ CDBG("Enter\n");
+ CDBG("%s:%d o_ctrl %p argp %p\n", __func__, __LINE__, o_ctrl, argp);
+ switch (cmd) {
+ case VIDIOC_MSM_SENSOR_GET_SUBDEV_ID:
+ return msm_ois_get_subdev_id(o_ctrl, argp);
+ case VIDIOC_MSM_OIS_CFG:
+ return msm_ois_config(o_ctrl, argp);
+ case MSM_SD_SHUTDOWN:
+ if (!o_ctrl->i2c_client.i2c_func_tbl) {
+ pr_err("o_ctrl->i2c_client.i2c_func_tbl NULL\n");
+ return -EINVAL;
+ }
+ rc = msm_ois_power_down(o_ctrl);
+ if (rc < 0) {
+ pr_err("%s:%d OIS Power down failed\n",
+ __func__, __LINE__);
+ }
+ return msm_ois_close(sd, NULL);
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static int32_t msm_ois_power_up(struct msm_ois_ctrl_t *o_ctrl)
+{
+ int rc = 0;
+ CDBG("%s called\n", __func__);
+
+ rc = msm_ois_vreg_control(o_ctrl, 1);
+ if (rc < 0) {
+ pr_err("%s failed %d\n", __func__, __LINE__);
+ return rc;
+ }
+
+ o_ctrl->ois_state = OIS_ENABLE_STATE;
+ CDBG("Exit\n");
+ return rc;
+}
+
+static struct v4l2_subdev_core_ops msm_ois_subdev_core_ops = {
+ .ioctl = msm_ois_subdev_ioctl,
+};
+
+static struct v4l2_subdev_ops msm_ois_subdev_ops = {
+ .core = &msm_ois_subdev_core_ops,
+};
+
+static const struct i2c_device_id msm_ois_i2c_id[] = {
+ {"qcom,ois", (kernel_ulong_t)NULL},
+ { }
+};
+
+static int32_t msm_ois_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int rc = 0;
+ struct msm_ois_ctrl_t *ois_ctrl_t = NULL;
+ CDBG("Enter\n");
+
+ if (client == NULL) {
+ pr_err("msm_ois_i2c_probe: client is null\n");
+ return -EINVAL;
+ }
+
+ ois_ctrl_t = kzalloc(sizeof(struct msm_ois_ctrl_t),
+ GFP_KERNEL);
+ if (!ois_ctrl_t) {
+ pr_err("%s:%d failed no memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ pr_err("i2c_check_functionality failed\n");
+ rc = -EINVAL;
+ goto probe_failure;
+ }
+
+ CDBG("client = 0x%p\n", client);
+
+ rc = of_property_read_u32(client->dev.of_node, "cell-index",
+ &ois_ctrl_t->subdev_id);
+ CDBG("cell-index %d, rc %d\n", ois_ctrl_t->subdev_id, rc);
+ if (rc < 0) {
+ pr_err("failed rc %d\n", rc);
+ goto probe_failure;
+ }
+
+ ois_ctrl_t->i2c_driver = &msm_ois_i2c_driver;
+ ois_ctrl_t->i2c_client.client = client;
+ /* Set device type as I2C */
+ ois_ctrl_t->ois_device_type = MSM_CAMERA_I2C_DEVICE;
+ ois_ctrl_t->i2c_client.i2c_func_tbl = &msm_sensor_qup_func_tbl;
+ ois_ctrl_t->ois_v4l2_subdev_ops = &msm_ois_subdev_ops;
+ ois_ctrl_t->ois_mutex = &msm_ois_mutex;
+
+ /* Assign name for sub device */
+ snprintf(ois_ctrl_t->msm_sd.sd.name, sizeof(ois_ctrl_t->msm_sd.sd.name),
+ "%s", ois_ctrl_t->i2c_driver->driver.name);
+
+ /* Initialize sub device */
+ v4l2_i2c_subdev_init(&ois_ctrl_t->msm_sd.sd,
+ ois_ctrl_t->i2c_client.client,
+ ois_ctrl_t->ois_v4l2_subdev_ops);
+ v4l2_set_subdevdata(&ois_ctrl_t->msm_sd.sd, ois_ctrl_t);
+ ois_ctrl_t->msm_sd.sd.internal_ops = &msm_ois_internal_ops;
+ ois_ctrl_t->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ media_entity_init(&ois_ctrl_t->msm_sd.sd.entity, 0, NULL, 0);
+ ois_ctrl_t->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ ois_ctrl_t->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_OIS;
+ ois_ctrl_t->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x2;
+ msm_sd_register(&ois_ctrl_t->msm_sd);
+ ois_ctrl_t->ois_state = OIS_DISABLE_STATE;
+ pr_info("msm_ois_i2c_probe: succeeded\n");
+ CDBG("Exit\n");
+
+probe_failure:
+ kfree(ois_ctrl_t);
+ return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_ois_subdev_do_ioctl(
+ struct file *file, unsigned int cmd, void *arg)
+{
+ long rc = 0;
+ struct video_device *vdev;
+ struct v4l2_subdev *sd;
+ struct msm_ois_cfg_data32 *u32;
+ struct msm_ois_cfg_data ois_data;
+ void *parg;
+ struct msm_camera_i2c_seq_reg_setting settings;
+ struct msm_camera_i2c_seq_reg_setting32 settings32;
+
+ if (!file || !arg) {
+ pr_err("%s:failed NULL parameter\n", __func__);
+ return -EINVAL;
+ }
+ vdev = video_devdata(file);
+ sd = vdev_to_v4l2_subdev(vdev);
+ u32 = (struct msm_ois_cfg_data32 *)arg;
+ parg = arg;
+
+ ois_data.cfgtype = u32->cfgtype;
+
+ switch (cmd) {
+ case VIDIOC_MSM_OIS_CFG32:
+ cmd = VIDIOC_MSM_OIS_CFG;
+
+ switch (u32->cfgtype) {
+ case CFG_OIS_CONTROL:
+ ois_data.cfg.set_info.ois_params.setting_size =
+ u32->cfg.set_info.ois_params.setting_size;
+ ois_data.cfg.set_info.ois_params.i2c_addr =
+ u32->cfg.set_info.ois_params.i2c_addr;
+ ois_data.cfg.set_info.ois_params.i2c_freq_mode =
+ u32->cfg.set_info.ois_params.i2c_freq_mode;
+ ois_data.cfg.set_info.ois_params.i2c_addr_type =
+ u32->cfg.set_info.ois_params.i2c_addr_type;
+ ois_data.cfg.set_info.ois_params.i2c_data_type =
+ u32->cfg.set_info.ois_params.i2c_data_type;
+ ois_data.cfg.set_info.ois_params.settings =
+ compat_ptr(u32->cfg.set_info.ois_params.
+ settings);
+ parg = &ois_data;
+ break;
+ case CFG_OIS_I2C_WRITE_SEQ_TABLE:
+ if (copy_from_user(&settings32,
+ (void *)compat_ptr(u32->cfg.settings),
+ sizeof(
+ struct msm_camera_i2c_seq_reg_setting32))) {
+ pr_err("copy_from_user failed\n");
+ return -EFAULT;
+ }
+
+ settings.addr_type = settings32.addr_type;
+ settings.delay = settings32.delay;
+ settings.size = settings32.size;
+ settings.reg_setting =
+ compat_ptr(settings32.reg_setting);
+
+ ois_data.cfgtype = u32->cfgtype;
+ ois_data.cfg.settings = &settings;
+ parg = &ois_data;
+ break;
+ default:
+ parg = &ois_data;
+ break;
+ }
+ }
+ rc = msm_ois_subdev_ioctl(sd, cmd, parg);
+
+ return rc;
+}
+
+static long msm_ois_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return video_usercopy(file, cmd, arg, msm_ois_subdev_do_ioctl);
+}
+#endif
+
+static int32_t msm_ois_platform_probe(struct platform_device *pdev)
+{
+ int32_t rc = 0;
+ struct msm_camera_cci_client *cci_client = NULL;
+ struct msm_ois_ctrl_t *msm_ois_t = NULL;
+ struct msm_ois_vreg *vreg_cfg;
+ CDBG("Enter\n");
+
+ if (!pdev->dev.of_node) {
+ pr_err("of_node NULL\n");
+ return -EINVAL;
+ }
+
+ msm_ois_t = kzalloc(sizeof(struct msm_ois_ctrl_t),
+ GFP_KERNEL);
+ if (!msm_ois_t) {
+ pr_err("%s:%d failed no memory\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ rc = of_property_read_u32((&pdev->dev)->of_node, "cell-index",
+ &pdev->id);
+ CDBG("cell-index %d, rc %d\n", pdev->id, rc);
+ if (rc < 0) {
+ kfree(msm_ois_t);
+ pr_err("failed rc %d\n", rc);
+ return rc;
+ }
+
+ rc = of_property_read_u32((&pdev->dev)->of_node, "qcom,cci-master",
+ &msm_ois_t->cci_master);
+ CDBG("qcom,cci-master %d, rc %d\n", msm_ois_t->cci_master, rc);
+ if (rc < 0 || msm_ois_t->cci_master >= MASTER_MAX) {
+ kfree(msm_ois_t);
+ pr_err("failed rc %d\n", rc);
+ return rc;
+ }
+
+ if (of_find_property((&pdev->dev)->of_node,
+ "qcom,cam-vreg-name", NULL)) {
+ vreg_cfg = &msm_ois_t->vreg_cfg;
+ rc = msm_camera_get_dt_vreg_data((&pdev->dev)->of_node,
+ &vreg_cfg->cam_vreg, &vreg_cfg->num_vreg);
+ if (rc < 0) {
+ kfree(msm_ois_t);
+ pr_err("failed rc %d\n", rc);
+ return rc;
+ }
+ }
+
+ msm_ois_t->ois_v4l2_subdev_ops = &msm_ois_subdev_ops;
+ msm_ois_t->ois_mutex = &msm_ois_mutex;
+
+ /* Set platform device handle */
+ msm_ois_t->pdev = pdev;
+ /* Set device type as platform device */
+ msm_ois_t->ois_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+ msm_ois_t->i2c_client.i2c_func_tbl = &msm_sensor_cci_func_tbl;
+ msm_ois_t->i2c_client.cci_client = kzalloc(sizeof(
+ struct msm_camera_cci_client), GFP_KERNEL);
+ if (!msm_ois_t->i2c_client.cci_client) {
+ kfree(msm_ois_t->vreg_cfg.cam_vreg);
+ kfree(msm_ois_t);
+ pr_err("failed no memory\n");
+ return -ENOMEM;
+ }
+
+ cci_client = msm_ois_t->i2c_client.cci_client;
+ cci_client->cci_subdev = msm_cci_get_subdev();
+ cci_client->cci_i2c_master = msm_ois_t->cci_master;
+ v4l2_subdev_init(&msm_ois_t->msm_sd.sd,
+ msm_ois_t->ois_v4l2_subdev_ops);
+ v4l2_set_subdevdata(&msm_ois_t->msm_sd.sd, msm_ois_t);
+ msm_ois_t->msm_sd.sd.internal_ops = &msm_ois_internal_ops;
+ msm_ois_t->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ snprintf(msm_ois_t->msm_sd.sd.name,
+ ARRAY_SIZE(msm_ois_t->msm_sd.sd.name), "msm_ois");
+ media_entity_init(&msm_ois_t->msm_sd.sd.entity, 0, NULL, 0);
+ msm_ois_t->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
+ msm_ois_t->msm_sd.sd.entity.group_id = MSM_CAMERA_SUBDEV_OIS;
+ msm_ois_t->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x2;
+ msm_sd_register(&msm_ois_t->msm_sd);
+ msm_ois_t->ois_state = OIS_DISABLE_STATE;
+ msm_cam_copy_v4l2_subdev_fops(&msm_ois_v4l2_subdev_fops);
+#ifdef CONFIG_COMPAT
+ msm_ois_v4l2_subdev_fops.compat_ioctl32 =
+ msm_ois_subdev_fops_ioctl;
+#endif
+ msm_ois_t->msm_sd.sd.devnode->fops =
+ &msm_ois_v4l2_subdev_fops;
+
+ CDBG("Exit\n");
+ return rc;
+}
+
+static const struct of_device_id msm_ois_i2c_dt_match[] = {
+ {.compatible = "qcom,ois"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_ois_i2c_dt_match);
+
+static struct i2c_driver msm_ois_i2c_driver = {
+ .id_table = msm_ois_i2c_id,
+ .probe = msm_ois_i2c_probe,
+ .remove = __exit_p(msm_ois_i2c_remove),
+ .driver = {
+ .name = "qcom,ois",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_ois_i2c_dt_match,
+ },
+};
+
+static const struct of_device_id msm_ois_dt_match[] = {
+ {.compatible = "qcom,ois", .data = NULL},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, msm_ois_dt_match);
+
+static struct platform_driver msm_ois_platform_driver = {
+ .probe = msm_ois_platform_probe,
+ .driver = {
+ .name = "qcom,ois",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_ois_dt_match,
+ },
+};
+
+static int __init msm_ois_init_module(void)
+{
+ int32_t rc = 0;
+ CDBG("Enter\n");
+ rc = platform_driver_register(&msm_ois_platform_driver);
+ if (!rc)
+ return rc;
+ CDBG("%s:%d rc %d\n", __func__, __LINE__, rc);
+ return i2c_add_driver(&msm_ois_i2c_driver);
+}
+
+static void __exit msm_ois_exit_module(void)
+{
+ platform_driver_unregister(&msm_ois_platform_driver);
+ i2c_del_driver(&msm_ois_i2c_driver);
+ return;
+}
+
+module_init(msm_ois_init_module);
+module_exit(msm_ois_exit_module);
+MODULE_DESCRIPTION("MSM OIS");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.h b/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.h
new file mode 100644
index 000000000000..e6db9ad4ffa0
--- /dev/null
+++ b/drivers/media/platform/msm/camera_v2/sensor/ois/msm_ois.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef MSM_OIS_H
+#define MSM_OIS_H
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <soc/qcom/camera2.h>
+#include <media/v4l2-subdev.h>
+#include <media/msmb_camera.h>
+#include "msm_camera_i2c.h"
+#include "msm_camera_dt_util.h"
+#include "msm_camera_io_util.h"
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+ static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+#define MSM_OIS_MAX_VREGS (10)
+
+struct msm_ois_ctrl_t;
+
+enum msm_ois_state_t {
+ OIS_ENABLE_STATE,
+ OIS_OPS_ACTIVE,
+ OIS_OPS_INACTIVE,
+ OIS_DISABLE_STATE,
+};
+
+struct msm_ois_vreg {
+ struct camera_vreg_t *cam_vreg;
+ void *data[MSM_OIS_MAX_VREGS];
+ int num_vreg;
+};
+
+struct msm_ois_ctrl_t {
+ struct i2c_driver *i2c_driver;
+ struct platform_driver *pdriver;
+ struct platform_device *pdev;
+ struct msm_camera_i2c_client i2c_client;
+ enum msm_camera_device_type_t ois_device_type;
+ struct msm_sd_subdev msm_sd;
+ struct mutex *ois_mutex;
+ enum msm_camera_i2c_data_type i2c_data_type;
+ struct v4l2_subdev sdev;
+ struct v4l2_subdev_ops *ois_v4l2_subdev_ops;
+ void *user_data;
+ uint16_t i2c_tbl_index;
+ enum cci_i2c_master_t cci_master;
+ uint32_t subdev_id;
+ enum msm_ois_state_t ois_state;
+ struct msm_ois_vreg vreg_cfg;
+};
+
+#endif